1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 2002-2020. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 
21 
22 /*
23  * Description:	A memory allocator utility. This utility provides
24  *              management of (multiple) memory segments, coalescing
25  *              of free blocks, etc. Allocators are implemented by
26  *              implementing a callback-interface which is called by
27  *              this utility. The only task the callback-module has to
28  *              perform is to supervise the free blocks.
29  *
30  * Author: 	Rickard Green
31  */
32 
33 /*
34  * Alloc util will enforce 8 byte alignment if sys_alloc and mseg_alloc at
35  * least enforces 8 byte alignment. If sys_alloc only enforces 4 byte
36  * alignment then alloc util will do so too.
37  */
38 
39 #ifdef HAVE_CONFIG_H
40 #  include "config.h"
41 #endif
42 
43 #include "global.h"
44 #include "big.h"
45 #include "erl_mmap.h"
46 #include "erl_mtrace.h"
47 #define GET_ERL_ALLOC_UTIL_IMPL
48 #include "erl_alloc_util.h"
49 #include "erl_mseg.h"
50 #include "erl_threads.h"
51 #include "erl_thr_progress.h"
52 #include "erl_bif_unique.h"
53 #include "erl_nif.h"
54 
55 #ifdef ERTS_ENABLE_LOCK_COUNT
56 #include "erl_lock_count.h"
57 #endif
58 #include "lttng-wrapper.h"
59 
60 #if defined(ERTS_ALLOC_UTIL_HARD_DEBUG) && defined(__GNUC__)
61 #warning "* * * * * * * * * *"
62 #warning "* * * * * * * * * *"
63 #warning "* * NOTE:       * *"
64 #warning "* * Hard debug  * *"
65 #warning "* * is enabled! * *"
66 #warning "* * * * * * * * * *"
67 #warning "* * * * * * * * * *"
68 #endif
69 
70 #define ERTS_ALCU_DD_OPS_LIM_HIGH 20
71 #define ERTS_ALCU_DD_OPS_LIM_LOW 2
72 
73 /* Fix alloc limit */
74 #define ERTS_ALCU_FIX_MAX_LIST_SZ 1000
75 #define ERTS_ALC_FIX_MAX_SHRINK_OPS 30
76 
77 #define ALLOC_ZERO_EQ_NULL 0
78 
79 #ifndef ERTS_MSEG_FLG_2POW
80 #  define ERTS_MSEG_FLG_2POW 0
81 #endif
82 #ifndef ERTS_MSEG_FLG_NONE
83 #  define ERTS_MSEG_FLG_NONE 0
84 #endif
85 
86 static int atoms_initialized = 0;
87 static int initialized = 0;
88 
89 #define INV_SYS_ALLOC_CARRIER_MASK	((UWord) (sys_alloc_carrier_size - 1))
90 #define SYS_ALLOC_CARRIER_MASK		(~INV_SYS_ALLOC_CARRIER_MASK)
91 #define SYS_ALLOC_CARRIER_FLOOR(X)	((X) & SYS_ALLOC_CARRIER_MASK)
92 #define SYS_ALLOC_CARRIER_CEILING(X) \
93   SYS_ALLOC_CARRIER_FLOOR((X) + INV_SYS_ALLOC_CARRIER_MASK)
94 #define SYS_PAGE_SIZE                   (sys_page_size)
95 #define SYS_PAGE_SZ_MASK                ((UWord)(SYS_PAGE_SIZE - 1))
96 
97 #if 0
98 /* Can be useful for debugging */
99 #define MBC_REALLOC_ALWAYS_MOVES
100 #endif
101 
102 /* alloc_util global parameters */
103 static Uint sys_alloc_carrier_size;
104 static Uint sys_page_size;
105 
106 #if HAVE_ERTS_MSEG
107 static Uint max_mseg_carriers;
108 #endif
109 static int allow_sys_alloc_carriers;
110 
111 #define ONE_GIGA (1000000000)
112 
113 #define ERTS_ALC_CC_GIGA_VAL(CC) ((CC) / ONE_GIGA)
114 #define ERTS_ALC_CC_VAL(CC) ((CC) % ONE_GIGA)
115 
116 #define INC_CC(CC) ((CC)++)
117 
118 #define DEC_CC(CC) ((CC)--)
119 
120 /* Multi block carrier (MBC) memory layout in OTP 22:
121 
122 Empty MBC:
123 [Carrier_t|pad|Block_t L0T0|fhdr| free... ]
124 
125 MBC after allocating first block:
126 [Carrier_t|pad|Block_t 0000|        udata        |pad|Block_t L0T0|fhdr| free... ]
127 
128 MBC after allocating second block:
129 [Carrier_t|pad|Block_t 0000|        udata        |pad|Block_t 0000|   udata   |pad|Block_t L0T0|fhdr| free... ]
130 
131 MBC after deallocating first block:
132 [Carrier_t|pad|Block_t 00T0|fhdr| free  |FreeBlkFtr_t|Block_t 0P00|   udata   |pad|Block_t L0T0|fhdr| free... ]
133 
134 MBC after allocating first block, with allocation tagging enabled:
135 [Carrier_t|pad|Block_t 000A|        udata        |atag|pad|Block_t L0T0|fhdr| free... ]
136 
137     udata = Allocated user data
138     atag  = A tag with basic metadata about this allocation
139     pad   = Padding to ensure correct alignment for user data
140     fhdr  = Allocator specific header to keep track of free block
141     free  = Unused free memory
142     T     = This block is free (THIS_FREE_BLK_HDR_FLG)
143     P     = Previous block is free (PREV_FREE_BLK_HDR_FLG)
144     L     = Last block in carrier (LAST_BLK_HDR_FLG)
145     A     = Block has an allocation tag footer, only valid for allocated blocks
146             (ATAG_BLK_HDR_FLG)
147 */
148 
149 /* Single block carrier (SBC):
150 [Carrier_t|pad|Block_t 1110| udata... ]
151 [Carrier_t|pad|Block_t 111A| udata | atag]
152 */
153 
154 /* Allocation tags ...
155  *
156  * These are added to the footer of every block when enabled. Currently they
157  * consist of the allocation type and an atom identifying the allocating
158  * driver/nif (or 'system' if that can't be determined), but the format is not
159  * supposed to be set in stone.
160  *
161  * The packing scheme requires that the atom values are small enough to fit
162  * into a word with ERTS_ALC_N_BITS to spare. Users must check for overflow
163  * before MAKE_ATAG(). */
164 
165 typedef UWord alcu_atag_t;
166 
167 #define MAKE_ATAG(IdAtom, TypeNum) \
168     (ASSERT((TypeNum) >= ERTS_ALC_N_MIN && (TypeNum) <= ERTS_ALC_N_MAX), \
169      ASSERT(atom_val(IdAtom) <= MAX_ATAG_ATOM_ID), \
170      (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (TypeNum))
171 
172 #define ATAG_ID(AT) (make_atom((AT) >> ERTS_ALC_N_BITS))
173 #define ATAG_TYPE(AT) ((AT) & ERTS_ALC_N_MASK)
174 
175 #define MAX_ATAG_ATOM_ID (ERTS_UWORD_MAX >> ERTS_ALC_N_BITS)
176 
177 #define DBG_IS_VALID_ATAG(AT) \
178     (ATAG_TYPE(AT) >= ERTS_ALC_N_MIN && \
179      ATAG_TYPE(AT) <= ERTS_ALC_N_MAX && \
180      ATAG_ID(AT) <= MAX_ATAG_ATOM_ID)
181 
182 /* Blocks ... */
183 
184 #define UNUSED0_BLK_FTR_FLG	(((UWord) 1) << 0)
185 #define UNUSED1_BLK_FTR_FLG	(((UWord) 1) << 1)
186 #define UNUSED2_BLK_FTR_FLG	(((UWord) 1) << 2)
187 
188 #if MBC_ABLK_OFFSET_BITS
189 #  define ABLK_HDR_SZ (offsetof(Block_t,u))
190 #else
191 #  define ABLK_HDR_SZ (sizeof(Block_t))
192 #endif
193 #define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t))
194 
195 #define BLK_HAS_ATAG(B) \
196     (!!((B)->bhdr & ATAG_BLK_HDR_FLG))
197 
198 #define GET_BLK_ATAG(B) \
199     (ASSERT(BLK_HAS_ATAG(B)), \
200      ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1])
201 #define SET_BLK_ATAG(B, T) \
202     ((B)->bhdr |= ATAG_BLK_HDR_FLG, \
203      ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T))
204 
205 #define BLK_ATAG_SZ(AP) ((AP)->atags ? sizeof(alcu_atag_t) : 0)
206 
207 #define UMEMSZ2BLKSZ(AP, SZ)						\
208   (ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ) <= (AP)->min_block_size		\
209    ? (AP)->min_block_size						\
210    : UNIT_CEILING(ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ)))
211 
212 #define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ))
213 #define BLK2UMEM(P) ((void *)    (((char *) (P)) + ABLK_HDR_SZ))
214 
215 #define PREV_BLK_SZ(B) 		((UWord) (((FreeBlkFtr_t *)(B))[-1]))
216 
217 #define SET_BLK_SZ_FTR(B, SZ) \
218   (((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ))
219 
220 #define SET_MBC_ABLK_SZ(B, SZ) \
221   (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
222    (B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ))
223 #define SET_MBC_FBLK_SZ(B, SZ) \
224   (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
225    (B)->bhdr = (((B)->bhdr) & ~MBC_FBLK_SZ_MASK) | (SZ))
226 #define SET_SBC_BLK_SZ(B, SZ) \
227   (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
228    (B)->bhdr = (((B)->bhdr) & ~SBC_BLK_SZ_MASK) | (SZ))
229 #define SET_PREV_BLK_FREE(AP,B) \
230   (ASSERT(!IS_MBC_FIRST_BLK(AP,B)), \
231    ASSERT(!IS_FREE_BLK(B)), \
232    (B)->bhdr |= PREV_FREE_BLK_HDR_FLG)
233 #define SET_PREV_BLK_ALLOCED(B) \
234   ((B)->bhdr &= ~PREV_FREE_BLK_HDR_FLG)
235 #define SET_LAST_BLK(B) \
236   ((B)->bhdr |= LAST_BLK_HDR_FLG)
237 #define SET_NOT_LAST_BLK(B) \
238   ((B)->bhdr &= ~LAST_BLK_HDR_FLG)
239 
240 #define SBH_THIS_FREE		THIS_FREE_BLK_HDR_FLG
241 #define SBH_PREV_FREE		PREV_FREE_BLK_HDR_FLG
242 #define SBH_LAST_BLK		LAST_BLK_HDR_FLG
243 
244 
245 #if MBC_ABLK_OFFSET_BITS
246 
247 #  define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << ERTS_SUPER_ALIGN_BITS)
248 
249 #  define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> ERTS_SACRR_UNIT_SHIFT)
250 
251 #  define SET_MBC_ABLK_HDR(B, Sz, F, C) \
252     (ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \
253      ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
254      (B)->bhdr = ((Sz) | (F) | (BLK_CARRIER_OFFSET(B,C) << MBC_ABLK_OFFSET_SHIFT)))
255 
256 #  define SET_MBC_FBLK_HDR(B, Sz, F, C) \
257     (ASSERT(((Sz) & ~MBC_FBLK_SZ_MASK) == 0), \
258      ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
259      (B)->bhdr = ((Sz) | (F)), \
260      (B)->u.carrier = (C))
261 
262 #  define IS_MBC_FIRST_ABLK(AP,B) \
263   ((((UWord)(B) & ~ERTS_SACRR_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
264    && ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
265 
266 #  define IS_MBC_FIRST_FBLK(AP,B) \
267   ((char*)(B) == (char*)((B)->u.carrier) + MBC_HEADER_SIZE(AP))
268 
269 #  define IS_MBC_FIRST_BLK(AP,B) \
270   (IS_FREE_BLK(B) ? IS_MBC_FIRST_FBLK(AP,B) : IS_MBC_FIRST_ABLK(AP,B))
271 
272 #  define SET_BLK_FREE(B) \
273   (ASSERT(!IS_PREV_BLK_FREE(B)), \
274    (B)->u.carrier = ABLK_TO_MBC(B), \
275    (B)->bhdr &= (MBC_ABLK_SZ_MASK|LAST_BLK_HDR_FLG), \
276    (B)->bhdr |= THIS_FREE_BLK_HDR_FLG)
277 
278 #  define SET_BLK_ALLOCED(B) \
279   (ASSERT(((B)->bhdr & (MBC_ABLK_OFFSET_MASK|THIS_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
280    (B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG, \
281    (B)->bhdr |= (BLK_CARRIER_OFFSET(B,(B)->u.carrier) << MBC_ABLK_OFFSET_SHIFT))
282 
283 #else /* !MBC_ABLK_OFFSET_BITS */
284 
285 #  define MBC_SZ_MAX_LIMIT ((UWord)~0)
286 
287 #  define SET_MBC_ABLK_HDR(B, Sz, F, C) \
288     (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \
289      ASSERT(((F) & ~BLK_FLG_MASK) == 0), \
290      ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
291      (B)->bhdr = ((Sz) | (F)), \
292      (B)->carrier = (C))
293 
294 #  define SET_MBC_FBLK_HDR(B, Sz, F, C) \
295     (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \
296      ASSERT(((F) & ~BLK_FLG_MASK) == 0), \
297      ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
298      (B)->bhdr = ((Sz) | (F)), \
299      (B)->carrier = (C))
300 
301 #  define IS_MBC_FIRST_BLK(AP,B) \
302   ((char*)(B) == (char*)((B)->carrier) + MBC_HEADER_SIZE(AP))
303 #  define IS_MBC_FIRST_ABLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
304 #  define IS_MBC_FIRST_FBLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
305 
306 #  define SET_BLK_FREE(B) \
307   (ASSERT(!IS_PREV_BLK_FREE(B)), \
308    (B)->bhdr |= THIS_FREE_BLK_HDR_FLG)
309 
310 #  define SET_BLK_ALLOCED(B) \
311   ((B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG)
312 
313 #endif /* !MBC_ABLK_OFFSET_BITS */
314 
315 #define SET_SBC_BLK_HDR(B, Sz) \
316   (ASSERT(((Sz) & BLK_FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG)))
317 
318 
319 #define BLK_UMEM_SZ(B) \
320   (BLK_SZ(B) - (ABLK_HDR_SZ))
321 #define IS_PREV_BLK_FREE(B) \
322   ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
323 #define IS_PREV_BLK_ALLOCED(B) \
324   (!IS_PREV_BLK_FREE((B)))
325 #define IS_ALLOCED_BLK(B) \
326   (!IS_FREE_BLK((B)))
327 #define IS_LAST_BLK(B) \
328   ((B)->bhdr & LAST_BLK_HDR_FLG)
329 #define IS_NOT_LAST_BLK(B) \
330   (!IS_LAST_BLK((B)))
331 
332 #define GET_LAST_BLK_HDR_FLG(B) \
333   ((B)->bhdr & LAST_BLK_HDR_FLG)
334 #define GET_THIS_FREE_BLK_HDR_FLG(B) \
335   ((B)->bhdr & THIS_FREE_BLK_HDR_FLG)
336 #define GET_PREV_FREE_BLK_HDR_FLG(B) \
337   ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
338 #define GET_BLK_HDR_FLGS(B) \
339   ((B)->bhdr & BLK_FLG_MASK)
340 
341 #define NXT_BLK(B) \
342   (ASSERT(IS_MBC_BLK(B)), \
343    (Block_t *) (((char *) (B)) + MBC_BLK_SZ((B))))
344 #define PREV_BLK(B) \
345   ((Block_t *) (((char *) (B)) - PREV_BLK_SZ((B))))
346 
347 #define BLK_AFTER(B,Sz) \
348   ((Block_t *) (((char *) (B)) + (Sz)))
349 
350 #define BLK_SZ(B) ((B)->bhdr & (((B)->bhdr & THIS_FREE_BLK_HDR_FLG) ? MBC_FBLK_SZ_MASK : MBC_ABLK_SZ_MASK))
351 
352 /* Carriers ... */
353 
354 /* #define ERTS_ALC_CPOOL_DEBUG */
355 
356 #if defined(DEBUG) && !defined(ERTS_ALC_CPOOL_DEBUG)
357 #  define ERTS_ALC_CPOOL_DEBUG
358 #endif
359 
360 
361 #ifdef ERTS_ALC_CPOOL_DEBUG
362 #  define ERTS_ALC_CPOOL_ASSERT(A)				\
363     ((void) ((A)						\
364 	     ? 1						\
365 	     : (erts_alcu_assert_failed(#A,			\
366 					(char *) __FILE__,	\
367 					__LINE__,		\
368 					(char *) __func__),	\
369 		0)))
370 #else
371 #  define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1)
372 #endif
373 
374 #define ERTS_ALC_IS_CPOOL_ENABLED(A)	((A)->cpool.util_limit)
375 
376 
377 #define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON		1000
378 #define ERTS_ALC_CPOOL_ALLOC_OP_INC			8
379 #define ERTS_ALC_CPOOL_FREE_OP_DEC			10
380 
381 #define ERTS_ALC_CPOOL_ALLOC_OP(A)						\
382 do {										\
383     if ((A)->cpool.disable_abandon < ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON) { 	\
384 	(A)->cpool.disable_abandon += ERTS_ALC_CPOOL_ALLOC_OP_INC;		\
385 	if ((A)->cpool.disable_abandon > ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON)	\
386 	    (A)->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON;	\
387     }										\
388 } while (0)
389 
390 
391 #if ERTS_ALC_CPOOL_ALLOC_OP_INC >= ERTS_ALC_CPOOL_FREE_OP_DEC
392 #  error "Implementation assume ERTS_ALC_CPOOL_ALLOC_OP_INC < ERTS_ALC_CPOOL_FREE_OP_DEC"
393 #endif
394 
395 #define ERTS_ALC_CPOOL_REALLOC_OP(A)						\
396 do {										\
397     if ((A)->cpool.disable_abandon) {						\
398 	(A)->cpool.disable_abandon -= (ERTS_ALC_CPOOL_FREE_OP_DEC		\
399 				       - ERTS_ALC_CPOOL_ALLOC_OP_INC);		\
400 	if ((A)->cpool.disable_abandon < 0)					\
401 	    (A)->cpool.disable_abandon = 0;					\
402     }										\
403 } while (0)
404 
405 #define ERTS_ALC_CPOOL_FREE_OP(A)						\
406 do {										\
407     if ((A)->cpool.disable_abandon) {						\
408 	(A)->cpool.disable_abandon -= ERTS_ALC_CPOOL_FREE_OP_DEC;		\
409 	if ((A)->cpool.disable_abandon < 0)					\
410 	    (A)->cpool.disable_abandon = 0;					\
411     }										\
412 } while (0)
413 
414 
415 #define ERTS_CRR_ALCTR_FLG_IN_POOL	(((erts_aint_t) 1) << 0)
416 #define ERTS_CRR_ALCTR_FLG_BUSY		(((erts_aint_t) 1) << 1)
417 #define ERTS_CRR_ALCTR_FLG_HOMECOMING	(((erts_aint_t) 1) << 2)
418 #define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \
419                                  ERTS_CRR_ALCTR_FLG_BUSY |    \
420                                  ERTS_CRR_ALCTR_FLG_HOMECOMING)
421 
422 #define SBC_HEADER_SIZE	   						\
423     (UNIT_CEILING(offsetof(Carrier_t, cpool)                            \
424 	          + ABLK_HDR_SZ)	                                \
425      - ABLK_HDR_SZ)
426 #define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size)
427 
428 
429 #define MSEG_CARRIER_HDR_FLAG		(((UWord) 1) << 0)
430 #define SBC_CARRIER_HDR_FLAG		(((UWord) 1) << 1)
431 
432 #define SCH_SYS_ALLOC			0
433 #define SCH_MSEG			MSEG_CARRIER_HDR_FLAG
434 #define SCH_MBC				0
435 #define SCH_SBC				SBC_CARRIER_HDR_FLAG
436 
437 #define SET_CARRIER_HDR(C, Sz, F, AP) \
438   (ASSERT(((Sz) & CRR_FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \
439    erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))
440 
441 #define BLK_TO_SBC(B) \
442   ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE))
443 #define FIRST_BLK_TO_MBC(AP, B) \
444   ((Carrier_t *) (((char *) (B)) - MBC_HEADER_SIZE(AP)))
445 
446 #define MBC_TO_FIRST_BLK(AP, P) \
447   ((Block_t *) (((char *) (P)) + MBC_HEADER_SIZE(AP)))
448 #define SBC2BLK(AP, P) \
449   ((Block_t *) (((char *) (P)) + SBC_HEADER_SIZE))
450 #define SBC2UMEM(AP, P) \
451   ((void *) (((char *) (P)) + (SBC_HEADER_SIZE + ABLK_HDR_SZ)))
452 
453 #define IS_MSEG_CARRIER(C) \
454   ((C)->chdr & MSEG_CARRIER_HDR_FLAG)
455 #define IS_SYS_ALLOC_CARRIER(C) \
456   (!IS_MSEG_CARRIER((C)))
457 #define IS_SB_CARRIER(C) \
458   ((C)->chdr & SBC_CARRIER_HDR_FLAG)
459 #define IS_MB_CARRIER(C) \
460   (!IS_SB_CARRIER((C)))
461 
462 #define SET_CARRIER_SZ(C, SZ) \
463   (ASSERT(((SZ) & CRR_FLG_MASK) == 0), \
464    ((C)->chdr = ((C)->chdr & CRR_FLG_MASK) | (SZ)))
465 
466 #define CFLG_SBC				(1 << 0)
467 #define CFLG_MBC				(1 << 1)
468 #define CFLG_FORCE_MSEG				(1 << 2)
469 #define CFLG_FORCE_SYS_ALLOC			(1 << 3)
470 #define CFLG_FORCE_SIZE				(1 << 4)
471 #define CFLG_MAIN_CARRIER			(1 << 5)
472 #define CFLG_NO_CPOOL				(1 << 6)
473 
474 #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
475 static void check_blk_carrier(Allctr_t *, Block_t *);
476 #define HARD_CHECK_BLK_CARRIER(A, B)	check_blk_carrier((A), (B))
477 #else
478 #define HARD_CHECK_BLK_CARRIER(A, B)
479 #endif
480 
481 /* Statistics updating ... */
482 
483 #ifdef DEBUG
484 #define DEBUG_CHECK_CARRIER_NO_SZ(AP)					\
485     ASSERT(((AP)->sbcs.curr.norm.mseg.no				\
486 	    && (AP)->sbcs.curr.norm.mseg.size)				\
487 	   || (!(AP)->sbcs.curr.norm.mseg.no				\
488 	       && !(AP)->sbcs.curr.norm.mseg.size));			\
489     ASSERT(((AP)->sbcs.curr.norm.sys_alloc.no				\
490 	    && (AP)->sbcs.curr.norm.sys_alloc.size)			\
491 	   || (!(AP)->sbcs.curr.norm.sys_alloc.no			\
492 	       && !(AP)->sbcs.curr.norm.sys_alloc.size));		\
493     ASSERT(((AP)->mbcs.curr.norm.mseg.no				\
494 	    && (AP)->mbcs.curr.norm.mseg.size)				\
495 	   || (!(AP)->mbcs.curr.norm.mseg.no				\
496 	       && !(AP)->mbcs.curr.norm.mseg.size));			\
497     ASSERT(((AP)->mbcs.curr.norm.sys_alloc.no				\
498 	    && (AP)->mbcs.curr.norm.sys_alloc.size)			\
499 	   || (!(AP)->mbcs.curr.norm.sys_alloc.no			\
500 	       && !(AP)->mbcs.curr.norm.sys_alloc.size));
501 
502 #else
503 #define DEBUG_CHECK_CARRIER_NO_SZ(AP)
504 #endif
505 
506 #define STAT_SBC_ALLOC(AP, BSZ)						\
507     (AP)->sbcs.blocks.curr.size += (BSZ);				\
508     if ((AP)->sbcs.blocks.max.size < (AP)->sbcs.blocks.curr.size)	\
509 	(AP)->sbcs.blocks.max.size = (AP)->sbcs.blocks.curr.size;	\
510     if ((AP)->sbcs.max.no < ((AP)->sbcs.curr.norm.mseg.no		\
511 			     + (AP)->sbcs.curr.norm.sys_alloc.no))	\
512 	(AP)->sbcs.max.no = ((AP)->sbcs.curr.norm.mseg.no		\
513 			     + (AP)->sbcs.curr.norm.sys_alloc.no);	\
514     if ((AP)->sbcs.max.size < ((AP)->sbcs.curr.norm.mseg.size		\
515 			       + (AP)->sbcs.curr.norm.sys_alloc.size))	\
516 	(AP)->sbcs.max.size = ((AP)->sbcs.curr.norm.mseg.size		\
517 			       + (AP)->sbcs.curr.norm.sys_alloc.size)
518 
519 #define STAT_MSEG_SBC_ALLOC(AP, CSZ, BSZ)				\
520 do {									\
521     (AP)->sbcs.curr.norm.mseg.no++;					\
522     (AP)->sbcs.curr.norm.mseg.size += (CSZ);				\
523     STAT_SBC_ALLOC((AP), (BSZ));					\
524     DEBUG_CHECK_CARRIER_NO_SZ((AP));					\
525 } while (0)
526 
527 #define STAT_SYS_ALLOC_SBC_ALLOC(AP, CSZ, BSZ)				\
528 do {									\
529     (AP)->sbcs.curr.norm.sys_alloc.no++;				\
530     (AP)->sbcs.curr.norm.sys_alloc.size += (CSZ);			\
531     STAT_SBC_ALLOC((AP), (BSZ));					\
532     DEBUG_CHECK_CARRIER_NO_SZ((AP));					\
533 } while (0)
534 
535 
536 #define STAT_SBC_FREE(AP, BSZ)						\
537     ASSERT((AP)->sbcs.blocks.curr.size >= (BSZ));			\
538     (AP)->sbcs.blocks.curr.size -= (BSZ)
539 
540 #define STAT_MSEG_SBC_FREE(AP, CSZ, BSZ)				\
541 do {									\
542     ASSERT((AP)->sbcs.curr.norm.mseg.no > 0);				\
543     (AP)->sbcs.curr.norm.mseg.no--;					\
544     ASSERT((AP)->sbcs.curr.norm.mseg.size >= (CSZ));			\
545     (AP)->sbcs.curr.norm.mseg.size -= (CSZ);				\
546     STAT_SBC_FREE((AP), (BSZ));						\
547     DEBUG_CHECK_CARRIER_NO_SZ((AP));					\
548 } while (0)
549 
550 #define STAT_SYS_ALLOC_SBC_FREE(AP, CSZ, BSZ)				\
551 do {									\
552     ASSERT((AP)->sbcs.curr.norm.sys_alloc.no > 0);			\
553     (AP)->sbcs.curr.norm.sys_alloc.no--;				\
554     ASSERT((AP)->sbcs.curr.norm.sys_alloc.size >= (CSZ));		\
555     (AP)->sbcs.curr.norm.sys_alloc.size -= (CSZ);			\
556     STAT_SBC_FREE((AP), (BSZ));						\
557     DEBUG_CHECK_CARRIER_NO_SZ((AP));					\
558 } while (0)
559 
560 #define STAT_MBC_ALLOC(AP)						\
561     if ((AP)->mbcs.max.no < ((AP)->mbcs.curr.norm.mseg.no		\
562 			     + (AP)->mbcs.curr.norm.sys_alloc.no))	\
563 	(AP)->mbcs.max.no = ((AP)->mbcs.curr.norm.mseg.no		\
564 			     + (AP)->mbcs.curr.norm.sys_alloc.no);	\
565     if ((AP)->mbcs.max.size < ((AP)->mbcs.curr.norm.mseg.size		\
566 			       + (AP)->mbcs.curr.norm.sys_alloc.size))	\
567 	(AP)->mbcs.max.size = ((AP)->mbcs.curr.norm.mseg.size		\
568 			       + (AP)->mbcs.curr.norm.sys_alloc.size)
569 
570 
571 #define STAT_MSEG_MBC_ALLOC(AP, CSZ)					\
572 do {									\
573     (AP)->mbcs.curr.norm.mseg.no++;					\
574     (AP)->mbcs.curr.norm.mseg.size += (CSZ);				\
575     STAT_MBC_ALLOC((AP));						\
576     DEBUG_CHECK_CARRIER_NO_SZ((AP));					\
577 } while (0)
578 
579 #define STAT_SYS_ALLOC_MBC_ALLOC(AP, CSZ)				\
580 do {									\
581     (AP)->mbcs.curr.norm.sys_alloc.no++;				\
582     (AP)->mbcs.curr.norm.sys_alloc.size += (CSZ);			\
583     STAT_MBC_ALLOC((AP));						\
584     DEBUG_CHECK_CARRIER_NO_SZ((AP));					\
585 } while (0)
586 
587 #define STAT_MBC_CPOOL_FETCH(AP, CRR)					\
588 do {									\
589     UWord csz__ = CARRIER_SZ((CRR));					\
590     if (IS_MSEG_CARRIER((CRR)))						\
591 	STAT_MSEG_MBC_ALLOC((AP), csz__);				\
592     else								\
593 	STAT_SYS_ALLOC_MBC_ALLOC((AP), csz__);				\
594     set_new_allctr_abandon_limit(AP);                                   \
595     (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks[(AP)->alloc_no];   \
596     if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no)		\
597 	(AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no;		\
598     (AP)->mbcs.blocks.curr.size +=                                      \
599        (CRR)->cpool.blocks_size[(AP)->alloc_no];                        \
600     if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size)	\
601 	(AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size;	\
602 } while (0)
603 
604 #define STAT_MSEG_MBC_FREE(AP, CSZ)					\
605 do {									\
606     ASSERT((AP)->mbcs.curr.norm.mseg.no > 0);				\
607     (AP)->mbcs.curr.norm.mseg.no--;					\
608     ASSERT((AP)->mbcs.curr.norm.mseg.size >= (CSZ));			\
609     (AP)->mbcs.curr.norm.mseg.size -= (CSZ);				\
610     DEBUG_CHECK_CARRIER_NO_SZ((AP));					\
611 } while (0)
612 
613 #define STAT_SYS_ALLOC_MBC_FREE(AP, CSZ)				\
614 do {									\
615     ASSERT((AP)->mbcs.curr.norm.sys_alloc.no > 0);			\
616     (AP)->mbcs.curr.norm.sys_alloc.no--;				\
617     ASSERT((AP)->mbcs.curr.norm.sys_alloc.size >= (CSZ));		\
618     (AP)->mbcs.curr.norm.sys_alloc.size -= (CSZ);			\
619     DEBUG_CHECK_CARRIER_NO_SZ((AP));					\
620 } while (0)
621 
622 #define STAT_MBC_FREE(AP, CRR)                                               \
623 do {                                                                         \
624     UWord csz__ = CARRIER_SZ((CRR));                                         \
625     if (IS_MSEG_CARRIER((CRR))) {                                            \
626         STAT_MSEG_MBC_FREE((AP), csz__);                                     \
627     } else {                                                                 \
628         STAT_SYS_ALLOC_MBC_FREE((AP), csz__);                                \
629     }                                                                        \
630     set_new_allctr_abandon_limit(AP);                                        \
631 } while (0)
632 
633 #define STAT_MBC_ABANDON(AP, CRR)                                            \
634 do {                                                                         \
635     STAT_MBC_FREE(AP, CRR);                                                  \
636     ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no                          \
637                           >= (CRR)->cpool.blocks[(AP)->alloc_no]);           \
638     (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks[(AP)->alloc_no];        \
639     ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size                        \
640                           >= (CRR)->cpool.blocks_size[(AP)->alloc_no]);      \
641     (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size[(AP)->alloc_no]; \
642 } while (0)
643 
644 #define STAT_MBC_BLK_ALLOC_CRR(AP, CRR, BSZ)				\
645 do {									\
646     (CRR)->cpool.blocks[(AP)->alloc_no]++;				\
647     (CRR)->cpool.blocks_size[(AP)->alloc_no] += (BSZ);			\
648     (CRR)->cpool.total_blocks_size += (BSZ);				\
649 } while (0)
650 
651 #define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS)	       			\
652 do {									\
653     CarriersStats_t *cstats__ = &(AP)->mbcs;			        \
654     cstats__->blocks.curr.no++;						\
655     if (cstats__->blocks.max.no < cstats__->blocks.curr.no)		\
656 	cstats__->blocks.max.no = cstats__->blocks.curr.no;		\
657     cstats__->blocks.curr.size += (BSZ);				\
658     if (cstats__->blocks.max.size < cstats__->blocks.curr.size)		\
659 	cstats__->blocks.max.size = cstats__->blocks.curr.size;		\
660     STAT_MBC_BLK_ALLOC_CRR((AP), (CRR), (BSZ));				\
661 } while (0)
662 
663 static ERTS_INLINE int
stat_cpool_mbc_blk_free(Allctr_t * allctr,ErtsAlcType_t type,Carrier_t * crr,Carrier_t ** busy_pcrr_pp,UWord blksz)664 stat_cpool_mbc_blk_free(Allctr_t *allctr,
665                         ErtsAlcType_t type,
666 			Carrier_t *crr,
667 			Carrier_t **busy_pcrr_pp,
668 			UWord blksz)
669 {
670     Allctr_t *orig_allctr;
671     int alloc_no;
672 
673     alloc_no = ERTS_ALC_T2A(type);
674 
675     ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] > 0);
676     crr->cpool.blocks[alloc_no]--;
677     ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] >= blksz);
678     crr->cpool.blocks_size[alloc_no] -= blksz;
679     ERTS_ALC_CPOOL_ASSERT(crr->cpool.total_blocks_size >= blksz);
680     crr->cpool.total_blocks_size -= blksz;
681 
682     if (allctr->alloc_no == alloc_no && (!busy_pcrr_pp || !*busy_pcrr_pp)) {
683         /* This is a local block, so we should not update the pool
684          * statistics. */
685         return 0;
686     }
687 
688     /* This is either a foreign block that's been fetched from the pool, or any
689      * block that's in the pool. The carrier's owner keeps the statistics for
690      * both pooled and foreign blocks. */
691 
692     orig_allctr = crr->cpool.orig_allctr;
693 
694     ERTS_ALC_CPOOL_ASSERT(alloc_no != allctr->alloc_no ||
695         (crr == *busy_pcrr_pp && allctr == orig_allctr));
696 
697 #ifdef ERTS_ALC_CPOOL_DEBUG
698     ERTS_ALC_CPOOL_ASSERT(
699 	erts_atomic_dec_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0);
700     ERTS_ALC_CPOOL_ASSERT(
701 	erts_atomic_add_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
702 				 -((erts_aint_t) blksz)) >= 0);
703 #else
704     erts_atomic_dec_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]);
705     erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
706 			-((erts_aint_t) blksz));
707 #endif
708 
709     return 1;
710 }
711 
712 #define STAT_MBC_BLK_FREE(AP, TYPE, CRR, BPCRRPP, BSZ, FLGS)               \
713 do {                                                                       \
714     if (!stat_cpool_mbc_blk_free((AP), (TYPE), (CRR), (BPCRRPP), (BSZ))) { \
715         CarriersStats_t *cstats__ = &(AP)->mbcs;                           \
716         ASSERT(cstats__->blocks.curr.no > 0);                              \
717         cstats__->blocks.curr.no--;                                        \
718         ASSERT(cstats__->blocks.curr.size >= (BSZ));                       \
719         cstats__->blocks.curr.size -= (BSZ);                               \
720     }                                                                      \
721 } while (0)
722 
723 /* Debug stuff... */
724 #ifdef DEBUG
725 static UWord carrier_alignment;
726 #define DEBUG_SAVE_ALIGNMENT(C)						\
727 do {									\
728     UWord algnmnt__ = sizeof(Unit_t) - (((UWord) (C)) % sizeof(Unit_t));\
729     carrier_alignment = MIN(carrier_alignment, algnmnt__);		\
730     ASSERT(((UWord) (C)) % sizeof(UWord) == 0);				\
731 } while (0)
732 #define DEBUG_CHECK_ALIGNMENT(P)					\
733 do {									\
734     ASSERT(sizeof(Unit_t) - (((UWord) (P)) % sizeof(Unit_t))		\
735 	   >= carrier_alignment);					\
736     ASSERT(((UWord) (P)) % sizeof(UWord) == 0);				\
737 } while (0)
738 
739 #else
740 #define DEBUG_SAVE_ALIGNMENT(C)
741 #define DEBUG_CHECK_ALIGNMENT(P)
742 #endif
743 
744 #ifdef DEBUG
745 #  define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking())
746 #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)					\
747 do {									\
748     if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) {                   \
749 	if (!(A)->debug.saved_tid) {                                    \
750 	    (A)->debug.tid = erts_thr_self();				\
751 	    (A)->debug.saved_tid = 1;					\
752 	}								\
753 	else {								\
754 	    ERTS_LC_ASSERT(						\
755 		ethr_equal_tids((A)->debug.tid, erts_thr_self()));	\
756 	}								\
757     }									\
758 } while (0)
759 #else
760 #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
761 #endif
762 
763 static void make_name_atoms(Allctr_t *allctr);
764 
765 static Block_t *create_carrier(Allctr_t *, Uint, UWord);
766 static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **);
767 static void mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp);
768 static void dealloc_block(Allctr_t *, ErtsAlcType_t, Uint32, void *, ErtsAlcFixList_t *);
769 
determine_alloc_tag(Allctr_t * allocator,ErtsAlcType_t type)770 static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type)
771 {
772     ErtsSchedulerData *esdp;
773     Eterm id;
774 
775     ERTS_CT_ASSERT(_unchecked_atom_val(am_system) <= MAX_ATAG_ATOM_ID);
776     ASSERT(allocator->atags);
777 
778     esdp = erts_get_scheduler_data();
779     id = am_system;
780 
781     if (esdp) {
782         if (esdp->current_nif) {
783             Module *mod = erts_nif_get_module((esdp->current_nif)->mod_nif);
784 
785             /* Mod can be NULL if a resource destructor allocates memory after
786              * the module has been unloaded. */
787             if (mod) {
788                 id = make_atom(mod->module);
789             }
790         } else if (esdp->current_port) {
791             Port *p = esdp->current_port;
792             id = (p->drv_ptr)->name_atom;
793         }
794 
795         /* We fall back to 'system' if we can't pack the driver/NIF name into
796          * the tag. This may be a bit misleading but we've made no promises
797          * that the information is complete.
798          *
799          * This can only happen on 32-bit emulators when a new driver/NIF has
800          * been loaded *after* 16 million atoms have been used, and supporting
801          * that fringe case is not worth an extra word. 64-bit emulators are
802          * unaffected since the atom cache limits atom indexes to 32 bits. */
803         if(MAX_ATOM_TABLE_SIZE > MAX_ATAG_ATOM_ID) {
804             if (atom_val(id) > MAX_ATAG_ATOM_ID) {
805                 id = am_system;
806             }
807         }
808     }
809 
810     return MAKE_ATAG(id, ERTS_ALC_T2N(type));
811 }
812 
set_alloc_tag(Allctr_t * allocator,void * p,alcu_atag_t tag)813 static void set_alloc_tag(Allctr_t *allocator, void *p, alcu_atag_t tag)
814 {
815     Block_t *block;
816 
817     ASSERT(DBG_IS_VALID_ATAG(tag));
818     ASSERT(allocator->atags && p);
819     (void)allocator;
820 
821     block = UMEM2BLK(p);
822 
823     SET_BLK_ATAG(block, tag);
824 }
825 
826 /* internal data... */
827 
828 #if 0
829 
830 static ERTS_INLINE void *
831 internal_alloc(UWord size)
832 {
833     void *res = erts_sys_alloc(0, NULL, size);
834     if (!res)
835 	erts_alloc_enomem(ERTS_ALC_T_UNDEF, size);
836     return res;
837 }
838 
839 static ERTS_INLINE void *
840 internal_realloc(void *ptr, UWord size)
841 {
842     void *res = erts_sys_realloc(0, NULL, ptr, size);
843     if (!res)
844 	erts_alloc_enomem(ERTS_ALC_T_UNDEF, size);
845     return res;
846 }
847 
848 static ERTS_INLINE void
849 internal_free(void *ptr)
850 {
851     erts_sys_free(0, NULL, ptr);
852 }
853 
854 #endif
855 
856 #ifdef ARCH_32
857 
858 /*
859  * Bit vector for the entire 32-bit virtual address space
860  * with one bit for each super aligned memory segment.
861  */
862 
863 #define VSPACE_MAP_BITS  (1 << (32 - ERTS_MMAP_SUPERALIGNED_BITS))
864 #define VSPACE_MAP_SZ    (VSPACE_MAP_BITS / ERTS_VSPACE_WORD_BITS)
865 
set_bit(UWord * map,Uint ix)866 static ERTS_INLINE void set_bit(UWord* map, Uint ix)
867 {
868     ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
869     map[ix / ERTS_VSPACE_WORD_BITS]
870         |= ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
871 }
872 
clr_bit(UWord * map,Uint ix)873 static ERTS_INLINE void clr_bit(UWord* map, Uint ix)
874 {
875     ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
876     map[ix / ERTS_VSPACE_WORD_BITS]
877         &= ~((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
878 }
879 
880 #ifdef DEBUG
881 
is_bit_set(UWord * map,Uint ix)882 static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
883 {
884     ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
885     return map[ix / ERTS_VSPACE_WORD_BITS]
886         & ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
887 }
888 
889 #endif
890 
891 UWord erts_literal_vspace_map[VSPACE_MAP_SZ];
892 
set_literal_range(void * start,Uint size)893 static void set_literal_range(void* start, Uint size)
894 {
895     Uint ix = (UWord)start >> ERTS_MMAP_SUPERALIGNED_BITS;
896     Uint n = size >> ERTS_MMAP_SUPERALIGNED_BITS;
897 
898     ASSERT(!((UWord)start & ERTS_INV_SUPERALIGNED_MASK));
899     ASSERT(!((UWord)size & ERTS_INV_SUPERALIGNED_MASK));
900     ASSERT(n);
901     while (n--) {
902         ASSERT(!is_bit_set(erts_literal_vspace_map, ix));
903         set_bit(erts_literal_vspace_map, ix);
904         ix++;
905     }
906 }
907 
clear_literal_range(void * start,Uint size)908 static void clear_literal_range(void* start, Uint size)
909 {
910     Uint ix = (UWord)start >> ERTS_MMAP_SUPERALIGNED_BITS;
911     Uint n = size >> ERTS_MMAP_SUPERALIGNED_BITS;
912 
913     ASSERT(!((UWord)start & ERTS_INV_SUPERALIGNED_MASK));
914     ASSERT(!((UWord)size & ERTS_INV_SUPERALIGNED_MASK));
915     ASSERT(n);
916     while (n--) {
917         ASSERT(is_bit_set(erts_literal_vspace_map, ix));
918         clr_bit(erts_literal_vspace_map, ix);
919         ix++;
920     }
921 }
922 
923 #endif /* ARCH_32 */
924 
925 /* mseg ... */
926 
927 #if HAVE_ERTS_MSEG
928 
929 static void*
erts_alcu_mseg_alloc(Allctr_t * allctr,Uint * size_p,Uint flags)930 erts_alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
931 {
932     void *res;
933     UWord size = (UWord) *size_p;
934     res = erts_mseg_alloc_opt(allctr->alloc_no, &size, flags, &allctr->mseg_opt);
935     *size_p = (Uint) size;
936     INC_CC(allctr->calls.mseg_alloc);
937     return res;
938 }
939 
940 static void*
erts_alcu_mseg_realloc(Allctr_t * allctr,void * seg,Uint old_size,Uint * new_size_p)941 erts_alcu_mseg_realloc(Allctr_t *allctr, void *seg,
942                        Uint old_size, Uint *new_size_p)
943 {
944     void *res;
945     UWord new_size = (UWord) *new_size_p;
946     res = erts_mseg_realloc_opt(allctr->alloc_no, seg, (UWord) old_size, &new_size,
947 				ERTS_MSEG_FLG_NONE, &allctr->mseg_opt);
948     *new_size_p = (Uint) new_size;
949     INC_CC(allctr->calls.mseg_realloc);
950     return res;
951 }
952 
953 static void
erts_alcu_mseg_dealloc(Allctr_t * allctr,void * seg,Uint size,Uint flags)954 erts_alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
955 {
956     erts_mseg_dealloc_opt(allctr->alloc_no, seg, (UWord) size, flags, &allctr->mseg_opt);
957     INC_CC(allctr->calls.mseg_dealloc);
958 }
959 
960 
961 #if defined(ARCH_32)
962 
963 void*
erts_alcu_literal_32_mseg_alloc(Allctr_t * allctr,Uint * size_p,Uint flags)964 erts_alcu_literal_32_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
965 {
966     void* res;
967     Uint sz = ERTS_SUPERALIGNED_CEILING(*size_p);
968     ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
969                    allctr->t == 0);
970     ERTS_LC_ASSERT(allctr->thread_safe);
971 
972     res = erts_alcu_mseg_alloc(allctr, &sz, flags);
973     if (res) {
974         set_literal_range(res, sz);
975         *size_p = sz;
976     }
977     return res;
978 }
979 
980 void*
erts_alcu_literal_32_mseg_realloc(Allctr_t * allctr,void * seg,Uint old_size,Uint * new_size_p)981 erts_alcu_literal_32_mseg_realloc(Allctr_t *allctr, void *seg,
982                                   Uint old_size, Uint *new_size_p)
983 {
984     void* res;
985     Uint new_sz = ERTS_SUPERALIGNED_CEILING(*new_size_p);
986     ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
987                    allctr->t == 0);
988     ERTS_LC_ASSERT(allctr->thread_safe);
989 
990     if (seg && old_size)
991         clear_literal_range(seg, old_size);
992     res = erts_alcu_mseg_realloc(allctr, seg, old_size, &new_sz);
993     if (res) {
994         set_literal_range(res, new_sz);
995         *new_size_p = new_sz;
996     }
997     return res;
998 }
999 
1000 void
erts_alcu_literal_32_mseg_dealloc(Allctr_t * allctr,void * seg,Uint size,Uint flags)1001 erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
1002                                Uint flags)
1003 {
1004     ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
1005                    allctr->t == 0);
1006     ERTS_LC_ASSERT(allctr->thread_safe);
1007 
1008     erts_alcu_mseg_dealloc(allctr, seg, size, flags);
1009 
1010     clear_literal_range(seg, size);
1011 }
1012 
1013 #elif defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
1014 
1015 /* For allocators that have their own mmapper (super carrier),
1016  * like literal_alloc.
1017  */
1018 void*
erts_alcu_mmapper_mseg_alloc(Allctr_t * allctr,Uint * size_p,Uint flags)1019 erts_alcu_mmapper_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
1020 {
1021     void* res;
1022     UWord size = (UWord) *size_p;
1023     Uint32 mmap_flags = ERTS_MMAPFLG_SUPERCARRIER_ONLY;
1024     if (flags & ERTS_MSEG_FLG_2POW)
1025         mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
1026 
1027     res = erts_mmap(allctr->mseg_mmapper, mmap_flags, &size);
1028     *size_p = (Uint)size;
1029     INC_CC(allctr->calls.mseg_alloc);
1030     return res;
1031 }
1032 
1033 void*
erts_alcu_mmapper_mseg_realloc(Allctr_t * allctr,void * seg,Uint old_size,Uint * new_size_p)1034 erts_alcu_mmapper_mseg_realloc(Allctr_t *allctr, void *seg,
1035                                Uint old_size, Uint *new_size_p)
1036 {
1037     void *res;
1038     UWord new_size = (UWord) *new_size_p;
1039     res = erts_mremap(allctr->mseg_mmapper, ERTS_MSEG_FLG_NONE, seg, old_size, &new_size);
1040     *new_size_p = (Uint) new_size;
1041     INC_CC(allctr->calls.mseg_realloc);
1042     return res;
1043 }
1044 
1045 void
erts_alcu_mmapper_mseg_dealloc(Allctr_t * allctr,void * seg,Uint size,Uint flags)1046 erts_alcu_mmapper_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
1047                                Uint flags)
1048 {
1049     Uint32 mmap_flags = ERTS_MMAPFLG_SUPERCARRIER_ONLY;
1050     if (flags & ERTS_MSEG_FLG_2POW)
1051         mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED;
1052 
1053     erts_munmap(allctr->mseg_mmapper, mmap_flags, seg, (UWord)size);
1054     INC_CC(allctr->calls.mseg_dealloc);
1055 }
1056 #endif /* ARCH_64 && ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION */
1057 
1058 #if defined(ERTS_ALC_A_EXEC)
1059 
1060 /*
1061  * For exec_alloc that need memory with PROT_EXEC
1062  */
1063 void*
erts_alcu_exec_mseg_alloc(Allctr_t * allctr,Uint * size_p,Uint flags)1064 erts_alcu_exec_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
1065 {
1066     void* res = erts_alcu_mseg_alloc(allctr, size_p, flags);
1067 
1068     if (res) {
1069         int r = mprotect(res, *size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
1070         ASSERT(r == 0); (void)r;
1071     }
1072     return res;
1073 }
1074 
1075 void*
erts_alcu_exec_mseg_realloc(Allctr_t * allctr,void * seg,Uint old_size,Uint * new_size_p)1076 erts_alcu_exec_mseg_realloc(Allctr_t *allctr, void *seg,
1077                             Uint old_size, Uint *new_size_p)
1078 {
1079     void *res;
1080 
1081     if (seg && old_size) {
1082         int r = mprotect(seg, old_size, PROT_READ | PROT_WRITE);
1083         ASSERT(r == 0); (void)r;
1084     }
1085     res = erts_alcu_mseg_realloc(allctr, seg, old_size, new_size_p);
1086     if (res) {
1087         int r = mprotect(res, *new_size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
1088         ASSERT(r == 0); (void)r;
1089     }
1090     return res;
1091 }
1092 
1093 void
erts_alcu_exec_mseg_dealloc(Allctr_t * allctr,void * seg,Uint size,Uint flags)1094 erts_alcu_exec_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
1095 {
1096     int r = mprotect(seg, size, PROT_READ | PROT_WRITE);
1097     ASSERT(r == 0); (void)r;
1098     erts_alcu_mseg_dealloc(allctr, seg, size, flags);
1099 }
1100 #endif /* ERTS_ALC_A_EXEC */
1101 
1102 #endif /* HAVE_ERTS_MSEG */
1103 
1104 static void*
erts_alcu_sys_alloc(Allctr_t * allctr,Uint * size_p,int superalign)1105 erts_alcu_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
1106 {
1107     void *res;
1108     const Uint size = *size_p;
1109 #if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
1110     if (superalign)
1111 	res = erts_sys_aligned_alloc(ERTS_SACRR_UNIT_SZ, size);
1112     else
1113 #endif
1114 	res = erts_sys_alloc(0, NULL, size);
1115     INC_CC(allctr->calls.sys_alloc);
1116     if (erts_mtrace_enabled)
1117 	erts_mtrace_crr_alloc(res, allctr->alloc_no, ERTS_ALC_A_SYSTEM, size);
1118     return res;
1119 }
1120 
1121 static void*
erts_alcu_sys_realloc(Allctr_t * allctr,void * ptr,Uint * size_p,Uint old_size,int superalign)1122 erts_alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign)
1123 {
1124     void *res;
1125     const Uint size = *size_p;
1126 
1127 #if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
1128     if (superalign)
1129 	res = erts_sys_aligned_realloc(ERTS_SACRR_UNIT_SZ, ptr, size, old_size);
1130     else
1131 #endif
1132 	res = erts_sys_realloc(0, NULL, ptr, size);
1133     INC_CC(allctr->calls.sys_realloc);
1134     if (erts_mtrace_enabled)
1135 	erts_mtrace_crr_realloc(res,
1136 				allctr->alloc_no,
1137 				ERTS_ALC_A_SYSTEM,
1138 				ptr,
1139 				size);
1140     return res;
1141 }
1142 
1143 static void
erts_alcu_sys_dealloc(Allctr_t * allctr,void * ptr,Uint size,int superalign)1144 erts_alcu_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int superalign)
1145 {
1146 #if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
1147     if (superalign)
1148 	erts_sys_aligned_free(ERTS_SACRR_UNIT_SZ, ptr);
1149     else
1150 #endif
1151 	erts_sys_free(0, NULL, ptr);
1152     INC_CC(allctr->calls.sys_free);
1153     if (erts_mtrace_enabled)
1154 	erts_mtrace_crr_free(allctr->alloc_no, ERTS_ALC_A_SYSTEM, ptr);
1155 }
1156 
1157 #ifdef ARCH_32
1158 
1159 void*
erts_alcu_literal_32_sys_alloc(Allctr_t * allctr,Uint * size_p,int superalign)1160 erts_alcu_literal_32_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
1161 {
1162     void* res;
1163     Uint size = ERTS_SUPERALIGNED_CEILING(*size_p);
1164     ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
1165                    allctr->t == 0);
1166     ERTS_LC_ASSERT(allctr->thread_safe);
1167 
1168     res = erts_alcu_sys_alloc(allctr, &size, 1);
1169     if (res) {
1170         set_literal_range(res, size);
1171         *size_p = size;
1172     }
1173     return res;
1174 }
1175 
1176 void*
erts_alcu_literal_32_sys_realloc(Allctr_t * allctr,void * ptr,Uint * size_p,Uint old_size,int superalign)1177 erts_alcu_literal_32_sys_realloc(Allctr_t *allctr, void *ptr, Uint* size_p, Uint old_size, int superalign)
1178 {
1179     void* res;
1180     Uint size = ERTS_SUPERALIGNED_CEILING(*size_p);
1181 
1182     ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
1183                    allctr->t == 0);
1184     ERTS_LC_ASSERT(allctr->thread_safe);
1185 
1186     if (ptr && old_size)
1187         clear_literal_range(ptr, old_size);
1188     res = erts_alcu_sys_realloc(allctr, ptr, &size, old_size, 1);
1189     if (res) {
1190         set_literal_range(res, size);
1191         *size_p = size;
1192     }
1193     return res;
1194 }
1195 
1196 void
erts_alcu_literal_32_sys_dealloc(Allctr_t * allctr,void * ptr,Uint size,int superalign)1197 erts_alcu_literal_32_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int superalign)
1198 {
1199     ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
1200                    allctr->t == 0);
1201     ERTS_LC_ASSERT(allctr->thread_safe);
1202 
1203     erts_alcu_sys_dealloc(allctr, ptr, size, 1);
1204 
1205     clear_literal_range(ptr, size);
1206 }
1207 
1208 #endif /* ARCH_32 */
1209 
1210 static Uint
get_next_mbc_size(Allctr_t * allctr)1211 get_next_mbc_size(Allctr_t *allctr)
1212 {
1213     Uint size;
1214     int cs = (allctr->mbcs.curr.norm.mseg.no
1215 	      + allctr->mbcs.curr.norm.sys_alloc.no
1216 	      - (allctr->main_carrier ? 1 : 0));
1217 
1218     ASSERT(cs >= 0);
1219     ASSERT(allctr->largest_mbc_size >= allctr->smallest_mbc_size);
1220 
1221     if (cs >= allctr->mbc_growth_stages)
1222 	size = allctr->largest_mbc_size;
1223     else
1224 	size = ((cs*(allctr->largest_mbc_size - allctr->smallest_mbc_size)
1225 		 / allctr->mbc_growth_stages)
1226 		+ allctr->smallest_mbc_size);
1227 
1228     if (size < allctr->min_mbc_size)
1229 	size = allctr->min_mbc_size;
1230 
1231     return size;
1232 }
1233 
1234 static ERTS_INLINE void
link_carrier(CarrierList_t * cl,Carrier_t * crr)1235 link_carrier(CarrierList_t *cl, Carrier_t *crr)
1236 {
1237     crr->next = NULL;
1238     if (!cl->last) {
1239 	ASSERT(!cl->first);
1240 	cl->first = cl->last = crr;
1241 	crr->prev = NULL;
1242     }
1243     else {
1244 	ASSERT(cl->first);
1245 	ASSERT(!cl->first->prev);
1246 	ASSERT(cl->last);
1247 	ASSERT(!cl->last->next);
1248 	crr->prev = cl->last;
1249 	cl->last->next = crr;
1250 	cl->last = crr;
1251     }
1252     ASSERT(crr->next != crr);
1253     ASSERT(crr->prev != crr);
1254 }
1255 
1256 static ERTS_INLINE void
relink_carrier(CarrierList_t * cl,Carrier_t * crr)1257 relink_carrier(CarrierList_t *cl, Carrier_t *crr)
1258 {
1259     if (crr->next) {
1260 	if (crr->next->prev != crr)
1261 	    crr->next->prev = crr;
1262     }
1263     else if (cl->last != crr)
1264 	cl->last = crr;
1265 
1266     if (crr->prev) {
1267 	if (crr->prev->next != crr)
1268 	    crr->prev->next = crr;
1269     }
1270     else if (cl->first != crr)
1271 	cl->first = crr;
1272 }
1273 
1274 static ERTS_INLINE void
unlink_carrier(CarrierList_t * cl,Carrier_t * crr)1275 unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
1276 {
1277     ASSERT(crr->next != crr);
1278     ASSERT(crr->prev != crr);
1279 
1280     if (cl->first == crr) {
1281 	ASSERT(!crr->prev);
1282 	cl->first = crr->next;
1283     }
1284     else {
1285 	ASSERT(crr->prev);
1286 	crr->prev->next = crr->next;
1287     }
1288 
1289     if (cl->last == crr) {
1290 	ASSERT(!crr->next);
1291 	cl->last = crr->prev;
1292     }
1293     else {
1294 	ASSERT(crr->next);
1295 	crr->next->prev = crr->prev;
1296     }
1297 #ifdef DEBUG
1298     crr->next = crr;
1299     crr->prev = crr;
1300 #endif
1301 }
1302 
is_abandoned(Carrier_t * crr)1303 static ERTS_INLINE int is_abandoned(Carrier_t *crr)
1304 {
1305     return crr->cpool.state != ERTS_MBC_IS_HOME;
1306 }
1307 
1308 static ERTS_INLINE void
unlink_abandoned_carrier(Carrier_t * crr)1309 unlink_abandoned_carrier(Carrier_t *crr)
1310 {
1311     if (crr->cpool.state == ERTS_MBC_WAS_POOLED) {
1312         aoff_remove_pooled_mbc(crr->cpool.orig_allctr, crr);
1313     }
1314 }
1315 
1316 static ERTS_INLINE void
clear_busy_pool_carrier(Allctr_t * allctr,Carrier_t * crr)1317 clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
1318 {
1319     if (crr) {
1320 	erts_aint_t max_size;
1321 	erts_aint_t iallctr;
1322 
1323 	max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
1324 	erts_atomic_set_nob(&crr->cpool.max_size, max_size);
1325 
1326         iallctr = erts_atomic_read_nob(&crr->allctr);
1327         ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
1328                               == ((erts_aint_t)allctr |
1329                                   ERTS_CRR_ALCTR_FLG_IN_POOL |
1330                                   ERTS_CRR_ALCTR_FLG_BUSY));
1331 
1332 	iallctr &= ~ERTS_CRR_ALCTR_FLG_BUSY;
1333 	erts_atomic_set_relb(&crr->allctr, iallctr);
1334     }
1335 }
1336 
1337 
1338 #if 0
1339 #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B)			\
1340     do { if ((FIX)) chk_fix_list((A), (FIX), (IX), (B)); } while (0)
1341 static void
1342 chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
1343 {
1344     void *p;
1345     int n;
1346     for (n = 0, p = fix[ix].list; p; p = *((void **) p))
1347 	n++;
1348     if (n != fix[ix].list_size) {
1349 	erts_fprintf(stderr, "FOUND IT ts=%d, sched=%d, ix=%d, n=%d, ls=%d %s!\n",
1350 		     allctr->thread_safe, allctr->ix, ix, n, fix[ix].list_size, before ? "before" : "after");
1351 	abort();
1352     }
1353 }
1354 #else
1355 #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B)
1356 #endif
1357 
1358 static ERTS_INLINE Allctr_t *get_pref_allctr(void *extra);
1359 static void *mbc_alloc(Allctr_t *allctr, Uint size);
1360 
1361 static ERTS_INLINE void
sched_fix_shrink(Allctr_t * allctr,int on)1362 sched_fix_shrink(Allctr_t *allctr, int on)
1363 {
1364     if (on && !allctr->fix_shrink_scheduled) {
1365 	allctr->fix_shrink_scheduled = 1;
1366 	erts_set_aux_work_timeout(allctr->ix,
1367 				  (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
1368 				   | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
1369 				  1);
1370     }
1371     else if (!on && allctr->fix_shrink_scheduled) {
1372 	allctr->fix_shrink_scheduled = 0;
1373 	erts_set_aux_work_timeout(allctr->ix,
1374 				  (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
1375 				   | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
1376 				  0);
1377     }
1378 }
1379 
1380 static ERTS_INLINE void
fix_cpool_check_shrink(Allctr_t * allctr,ErtsAlcType_t type,ErtsAlcFixList_t * fix,Carrier_t ** busy_pcrr_pp)1381 fix_cpool_check_shrink(Allctr_t *allctr,
1382 		       ErtsAlcType_t type,
1383 		       ErtsAlcFixList_t *fix,
1384 		       Carrier_t **busy_pcrr_pp)
1385 {
1386     if (fix->u.cpool.shrink_list > 0) {
1387 	if (fix->list_size == 0)
1388 	    fix->u.cpool.shrink_list = 0;
1389 	else {
1390 	    void *p;
1391 	    if (busy_pcrr_pp) {
1392 		clear_busy_pool_carrier(allctr, *busy_pcrr_pp);
1393 		*busy_pcrr_pp = NULL;
1394 	    }
1395 	    fix->u.cpool.shrink_list--;
1396 	    p = fix->list;
1397 	    fix->list = *((void **) p);
1398 	    fix->list_size--;
1399 	    if (fix->u.cpool.min_list_size > fix->list_size)
1400 		fix->u.cpool.min_list_size = fix->list_size;
1401 
1402 	    dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, p, fix);
1403 	}
1404     }
1405 }
1406 
1407 static ERTS_INLINE void *
fix_cpool_alloc(Allctr_t * allctr,ErtsAlcType_t type,Uint size)1408 fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
1409 {
1410     void *res;
1411     ErtsAlcFixList_t *fix;
1412 
1413     fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
1414     ASSERT(type == fix->type && size == fix->type_size);
1415     ASSERT(size >= sizeof(ErtsAllctrDDBlock_t));
1416 
1417     res = fix->list;
1418     if (res) {
1419 	fix->list = *((void **) res);
1420 	fix->list_size--;
1421 	if (fix->u.cpool.min_list_size > fix->list_size)
1422 	    fix->u.cpool.min_list_size = fix->list_size;
1423 	fix->u.cpool.used++;
1424 	fix_cpool_check_shrink(allctr, type, fix, NULL);
1425 	return res;
1426     }
1427     if (size >= allctr->sbc_threshold) {
1428 	Block_t *blk;
1429 	blk = create_carrier(allctr, size, CFLG_SBC);
1430 	res = blk ? BLK2UMEM(blk) : NULL;
1431     }
1432     else
1433 	res = mbc_alloc(allctr, size);
1434     if (res) {
1435 	fix->u.cpool.used++;
1436 	fix->u.cpool.allocated++;
1437     }
1438     return res;
1439 }
1440 
1441 static ERTS_INLINE void
fix_cpool_free(Allctr_t * allctr,ErtsAlcType_t type,Uint32 flags,void * p,Carrier_t ** busy_pcrr_pp)1442 fix_cpool_free(Allctr_t *allctr,
1443 	       ErtsAlcType_t type,
1444                Uint32 flags,
1445 	       void *p,
1446 	       Carrier_t **busy_pcrr_pp)
1447 {
1448     ErtsAlcFixList_t *fix;
1449     Allctr_t *fix_allctr;
1450 
1451     /* If this isn't a fix allocator we need to update the fix list of our
1452      * neighboring fix_alloc to keep the statistics consistent. */
1453     if (!allctr->fix) {
1454         ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
1455         fix_allctr = get_pref_allctr(tspec);
1456         ASSERT(!fix_allctr->thread_safe);
1457         ASSERT(allctr != fix_allctr);
1458     }
1459     else {
1460         fix_allctr = allctr;
1461     }
1462 
1463     ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(fix_allctr));
1464     ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
1465 
1466     fix = &fix_allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
1467     ASSERT(type == fix->type);
1468 
1469     if (!(flags & DEALLOC_FLG_FIX_SHRINK)) {
1470         fix->u.cpool.used--;
1471     }
1472 
1473     /* We don't want foreign blocks to be long-lived, so we skip recycling if
1474      * allctr != fix_allctr. */
1475     if (allctr == fix_allctr
1476         && (!busy_pcrr_pp || !*busy_pcrr_pp)
1477 	&& !fix->u.cpool.shrink_list
1478 	&& fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
1479 	*((void **) p) = fix->list;
1480 	fix->list = p;
1481 	fix->list_size++;
1482 	sched_fix_shrink(allctr, 1);
1483     }
1484     else {
1485 	Block_t *blk = UMEM2BLK(p);
1486 	if (IS_SBC_BLK(blk))
1487 	    destroy_carrier(allctr, blk, NULL);
1488 	else
1489 	    mbc_free(allctr, type, p, busy_pcrr_pp);
1490 	fix->u.cpool.allocated--;
1491 	fix_cpool_check_shrink(allctr, type, fix, busy_pcrr_pp);
1492     }
1493 }
1494 
1495 static ERTS_INLINE erts_aint32_t
fix_cpool_alloc_shrink(Allctr_t * allctr,erts_aint32_t flgs)1496 fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
1497 {
1498     int all_empty = 1;
1499     erts_aint32_t res = 0;
1500     int ix, o;
1501     int flush = flgs == 0;
1502 
1503     if (allctr->thread_safe)
1504 	erts_mtx_lock(&allctr->mutex);
1505 
1506     for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
1507 	ErtsAlcFixList_t *fix = &allctr->fix[ix];
1508 	ErtsAlcType_t type;
1509 	ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
1510 	if (flush)
1511 	    fix->u.cpool.shrink_list = fix->list_size;
1512 	else if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
1513 	    fix->u.cpool.shrink_list = fix->u.cpool.min_list_size;
1514 	    fix->u.cpool.min_list_size = fix->list_size;
1515 	}
1516 	type = ERTS_ALC_N2T((ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE));
1517 	for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
1518 	    void *ptr;
1519 
1520 	    if (fix->u.cpool.shrink_list == 0)
1521 		break;
1522 	    if (fix->list_size == 0) {
1523 		fix->u.cpool.shrink_list = 0;
1524 		break;
1525 	    }
1526 	    ptr = fix->list;
1527 	    fix->list = *((void **) ptr);
1528 	    fix->list_size--;
1529 	    fix->u.cpool.shrink_list--;
1530 	    dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, ptr, fix);
1531 	}
1532 	if (fix->u.cpool.min_list_size > fix->list_size)
1533 	    fix->u.cpool.min_list_size = fix->list_size;
1534 	if (fix->list_size != 0) {
1535 	    if (fix->u.cpool.shrink_list > 0)
1536 		res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
1537 	    all_empty = 0;
1538 	}
1539     }
1540 
1541     if (all_empty)
1542 	sched_fix_shrink(allctr, 0);
1543 
1544     if (allctr->thread_safe)
1545 	erts_mtx_unlock(&allctr->mutex);
1546 
1547     return res;
1548 }
1549 
1550 static ERTS_INLINE void *
fix_nocpool_alloc(Allctr_t * allctr,ErtsAlcType_t type,Uint size)1551 fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
1552 {
1553     ErtsAlcFixList_t *fix;
1554     void *res;
1555 
1556     fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
1557     ASSERT(type == fix->type && size == fix->type_size);
1558     ASSERT(size >= sizeof(ErtsAllctrDDBlock_t));
1559 
1560     ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
1561     fix->u.nocpool.used++;
1562     res = fix->list;
1563     if (res) {
1564 	fix->list_size--;
1565 	fix->list = *((void **) res);
1566 	if (fix->list && fix->u.nocpool.allocated > fix->u.nocpool.limit) {
1567 	    Block_t *blk;
1568 	    void *p = fix->list;
1569 	    fix->list = *((void **) p);
1570 	    fix->list_size--;
1571 	    blk = UMEM2BLK(p);
1572 	    if (IS_SBC_BLK(blk))
1573 		destroy_carrier(allctr, blk, NULL);
1574 	    else
1575 		mbc_free(allctr, type, p, NULL);
1576 	    fix->u.nocpool.allocated--;
1577 	}
1578 	ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
1579 	return res;
1580     }
1581     if (fix->u.nocpool.limit < fix->u.nocpool.used)
1582 	fix->u.nocpool.limit = fix->u.nocpool.used;
1583     if (fix->u.nocpool.max_used < fix->u.nocpool.used)
1584 	fix->u.nocpool.max_used = fix->u.nocpool.used;
1585     fix->u.nocpool.allocated++;
1586 
1587     if (size >= allctr->sbc_threshold) {
1588 	Block_t *blk;
1589 	blk = create_carrier(allctr, size, CFLG_SBC);
1590 	res = blk ? BLK2UMEM(blk) : NULL;
1591     }
1592     else
1593 	res = mbc_alloc(allctr, size);
1594 
1595     if (!res) {
1596 	fix->u.nocpool.allocated--;
1597 	fix->u.nocpool.used--;
1598     }
1599     return res;
1600 }
1601 
1602 static ERTS_INLINE void
fix_nocpool_free(Allctr_t * allctr,ErtsAlcType_t type,void * p)1603 fix_nocpool_free(Allctr_t *allctr,
1604 		 ErtsAlcType_t type,
1605 		 void *p)
1606 {
1607     Block_t *blk;
1608     ErtsAlcFixList_t *fix;
1609 
1610     fix = &allctr->fix[ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE];
1611     ASSERT(fix->type == type);
1612 
1613     ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
1614     fix->u.nocpool.used--;
1615     if (fix->u.nocpool.allocated < fix->u.nocpool.limit
1616 	&& fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
1617 	*((void **) p) = fix->list;
1618 	fix->list = p;
1619 	fix->list_size++;
1620 	sched_fix_shrink(allctr, 1);
1621 	ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
1622 	return;
1623     }
1624     fix->u.nocpool.allocated--;
1625     if (fix->list && fix->u.nocpool.allocated > fix->u.nocpool.limit) {
1626 	blk = UMEM2BLK(p);
1627 	if (IS_SBC_BLK(blk))
1628 	    destroy_carrier(allctr, blk, NULL);
1629 	else
1630 	    mbc_free(allctr, type, p, NULL);
1631 	p = fix->list;
1632 	fix->list = *((void **) p);
1633 	fix->list_size--;
1634 	fix->u.nocpool.allocated--;
1635     }
1636 
1637     blk = UMEM2BLK(p);
1638     if (IS_SBC_BLK(blk))
1639 	destroy_carrier(allctr, blk, NULL);
1640     else
1641 	mbc_free(allctr, type, p, NULL);
1642     ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
1643 }
1644 
1645 static ERTS_INLINE erts_aint32_t
fix_nocpool_alloc_shrink(Allctr_t * allctr,erts_aint32_t flgs)1646 fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
1647 {
1648     int all_empty = 1;
1649     erts_aint32_t res = 0;
1650     int ix, o;
1651     int flush = flgs == 0;
1652 
1653     if (allctr->thread_safe)
1654 	erts_mtx_lock(&allctr->mutex);
1655 
1656     for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
1657 	ErtsAlcFixList_t *fix = &allctr->fix[ix];
1658 	ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
1659 	if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
1660 	    fix->u.nocpool.limit = fix->u.nocpool.max_used;
1661 	    if (fix->u.nocpool.limit < fix->u.nocpool.used)
1662 		fix->u.nocpool.limit = fix->u.nocpool.used;
1663 	    fix->u.nocpool.max_used = fix->u.nocpool.used;
1664 	    ASSERT(fix->u.nocpool.limit >= 0);
1665 
1666 	}
1667 	if (flush) {
1668 	    fix->u.nocpool.limit = 0;
1669 	    fix->u.nocpool.max_used = fix->u.nocpool.used;
1670 	    ASSERT(fix->u.nocpool.limit >= 0);
1671 	}
1672 	for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
1673 	    void *ptr;
1674 
1675 	    if (!flush && fix->u.nocpool.limit >= fix->u.nocpool.allocated)
1676 		break;
1677 	    if (fix->list_size == 0)
1678 		break;
1679 	    ptr = fix->list;
1680 	    fix->list = *((void **) ptr);
1681 	    fix->list_size--;
1682 	    dealloc_block(allctr, fix->type, 0, ptr, NULL);
1683 	    fix->u.nocpool.allocated--;
1684 	}
1685 	if (fix->list_size != 0) {
1686 	    if (fix->u.nocpool.limit < fix->u.nocpool.allocated)
1687 		res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
1688 	    all_empty = 0;
1689 	}
1690 	ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
1691     }
1692 
1693     if (all_empty)
1694 	sched_fix_shrink(allctr, 0);
1695 
1696     if (allctr->thread_safe)
1697 	erts_mtx_unlock(&allctr->mutex);
1698 
1699     return res;
1700 }
1701 
1702 erts_aint32_t
erts_alcu_fix_alloc_shrink(Allctr_t * allctr,erts_aint32_t flgs)1703 erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
1704 {
1705     if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
1706 	return fix_cpool_alloc_shrink(allctr, flgs);
1707     else
1708 	return fix_nocpool_alloc_shrink(allctr, flgs);
1709 }
1710 
1711 static void dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned);
1712 
1713 static ERTS_INLINE void
dealloc_mbc(Allctr_t * allctr,Carrier_t * crr)1714 dealloc_mbc(Allctr_t *allctr, Carrier_t *crr)
1715 {
1716     ASSERT(IS_MB_CARRIER(crr));
1717     if (allctr->destroying_mbc)
1718         allctr->destroying_mbc(allctr, crr);
1719 
1720     dealloc_carrier(allctr, crr, 1);
1721 }
1722 
1723 
1724 static UWord allctr_abandon_limit(Allctr_t *allctr);
1725 static void set_new_allctr_abandon_limit(Allctr_t*);
1726 static void abandon_carrier(Allctr_t*, Carrier_t*);
1727 static void poolify_my_carrier(Allctr_t*, Carrier_t*);
1728 static void enqueue_homecoming(Allctr_t*, Carrier_t*);
1729 
1730 static ERTS_INLINE Allctr_t*
get_pref_allctr(void * extra)1731 get_pref_allctr(void *extra)
1732 {
1733     ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
1734     int pref_ix;
1735 
1736     pref_ix = ERTS_ALC_GET_THR_IX();
1737 
1738     ERTS_CT_ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
1739     ASSERT(0 <= pref_ix && pref_ix < tspec->size);
1740 
1741     return tspec->allctr[pref_ix];
1742 }
1743 
1744 #define ERTS_ALC_TS_PREF_LOCK_IF_USED	(1)
1745 #define ERTS_ALC_TS_PREF_LOCK_NO	(0)
1746 
1747 /* SMP note:
1748  * get_used_allctr() must be safe WITHOUT locking the allocator while
1749  * concurrent threads may be updating adjacent blocks.
1750  * We rely on getting a consistent result (without atomic op) when reading
1751  * the block header word even if a concurrent thread is updating
1752  * the "PREV_FREE" flag bit.
1753  */
1754 static ERTS_INLINE Allctr_t*
get_used_allctr(Allctr_t * pref_allctr,int pref_lock,void * p,UWord * sizep,Carrier_t ** busy_pcrr_pp)1755 get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
1756 		Carrier_t **busy_pcrr_pp)
1757 {
1758     Block_t* blk = UMEM2BLK(p);
1759     Carrier_t *crr;
1760     erts_aint_t iallctr;
1761     Allctr_t *used_allctr;
1762 
1763     *busy_pcrr_pp = NULL;
1764 
1765     if (IS_SBC_BLK(blk)) {
1766 	crr = BLK_TO_SBC(blk);
1767 	if (sizep)
1768 	    *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ;
1769 	iallctr = erts_atomic_read_dirty(&crr->allctr);
1770     }
1771     else {
1772 	crr = ABLK_TO_MBC(blk);
1773 
1774 	if (sizep)
1775 	    *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ;
1776 	if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr))
1777 	    iallctr = erts_atomic_read_dirty(&crr->allctr);
1778 	else {
1779 	    int locked_pref_allctr = 0;
1780 	    iallctr = erts_atomic_read_ddrb(&crr->allctr);
1781 
1782 	    if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock
1783 		&& pref_allctr->thread_safe) {
1784 		used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
1785 		if (pref_allctr == used_allctr) {
1786 		    erts_mtx_lock(&pref_allctr->mutex);
1787 		    locked_pref_allctr = 1;
1788 		}
1789 	    }
1790 
1791 	    while ((iallctr & ((~ERTS_CRR_ALCTR_FLG_MASK)|ERTS_CRR_ALCTR_FLG_IN_POOL))
1792 		   == (((erts_aint_t) pref_allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL)) {
1793 		erts_aint_t act;
1794 
1795 		ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
1796                 if (iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING) {
1797                     /*
1798                      * This carrier has just been given back to us by writing
1799                      * to crr->allctr with a write barrier (see abandon_carrier).
1800                      *
1801                      * We need a mathing read barrier to guarantee a correct view
1802                      * of the carrier for deallocation work.
1803                      */
1804                     act = erts_atomic_cmpxchg_rb(&crr->allctr,
1805                                                  iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
1806                                                  iallctr);
1807                 }
1808                 else {
1809                     act = erts_atomic_cmpxchg_ddrb(&crr->allctr,
1810                                                    iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
1811                                                    iallctr);
1812                 }
1813 		if (act == iallctr) {
1814 		    *busy_pcrr_pp = crr;
1815 		    break;
1816 		}
1817 		iallctr = act;
1818 	    }
1819 
1820 	    used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
1821 
1822 	    if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock) {
1823 		if (locked_pref_allctr && used_allctr != pref_allctr) {
1824 		    /* Was taken out of pool; now owned by someone else */
1825 		    erts_mtx_unlock(&pref_allctr->mutex);
1826 		}
1827 	    }
1828 	    return used_allctr;
1829 	}
1830     }
1831 
1832     used_allctr = (Allctr_t *) (iallctr & ~ERTS_CRR_ALCTR_FLG_MASK);
1833 
1834     if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock
1835 	&& used_allctr == pref_allctr
1836 	&& pref_allctr->thread_safe) {
1837 	erts_mtx_lock(&pref_allctr->mutex);
1838     }
1839 
1840     return used_allctr;
1841 }
1842 
1843 static void
init_dd_queue(ErtsAllctrDDQueue_t * ddq)1844 init_dd_queue(ErtsAllctrDDQueue_t *ddq)
1845 {
1846     erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL);
1847     erts_atomic_init_nob(&ddq->tail.data.last,
1848 			 (erts_aint_t) &ddq->tail.data.marker);
1849     erts_atomic_init_nob(&ddq->tail.data.um_refc[0], 0);
1850     erts_atomic_init_nob(&ddq->tail.data.um_refc[1], 0);
1851     erts_atomic32_init_nob(&ddq->tail.data.um_refc_ix, 0);
1852     ddq->head.first = &ddq->tail.data.marker;
1853     ddq->head.unref_end = &ddq->tail.data.marker;
1854     ddq->head.next.thr_progress = erts_thr_progress_current();
1855     ddq->head.next.thr_progress_reached = 1;
1856     ddq->head.next.um_refc_ix = 1;
1857     ddq->head.next.unref_end = &ddq->tail.data.marker;
1858     ddq->head.used_marker = 1;
1859 }
1860 
1861 static ERTS_INLINE int
ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t * ddq,void * ptr,int cinit)1862 ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
1863 {
1864     erts_aint_t itmp;
1865     ErtsAllctrDDBlock_t *enq, *this = ptr;
1866 
1867     erts_atomic_init_nob(&this->u.atmc_next, ERTS_AINT_NULL);
1868     /* Enqueue at end of list... */
1869 
1870     enq = (ErtsAllctrDDBlock_t *) erts_atomic_read_nob(&ddq->tail.data.last);
1871     itmp = erts_atomic_cmpxchg_relb(&enq->u.atmc_next,
1872 				    (erts_aint_t) this,
1873 				    ERTS_AINT_NULL);
1874     if (itmp == ERTS_AINT_NULL) {
1875 	/* We are required to move last pointer */
1876 #ifdef DEBUG
1877 	ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->u.atmc_next));
1878 	ASSERT(((erts_aint_t) enq)
1879 	       == erts_atomic_xchg_relb(&ddq->tail.data.last,
1880 					(erts_aint_t) this));
1881 #else
1882 	erts_atomic_set_relb(&ddq->tail.data.last, (erts_aint_t) this);
1883 #endif
1884 	return 1;
1885     }
1886     else {
1887 	/*
1888 	 * We *need* to insert element somewhere in between the
1889 	 * last element we read earlier and the actual last element.
1890 	 */
1891 	int i = cinit;
1892 
1893 	while (1) {
1894 	    erts_aint_t itmp2;
1895 	    erts_atomic_set_nob(&this->u.atmc_next, itmp);
1896 	    itmp2 = erts_atomic_cmpxchg_relb(&enq->u.atmc_next,
1897 					     (erts_aint_t) this,
1898 					     itmp);
1899 	    if (itmp == itmp2)
1900 		return 0; /* inserted this */
1901 	    if ((i & 1) == 0)
1902 		itmp = itmp2;
1903 	    else {
1904 		enq = (ErtsAllctrDDBlock_t *) itmp2;
1905 		itmp = erts_atomic_read_acqb(&enq->u.atmc_next);
1906 		ASSERT(itmp != ERTS_AINT_NULL);
1907 	    }
1908 	    i++;
1909 	}
1910     }
1911 }
1912 
1913 static ERTS_INLINE erts_aint_t
check_insert_marker(ErtsAllctrDDQueue_t * ddq,erts_aint_t ilast)1914 check_insert_marker(ErtsAllctrDDQueue_t *ddq, erts_aint_t ilast)
1915 {
1916     if (!ddq->head.used_marker
1917 	&& ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast) {
1918 	erts_aint_t itmp;
1919 	ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast;
1920 
1921 	erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL);
1922 	itmp = erts_atomic_cmpxchg_relb(&last->u.atmc_next,
1923 					(erts_aint_t) &ddq->tail.data.marker,
1924 					ERTS_AINT_NULL);
1925 	if (itmp == ERTS_AINT_NULL) {
1926 	    ilast = (erts_aint_t) &ddq->tail.data.marker;
1927 	    ddq->head.used_marker = !0;
1928 	    erts_atomic_set_relb(&ddq->tail.data.last, ilast);
1929 	}
1930     }
1931     return ilast;
1932 }
1933 
1934 static ERTS_INLINE int
ddq_enqueue(ErtsAllctrDDQueue_t * ddq,void * ptr,int cinit)1935 ddq_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
1936 {
1937     int last_elem;
1938     int um_refc_ix = 0;
1939     int managed_thread = erts_thr_progress_is_managed_thread();
1940     if (!managed_thread) {
1941 	um_refc_ix = erts_atomic32_read_acqb(&ddq->tail.data.um_refc_ix);
1942 	while (1) {
1943 	    int tmp_um_refc_ix;
1944 	    erts_atomic_inc_acqb(&ddq->tail.data.um_refc[um_refc_ix]);
1945 	    tmp_um_refc_ix = erts_atomic32_read_acqb(&ddq->tail.data.um_refc_ix);
1946 	    if (tmp_um_refc_ix == um_refc_ix)
1947 		break;
1948 	    erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
1949 	    um_refc_ix = tmp_um_refc_ix;
1950 	}
1951     }
1952 
1953     last_elem = ddq_managed_thread_enqueue(ddq, ptr, cinit);
1954 
1955     if (!managed_thread)
1956 	erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
1957     return last_elem;
1958 }
1959 
1960 static ERTS_INLINE void *
ddq_dequeue(ErtsAllctrDDQueue_t * ddq)1961 ddq_dequeue(ErtsAllctrDDQueue_t *ddq)
1962 {
1963     ErtsAllctrDDBlock_t *blk;
1964 
1965     if (ddq->head.first == ddq->head.unref_end)
1966 	return NULL;
1967 
1968     blk = ddq->head.first;
1969     if (blk == &ddq->tail.data.marker) {
1970 	ASSERT(ddq->head.used_marker);
1971 	ddq->head.used_marker = 0;
1972 	blk = ((ErtsAllctrDDBlock_t *)
1973 	       erts_atomic_read_nob(&blk->u.atmc_next));
1974 	if (blk == ddq->head.unref_end) {
1975 	    ddq->head.first = blk;
1976 	    return NULL;
1977 	}
1978     }
1979 
1980     ddq->head.first = ((ErtsAllctrDDBlock_t *)
1981 		       erts_atomic_read_nob(&blk->u.atmc_next));
1982 
1983     ASSERT(ddq->head.first);
1984 
1985     return (void *) blk;
1986 }
1987 
1988 static int
ddq_check_incoming(ErtsAllctrDDQueue_t * ddq)1989 ddq_check_incoming(ErtsAllctrDDQueue_t *ddq)
1990 {
1991     erts_aint_t ilast = erts_atomic_read_nob(&ddq->tail.data.last);
1992     if (((ErtsAllctrDDBlock_t *) ilast) == &ddq->tail.data.marker
1993 	&& ddq->head.first == &ddq->tail.data.marker) {
1994 	/* Nothing more to do... */
1995 	return 0;
1996     }
1997 
1998     if (ddq->head.next.thr_progress_reached
1999 	|| erts_thr_progress_has_reached(ddq->head.next.thr_progress)) {
2000 	int um_refc_ix;
2001 	ddq->head.next.thr_progress_reached = 1;
2002 	um_refc_ix = ddq->head.next.um_refc_ix;
2003 	if (erts_atomic_read_nob(&ddq->tail.data.um_refc[um_refc_ix]) == 0) {
2004 	    /* Move unreferenced end pointer forward... */
2005 
2006 	    ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
2007 
2008 	    ddq->head.unref_end = ddq->head.next.unref_end;
2009 
2010 	    ilast = check_insert_marker(ddq, ilast);
2011 
2012 	    if (ddq->head.unref_end != (ErtsAllctrDDBlock_t *) ilast) {
2013 		ddq->head.next.unref_end = (ErtsAllctrDDBlock_t *) ilast;
2014 		ddq->head.next.thr_progress = erts_thr_progress_later(NULL);
2015 		erts_atomic32_set_relb(&ddq->tail.data.um_refc_ix,
2016 				       um_refc_ix);
2017 		ddq->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
2018 		ddq->head.next.thr_progress_reached = 0;
2019 	    }
2020 	}
2021     }
2022     return 1;
2023 }
2024 
2025 static ERTS_INLINE void
store_earliest_thr_prgr(ErtsThrPrgrVal * prev_val,ErtsAllctrDDQueue_t * ddq)2026 store_earliest_thr_prgr(ErtsThrPrgrVal *prev_val, ErtsAllctrDDQueue_t *ddq)
2027 {
2028     if (!ddq->head.next.thr_progress_reached
2029 	&& (*prev_val == ERTS_THR_PRGR_INVALID
2030 	    || erts_thr_progress_cmp(ddq->head.next.thr_progress,
2031 				     *prev_val) < 0)) {
2032 	*prev_val = ddq->head.next.thr_progress;
2033     }
2034 }
2035 
2036 static void
2037 check_pending_dealloc_carrier(Allctr_t *allctr,
2038 			      int *need_thr_progress,
2039 			      ErtsThrPrgrVal *thr_prgr_p,
2040 			      int *need_more_work);
2041 
2042 static void
handle_delayed_fix_dealloc(Allctr_t * allctr,ErtsAlcType_t type,Uint32 flags,void * ptr)2043 handle_delayed_fix_dealloc(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags,
2044                            void *ptr)
2045 {
2046     ASSERT(ERTS_ALC_IS_FIX_TYPE(type));
2047 
2048     if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
2049 	fix_nocpool_free(allctr, type, ptr);
2050     else {
2051 	Block_t *blk = UMEM2BLK(ptr);
2052 	Carrier_t *busy_pcrr_p;
2053 	Allctr_t *used_allctr;
2054 
2055 	if (IS_SBC_BLK(blk)) {
2056 	    busy_pcrr_p = NULL;
2057 	    goto doit;
2058 	}
2059 
2060 	used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
2061 				      NULL, &busy_pcrr_p);
2062 	if (used_allctr == allctr) {
2063 	doit:
2064 	    fix_cpool_free(allctr, type, flags, ptr, &busy_pcrr_p);
2065 	    clear_busy_pool_carrier(allctr, busy_pcrr_p);
2066 	}
2067 	else {
2068 	    /* Carrier migrated; need to redirect block to new owner... */
2069             ErtsAllctrDDBlock_t *dd_block;
2070             int cinit;
2071 
2072             dd_block = (ErtsAllctrDDBlock_t*)ptr;
2073             dd_block->flags = flags;
2074             dd_block->type = type;
2075 
2076             ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
2077 
2078             DEC_CC(allctr->calls.this_free);
2079 
2080             cinit = used_allctr->dd.ix - allctr->dd.ix;
2081 
2082 	    if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
2083 		erts_alloc_notify_delayed_dealloc(used_allctr->ix);
2084 	}
2085     }
2086 }
2087 
2088 static void schedule_dealloc_carrier(Allctr_t*, Carrier_t*);
2089 static void dealloc_my_carrier(Allctr_t*, Carrier_t*);
2090 
2091 
2092 static ERTS_INLINE int
handle_delayed_dealloc(Allctr_t * allctr,int allctr_locked,int use_limit,int ops_limit,int * need_thr_progress,ErtsThrPrgrVal * thr_prgr_p,int * need_more_work)2093 handle_delayed_dealloc(Allctr_t *allctr,
2094 		       int allctr_locked,
2095 		       int use_limit,
2096 		       int ops_limit,
2097 		       int *need_thr_progress,
2098 		       ErtsThrPrgrVal *thr_prgr_p,
2099 		       int *need_more_work)
2100 {
2101     int need_thr_prgr = 0;
2102     int need_mr_wrk = 0;
2103     int have_checked_incoming = 0;
2104     int ops = 0;
2105     int res;
2106     ErtsAllctrDDQueue_t *ddq;
2107 
2108     if (allctr->thread_safe && !allctr_locked)
2109 	erts_mtx_lock(&allctr->mutex);
2110 
2111     ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
2112 
2113     ddq = &allctr->dd.q;
2114 
2115     res = 0;
2116 
2117     while (1) {
2118 	Block_t *blk;
2119 	void *ptr;
2120 
2121 	if (use_limit && ++ops > ops_limit) {
2122 	    if (ddq->head.first != ddq->head.unref_end) {
2123 		need_mr_wrk = 1;
2124 		if (need_more_work)
2125 		    *need_more_work |= 1;
2126 	    }
2127 	    break;
2128 	}
2129 
2130     dequeue:
2131 	ptr = ddq_dequeue(ddq);
2132 	if (!ptr) {
2133 	    if (have_checked_incoming)
2134 		break;
2135 	    need_thr_prgr = ddq_check_incoming(ddq);
2136 	    if (need_thr_progress) {
2137 		*need_thr_progress |= need_thr_prgr;
2138 		if (need_thr_prgr)
2139 		    store_earliest_thr_prgr(thr_prgr_p, ddq);
2140 
2141 	    }
2142 	    have_checked_incoming = 1;
2143 	    goto dequeue;
2144 	}
2145 
2146 	res = 1;
2147 
2148 	blk = UMEM2BLK(ptr);
2149 	if (blk->bhdr == HOMECOMING_MBC_BLK_HDR) {
2150 	    /*
2151 	     * A multiblock carrier that previously has been migrated away
2152              * from us, was sent back to us either because
2153              * - it became empty and we need to deallocated it, or
2154              * - it was inserted into the pool and we need to update our pooled_tree
2155 	     */
2156 	    Carrier_t *crr = ErtsContainerStruct(blk, Carrier_t,
2157                                                  cpool.homecoming_dd.blk);
2158             Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr);
2159             erts_aint_t iallctr;
2160 
2161 	    ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
2162 	    ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
2163 
2164             iallctr = erts_atomic_read_nob(&crr->allctr);
2165             ASSERT(iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING);
2166             while (1) {
2167                 if ((iallctr & (~ERTS_CRR_ALCTR_FLG_MASK |
2168                                 ERTS_CRR_ALCTR_FLG_IN_POOL))
2169                     == (erts_aint_t)allctr) {
2170                     /*
2171                      * Carrier is home (mine and not in pool)
2172                      */
2173                     ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
2174                     erts_atomic_set_nob(&crr->allctr, (erts_aint_t)allctr);
2175                     if (IS_FREE_LAST_MBC_BLK(first_blk))
2176                         dealloc_my_carrier(allctr, crr);
2177                     else
2178                         ASSERT(crr->cpool.state == ERTS_MBC_IS_HOME);
2179                 }
2180                 else {
2181                     erts_aint_t exp = iallctr;
2182                     erts_aint_t want = iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING;
2183 
2184                     iallctr = erts_atomic_cmpxchg_nob(&crr->allctr,
2185                                                           want,
2186                                                           exp);
2187                     if (iallctr != exp)
2188                         continue; /* retry */
2189 
2190                     ASSERT(crr->cpool.state != ERTS_MBC_IS_HOME);
2191                     unlink_abandoned_carrier(crr);
2192                     if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL)
2193                         poolify_my_carrier(allctr, crr);
2194                     else
2195                         crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
2196                 }
2197                 break;
2198             }
2199 	}
2200 	else {
2201             ErtsAllctrDDBlock_t *dd_block;
2202             ErtsAlcType_t type;
2203             Uint32 flags;
2204 
2205             dd_block = (ErtsAllctrDDBlock_t*)ptr;
2206             flags = dd_block->flags;
2207             type = dd_block->type;
2208 
2209             flags |= DEALLOC_FLG_REDIRECTED;
2210 
2211             ASSERT(IS_SBC_BLK(blk) || (ABLK_TO_MBC(blk) !=
2212                                        ErtsContainerStruct(blk, Carrier_t,
2213                                                            cpool.homecoming_dd.blk)));
2214 
2215 	    INC_CC(allctr->calls.this_free);
2216 
2217 	    if (ERTS_ALC_IS_FIX_TYPE(type)) {
2218 		handle_delayed_fix_dealloc(allctr, type, flags, ptr);
2219 	    } else {
2220 		dealloc_block(allctr, type, flags, ptr, NULL);
2221             }
2222 	}
2223     }
2224 
2225     if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) {
2226 	need_thr_prgr = ddq_check_incoming(ddq);
2227 	*need_thr_progress |= need_thr_prgr;
2228 	if (need_thr_prgr)
2229 	    store_earliest_thr_prgr(thr_prgr_p, ddq);
2230     }
2231 
2232     if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
2233 	check_pending_dealloc_carrier(allctr,
2234 				      need_thr_progress,
2235 				      thr_prgr_p,
2236 				      need_more_work);
2237 
2238     if (allctr->thread_safe && !allctr_locked)
2239 	erts_mtx_unlock(&allctr->mutex);
2240    return res;
2241 }
2242 
2243 static ERTS_INLINE void
enqueue_dealloc_other_instance(ErtsAlcType_t type,Allctr_t * allctr,void * ptr,int cinit)2244 enqueue_dealloc_other_instance(ErtsAlcType_t type,
2245 			       Allctr_t *allctr,
2246 			       void *ptr,
2247 			       int cinit)
2248 {
2249     ErtsAllctrDDBlock_t *dd_block = ((ErtsAllctrDDBlock_t*)ptr);
2250 
2251     dd_block->type = type;
2252     dd_block->flags = 0;
2253 
2254     if (ddq_enqueue(&allctr->dd.q, ptr, cinit))
2255 	erts_alloc_notify_delayed_dealloc(allctr->ix);
2256 }
2257 
2258 static ERTS_INLINE void
update_pooled_tree(Allctr_t * allctr,Carrier_t * crr,Uint blk_sz)2259 update_pooled_tree(Allctr_t *allctr, Carrier_t *crr, Uint blk_sz)
2260 {
2261     if (allctr == crr->cpool.orig_allctr && crr->cpool.state == ERTS_MBC_WAS_POOLED) {
2262 	/*
2263 	 * Update pooled_tree with a potentially new (larger) max_sz
2264          */
2265         AOFF_RBTree_t* crr_node = &crr->cpool.pooled;
2266         if (blk_sz > crr_node->hdr.bhdr) {
2267             crr_node->hdr.bhdr = blk_sz;
2268             erts_aoff_larger_max_size(crr_node);
2269         }
2270     }
2271 }
2272 
2273 static ERTS_INLINE void
check_abandon_carrier(Allctr_t * allctr,Block_t * fblk,Carrier_t ** busy_pcrr_pp)2274 check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
2275 {
2276     Carrier_t *crr;
2277     UWord ncrr_in_pool, largest_fblk;
2278 
2279     if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
2280 	return;
2281 
2282     ASSERT(allctr->cpool.abandon_limit == allctr_abandon_limit(allctr));
2283     ASSERT(erts_thr_progress_is_managed_thread());
2284 
2285     if (allctr->cpool.disable_abandon)
2286 	return;
2287 
2288     if (allctr->mbcs.blocks.curr.size > allctr->cpool.abandon_limit)
2289 	return;
2290 
2291     ncrr_in_pool = erts_atomic_read_nob(&allctr->cpool.stat.no_carriers);
2292     if (ncrr_in_pool >= allctr->cpool.in_pool_limit)
2293         return;
2294 
2295     crr = FBLK_TO_MBC(fblk);
2296 
2297     if (allctr->main_carrier == crr)
2298 	return;
2299 
2300     if (crr->cpool.total_blocks_size > crr->cpool.abandon_limit)
2301 	return;
2302 
2303     if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID
2304         && !erts_thr_progress_has_reached(crr->cpool.thr_prgr))
2305         return;
2306 
2307     largest_fblk = allctr->largest_fblk_in_mbc(allctr, crr);
2308     if (largest_fblk < allctr->cpool.fblk_min_limit)
2309         return;
2310 
2311     erts_atomic_set_nob(&crr->cpool.max_size, largest_fblk);
2312     abandon_carrier(allctr, crr);
2313 }
2314 
2315 void
erts_alcu_check_delayed_dealloc(Allctr_t * allctr,int limit,int * need_thr_progress,ErtsThrPrgrVal * thr_prgr_p,int * more_work)2316 erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
2317 				int limit,
2318 				int *need_thr_progress,
2319 				ErtsThrPrgrVal *thr_prgr_p,
2320 				int *more_work)
2321 {
2322     handle_delayed_dealloc(allctr,
2323 			   0,
2324 			   limit,
2325 			   ERTS_ALCU_DD_OPS_LIM_HIGH,
2326 			   need_thr_progress,
2327 			   thr_prgr_p,
2328 			   more_work);
2329 }
2330 
2331 #define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked)			\
2332     handle_delayed_dealloc((Allctr), (Locked), 1, 			\
2333 			   ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL)
2334 
2335 static void
dealloc_block(Allctr_t * allctr,ErtsAlcType_t type,Uint32 flags,void * ptr,ErtsAlcFixList_t * fix)2336 dealloc_block(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags, void *ptr,
2337               ErtsAlcFixList_t *fix)
2338 {
2339     Block_t *blk = UMEM2BLK(ptr);
2340 
2341     ASSERT(!fix || type == fix->type);
2342 
2343     ERTS_LC_ASSERT(!allctr->thread_safe
2344 		       || erts_lc_mtx_is_locked(&allctr->mutex));
2345 
2346     if (IS_SBC_BLK(blk)) {
2347 	destroy_carrier(allctr, blk, NULL);
2348 	if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
2349 	    if (!(flags & DEALLOC_FLG_FIX_SHRINK))
2350 		fix->u.cpool.used--;
2351 	    fix->u.cpool.allocated--;
2352 	}
2353     }
2354     else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
2355 	mbc_free(allctr, type, ptr, NULL);
2356     else {
2357 	Carrier_t *busy_pcrr_p;
2358 	Allctr_t *used_allctr;
2359 
2360 	used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
2361 				      NULL, &busy_pcrr_p);
2362 	if (used_allctr == allctr) {
2363 	    if (fix) {
2364 	        if (!(flags & DEALLOC_FLG_FIX_SHRINK))
2365 		    fix->u.cpool.used--;
2366 		fix->u.cpool.allocated--;
2367 	    }
2368 	    mbc_free(allctr, type, ptr, &busy_pcrr_p);
2369 	    clear_busy_pool_carrier(allctr, busy_pcrr_p);
2370 	}
2371 	else {
2372 	    /* Carrier migrated; need to redirect block to new owner... */
2373             ErtsAllctrDDBlock_t *dd_block;
2374             int cinit;
2375 
2376             dd_block = (ErtsAllctrDDBlock_t*)ptr;
2377             dd_block->flags = flags;
2378             dd_block->type = type;
2379 
2380             ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
2381 
2382             if (flags & DEALLOC_FLG_REDIRECTED)
2383                 DEC_CC(allctr->calls.this_free);
2384 
2385             cinit = used_allctr->dd.ix - allctr->dd.ix;
2386 
2387 	    if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
2388 		erts_alloc_notify_delayed_dealloc(used_allctr->ix);
2389 	}
2390     }
2391 }
2392 
2393 /* Multi block carrier alloc/realloc/free ... */
2394 
2395 /* NOTE! mbc_alloc() may in case of memory shortage place the requested
2396  * block in a sbc.
2397  */
2398 static ERTS_INLINE void *
mbc_alloc_block(Allctr_t * allctr,Uint size,Uint * blk_szp)2399 mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
2400 {
2401     Block_t *blk;
2402     Uint get_blk_sz;
2403 
2404     ASSERT(size);
2405     ASSERT(size < allctr->sbc_threshold);
2406 
2407     *blk_szp = get_blk_sz = UMEMSZ2BLKSZ(allctr, size);
2408 
2409     blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0);
2410 
2411     if (!blk) {
2412 	blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
2413 #if !ERTS_SUPER_ALIGNED_MSEG_ONLY
2414 	if (!blk) {
2415 	    /* Emergency! We couldn't create the carrier as we wanted.
2416 	       Try to place it in a sys_alloced sbc. */
2417 	    blk = create_carrier(allctr,
2418 				 size,
2419 				 (CFLG_SBC
2420 				  | CFLG_FORCE_SIZE
2421 				  | CFLG_FORCE_SYS_ALLOC));
2422 	}
2423 #endif
2424     }
2425 
2426 #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
2427     if (IS_MBC_BLK(blk)) {
2428 	(*allctr->link_free_block)(allctr, blk);
2429 	HARD_CHECK_BLK_CARRIER(allctr, blk);
2430 	(*allctr->unlink_free_block)(allctr, blk);
2431     }
2432 #endif
2433 
2434     return blk;
2435 }
2436 
2437 static ERTS_INLINE void
mbc_alloc_finalize(Allctr_t * allctr,Block_t * blk,Uint org_blk_sz,UWord flags,Carrier_t * crr,Uint want_blk_sz,int valid_blk_info)2438 mbc_alloc_finalize(Allctr_t *allctr,
2439 		   Block_t *blk,
2440 		   Uint org_blk_sz,
2441 		   UWord flags,
2442 		   Carrier_t *crr,
2443 		   Uint want_blk_sz,
2444 		   int valid_blk_info)
2445 {
2446     Uint blk_sz;
2447     Uint nxt_blk_sz;
2448     Block_t *nxt_blk;
2449     UWord prev_free_flg = flags & PREV_FREE_BLK_HDR_FLG;
2450 
2451     ASSERT(org_blk_sz >= want_blk_sz);
2452     ASSERT(blk);
2453 
2454 #ifdef DEBUG
2455     nxt_blk = NULL;
2456 #endif
2457 
2458     if (org_blk_sz - allctr->min_block_size >= want_blk_sz) {
2459 	/* Shrink block... */
2460 	blk_sz = want_blk_sz;
2461 	nxt_blk_sz = org_blk_sz - blk_sz;
2462 	SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr);
2463 
2464 	nxt_blk = BLK_AFTER(blk, blk_sz);
2465 	SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
2466 			 SBH_THIS_FREE|(flags & LAST_BLK_HDR_FLG),
2467 			 crr);
2468 
2469 	if (!(flags & LAST_BLK_HDR_FLG)) {
2470 	    SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
2471 	    if (!valid_blk_info) {
2472 		Block_t *nxt_nxt_blk = BLK_AFTER(nxt_blk, nxt_blk_sz);
2473 		SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
2474 	    }
2475 	}
2476 	(*allctr->link_free_block)(allctr, nxt_blk);
2477 
2478 	ASSERT(IS_NOT_LAST_BLK(blk));
2479 	ASSERT(IS_FREE_BLK(nxt_blk));
2480 	ASSERT((flags & LAST_BLK_HDR_FLG)
2481 	       ? IS_LAST_BLK(nxt_blk)
2482 	       : IS_NOT_LAST_BLK(nxt_blk));
2483 	ASSERT((flags & LAST_BLK_HDR_FLG)
2484 	       || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
2485 	ASSERT((flags & LAST_BLK_HDR_FLG)
2486 	       || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
2487 	ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
2488 	ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
2489 	ASSERT(nxt_blk_sz >= allctr->min_block_size);
2490 	ASSERT(ABLK_TO_MBC(blk) == crr);
2491 	ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
2492     }
2493     else {
2494 	ASSERT(org_blk_sz <= MBC_ABLK_SZ_MASK);
2495 	blk_sz = org_blk_sz;
2496 	if (flags & LAST_BLK_HDR_FLG) {
2497 	    if (valid_blk_info)
2498 		SET_BLK_ALLOCED(blk);
2499 	    else
2500 		SET_MBC_ABLK_HDR(blk, blk_sz, SBH_LAST_BLK|prev_free_flg, crr);
2501 	}
2502 	else {
2503 	    if (valid_blk_info)
2504 		SET_BLK_ALLOCED(blk);
2505 	    else
2506 		SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr);
2507 	    nxt_blk = BLK_AFTER(blk, blk_sz);
2508 	    SET_PREV_BLK_ALLOCED(nxt_blk);
2509 	}
2510 
2511 	ASSERT((flags & LAST_BLK_HDR_FLG)
2512 	       ? IS_LAST_BLK(blk)
2513 	       : IS_NOT_LAST_BLK(blk));
2514 	ASSERT(ABLK_TO_MBC(blk) == crr);
2515     }
2516 
2517     ERTS_ALC_CPOOL_ALLOC_OP(allctr);
2518     STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
2519 
2520     ASSERT(IS_ALLOCED_BLK(blk));
2521     ASSERT(blk_sz == MBC_BLK_SZ(blk));
2522     ASSERT(blk_sz % sizeof(Unit_t) == 0);
2523     ASSERT(blk_sz >= allctr->min_block_size);
2524     ASSERT(blk_sz >= want_blk_sz);
2525     ASSERT(IS_MBC_BLK(blk));
2526 
2527     ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk));
2528     ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk));
2529 
2530     HARD_CHECK_BLK_CARRIER(allctr, blk);
2531 }
2532 
2533 static void *
mbc_alloc(Allctr_t * allctr,Uint size)2534 mbc_alloc(Allctr_t *allctr, Uint size)
2535 {
2536     Block_t *blk;
2537     Uint blk_sz;
2538     blk = mbc_alloc_block(allctr, size, &blk_sz);
2539     if (!blk)
2540 	return NULL;
2541     if (IS_MBC_BLK(blk))
2542 	mbc_alloc_finalize(allctr,
2543 			   blk,
2544 			   MBC_FBLK_SZ(blk),
2545 			   GET_BLK_HDR_FLGS(blk),
2546 			   FBLK_TO_MBC(blk),
2547 			   blk_sz,
2548 			   1);
2549     return BLK2UMEM(blk);
2550 }
2551 
2552 typedef struct {
2553     char *ptr;
2554     UWord size;
2555 } ErtsMemDiscardRegion;
2556 
2557 /* Construct a discard region for the user memory of a free block, letting the
2558  * OS reclaim its physical memory when required.
2559  *
2560  * Note that we're ignoring both the footer and everything that comes before
2561  * the minimum block size as the allocator uses those areas to manage the
2562  * block. */
2563 static void ERTS_INLINE
mem_discard_start(Allctr_t * allocator,Block_t * block,ErtsMemDiscardRegion * out)2564 mem_discard_start(Allctr_t *allocator, Block_t *block,
2565                   ErtsMemDiscardRegion *out)
2566 {
2567     UWord size = BLK_SZ(block);
2568 
2569     ASSERT(size >= allocator->min_block_size);
2570 
2571     if (size > (allocator->min_block_size + FBLK_FTR_SZ)) {
2572         out->size = size - allocator->min_block_size - FBLK_FTR_SZ;
2573     } else {
2574         out->size = 0;
2575     }
2576 
2577     out->ptr = (char*)block + allocator->min_block_size;
2578 }
2579 
2580 /* Expands a discard region into a neighboring free block, allowing us to
2581  * discard the block header and first page.
2582  *
2583  * This is very important in small-allocation scenarios where no single block
2584  * is large enough to be discarded on its own. */
2585 static void ERTS_INLINE
mem_discard_coalesce(Allctr_t * allocator,Block_t * neighbor,ErtsMemDiscardRegion * region)2586 mem_discard_coalesce(Allctr_t *allocator, Block_t *neighbor,
2587                      ErtsMemDiscardRegion *region)
2588 {
2589     char *neighbor_start;
2590 
2591     ASSERT(IS_FREE_BLK(neighbor));
2592 
2593     neighbor_start = (char*)neighbor;
2594 
2595     if (region->ptr >= neighbor_start) {
2596         char *region_start_page;
2597 
2598         region_start_page = region->ptr - SYS_PAGE_SIZE;
2599         region_start_page = (char*)((UWord)region_start_page & ~SYS_PAGE_SZ_MASK);
2600 
2601         /* Expand if our first page begins within the previous free block's
2602          * unused data. */
2603         if (region_start_page >= (neighbor_start + allocator->min_block_size)) {
2604             region->size += (region->ptr - region_start_page) - FBLK_FTR_SZ;
2605             region->ptr = region_start_page;
2606         }
2607     } else {
2608         char *region_end_page;
2609         UWord neighbor_size;
2610 
2611         ASSERT(region->ptr <= neighbor_start);
2612 
2613         region_end_page = region->ptr + region->size + SYS_PAGE_SIZE;
2614         region_end_page = (char*)((UWord)region_end_page & ~SYS_PAGE_SZ_MASK);
2615 
2616         neighbor_size = BLK_SZ(neighbor) - FBLK_FTR_SZ;
2617 
2618         /* Expand if our last page ends anywhere within the next free block,
2619          * sans the footer we'll inherit. */
2620         if (region_end_page < neighbor_start + neighbor_size) {
2621             region->size += region_end_page - (region->ptr + region->size);
2622         }
2623     }
2624 }
2625 
2626 static void ERTS_INLINE
mem_discard_finish(Allctr_t * allocator,Block_t * block,ErtsMemDiscardRegion * region)2627 mem_discard_finish(Allctr_t *allocator, Block_t *block,
2628                    ErtsMemDiscardRegion *region)
2629 {
2630 #ifdef DEBUG
2631     char *block_start, *block_end;
2632     UWord block_size;
2633 
2634     block_size = BLK_SZ(block);
2635 
2636     /* Ensure that the region is completely covered by the legal area of the
2637      * free block. This must hold even when the region is too small to be
2638      * discarded. */
2639     if (region->size > 0) {
2640         ASSERT(block_size > allocator->min_block_size + FBLK_FTR_SZ);
2641 
2642         block_start = (char*)block + allocator->min_block_size;
2643         block_end = (char*)block + block_size - FBLK_FTR_SZ;
2644 
2645         ASSERT(region->size == 0 ||
2646             (region->ptr + region->size <= block_end &&
2647              region->ptr >= block_start &&
2648              region->size <= block_size));
2649     }
2650 #else
2651     (void)allocator;
2652     (void)block;
2653 #endif
2654 
2655     if (region->size > SYS_PAGE_SIZE) {
2656         UWord align_offset, size;
2657         char *ptr;
2658 
2659         align_offset = SYS_PAGE_SIZE - ((UWord)region->ptr & SYS_PAGE_SZ_MASK);
2660 
2661         size = (region->size - align_offset) & ~SYS_PAGE_SZ_MASK;
2662         ptr = region->ptr + align_offset;
2663 
2664         if (size > 0) {
2665             ASSERT(!((UWord)ptr & SYS_PAGE_SZ_MASK));
2666             ASSERT(!(size & SYS_PAGE_SZ_MASK));
2667 
2668             erts_mem_discard(ptr, size);
2669         }
2670     }
2671 }
2672 
2673 static void
carrier_mem_discard_free_blocks(Allctr_t * allocator,Carrier_t * carrier)2674 carrier_mem_discard_free_blocks(Allctr_t *allocator, Carrier_t *carrier)
2675 {
2676     static const int MAX_BLOCKS_TO_DISCARD = 100;
2677     Block_t *block;
2678     int i;
2679 
2680     block = allocator->first_fblk_in_mbc(allocator, carrier);
2681     i = 0;
2682 
2683     while (block != NULL && i < MAX_BLOCKS_TO_DISCARD) {
2684         ErtsMemDiscardRegion region;
2685 
2686         ASSERT(IS_FREE_BLK(block));
2687 
2688         mem_discard_start(allocator, block, &region);
2689         mem_discard_finish(allocator, block, &region);
2690 
2691         block = allocator->next_fblk_in_mbc(allocator, carrier, block);
2692         i++;
2693     }
2694 }
2695 
2696 static void
mbc_free(Allctr_t * allctr,ErtsAlcType_t type,void * p,Carrier_t ** busy_pcrr_pp)2697 mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp)
2698 {
2699     ErtsMemDiscardRegion discard_region = {0};
2700     int discard;
2701     Uint is_first_blk;
2702     Uint is_last_blk;
2703     Uint blk_sz;
2704     Block_t *blk;
2705     Block_t *nxt_blk;
2706     Carrier_t *crr;
2707 
2708     ASSERT(p);
2709 
2710     blk = UMEM2BLK(p);
2711     blk_sz = MBC_ABLK_SZ(blk);
2712 
2713     ASSERT(IS_MBC_BLK(blk));
2714     ASSERT(blk_sz >= allctr->min_block_size);
2715 
2716 #ifndef DEBUG
2717     /* We want to mark freed blocks as reclaimable to the OS, but it's a fairly
2718      * expensive operation which doesn't do much good if we use it again soon
2719      * after, so we limit it to deallocations on pooled carriers. */
2720     discard = busy_pcrr_pp && *busy_pcrr_pp;
2721 #else
2722     /* Always discard in debug mode, regardless of whether we're in the pool or
2723      * not. */
2724     discard = 1;
2725 #endif
2726 
2727     if (discard) {
2728         mem_discard_start(allctr, blk, &discard_region);
2729     }
2730 
2731     HARD_CHECK_BLK_CARRIER(allctr, blk);
2732 
2733     crr = ABLK_TO_MBC(blk);
2734 
2735     ERTS_ALC_CPOOL_FREE_OP(allctr);
2736 
2737     STAT_MBC_BLK_FREE(allctr, type, crr, busy_pcrr_pp, blk_sz, alcu_flgs);
2738 
2739     is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk);
2740     is_last_blk = IS_LAST_BLK(blk);
2741 
2742     if (IS_PREV_BLK_FREE(blk)) {
2743 	ASSERT(!is_first_blk);
2744 	/* Coalesce with previous block... */
2745 	blk = PREV_BLK(blk);
2746 	(*allctr->unlink_free_block)(allctr, blk);
2747 
2748         if (discard) {
2749             mem_discard_coalesce(allctr, blk, &discard_region);
2750         }
2751 
2752 	blk_sz += MBC_FBLK_SZ(blk);
2753 	is_first_blk = IS_MBC_FIRST_FBLK(allctr, blk);
2754 	SET_MBC_FBLK_SZ(blk, blk_sz);
2755     }
2756     else {
2757 	SET_BLK_FREE(blk);
2758     }
2759 
2760     if (is_last_blk)
2761 	SET_LAST_BLK(blk);
2762     else {
2763 	nxt_blk = BLK_AFTER(blk, blk_sz);
2764 	if (IS_FREE_BLK(nxt_blk)) {
2765 	    /* Coalesce with next block... */
2766 	    (*allctr->unlink_free_block)(allctr, nxt_blk);
2767 
2768             if (discard) {
2769                 mem_discard_coalesce(allctr, nxt_blk, &discard_region);
2770             }
2771 
2772 	    blk_sz += MBC_FBLK_SZ(nxt_blk);
2773 	    SET_MBC_FBLK_SZ(blk, blk_sz);
2774 
2775 	    is_last_blk = IS_LAST_BLK(nxt_blk);
2776 	    if (is_last_blk)
2777 		SET_LAST_BLK(blk);
2778 	    else {
2779 		SET_NOT_LAST_BLK(blk);
2780 		SET_BLK_SZ_FTR(blk, blk_sz);
2781 	    }
2782 	}
2783 	else {
2784 	    SET_PREV_BLK_FREE(allctr, nxt_blk);
2785 	    SET_NOT_LAST_BLK(blk);
2786 	    SET_BLK_SZ_FTR(blk, blk_sz);
2787 	}
2788 
2789     }
2790 
2791     ASSERT(IS_FREE_BLK(blk));
2792     ASSERT(!is_last_blk  == !IS_LAST_BLK(blk));
2793     ASSERT(!is_first_blk == !IS_MBC_FIRST_FBLK(allctr, blk));
2794     ASSERT(is_first_blk || IS_PREV_BLK_ALLOCED(blk));
2795     ASSERT(is_last_blk  || IS_PREV_BLK_FREE(NXT_BLK(blk)));
2796     ASSERT(blk_sz == MBC_BLK_SZ(blk));
2797     ASSERT(is_last_blk || blk == PREV_BLK(NXT_BLK(blk)));
2798     ASSERT(blk_sz % sizeof(Unit_t) == 0);
2799     ASSERT(IS_MBC_BLK(blk));
2800 
2801     if (is_first_blk && is_last_blk && crr != allctr->main_carrier) {
2802         destroy_carrier(allctr, blk, busy_pcrr_pp);
2803     }
2804     else {
2805 	(*allctr->link_free_block)(allctr, blk);
2806 	HARD_CHECK_BLK_CARRIER(allctr, blk);
2807 
2808         if (discard) {
2809             mem_discard_finish(allctr, blk, &discard_region);
2810         }
2811 
2812         if (busy_pcrr_pp && *busy_pcrr_pp) {
2813             update_pooled_tree(allctr, crr, blk_sz);
2814         } else {
2815             check_abandon_carrier(allctr, blk, busy_pcrr_pp);
2816         }
2817     }
2818 }
2819 
2820 static void *
mbc_realloc(Allctr_t * allctr,ErtsAlcType_t type,void * p,Uint size,Uint32 alcu_flgs,Carrier_t ** busy_pcrr_pp)2821 mbc_realloc(Allctr_t *allctr, ErtsAlcType_t type, void *p, Uint size,
2822             Uint32 alcu_flgs, Carrier_t **busy_pcrr_pp)
2823 {
2824     void *new_p;
2825     Uint old_blk_sz;
2826     Block_t *blk;
2827 #ifndef MBC_REALLOC_ALWAYS_MOVES
2828     Block_t *new_blk, *cand_blk;
2829     Uint cand_blk_sz;
2830     Uint blk_sz, get_blk_sz;
2831     Block_t *nxt_blk;
2832     Uint nxt_blk_sz;
2833     Uint is_last_blk;
2834 #endif /* #ifndef MBC_REALLOC_ALWAYS_MOVES */
2835 
2836     ASSERT(p);
2837     ASSERT(size);
2838     ASSERT(size < allctr->sbc_threshold);
2839 
2840     blk = (Block_t *) UMEM2BLK(p);
2841     old_blk_sz = MBC_ABLK_SZ(blk);
2842 
2843     ASSERT(old_blk_sz >= allctr->min_block_size);
2844 
2845 #ifdef MBC_REALLOC_ALWAYS_MOVES
2846     if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
2847 	return NULL;
2848 #else /* !MBC_REALLOC_ALWAYS_MOVES */
2849 
2850     if (busy_pcrr_pp && *busy_pcrr_pp) {
2851         /*
2852          * Don't want to use carrier in pool
2853          */
2854         new_p = mbc_alloc(allctr, size);
2855         if (!new_p)
2856             return NULL;
2857         new_blk = UMEM2BLK(new_p);
2858         ASSERT(!(IS_MBC_BLK(new_blk) && ABLK_TO_MBC(new_blk) == *busy_pcrr_pp));
2859         sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
2860         mbc_free(allctr, type, p, busy_pcrr_pp);
2861         return new_p;
2862     }
2863 
2864     get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size);
2865 
2866     ASSERT(IS_ALLOCED_BLK(blk));
2867     ASSERT(IS_MBC_BLK(blk));
2868 
2869     is_last_blk = IS_LAST_BLK(blk);
2870 
2871     if (old_blk_sz == blk_sz)
2872 	return p;
2873     else if (blk_sz < old_blk_sz) {
2874 	/* Shrink block... */
2875 	Carrier_t* crr;
2876 	Block_t *nxt_nxt_blk;
2877 	Uint diff_sz_val = old_blk_sz - blk_sz;
2878 	Uint old_blk_sz_val = old_blk_sz;
2879 
2880 	if (get_blk_sz >= old_blk_sz)
2881 	    return p;
2882 
2883 	if (diff_sz_val >= (~((Uint) 0) / 100)) {
2884 	    /* div both by 128 */
2885 	    old_blk_sz_val >>= 7;
2886 	    diff_sz_val >>= 7;
2887 	}
2888 
2889 	/* Avoid fragmentation by moving the block if it is shrunk much */
2890 	if (100*diff_sz_val > allctr->mbc_move_threshold*old_blk_sz_val) {
2891 	    if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
2892 		return NULL;
2893 
2894 	    cand_blk_sz = old_blk_sz;
2895 	    if (!IS_PREV_BLK_FREE(blk)) {
2896 		cand_blk = blk;
2897 	    }
2898 	    else {
2899 		ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk));
2900 		cand_blk = PREV_BLK(blk);
2901 		cand_blk_sz += PREV_BLK_SZ(blk);
2902 	    }
2903 	    if (!is_last_blk) {
2904 		nxt_blk = BLK_AFTER(blk, old_blk_sz);
2905 		if (IS_FREE_BLK(nxt_blk))
2906 		    cand_blk_sz += MBC_FBLK_SZ(nxt_blk);
2907 	    }
2908 
2909 	    new_blk = (*allctr->get_free_block)(allctr,
2910 						get_blk_sz,
2911 						cand_blk,
2912 						cand_blk_sz);
2913 	    if (new_blk || cand_blk != blk)
2914 		goto move_into_new_blk;
2915 	}
2916 
2917 	/* Shrink at current location */
2918 
2919 	nxt_blk_sz = old_blk_sz - blk_sz;
2920 
2921 	if ((is_last_blk || IS_ALLOCED_BLK(BLK_AFTER(blk,old_blk_sz)))
2922 	    && (nxt_blk_sz < allctr->min_block_size))
2923 	    return p;
2924 
2925 	HARD_CHECK_BLK_CARRIER(allctr, blk);
2926 
2927 	nxt_nxt_blk = BLK_AFTER(blk, old_blk_sz);
2928 
2929 	SET_MBC_ABLK_SZ(blk, blk_sz);
2930 	SET_NOT_LAST_BLK(blk);
2931 
2932 	nxt_blk = BLK_AFTER(blk, blk_sz);
2933 
2934 	crr = ABLK_TO_MBC(blk);
2935 
2936 	ERTS_ALC_CPOOL_REALLOC_OP(allctr);
2937 	STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
2938 	STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
2939 
2940 	ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size);
2941 
2942 	if (!is_last_blk) {
2943 	    if (IS_FREE_BLK(nxt_nxt_blk)) {
2944 		/* Coalesce with next free block... */
2945 		nxt_blk_sz += MBC_FBLK_SZ(nxt_nxt_blk);
2946 		(*allctr->unlink_free_block)(allctr, nxt_nxt_blk);
2947 
2948 		is_last_blk = GET_LAST_BLK_HDR_FLG(nxt_nxt_blk);
2949 	    }
2950 	    else {
2951 		SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
2952 	    }
2953 	    SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
2954 	}
2955 
2956 	SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
2957 			SBH_THIS_FREE | (is_last_blk ? SBH_LAST_BLK : 0),
2958 			crr);
2959 
2960 	(*allctr->link_free_block)(allctr, nxt_blk);
2961 
2962 
2963 	ASSERT(IS_ALLOCED_BLK(blk));
2964 	ASSERT(blk_sz == MBC_BLK_SZ(blk));
2965 	ASSERT(blk_sz % sizeof(Unit_t) == 0);
2966 	ASSERT(blk_sz >= allctr->min_block_size);
2967 	ASSERT(blk_sz >= size + ABLK_HDR_SZ);
2968 	ASSERT(IS_MBC_BLK(blk));
2969 
2970 	ASSERT(IS_FREE_BLK(nxt_blk));
2971 	ASSERT(IS_PREV_BLK_ALLOCED(nxt_blk));
2972 	ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
2973 	ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
2974 	ASSERT(nxt_blk_sz >= allctr->min_block_size);
2975 	ASSERT(IS_MBC_BLK(nxt_blk));
2976 	ASSERT(is_last_blk ? IS_LAST_BLK(nxt_blk) : IS_NOT_LAST_BLK(nxt_blk));
2977 	ASSERT(is_last_blk || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
2978 	ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
2979 	ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
2980 
2981 	HARD_CHECK_BLK_CARRIER(allctr, blk);
2982 
2983 	check_abandon_carrier(allctr, nxt_blk, NULL);
2984 
2985 	return p;
2986     }
2987 
2988     /* Need larger block... */
2989 
2990     if (!is_last_blk) {
2991 	nxt_blk = BLK_AFTER(blk, old_blk_sz);
2992 	nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
2993 	if (IS_FREE_BLK(nxt_blk) && get_blk_sz <= old_blk_sz + nxt_blk_sz) {
2994 	    Carrier_t* crr = ABLK_TO_MBC(blk);
2995 	    /* Grow into next block... */
2996 
2997 	    HARD_CHECK_BLK_CARRIER(allctr, blk);
2998 
2999 	    (*allctr->unlink_free_block)(allctr, nxt_blk);
3000 	    nxt_blk_sz -= blk_sz - old_blk_sz;
3001 
3002 	    is_last_blk = IS_LAST_BLK(nxt_blk);
3003 	    if (nxt_blk_sz < allctr->min_block_size) {
3004 		blk_sz += nxt_blk_sz;
3005 
3006 		SET_MBC_ABLK_SZ(blk, blk_sz);
3007 
3008 		if (is_last_blk) {
3009 		    SET_LAST_BLK(blk);
3010 #ifdef DEBUG
3011 		    nxt_blk = NULL;
3012 #endif
3013 		}
3014 		else {
3015 		    nxt_blk = BLK_AFTER(blk, blk_sz);
3016 		    SET_PREV_BLK_ALLOCED(nxt_blk);
3017 #ifdef DEBUG
3018 		    is_last_blk = IS_LAST_BLK(nxt_blk);
3019 		    nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
3020 #endif
3021 		}
3022 	    }
3023 	    else {
3024 		SET_MBC_ABLK_SZ(blk, blk_sz);
3025 
3026 		nxt_blk = BLK_AFTER(blk, blk_sz);
3027 		SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz, SBH_THIS_FREE, crr);
3028 
3029 		if (is_last_blk)
3030 		    SET_LAST_BLK(nxt_blk);
3031 		else
3032 		    SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
3033 
3034 		(*allctr->link_free_block)(allctr, nxt_blk);
3035 
3036 		ASSERT(IS_FREE_BLK(nxt_blk));
3037 		ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
3038 	    }
3039 
3040 	    ERTS_ALC_CPOOL_REALLOC_OP(allctr);
3041 	    STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
3042 	    STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
3043 
3044 	    ASSERT(IS_ALLOCED_BLK(blk));
3045 	    ASSERT(blk_sz == MBC_BLK_SZ(blk));
3046 	    ASSERT(blk_sz % sizeof(Unit_t) == 0);
3047 	    ASSERT(blk_sz >= allctr->min_block_size);
3048 	    ASSERT(blk_sz >= size + ABLK_HDR_SZ);
3049 	    ASSERT(IS_MBC_BLK(blk));
3050 
3051 	    ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk));
3052 	    ASSERT(!nxt_blk || nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
3053 	    ASSERT(!nxt_blk || nxt_blk_sz % sizeof(Unit_t) == 0);
3054 	    ASSERT(!nxt_blk || nxt_blk_sz >= allctr->min_block_size);
3055 	    ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk));
3056 	    ASSERT(!nxt_blk || (is_last_blk
3057 				? IS_LAST_BLK(nxt_blk)
3058 				: IS_NOT_LAST_BLK(nxt_blk)));
3059 	    ASSERT(!nxt_blk || is_last_blk
3060 		   || IS_ALLOCED_BLK(nxt_blk)
3061 		   || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
3062 	    ASSERT(!nxt_blk || is_last_blk
3063 		   || IS_ALLOCED_BLK(nxt_blk)
3064 		   || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
3065 
3066 	    HARD_CHECK_BLK_CARRIER(allctr, blk);
3067 
3068 	    return p;
3069 	}
3070     }
3071 
3072     if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
3073 	return NULL;
3074 
3075     /* Need to grow in another block */
3076 
3077     if (!IS_PREV_BLK_FREE(blk)) {
3078 	cand_blk = NULL;
3079 	cand_blk_sz = 0;
3080     }
3081     else {
3082 	ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk));
3083 	cand_blk = PREV_BLK(blk);
3084 	cand_blk_sz = old_blk_sz + PREV_BLK_SZ(blk);
3085 
3086 	if (!is_last_blk) {
3087 	    nxt_blk = BLK_AFTER(blk, old_blk_sz);
3088 	    if (IS_FREE_BLK(nxt_blk))
3089 		cand_blk_sz += MBC_FBLK_SZ(nxt_blk);
3090 	}
3091     }
3092 
3093     if (cand_blk_sz < get_blk_sz) {
3094 	/* We wont fit in cand_blk get a new one */
3095 
3096 #endif /* !MBC_REALLOC_ALWAYS_MOVES */
3097 
3098 	new_p = mbc_alloc(allctr, size);
3099 	if (!new_p)
3100 	    return NULL;
3101 	sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
3102 	mbc_free(allctr, type, p, busy_pcrr_pp);
3103 
3104 	return new_p;
3105 
3106 #ifndef MBC_REALLOC_ALWAYS_MOVES
3107 
3108     }
3109     else {
3110 	/* We will at least fit in cand_blk */
3111 
3112 	new_blk = (*allctr->get_free_block)(allctr,
3113 					    get_blk_sz,
3114 					    cand_blk,
3115 					    cand_blk_sz);
3116     move_into_new_blk:
3117 	/*
3118 	 * new_blk, and cand_blk have to be correctly set
3119 	 * when jumping to this label.
3120 	 */
3121 
3122 	if (new_blk) {
3123 	    mbc_alloc_finalize(allctr,
3124 			       new_blk,
3125 			       MBC_FBLK_SZ(new_blk),
3126 			       GET_BLK_HDR_FLGS(new_blk),
3127 			       FBLK_TO_MBC(new_blk),
3128 			       blk_sz,
3129 			       1);
3130 	    new_p = BLK2UMEM(new_blk);
3131 	    sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
3132 	    mbc_free(allctr, type, p, NULL);
3133 	    return new_p;
3134 	}
3135 	else {
3136 	    Carrier_t* crr;
3137 	    Uint new_blk_sz;
3138 	    UWord new_blk_flgs;
3139 	    Uint prev_blk_sz;
3140 	    Uint blk_cpy_sz;
3141 
3142 	    ASSERT(IS_PREV_BLK_FREE(blk));
3143 	    ASSERT(cand_blk == PREV_BLK(blk));
3144 
3145 	    prev_blk_sz = PREV_BLK_SZ(blk);
3146 	    new_blk = cand_blk;
3147 	    new_blk_sz = prev_blk_sz + old_blk_sz;
3148 	    new_blk_flgs = GET_BLK_HDR_FLGS(new_blk);
3149 
3150 	    HARD_CHECK_BLK_CARRIER(allctr, blk);
3151 
3152 	    (*allctr->unlink_free_block)(allctr, new_blk); /* prev */
3153 
3154 	    if (is_last_blk)
3155 		new_blk_flgs |= LAST_BLK_HDR_FLG;
3156 	    else {
3157 		nxt_blk = BLK_AFTER(blk, old_blk_sz);
3158 		if (IS_FREE_BLK(nxt_blk)) {
3159 		    new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk);
3160 		    new_blk_sz += MBC_FBLK_SZ(nxt_blk);
3161 		    (*allctr->unlink_free_block)(allctr, nxt_blk);
3162 		}
3163 	    }
3164 
3165 	    /*
3166 	     * Copy user-data then update new blocks in mbc_alloc_finalize().
3167 	     * mbc_alloc_finalize() may write headers at old location of
3168 	     * user data; therfore, order is important.
3169 	     */
3170 
3171 	    new_p = BLK2UMEM(new_blk);
3172 	    blk_cpy_sz = MIN(blk_sz, old_blk_sz);
3173 	    crr = FBLK_TO_MBC(new_blk);
3174 
3175 	    if (prev_blk_sz >= blk_cpy_sz)
3176 		sys_memcpy(new_p, p, blk_cpy_sz - ABLK_HDR_SZ);
3177 	    else
3178 		sys_memmove(new_p, p, blk_cpy_sz - ABLK_HDR_SZ);
3179 
3180 	    mbc_alloc_finalize(allctr,
3181 			       new_blk,
3182 			       new_blk_sz,
3183 			       new_blk_flgs,
3184 			       crr,
3185 			       blk_sz,
3186 			       0);
3187 
3188 	    ERTS_ALC_CPOOL_FREE_OP(allctr);
3189 	    STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
3190 
3191 	    return new_p;
3192 	}
3193     }
3194 #endif /* !MBC_REALLOC_ALWAYS_MOVES */
3195 }
3196 
3197 
3198 #define ERTS_ALC_MAX_DEALLOC_CARRIER		10
3199 #define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT	100
3200 #define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS	3
3201 
3202 #define ERTS_ALC_CPOOL_PTR_MOD_MRK		(((erts_aint_t) 1) << 0)
3203 #define ERTS_ALC_CPOOL_PTR_DEL_MRK		(((erts_aint_t) 1) << 1)
3204 
3205 #define ERTS_ALC_CPOOL_PTR_MRKS \
3206     (ERTS_ALC_CPOOL_PTR_MOD_MRK | ERTS_ALC_CPOOL_PTR_DEL_MRK)
3207 
3208 /*
3209  * When setting multiple mod markers we always
3210  * set mod markers in pointer order and always
3211  * on next pointers before prev pointers.
3212  */
3213 
3214 typedef union {
3215     ErtsAlcCPoolData_t sentinel;
3216     char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAlcCPoolData_t))];
3217 } ErtsAlcCrrPool_t;
3218 
3219 static ErtsAlcCrrPool_t firstfit_carrier_pools[ERTS_ALC_NO_CPOOLS] erts_align_attribute(ERTS_CACHE_LINE_SIZE);
3220 
3221 #define ERTS_ALC_CPOOL_MAX_BACKOFF (1 << 8)
3222 
3223 static int
backoff(int n)3224 backoff(int n)
3225 {
3226     int i;
3227 
3228     for (i = 0; i < n; i++)
3229 	ERTS_SPIN_BODY;
3230 
3231     if (n >= ERTS_ALC_CPOOL_MAX_BACKOFF)
3232 	return ERTS_ALC_CPOOL_MAX_BACKOFF;
3233     else
3234 	return n << 1;
3235 }
3236 
3237 static int
cpool_dbg_is_in_pool(Allctr_t * allctr,Carrier_t * crr)3238 cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr)
3239 {
3240     ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
3241     ErtsAlcCPoolData_t *cpdp = sentinel;
3242     Carrier_t *tmp_crr;
3243 
3244     while (1) {
3245 	cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~CRR_FLG_MASK);
3246 	if (cpdp == sentinel)
3247 	    return 0;
3248 	tmp_crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool));
3249 	if (tmp_crr == crr)
3250 	    return 1;
3251     }
3252 }
3253 
3254 static int
cpool_is_empty(Allctr_t * allctr)3255 cpool_is_empty(Allctr_t *allctr)
3256 {
3257     ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
3258     return ((erts_atomic_read_rb(&sentinel->next) == (erts_aint_t) sentinel)
3259 	    && (erts_atomic_read_rb(&sentinel->prev) == (erts_aint_t) sentinel));
3260 }
3261 
3262 static ERTS_INLINE ErtsAlcCPoolData_t *
cpool_aint2cpd(erts_aint_t aint)3263 cpool_aint2cpd(erts_aint_t aint)
3264 {
3265     return (ErtsAlcCPoolData_t *) (aint & ~ERTS_ALC_CPOOL_PTR_MRKS);
3266 }
3267 
3268 static ERTS_INLINE erts_aint_t
cpool_read(erts_atomic_t * aptr)3269 cpool_read(erts_atomic_t *aptr)
3270 {
3271     return erts_atomic_read_acqb(aptr);
3272 }
3273 
3274 static ERTS_INLINE void
cpool_init(erts_atomic_t * aptr,erts_aint_t val)3275 cpool_init(erts_atomic_t *aptr, erts_aint_t val)
3276 {
3277     erts_atomic_set_nob(aptr, val);
3278 }
3279 
3280 static ERTS_INLINE void
cpool_set_mod_marked(erts_atomic_t * aptr,erts_aint_t new,erts_aint_t old)3281 cpool_set_mod_marked(erts_atomic_t *aptr, erts_aint_t new, erts_aint_t old)
3282 {
3283 #ifdef ERTS_ALC_CPOOL_DEBUG
3284     erts_aint_t act = erts_atomic_xchg_relb(aptr, new);
3285     ERTS_ALC_CPOOL_ASSERT(act == (old | ERTS_ALC_CPOOL_PTR_MOD_MRK));
3286 #else
3287     erts_atomic_set_relb(aptr, new);
3288 #endif
3289 }
3290 
3291 
3292 static ERTS_INLINE erts_aint_t
cpool_try_mod_mark_exp(erts_atomic_t * aptr,erts_aint_t exp)3293 cpool_try_mod_mark_exp(erts_atomic_t *aptr, erts_aint_t exp)
3294 {
3295     ERTS_ALC_CPOOL_ASSERT((exp & ERTS_ALC_CPOOL_PTR_MOD_MRK) == 0);
3296     return erts_atomic_cmpxchg_nob(aptr, exp | ERTS_ALC_CPOOL_PTR_MOD_MRK, exp);
3297 }
3298 
3299 static ERTS_INLINE erts_aint_t
cpool_mod_mark_exp(erts_atomic_t * aptr,erts_aint_t exp)3300 cpool_mod_mark_exp(erts_atomic_t *aptr, erts_aint_t exp)
3301 {
3302     int b;
3303     erts_aint_t act;
3304     ERTS_ALC_CPOOL_ASSERT((exp & ERTS_ALC_CPOOL_PTR_MOD_MRK) == 0);
3305     while (1) {
3306 	act = erts_atomic_cmpxchg_nob(aptr,
3307 				      exp | ERTS_ALC_CPOOL_PTR_MOD_MRK,
3308 				      exp);
3309 	if (act == exp)
3310 	    return exp;
3311 	b = 1;
3312 	do {
3313 	    if ((act & ~ERTS_ALC_CPOOL_PTR_MOD_MRK) != exp)
3314 		return act;
3315 	    b = backoff(b);
3316 	    act = erts_atomic_read_nob(aptr);
3317 	} while (act != exp);
3318     }
3319 }
3320 
3321 static ERTS_INLINE erts_aint_t
cpool_mod_mark(erts_atomic_t * aptr)3322 cpool_mod_mark(erts_atomic_t *aptr)
3323 {
3324     int b;
3325     erts_aint_t act, exp;
3326     act = cpool_read(aptr);
3327     while (1) {
3328 	b = 1;
3329 	while (act & ERTS_ALC_CPOOL_PTR_MOD_MRK) {
3330 	    b = backoff(b);
3331 	    act = erts_atomic_read_nob(aptr);
3332 	}
3333 	exp = act;
3334 	act = erts_atomic_cmpxchg_acqb(aptr,
3335 				       exp | ERTS_ALC_CPOOL_PTR_MOD_MRK,
3336 				       exp);
3337 	if (act == exp)
3338 	    return exp;
3339     }
3340 }
3341 
3342 static void
cpool_insert(Allctr_t * allctr,Carrier_t * crr)3343 cpool_insert(Allctr_t *allctr, Carrier_t *crr)
3344 {
3345     ErtsAlcCPoolData_t *cpd1p, *cpd2p;
3346     erts_aint_t val;
3347     ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
3348     Allctr_t *orig_allctr = crr->cpool.orig_allctr;
3349 
3350     ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
3351 			  || erts_thr_progress_is_managed_thread());
3352 
3353     {
3354         int alloc_no = allctr->alloc_no;
3355 
3356         ERTS_ALC_CPOOL_ASSERT(
3357             erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no]) >= 0 &&
3358             crr->cpool.blocks_size[alloc_no] >= 0);
3359 
3360         ERTS_ALC_CPOOL_ASSERT(
3361             erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0 &&
3362             crr->cpool.blocks[alloc_no] >= 0);
3363 
3364         /* We only modify the counter for our current type since the others are
3365          * conceptually still in the pool. */
3366         erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
3367                             ((erts_aint_t) crr->cpool.blocks_size[alloc_no]));
3368         erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no],
3369                             ((erts_aint_t) crr->cpool.blocks[alloc_no]));
3370     }
3371 
3372     erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,
3373 			(erts_aint_t) CARRIER_SZ(crr));
3374     erts_atomic_inc_nob(&orig_allctr->cpool.stat.no_carriers);
3375 
3376     /*
3377      * We search in 'next' direction and begin by passing
3378      * one element before trying to insert. This in order to
3379      * avoid contention with threads fetching elements.
3380      */
3381 
3382     val = cpool_read(&sentinel->next);
3383 
3384     /* Find a predecessor to be, and set mod marker on its next ptr */
3385 
3386     while (1) {
3387 	cpd1p = cpool_aint2cpd(val);
3388 	if (cpd1p == sentinel) {
3389 	    val = cpool_mod_mark(&cpd1p->next);
3390 	    break;
3391 	}
3392 	val = cpool_read(&cpd1p->next);
3393 	if (!(val & ERTS_ALC_CPOOL_PTR_MRKS)) {
3394 	    erts_aint_t tmp = cpool_try_mod_mark_exp(&cpd1p->next, val);
3395 	    if (tmp == val) {
3396 		val = tmp;
3397 		break;
3398 	    }
3399 	    val = tmp;
3400 	}
3401     }
3402 
3403     /* Set mod marker on prev ptr of the to be successor */
3404 
3405     cpd2p = cpool_aint2cpd(val);
3406 
3407     cpool_init(&crr->cpool.next, (erts_aint_t) cpd2p);
3408     cpool_init(&crr->cpool.prev, (erts_aint_t) cpd1p);
3409 
3410     val = (erts_aint_t) cpd1p;
3411 
3412     while (1) {
3413 	int b;
3414 	erts_aint_t tmp;
3415 
3416 	tmp = cpool_mod_mark_exp(&cpd2p->prev, val);
3417 	if (tmp == val)
3418 	    break;
3419 	b = 1;
3420 	do {
3421 	    b = backoff(b);
3422 	    tmp = cpool_read(&cpd2p->prev);
3423 	} while (tmp != val);
3424     }
3425 
3426     /* Write pointers to this element in successor and predecessor */
3427 
3428     cpool_set_mod_marked(&cpd1p->next,
3429 			 (erts_aint_t) &crr->cpool,
3430 			 (erts_aint_t) cpd2p);
3431     cpool_set_mod_marked(&cpd2p->prev,
3432 			 (erts_aint_t) &crr->cpool,
3433 			 (erts_aint_t) cpd1p);
3434 
3435     LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr));
3436 }
3437 
3438 static void
cpool_delete(Allctr_t * allctr,Allctr_t * prev_allctr,Carrier_t * crr)3439 cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
3440 {
3441     ErtsAlcCPoolData_t *cpd1p, *cpd2p;
3442     erts_aint_t val;
3443 #ifdef ERTS_ALC_CPOOL_DEBUG
3444     ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
3445 #endif
3446 
3447     ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
3448 			  || erts_thr_progress_is_managed_thread());
3449     ERTS_ALC_CPOOL_ASSERT(sentinel != &crr->cpool);
3450 
3451     /* Set mod marker on next ptr of our predecessor */
3452 
3453     val = (erts_aint_t) &crr->cpool;
3454     while (1) {
3455 	erts_aint_t tmp;
3456 	cpd1p = cpool_aint2cpd(cpool_read(&crr->cpool.prev));
3457 	tmp = cpool_mod_mark_exp(&cpd1p->next, val);
3458 	if (tmp == val)
3459 	    break;
3460     }
3461 
3462     /* Set mod marker on our next ptr */
3463 
3464     val = cpool_mod_mark(&crr->cpool.next);
3465 
3466     /* Set mod marker on the prev ptr of our successor */
3467 
3468     cpd2p = cpool_aint2cpd(val);
3469 
3470     val = (erts_aint_t) &crr->cpool;
3471 
3472     while (1) {
3473 	int b;
3474 	erts_aint_t tmp;
3475 
3476 	tmp = cpool_mod_mark_exp(&cpd2p->prev, val);
3477 	if (tmp == val)
3478 	    break;
3479 	b = 1;
3480 	do {
3481 	    b = backoff(b);
3482 	    tmp = cpool_read(&cpd2p->prev);
3483 	} while (tmp != val);
3484     }
3485 
3486     /* Set mod marker on our prev ptr */
3487 
3488     val = (erts_aint_t) cpd1p;
3489 
3490     while (1) {
3491 	int b;
3492 	erts_aint_t tmp;
3493 
3494 	tmp = cpool_mod_mark_exp(&crr->cpool.prev, val);
3495 	if (tmp == val)
3496 	    break;
3497 	b = 1;
3498 	do {
3499 	    b = backoff(b);
3500 	    tmp = cpool_read(&cpd2p->prev);
3501 	} while (tmp != val);
3502     }
3503 
3504     /* Write pointers past this element in predecessor and successor */
3505 
3506     cpool_set_mod_marked(&cpd1p->next,
3507 			 (erts_aint_t) cpd2p,
3508 			 (erts_aint_t) &crr->cpool);
3509     cpool_set_mod_marked(&cpd2p->prev,
3510 			 (erts_aint_t) cpd1p,
3511 			 (erts_aint_t) &crr->cpool);
3512 
3513     /* Repleace mod markers with delete markers on this element */
3514     cpool_set_mod_marked(&crr->cpool.next,
3515 			 ((erts_aint_t) cpd2p) | ERTS_ALC_CPOOL_PTR_DEL_MRK,
3516 			 ((erts_aint_t) cpd2p) | ERTS_ALC_CPOOL_PTR_MOD_MRK);
3517     cpool_set_mod_marked(&crr->cpool.prev,
3518 			 ((erts_aint_t) cpd1p) | ERTS_ALC_CPOOL_PTR_DEL_MRK,
3519 			 ((erts_aint_t) cpd1p) | ERTS_ALC_CPOOL_PTR_MOD_MRK);
3520 
3521     crr->cpool.thr_prgr = erts_thr_progress_later(NULL);
3522 
3523     {
3524         Allctr_t *orig_allctr = crr->cpool.orig_allctr;
3525         int alloc_no = allctr->alloc_no;
3526 
3527         ERTS_ALC_CPOOL_ASSERT(orig_allctr == prev_allctr);
3528 
3529         ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] <=
3530             erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no]));
3531 
3532         ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] <=
3533             erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]));
3534 
3535         /* We only modify the counters for our current type since the others
3536          * were, conceptually, never taken out of the pool. */
3537         erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
3538                             -((erts_aint_t) crr->cpool.blocks_size[alloc_no]));
3539         erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no],
3540                             -((erts_aint_t) crr->cpool.blocks[alloc_no]));
3541 
3542         erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,
3543 			-((erts_aint_t) CARRIER_SZ(crr)));
3544         erts_atomic_dec_wb(&orig_allctr->cpool.stat.no_carriers);
3545     }
3546 
3547 }
3548 
3549 static Carrier_t *
cpool_fetch(Allctr_t * allctr,UWord size)3550 cpool_fetch(Allctr_t *allctr, UWord size)
3551 {
3552     int i, seen_sentinel;
3553     Carrier_t *crr;
3554     Carrier_t *reinsert_crr = NULL;
3555     ErtsAlcCPoolData_t *cpdp;
3556     ErtsAlcCPoolData_t *cpool_entrance = NULL;
3557     ErtsAlcCPoolData_t *sentinel;
3558 
3559     ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
3560 			  || erts_thr_progress_is_managed_thread());
3561 
3562     i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
3563 
3564     LTTNG3(carrier_pool_get, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, (unsigned long)size);
3565     /*
3566      * Search my own pooled_tree,
3567      * i.e my abandoned carriers that were in the pool last time I checked.
3568      */
3569     do {
3570         erts_aint_t exp, act;
3571 
3572         crr = aoff_lookup_pooled_mbc(allctr, size);
3573         if (!crr)
3574             break;
3575 
3576         ASSERT(crr->cpool.state == ERTS_MBC_WAS_POOLED);
3577         ASSERT(crr->cpool.orig_allctr == allctr);
3578 
3579         aoff_remove_pooled_mbc(allctr, crr);
3580 
3581         exp = erts_atomic_read_nob(&crr->allctr);
3582         if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
3583             ASSERT((exp & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t)allctr);
3584             if (erts_atomic_read_nob(&crr->cpool.max_size) < size) {
3585                 /*
3586                  * This carrier has been fetched and inserted back again
3587                  * by a foreign allocator. That's why it has a stale search size.
3588                  */
3589                 ASSERT(exp & ERTS_CRR_ALCTR_FLG_HOMECOMING);
3590                 crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size);
3591                 aoff_add_pooled_mbc(allctr, crr);
3592                 INC_CC(allctr->cpool.stat.skip_size);
3593                 continue;
3594             }
3595             else if (exp & ERTS_CRR_ALCTR_FLG_BUSY) {
3596                 /*
3597                  * This must be our own carrier as part of a realloc call.
3598                  * Skip it to make things simpler.
3599                  * Must wait to re-insert to not be found again by lookup.
3600                  */
3601                 ASSERT(!reinsert_crr);
3602                 reinsert_crr = crr;
3603                 INC_CC(allctr->cpool.stat.skip_busy);
3604                 continue;
3605             }
3606 
3607             /* Try to fetch it... */
3608             act = erts_atomic_cmpxchg_mb(&crr->allctr,
3609                                          exp & ~ERTS_CRR_ALCTR_FLG_IN_POOL,
3610                                          exp);
3611             if (act == exp) {
3612                 cpool_delete(allctr, allctr, crr);
3613                 crr->cpool.state = ERTS_MBC_IS_HOME;
3614 
3615                 if (reinsert_crr)
3616                     aoff_add_pooled_mbc(allctr, reinsert_crr);
3617                 return crr;
3618             }
3619             exp = act;
3620             INC_CC(allctr->cpool.stat.skip_race);
3621         }
3622         else
3623             INC_CC(allctr->cpool.stat.skip_not_pooled);
3624 
3625         /* Not in pool anymore */
3626         ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY));
3627         crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
3628 
3629     }while (--i > 0);
3630 
3631     if (reinsert_crr)
3632         aoff_add_pooled_mbc(allctr, reinsert_crr);
3633 
3634     /*
3635      * Try find a nice cpool_entrance
3636      */
3637     while (allctr->cpool.pooled_tree) {
3638         erts_aint_t iallctr;
3639 
3640         crr = ErtsContainerStruct(allctr->cpool.pooled_tree, Carrier_t, cpool.pooled);
3641         iallctr = erts_atomic_read_nob(&crr->allctr);
3642         if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL) {
3643             cpool_entrance = &crr->cpool;
3644             break;
3645         }
3646         /* Not in pool anymore */
3647         ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
3648         aoff_remove_pooled_mbc(allctr, crr);
3649         crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
3650 
3651         if (--i <= 0) {
3652             INC_CC(allctr->cpool.stat.fail_pooled);
3653             return NULL;
3654         }
3655     }
3656 
3657 
3658     /*
3659      * Finally search the shared pool and try employ foreign carriers
3660      */
3661     sentinel = allctr->cpool.sentinel;
3662     if (cpool_entrance) {
3663         /*
3664          * We saw a pooled carried above, use it as entrance into the pool
3665 	 */
3666     }
3667     else {
3668         /*
3669          * No pooled carrier seen above. Start search at cpool sentinel,
3670 	 * but begin by passing one element before trying to fetch.
3671 	 * This in order to avoid contention with threads inserting elements.
3672 	 */
3673         cpool_entrance = cpool_aint2cpd(cpool_read(&sentinel->prev));
3674 	if (cpool_entrance == sentinel)
3675 	    goto check_dc_list;
3676     }
3677 
3678     cpdp = cpool_entrance;
3679     seen_sentinel = 0;
3680     do {
3681 	erts_aint_t exp;
3682 	cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
3683         if (cpdp == sentinel) {
3684 	    if (seen_sentinel) {
3685 		/* We been here before. cpool_entrance must have been removed */
3686                 INC_CC(allctr->cpool.stat.entrance_removed);
3687 		break;
3688 	    }
3689             seen_sentinel = 1;
3690             continue;
3691 	}
3692         ASSERT(cpdp != cpool_entrance || seen_sentinel);
3693 
3694 	crr = ErtsContainerStruct(cpdp, Carrier_t, cpool);
3695 	exp = erts_atomic_read_rb(&crr->allctr);
3696 
3697         if (erts_atomic_read_nob(&cpdp->max_size) < size) {
3698             INC_CC(allctr->cpool.stat.skip_size);
3699         }
3700         else if ((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL | ERTS_CRR_ALCTR_FLG_BUSY))
3701                   == ERTS_CRR_ALCTR_FLG_IN_POOL) {
3702 	    erts_aint_t act;
3703             erts_aint_t want = (((erts_aint_t) allctr)
3704                                 | (exp & ERTS_CRR_ALCTR_FLG_HOMECOMING));
3705             /* Try to fetch it... */
3706 	    act = erts_atomic_cmpxchg_mb(&crr->allctr, want, exp);
3707 	    if (act == exp) {
3708 		cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
3709 		if (crr->cpool.orig_allctr == allctr) {
3710 		    unlink_abandoned_carrier(crr);
3711                     crr->cpool.state = ERTS_MBC_IS_HOME;
3712                 }
3713 		return crr;
3714 	    }
3715 	}
3716 
3717         if (exp & ERTS_CRR_ALCTR_FLG_BUSY)
3718             INC_CC(allctr->cpool.stat.skip_busy);
3719         else
3720             INC_CC(allctr->cpool.stat.skip_race);
3721 
3722 	if (--i <= 0) {
3723             INC_CC(allctr->cpool.stat.fail_shared);
3724 	    return NULL;
3725         }
3726     }while (cpdp != cpool_entrance);
3727 
3728 check_dc_list:
3729     /* Last; check our own pending dealloc carrier list... */
3730     crr = allctr->cpool.dc_list.last;
3731     while (crr) {
3732 	if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
3733 	    Block_t* blk;
3734 	    unlink_carrier(&allctr->cpool.dc_list, crr);
3735 	    ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr)
3736                                   == ((erts_aint_t) allctr));
3737 	    blk = MBC_TO_FIRST_BLK(allctr, crr);
3738 	    ASSERT(FBLK_TO_MBC(blk) == crr);
3739 	    allctr->link_free_block(allctr, blk);
3740 	    return crr;
3741 	}
3742 	crr = crr->prev;
3743 	if (--i <= 0) {
3744             INC_CC(allctr->cpool.stat.fail_pend_dealloc);
3745 	    return NULL;
3746         }
3747     }
3748 
3749     if (i != ERTS_ALC_CPOOL_MAX_FETCH_INSPECT)
3750         INC_CC(allctr->cpool.stat.fail);
3751 
3752     return NULL;
3753 }
3754 
3755 static void
check_pending_dealloc_carrier(Allctr_t * allctr,int * need_thr_progress,ErtsThrPrgrVal * thr_prgr_p,int * need_more_work)3756 check_pending_dealloc_carrier(Allctr_t *allctr,
3757 			      int *need_thr_progress,
3758 			      ErtsThrPrgrVal *thr_prgr_p,
3759 			      int *need_more_work)
3760 {
3761     Carrier_t *crr = allctr->cpool.dc_list.first;
3762 
3763     if (crr) {
3764 	ErtsThrPrgrVal current = erts_thr_progress_current();
3765 	int i = 0;
3766 
3767 	do {
3768 	    Carrier_t *dcrr;
3769 
3770 	    if (!erts_thr_progress_has_reached_this(current, crr->cpool.thr_prgr))
3771 		break;
3772 
3773 	    dcrr = crr;
3774 	    crr = crr->next;
3775 	    dealloc_mbc(allctr, dcrr);
3776 	    i++;
3777 	} while (crr && i < ERTS_ALC_MAX_DEALLOC_CARRIER);
3778 
3779 	allctr->cpool.dc_list.first = crr;
3780 	if (!crr)
3781 	    allctr->cpool.dc_list.last = NULL;
3782 	else {
3783 	    crr->prev = NULL;
3784 
3785 	    if (need_more_work) {
3786 		ERTS_ALC_CPOOL_ASSERT(need_thr_progress && thr_prgr_p);
3787 		if (erts_thr_progress_has_reached_this(current, crr->cpool.thr_prgr))
3788 		    *need_more_work = 1;
3789 		else {
3790 		    *need_thr_progress = 1;
3791 		    if (*thr_prgr_p == ERTS_THR_PRGR_INVALID
3792 			|| erts_thr_progress_cmp(crr->cpool.thr_prgr,
3793 						 *thr_prgr_p) < 0) {
3794 			*thr_prgr_p = crr->cpool.thr_prgr;
3795 		    }
3796 		}
3797 	    }
3798 	}
3799     }
3800 }
3801 
3802 static void
schedule_dealloc_carrier(Allctr_t * allctr,Carrier_t * crr)3803 schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
3804 {
3805     Allctr_t *orig_allctr;
3806 
3807     ASSERT(IS_MB_CARRIER(crr));
3808 
3809     if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
3810 	dealloc_mbc(allctr, crr);
3811 	return;
3812     }
3813 
3814     orig_allctr = crr->cpool.orig_allctr;
3815 
3816     if (allctr == orig_allctr) {
3817         if (!(erts_atomic_read_nob(&crr->allctr) & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
3818             dealloc_my_carrier(allctr, crr);
3819         }
3820         /*else
3821          * Carrier was abandoned earlier by other thread and
3822          * is still waiting for us in dd-queue.
3823          * handle_delayed_dealloc() will handle it when crr is dequeued.
3824          */
3825     }
3826     else {
3827 	/*
3828 	 * We send the carrier to its origin for deallocation.
3829 	 * This in order:
3830 	 * - not to complicate things for the thread specific
3831 	 *   instances of mseg_alloc, and
3832 	 * - to ensure that we always only reuse empty carriers
3833 	 *   originating from our own thread specific mseg_alloc
3834 	 *   instance which is beneficial on NUMA systems.
3835 	 */
3836         erts_aint_t iallctr;
3837 #ifdef ERTS_ALC_CPOOL_DEBUG
3838 	Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr);
3839 	ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(first_blk));
3840 
3841 	ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, first_blk));
3842 	ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(first_blk));
3843 	ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, first_blk));
3844 	ERTS_ALC_CPOOL_ASSERT((erts_atomic_read_nob(&crr->allctr)
3845                                & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
3846                               == (erts_aint_t) allctr);
3847 #endif
3848 
3849         iallctr = (erts_aint_t)orig_allctr | ERTS_CRR_ALCTR_FLG_HOMECOMING;
3850         if (!(erts_atomic_xchg_nob(&crr->allctr, iallctr)
3851               & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
3852             enqueue_homecoming(allctr, crr);
3853         }
3854     }
3855 }
3856 
dealloc_my_carrier(Allctr_t * allctr,Carrier_t * crr)3857 static void dealloc_my_carrier(Allctr_t *allctr, Carrier_t *crr)
3858 {
3859     Block_t *blk;
3860     int check_pending_dealloc;
3861     erts_aint_t max_size;
3862 
3863     ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
3864     if (is_abandoned(crr)) {
3865         unlink_abandoned_carrier(crr);
3866         crr->cpool.state = ERTS_MBC_IS_HOME;
3867     }
3868 
3869     if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
3870 	|| erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
3871 	dealloc_mbc(allctr, crr);
3872 	return;
3873     }
3874 
3875     blk = MBC_TO_FIRST_BLK(allctr, crr);
3876     ASSERT(IS_FREE_LAST_MBC_BLK(blk));
3877     max_size = (erts_aint_t) MBC_FBLK_SZ(blk);
3878     erts_atomic_set_nob(&crr->cpool.max_size, max_size);
3879 
3880     crr->next = NULL;
3881     crr->prev = allctr->cpool.dc_list.last;
3882     if (allctr->cpool.dc_list.last) {
3883 	check_pending_dealloc = 1;
3884 	allctr->cpool.dc_list.last->next = crr;
3885     }
3886     else {
3887 	check_pending_dealloc = 0;
3888 	allctr->cpool.dc_list.first = crr;
3889     }
3890     allctr->cpool.dc_list.last = crr;
3891     if (check_pending_dealloc)
3892 	check_pending_dealloc_carrier(allctr, NULL, NULL, NULL);
3893     erts_alloc_ensure_handle_delayed_dealloc_call(allctr->ix);
3894 }
3895 
3896 static ERTS_INLINE void
cpool_init_carrier_data(Allctr_t * allctr,Carrier_t * crr)3897 cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
3898 {
3899     crr->cpool.homecoming_dd.blk.bhdr = HOMECOMING_MBC_BLK_HDR;
3900     erts_atomic_init_nob(&crr->cpool.next, ERTS_AINT_NULL);
3901     erts_atomic_init_nob(&crr->cpool.prev, ERTS_AINT_NULL);
3902     crr->cpool.orig_allctr = allctr;
3903     crr->cpool.thr_prgr = ERTS_THR_PRGR_INVALID;
3904     erts_atomic_init_nob(&crr->cpool.max_size, 0);
3905     sys_memset(&crr->cpool.blocks_size, 0, sizeof(crr->cpool.blocks_size));
3906     sys_memset(&crr->cpool.blocks, 0, sizeof(crr->cpool.blocks));
3907     crr->cpool.total_blocks_size = 0;
3908     if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
3909 	crr->cpool.abandon_limit = 0;
3910     else {
3911 	UWord csz = CARRIER_SZ(crr);
3912 	UWord limit = csz*allctr->cpool.util_limit;
3913 	if (limit > csz)
3914 	    limit /= 100;
3915 	else
3916 	    limit = (csz/100)*allctr->cpool.util_limit;
3917 	crr->cpool.abandon_limit = limit;
3918     }
3919     crr->cpool.state = ERTS_MBC_IS_HOME;
3920 }
3921 
3922 
3923 
3924 static UWord
allctr_abandon_limit(Allctr_t * allctr)3925 allctr_abandon_limit(Allctr_t *allctr)
3926 {
3927     UWord limit;
3928     UWord csz;
3929 
3930     csz = allctr->mbcs.curr.norm.mseg.size;
3931     csz += allctr->mbcs.curr.norm.sys_alloc.size;
3932 
3933     limit = csz*allctr->cpool.util_limit;
3934     if (limit > csz)
3935 	limit /= 100;
3936     else
3937 	limit = (csz/100)*allctr->cpool.util_limit;
3938 
3939     return limit;
3940 }
3941 
3942 static void ERTS_INLINE
set_new_allctr_abandon_limit(Allctr_t * allctr)3943 set_new_allctr_abandon_limit(Allctr_t *allctr)
3944 {
3945     allctr->cpool.abandon_limit = allctr_abandon_limit(allctr);
3946 }
3947 
3948 static void
abandon_carrier(Allctr_t * allctr,Carrier_t * crr)3949 abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
3950 {
3951     erts_aint_t iallctr;
3952 
3953     STAT_MBC_ABANDON(allctr, crr);
3954 
3955     unlink_carrier(&allctr->mbc_list, crr);
3956     allctr->remove_mbc(allctr, crr);
3957 
3958     /* Mark our free blocks as unused and reclaimable to the OS. */
3959     carrier_mem_discard_free_blocks(allctr, crr);
3960 
3961     cpool_insert(allctr, crr);
3962 
3963 
3964     iallctr = erts_atomic_read_nob(&crr->allctr);
3965     if (allctr == crr->cpool.orig_allctr) {
3966         /* preserve HOMECOMING flag */
3967         ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr);
3968         erts_atomic_set_wb(&crr->allctr, iallctr | ERTS_CRR_ALCTR_FLG_IN_POOL);
3969         poolify_my_carrier(allctr, crr);
3970     }
3971     else {
3972         ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr);
3973         iallctr = ((erts_aint_t)crr->cpool.orig_allctr |
3974                    ERTS_CRR_ALCTR_FLG_HOMECOMING |
3975                    ERTS_CRR_ALCTR_FLG_IN_POOL);
3976         if (!(erts_atomic_xchg_wb(&crr->allctr, iallctr)
3977               & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
3978 
3979             enqueue_homecoming(allctr, crr);
3980         }
3981     }
3982 }
3983 
3984 static void
enqueue_homecoming(Allctr_t * allctr,Carrier_t * crr)3985 enqueue_homecoming(Allctr_t* allctr, Carrier_t* crr)
3986 {
3987     Allctr_t* orig_allctr = crr->cpool.orig_allctr;
3988     const int cinit = orig_allctr->dd.ix - allctr->dd.ix;
3989     Block_t* dd_blk = &crr->cpool.homecoming_dd.blk;
3990 
3991     /*
3992      * The receiver will recognize this as a carrier
3993      * (and not a block which is the common case)
3994      * since the block header is HOMECOMING_MBC_BLK_HDR.
3995      */
3996     ASSERT(dd_blk->bhdr == HOMECOMING_MBC_BLK_HDR);
3997     if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(dd_blk), cinit))
3998         erts_alloc_notify_delayed_dealloc(orig_allctr->ix);
3999 }
4000 
4001 static void
poolify_my_carrier(Allctr_t * allctr,Carrier_t * crr)4002 poolify_my_carrier(Allctr_t *allctr, Carrier_t *crr)
4003 {
4004     ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
4005 
4006     crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size);
4007     aoff_add_pooled_mbc(allctr, crr);
4008     crr->cpool.state = ERTS_MBC_WAS_POOLED;
4009 }
4010 
4011 static void
cpool_read_stat(Allctr_t * allctr,int alloc_no,UWord * nocp,UWord * cszp,UWord * nobp,UWord * bszp)4012 cpool_read_stat(Allctr_t *allctr, int alloc_no,
4013                 UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp)
4014 {
4015     int i;
4016     UWord noc = 0, csz = 0, nob = 0, bsz = 0;
4017 
4018     /*
4019      * We try to get consistent values, but after
4020      * ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS failed
4021      * tries we give up and present what we got...
4022      */
4023     for (i = 0; i <= ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS; i++) {
4024 	UWord tnoc, tcsz, tnob, tbsz;
4025 
4026 	tnoc = (UWord) (nocp
4027 			? erts_atomic_read_nob(&allctr->cpool.stat.no_carriers)
4028 			: 0);
4029 	tcsz = (UWord) (cszp
4030 			? erts_atomic_read_nob(&allctr->cpool.stat.carriers_size)
4031 			: 0);
4032 	tnob = (UWord) (nobp
4033 			? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks[alloc_no])
4034 			: 0);
4035 	tbsz = (UWord) (bszp
4036 			? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size[alloc_no])
4037 			: 0);
4038 	if (tnoc == noc && tcsz == csz && tnob == nob && tbsz == bsz)
4039 	    break;
4040 	noc = tnoc;
4041 	csz = tcsz;
4042 	nob = tnob;
4043 	bsz = tbsz;
4044 	ERTS_THR_READ_MEMORY_BARRIER;
4045     }
4046 
4047     if (nocp)
4048 	*nocp = noc;
4049     if (cszp)
4050 	*cszp = csz;
4051     if (nobp)
4052 	*nobp = nob;
4053     if (bszp)
4054 	*bszp = bsz;
4055 }
4056 
4057 
4058 
4059 #ifdef DEBUG
4060 
4061 #if ERTS_SA_MB_CARRIERS
4062 #define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % ERTS_SACRR_UNIT_SZ == 0)
4063 #else
4064 #define ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE(CSZ)
4065 #endif
4066 
CHECK_1BLK_CARRIER(Allctr_t * A,int SBC,int MSEGED,Carrier_t * C,UWord CSZ,Block_t * B,UWord BSZ)4067 static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C,
4068 			       UWord CSZ, Block_t* B, UWord BSZ)
4069 {
4070     ASSERT(IS_LAST_BLK((B)));
4071     ASSERT((CSZ) == CARRIER_SZ((C)));
4072     ASSERT((BSZ) % sizeof(Unit_t) == 0);
4073     if ((SBC)) {
4074 	ASSERT((BSZ) == SBC_BLK_SZ((B)));
4075 	ASSERT((char*)B == (char*)C + SBC_HEADER_SIZE);
4076 	ASSERT(IS_SBC_BLK((B)));
4077 	ASSERT(IS_SB_CARRIER((C)));
4078     }
4079     else {
4080 	ASSERT(IS_FREE_BLK(B));
4081 	ASSERT((BSZ) == MBC_FBLK_SZ((B)));
4082 	ASSERT(IS_MBC_FIRST_FBLK(A, (B)));
4083 	ASSERT(IS_MBC_BLK((B)));
4084 	ASSERT(IS_MB_CARRIER((C)));
4085 	ASSERT(FBLK_TO_MBC(B) == (C));
4086 	if ((MSEGED)) {
4087 	    ASSERT_ERTS_SACRR_UNIT_SIZE_MULTIPLE((CSZ));
4088 	}
4089     }
4090     if ((MSEGED)) {
4091 	ASSERT(IS_MSEG_CARRIER((C)));
4092     }
4093     else {
4094 	ASSERT(IS_SYS_ALLOC_CARRIER((C)));
4095 	ASSERT((CSZ) % sizeof(Unit_t) == 0);
4096     }
4097 }
4098 
4099 #else
4100 #define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ)
4101 #endif
4102 
4103 static Block_t *
create_carrier(Allctr_t * allctr,Uint umem_sz,UWord flags)4104 create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
4105 {
4106     Block_t *blk;
4107     Carrier_t *crr;
4108     Uint blk_sz, bcrr_sz, crr_sz;
4109 #if HAVE_ERTS_MSEG
4110     int have_tried_sys_alloc = 0, have_tried_mseg = 0;
4111     Uint mseg_flags;
4112 #endif
4113 #ifdef DEBUG
4114     int is_mseg = 0;
4115 #endif
4116 
4117     if ((ERTS_SUPER_ALIGNED_MSEG_ONLY && (flags & CFLG_MBC))
4118 	|| !allow_sys_alloc_carriers) {
4119 	flags |= CFLG_FORCE_MSEG;
4120 	flags &= ~CFLG_FORCE_SYS_ALLOC;
4121 #if !HAVE_ERTS_MSEG
4122 	return NULL;
4123 #endif
4124     }
4125     flags |= allctr->crr_set_flgs;
4126     flags &= ~allctr->crr_clr_flgs;
4127 
4128     ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC))
4129 	   || (flags & CFLG_MBC && !(flags & CFLG_SBC)));
4130 
4131     ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC));
4132 
4133     if (umem_sz > (ERTS_UINT_MAX - ERTS_UINT_MAX/100)) {
4134 	/* Do an overly conservative _overflow_ check here so we don't
4135 	 * have to deal with it from here on. I guess we could be more accurate
4136 	 * but I don't think the need to allocate over 99% of the address space
4137 	 * will ever arise on any machine, neither 32 nor 64 bit.
4138 	 */
4139 	return NULL;
4140     }
4141 
4142     if (flags & CFLG_MAIN_CARRIER) {
4143         ASSERT(flags & CFLG_MBC);
4144         ASSERT(flags & CFLG_NO_CPOOL);
4145         ASSERT(umem_sz == allctr->main_carrier_size);
4146         ERTS_UNDEF(blk_sz, 0);
4147 
4148         if (allctr->main_carrier_size < allctr->min_mbc_size)
4149             allctr->main_carrier_size = allctr->min_mbc_size;
4150         crr_sz = bcrr_sz = allctr->main_carrier_size;
4151     }
4152     else {
4153         ERTS_UNDEF(bcrr_sz, 0);
4154 	ERTS_UNDEF(crr_sz, 0);
4155         blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
4156     }
4157 
4158     allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON;
4159 
4160     if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC
4161 	&& ERTS_ALC_IS_CPOOL_ENABLED(allctr)
4162 	&& erts_thr_progress_is_managed_thread()) {
4163 	crr = cpool_fetch(allctr, blk_sz);
4164 	if (crr) {
4165 	    STAT_MBC_CPOOL_FETCH(allctr, crr);
4166             INC_CC(allctr->cpool.stat.fetch);
4167 	    link_carrier(&allctr->mbc_list, crr);
4168 	    (*allctr->add_mbc)(allctr, crr);
4169 	    blk = (*allctr->get_free_block)(allctr, blk_sz, NULL, 0);
4170 	    ASSERT(blk);
4171 	    return blk;
4172 	}
4173     }
4174 
4175 #if HAVE_ERTS_MSEG
4176 
4177     if (flags & CFLG_FORCE_SYS_ALLOC)
4178 	goto try_sys_alloc;
4179     if (flags & CFLG_FORCE_MSEG)
4180 	goto try_mseg;
4181     if (erts_mseg_no(&allctr->mseg_opt) >= max_mseg_carriers)
4182 	goto try_sys_alloc;
4183     if (flags & CFLG_SBC) {
4184 	if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs)
4185 	    goto try_sys_alloc;
4186     }
4187 #if !ERTS_SUPER_ALIGNED_MSEG_ONLY
4188     else {
4189 	if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs)
4190 	    goto try_sys_alloc;
4191     }
4192 #endif
4193 
4194  try_mseg:
4195 
4196     if (flags & CFLG_SBC) {
4197 	crr_sz = blk_sz + SBC_HEADER_SIZE;
4198 	mseg_flags = ERTS_MSEG_FLG_NONE;
4199     }
4200     else {
4201         if (!(flags & CFLG_MAIN_CARRIER)) {
4202             crr_sz = (*allctr->get_next_mbc_size)(allctr);
4203             if (crr_sz < MBC_HEADER_SIZE(allctr) + blk_sz)
4204                 crr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
4205         }
4206         mseg_flags = ERTS_MSEG_FLG_2POW;
4207     }
4208 
4209     crr = (Carrier_t *) allctr->mseg_alloc(allctr, &crr_sz, mseg_flags);
4210     if (!crr) {
4211 	have_tried_mseg = 1;
4212 	if (!(have_tried_sys_alloc || flags & CFLG_FORCE_MSEG))
4213 	    goto try_sys_alloc;
4214 	return NULL;
4215     }
4216 
4217 #ifdef DEBUG
4218     is_mseg = 1;
4219 #endif
4220     if (flags & CFLG_SBC) {
4221 	SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC, allctr);
4222 	STAT_MSEG_SBC_ALLOC(allctr, crr_sz, blk_sz);
4223 	goto sbc_final_touch;
4224     }
4225     else {
4226 #ifndef ARCH_64
4227 	ASSERT(crr_sz <= MBC_SZ_MAX_LIMIT);
4228 #endif
4229 	SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC, allctr);
4230 	STAT_MSEG_MBC_ALLOC(allctr, crr_sz);
4231 	goto mbc_final_touch;
4232     }
4233 
4234  try_sys_alloc:
4235 
4236 #endif /* #if HAVE_ERTS_MSEG */
4237 
4238     if (flags & CFLG_SBC) {
4239 	bcrr_sz = blk_sz + SBC_HEADER_SIZE;
4240     }
4241     else if (!(flags & CFLG_MAIN_CARRIER)) {
4242 	bcrr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
4243 	if (bcrr_sz < allctr->smallest_mbc_size)
4244             bcrr_sz = allctr->smallest_mbc_size;
4245     }
4246 
4247     crr_sz = (flags & CFLG_FORCE_SIZE
4248 	      ? UNIT_CEILING(bcrr_sz)
4249 	      : SYS_ALLOC_CARRIER_CEILING(bcrr_sz));
4250 
4251     crr = (Carrier_t *) allctr->sys_alloc(allctr, &crr_sz, flags & CFLG_MBC);
4252 
4253     if (!crr) {
4254 	if (crr_sz > UNIT_CEILING(bcrr_sz)) {
4255 	    crr_sz = UNIT_CEILING(bcrr_sz);
4256 	    crr = (Carrier_t *) allctr->sys_alloc(allctr, &crr_sz, flags & CFLG_MBC);
4257 	}
4258 	if (!crr) {
4259 #if HAVE_ERTS_MSEG
4260 	    have_tried_sys_alloc = 1;
4261 	    if (!(have_tried_mseg || flags & CFLG_FORCE_SYS_ALLOC))
4262 		goto try_mseg;
4263 #endif
4264 	    return NULL;
4265 	}
4266     }
4267     if (flags & CFLG_SBC) {
4268 	SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC, allctr);
4269 	STAT_SYS_ALLOC_SBC_ALLOC(allctr, crr_sz, blk_sz);
4270 
4271 #if HAVE_ERTS_MSEG
4272     sbc_final_touch:
4273 #endif
4274 
4275 	blk = SBC2BLK(allctr, crr);
4276 
4277 	SET_SBC_BLK_HDR(blk, blk_sz);
4278 
4279 	link_carrier(&allctr->sbc_list, crr);
4280 
4281 	CHECK_1BLK_CARRIER(allctr, 1, is_mseg, crr, crr_sz, blk, blk_sz);
4282 
4283     }
4284     else {
4285 	SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr);
4286 	STAT_SYS_ALLOC_MBC_ALLOC(allctr, crr_sz);
4287 
4288 #if HAVE_ERTS_MSEG
4289     mbc_final_touch:
4290 #endif
4291         set_new_allctr_abandon_limit(allctr);
4292 
4293 	blk = MBC_TO_FIRST_BLK(allctr, crr);
4294 
4295 	blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr));
4296 
4297 	SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr);
4298 
4299 	if (flags & CFLG_MAIN_CARRIER) {
4300 	    ASSERT(!allctr->main_carrier);
4301 	    allctr->main_carrier = crr;
4302 	}
4303 
4304 	cpool_init_carrier_data(allctr, crr);
4305 
4306 	link_carrier(&allctr->mbc_list, crr);
4307 
4308 	CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz);
4309 	if (allctr->creating_mbc)
4310 	    (*allctr->creating_mbc)(allctr, crr);
4311 
4312     }
4313 
4314 #ifdef USE_LTTNG_VM_TRACEPOINTS
4315     if (LTTNG_ENABLED(carrier_create)) {
4316         lttng_decl_carrier_stats(mbc_stats);
4317         lttng_decl_carrier_stats(sbc_stats);
4318         LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->mbcs), mbc_stats);
4319         LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->sbcs), sbc_stats);
4320         LTTNG5(carrier_create,
4321                 ERTS_ALC_A2AD(allctr->alloc_no),
4322                 allctr->ix,
4323                 crr_sz,
4324                 mbc_stats,
4325                 sbc_stats);
4326     }
4327 #endif
4328 
4329     DEBUG_SAVE_ALIGNMENT(crr);
4330     return blk;
4331 }
4332 
4333 static Block_t *
resize_carrier(Allctr_t * allctr,Block_t * old_blk,Uint umem_sz,UWord flags)4334 resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
4335 {
4336     Block_t *new_blk;
4337     Carrier_t *new_crr, *old_crr;
4338     UWord create_flags;
4339     Uint old_crr_sz, old_blk_sz, new_blk_sz, new_crr_sz;
4340     Uint new_bcrr_sz;
4341 
4342     if (flags & CFLG_MBC) {
4343 	ASSERT(0);
4344 	return NULL;
4345     }
4346 
4347     ASSERT(flags & CFLG_SBC);
4348     create_flags = flags|CFLG_SBC;
4349 
4350     HARD_CHECK_BLK_CARRIER(allctr, old_blk);
4351 
4352     old_blk_sz = SBC_BLK_SZ(old_blk);
4353     old_crr = BLK_TO_SBC(old_blk);
4354     old_crr_sz = CARRIER_SZ(old_crr);
4355     ASSERT(IS_SB_CARRIER(old_crr));
4356     ASSERT(IS_SBC_BLK(old_blk));
4357 
4358     new_blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
4359 
4360 #if HAVE_ERTS_MSEG
4361 
4362     if (IS_MSEG_CARRIER(old_crr)) {
4363 	STAT_MSEG_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
4364 
4365 	if (!(flags & CFLG_FORCE_SYS_ALLOC)) {
4366 
4367 	    new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
4368 	    new_crr_sz = ERTS_SACRR_UNIT_CEILING(new_crr_sz);
4369 	    new_crr = (Carrier_t *) allctr->mseg_realloc(allctr,
4370 						      old_crr,
4371 						      old_crr_sz,
4372 						      &new_crr_sz);
4373 	    if (new_crr) {
4374 		SET_CARRIER_SZ(new_crr, new_crr_sz);
4375 		new_blk = SBC2BLK(allctr, new_crr);
4376 		SET_SBC_BLK_SZ(new_blk, new_blk_sz);
4377 		STAT_MSEG_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
4378 		relink_carrier(&allctr->sbc_list, new_crr);
4379 		CHECK_1BLK_CARRIER(allctr, 1, 1, new_crr, new_crr_sz,
4380 				   new_blk, new_blk_sz);
4381 		DEBUG_SAVE_ALIGNMENT(new_crr);
4382 		return new_blk;
4383 	    }
4384 	    create_flags |= CFLG_FORCE_SYS_ALLOC; /* since mseg_realloc()
4385 						     failed */
4386 	}
4387 
4388 	new_blk = create_carrier(allctr, umem_sz, create_flags);
4389 	if (new_blk) {
4390 	    sys_memcpy((void *) BLK2UMEM(new_blk),
4391 		       (void *) BLK2UMEM(old_blk),
4392 		       MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
4393 	    unlink_carrier(&allctr->sbc_list, old_crr);
4394             allctr->mseg_dealloc(allctr, old_crr, old_crr_sz, ERTS_MSEG_FLG_NONE);
4395 	}
4396 	else {
4397 	    /* Old carrier unchanged; restore stat */
4398 	    STAT_MSEG_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
4399 	}
4400 
4401 	return new_blk;
4402     }
4403     else {
4404 	if (!(flags & CFLG_FORCE_MSEG)) {
4405 #endif /* #if HAVE_ERTS_MSEG */
4406 	    new_bcrr_sz = new_blk_sz + SBC_HEADER_SIZE;
4407 	    new_crr_sz = (flags & CFLG_FORCE_SIZE
4408 			  ? UNIT_CEILING(new_bcrr_sz)
4409 			  : SYS_ALLOC_CARRIER_CEILING(new_bcrr_sz));
4410 
4411 	    new_crr = (Carrier_t *) allctr->sys_realloc(allctr,
4412 						     (void *) old_crr,
4413 						     &new_crr_sz,
4414 						     old_crr_sz,
4415 						     0);
4416 	    if (new_crr) {
4417 	    sys_realloc_success:
4418 		SET_CARRIER_SZ(new_crr, new_crr_sz);
4419 		new_blk = SBC2BLK(allctr, new_crr);
4420 		SET_SBC_BLK_SZ(new_blk, new_blk_sz);
4421 		STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
4422 		STAT_SYS_ALLOC_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
4423 		relink_carrier(&allctr->sbc_list, new_crr);
4424 		CHECK_1BLK_CARRIER(allctr, 1, 0, new_crr, new_crr_sz,
4425 				   new_blk, new_blk_sz);
4426 		DEBUG_SAVE_ALIGNMENT(new_crr);
4427 		return new_blk;
4428 	    }
4429 	    else if (new_crr_sz > UNIT_CEILING(new_bcrr_sz)) {
4430 		new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
4431 		new_crr_sz = UNIT_CEILING(new_crr_sz);
4432 		new_crr = (Carrier_t *) allctr->sys_realloc(allctr,
4433 							 (void *) old_crr,
4434 							 &new_crr_sz,
4435 							 old_crr_sz,
4436 							 0);
4437 		if (new_crr)
4438 		    goto sys_realloc_success;
4439 	    }
4440 
4441 #if !HAVE_ERTS_MSEG
4442 	    return NULL;
4443 #else
4444 	    create_flags |= CFLG_FORCE_MSEG; /* Since sys_realloc() failed */
4445 	}
4446 
4447 	STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
4448 
4449 	new_blk = create_carrier(allctr, umem_sz, create_flags);
4450 	if (new_blk) {
4451 	    sys_memcpy((void *) BLK2UMEM(new_blk),
4452 		       (void *) BLK2UMEM(old_blk),
4453 		       MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
4454 	    unlink_carrier(&allctr->sbc_list, old_crr);
4455 	    allctr->sys_dealloc(allctr, old_crr, CARRIER_SZ(old_crr), 0);
4456 	}
4457 	else {
4458 	    /* Old carrier unchanged; restore... */
4459 	    STAT_SYS_ALLOC_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
4460 	}
4461 	return new_blk;
4462     }
4463 #endif
4464 }
4465 
4466 static void
dealloc_carrier(Allctr_t * allctr,Carrier_t * crr,int superaligned)4467 dealloc_carrier(Allctr_t *allctr, Carrier_t *crr, int superaligned)
4468 {
4469 #if HAVE_ERTS_MSEG
4470     if (IS_MSEG_CARRIER(crr))
4471 	allctr->mseg_dealloc(allctr, crr, CARRIER_SZ(crr),
4472 			  (superaligned
4473 			   ? ERTS_MSEG_FLG_2POW
4474 			   : ERTS_MSEG_FLG_NONE));
4475     else
4476 #endif
4477 	allctr->sys_dealloc(allctr, crr, CARRIER_SZ(crr), superaligned);
4478 }
4479 
4480 static void
destroy_carrier(Allctr_t * allctr,Block_t * blk,Carrier_t ** busy_pcrr_pp)4481 destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
4482 {
4483     Uint crr_sz;
4484     Carrier_t *crr;
4485 
4486     if (IS_SBC_BLK(blk)) {
4487 	Uint blk_sz = SBC_BLK_SZ(blk);
4488 	crr = BLK_TO_SBC(blk);
4489 	crr_sz = CARRIER_SZ(crr);
4490 
4491 	ASSERT(IS_LAST_BLK(blk));
4492 
4493 	HARD_CHECK_BLK_CARRIER(allctr, blk);
4494 
4495 #if HAVE_ERTS_MSEG
4496 	if (IS_MSEG_CARRIER(crr)) {
4497 	    STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz);
4498 	}
4499 	else
4500 #endif
4501 	    STAT_SYS_ALLOC_SBC_FREE(allctr, crr_sz, blk_sz);
4502 
4503 	unlink_carrier(&allctr->sbc_list, crr);
4504 
4505 	dealloc_carrier(allctr, crr, 0);
4506     }
4507     else {
4508 	ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
4509 	crr = FIRST_BLK_TO_MBC(allctr, blk);
4510 
4511 #ifdef DEBUG
4512 	if (!allctr->stopped) {
4513 	    ASSERT(IS_LAST_BLK(blk));
4514 
4515 #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
4516 	    (*allctr->link_free_block)(allctr, blk);
4517 	    HARD_CHECK_BLK_CARRIER(allctr, blk);
4518 	    (*allctr->unlink_free_block)(allctr, blk);
4519 #endif
4520 	}
4521 #endif
4522 
4523 	if (busy_pcrr_pp && *busy_pcrr_pp) {
4524             erts_aint_t iallctr = erts_atomic_read_nob(&crr->allctr);
4525 	    ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
4526             ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
4527                                   == (((erts_aint_t) allctr)
4528                                       | ERTS_CRR_ALCTR_FLG_IN_POOL
4529                                       | ERTS_CRR_ALCTR_FLG_BUSY));
4530             ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
4531 
4532             *busy_pcrr_pp = NULL;
4533 	    erts_atomic_set_nob(&crr->allctr,
4534                                 (iallctr & ~(ERTS_CRR_ALCTR_FLG_IN_POOL |
4535                                              ERTS_CRR_ALCTR_FLG_BUSY)));
4536 	    cpool_delete(allctr, allctr, crr);
4537 	}
4538 	else
4539 	{
4540 	    unlink_carrier(&allctr->mbc_list, crr);
4541             STAT_MBC_FREE(allctr, crr);
4542             if (allctr->remove_mbc)
4543                 allctr->remove_mbc(allctr, crr);
4544 	}
4545 
4546 #ifdef USE_LTTNG_VM_TRACEPOINTS
4547         if (LTTNG_ENABLED(carrier_destroy)) {
4548             lttng_decl_carrier_stats(mbc_stats);
4549             lttng_decl_carrier_stats(sbc_stats);
4550             LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->mbcs), mbc_stats);
4551             LTTNG_CARRIER_STATS_TO_LTTNG_STATS(&(allctr->sbcs), sbc_stats);
4552             LTTNG5(carrier_destroy,
4553                 ERTS_ALC_A2AD(allctr->alloc_no),
4554                 allctr->ix,
4555                 CARRIER_SZ(crr),
4556                 mbc_stats,
4557                 sbc_stats);
4558         }
4559 #endif
4560 
4561 	schedule_dealloc_carrier(allctr, crr);
4562     }
4563 }
4564 
4565 
4566 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
4567  * Info stuff                                                              *
4568 \*                                                                         */
4569 
4570 static struct {
4571     Eterm versions;
4572 
4573     Eterm options;
4574     Eterm e;
4575     Eterm t;
4576     Eterm ramv;
4577     Eterm atags;
4578 #if HAVE_ERTS_MSEG
4579     Eterm asbcst;
4580     Eterm rsbcst;
4581 #endif
4582     Eterm rsbcmt;
4583     Eterm rmbcmt;
4584     Eterm mmbcs;
4585     Eterm msbclt;
4586 #if HAVE_ERTS_MSEG
4587     Eterm mmsbc;
4588     Eterm mmmbc;
4589 #endif
4590     Eterm lmbcs;
4591     Eterm smbcs;
4592     Eterm mbcgs;
4593     Eterm acul;
4594     Eterm acnl;
4595     Eterm acfml;
4596     Eterm cp;
4597 
4598 #if HAVE_ERTS_MSEG
4599     Eterm mmc;
4600 #endif
4601     Eterm ycs;
4602     Eterm sac;
4603 
4604     Eterm fix_types;
4605 
4606     Eterm mbcs;
4607     Eterm mbcs_pool;
4608     Eterm fetch;
4609     Eterm fail_pooled;
4610     Eterm fail_shared;
4611     Eterm fail_pend_dealloc;
4612     Eterm fail;
4613     Eterm skip_size;
4614     Eterm skip_busy;
4615     Eterm skip_not_pooled;
4616     Eterm skip_homecoming;
4617     Eterm skip_race;
4618     Eterm entrance_removed;
4619     Eterm sbcs;
4620 
4621     Eterm sys_alloc_carriers_size;
4622 #if HAVE_ERTS_MSEG
4623     Eterm mseg_alloc_carriers_size;
4624 #endif
4625     Eterm carriers_size;
4626     Eterm sys_alloc_carriers;
4627 #if HAVE_ERTS_MSEG
4628     Eterm mseg_alloc_carriers;
4629 #endif
4630     Eterm carriers;
4631     Eterm blocks_size;
4632     Eterm blocks;
4633 
4634     Eterm foreign_blocks;
4635 
4636     Eterm calls;
4637     Eterm sys_alloc;
4638     Eterm sys_free;
4639     Eterm sys_realloc;
4640 #if HAVE_ERTS_MSEG
4641     Eterm mseg_alloc;
4642     Eterm mseg_dealloc;
4643     Eterm mseg_realloc;
4644 #endif
4645 
4646     Eterm At_sign;
4647 
4648 #ifdef DEBUG
4649     Eterm end_of_atoms;
4650 #endif
4651 } am;
4652 
4653 static char *allocator_char_str[ERTS_ALC_A_MAX + 1];
4654 static Eterm allocator_char_atom[ERTS_ALC_A_MAX + 1];
4655 static Eterm alloc_type_atoms[ERTS_ALC_N_MAX + 1];
4656 
atom_init(Eterm * atom,char * name)4657 static ERTS_INLINE void atom_init(Eterm *atom, char *name)
4658 {
4659     *atom = am_atom_put(name, sys_strlen(name));
4660 }
4661 #define AM_INIT(AM) atom_init(&am.AM, #AM)
4662 
4663 static erts_mtx_t init_atoms_mtx;
4664 
4665 static void
init_atoms(Allctr_t * allctr)4666 init_atoms(Allctr_t *allctr)
4667 {
4668     erts_mtx_lock(&init_atoms_mtx);
4669 
4670     if (!atoms_initialized) {
4671 	int ix;
4672 #ifdef DEBUG
4673 	Eterm *atom;
4674 
4675 	for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
4676 	    *atom = THE_NON_VALUE;
4677 	}
4678 #endif
4679 
4680 	AM_INIT(versions);
4681 
4682 	AM_INIT(options);
4683 	AM_INIT(e);
4684 	AM_INIT(t);
4685 	AM_INIT(ramv);
4686 	AM_INIT(atags);
4687 #if HAVE_ERTS_MSEG
4688 	AM_INIT(asbcst);
4689 	AM_INIT(rsbcst);
4690 #endif
4691 	AM_INIT(rsbcmt);
4692 	AM_INIT(rmbcmt);
4693 	AM_INIT(mmbcs);
4694 	AM_INIT(msbclt);
4695 #if HAVE_ERTS_MSEG
4696 	AM_INIT(mmsbc);
4697 	AM_INIT(mmmbc);
4698 #endif
4699 	AM_INIT(lmbcs);
4700 	AM_INIT(smbcs);
4701 	AM_INIT(mbcgs);
4702 	AM_INIT(acul);
4703         AM_INIT(acnl);
4704         AM_INIT(acfml);
4705         AM_INIT(cp);
4706 
4707 #if HAVE_ERTS_MSEG
4708 	AM_INIT(mmc);
4709 #endif
4710 	AM_INIT(ycs);
4711 	AM_INIT(sac);
4712 
4713 	AM_INIT(fix_types);
4714 
4715 	AM_INIT(mbcs);
4716 	AM_INIT(mbcs_pool);
4717 	AM_INIT(fetch);
4718         AM_INIT(fail_pooled);
4719         AM_INIT(fail_shared);
4720         AM_INIT(fail_pend_dealloc);
4721         AM_INIT(fail);
4722         AM_INIT(skip_size);
4723         AM_INIT(skip_busy);
4724         AM_INIT(skip_not_pooled);
4725         AM_INIT(skip_homecoming);
4726         AM_INIT(skip_race);
4727         AM_INIT(entrance_removed);
4728 	AM_INIT(sbcs);
4729 
4730 	AM_INIT(sys_alloc_carriers_size);
4731 #if HAVE_ERTS_MSEG
4732 	AM_INIT(mseg_alloc_carriers_size);
4733 #endif
4734 	AM_INIT(carriers_size);
4735 	AM_INIT(sys_alloc_carriers);
4736 #if HAVE_ERTS_MSEG
4737 	AM_INIT(mseg_alloc_carriers);
4738 #endif
4739 	AM_INIT(carriers);
4740 	AM_INIT(blocks_size);
4741 	AM_INIT(blocks);
4742 	AM_INIT(foreign_blocks);
4743 
4744 	AM_INIT(calls);
4745 	AM_INIT(sys_alloc);
4746 	AM_INIT(sys_free);
4747 	AM_INIT(sys_realloc);
4748 #if HAVE_ERTS_MSEG
4749 	AM_INIT(mseg_alloc);
4750 	AM_INIT(mseg_dealloc);
4751 	AM_INIT(mseg_realloc);
4752 #endif
4753 
4754         am.At_sign = am_atom_put("@", 1);
4755 
4756 #ifdef DEBUG
4757 	for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
4758 	    ASSERT(*atom != THE_NON_VALUE);
4759 	}
4760 #endif
4761 
4762         for (ix = ERTS_ALC_A_MIN; ix <= ERTS_ALC_A_MAX; ix++) {
4763             char *cp_str = allocator_char_str[ix];
4764             Eterm cp_atom = am_atom_put(cp_str, sys_strlen(cp_str));
4765             allocator_char_atom[ix] = cp_atom;
4766         }
4767 
4768         for (ix = ERTS_ALC_N_MIN; ix <= ERTS_ALC_N_MAX; ix++) {
4769             const char *name = ERTS_ALC_N2TD(ix);
4770             size_t len = sys_strlen(name);
4771 
4772             alloc_type_atoms[ix] = am_atom_put(name, len);
4773         }
4774     }
4775 
4776     if (allctr && !allctr->atoms_initialized) {
4777 
4778 	make_name_atoms(allctr);
4779 
4780 	(*allctr->init_atoms)();
4781 
4782     	allctr->atoms_initialized = 1;
4783     }
4784 
4785     atoms_initialized = 1;
4786     erts_mtx_unlock(&init_atoms_mtx);
4787 
4788 }
4789 
4790 static ERTS_INLINE void
ensure_atoms_initialized(Allctr_t * allctr)4791 ensure_atoms_initialized(Allctr_t *allctr)
4792 {
4793     if (!allctr || !allctr->atoms_initialized)
4794 	init_atoms(allctr);
4795 }
4796 
4797 #define bld_uint	erts_bld_uint
4798 #define bld_cons	erts_bld_cons
4799 #define bld_tuple	erts_bld_tuple
4800 #define bld_string	erts_bld_string
4801 
4802 /*
4803  * bld_unstable_uint() (instead bld_uint()) is used when values may
4804  * change between size check and actual build. This because a value
4805  * that would fit a small when size check is done may need to be built
4806  * as a big when the actual build is performed. Caller is required to
4807  * HRelease after build.
4808  *
4809  * Note, bld_unstable_uint() should have been called bld_unstable_uword()
4810  * but we do not want to rename it...
4811  */
4812 static ERTS_INLINE Eterm
bld_unstable_uint(Uint ** hpp,Uint * szp,UWord ui)4813 bld_unstable_uint(Uint **hpp, Uint *szp, UWord ui)
4814 {
4815     Eterm res = THE_NON_VALUE;
4816     if (szp)
4817 	*szp += BIG_UWORD_HEAP_SIZE(~((UWord) 0));
4818     if (hpp) {
4819 	if (IS_USMALL(0, ui))
4820 	    res = make_small(ui);
4821 	else {
4822 	    res = uword_to_big(ui, *hpp);
4823 	    *hpp += BIG_UWORD_HEAP_SIZE(ui);
4824 	}
4825     }
4826     return res;
4827 }
4828 
4829 static ERTS_INLINE void
add_2tup(Uint ** hpp,Uint * szp,Eterm * lp,Eterm el1,Eterm el2)4830 add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
4831 {
4832     *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
4833 }
4834 
4835 static ERTS_INLINE void
add_3tup(Uint ** hpp,Uint * szp,Eterm * lp,Eterm el1,Eterm el2,Eterm el3)4836 add_3tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2, Eterm el3)
4837 {
4838     *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 3, el1, el2, el3), *lp);
4839 }
4840 
4841 static ERTS_INLINE void
add_4tup(Uint ** hpp,Uint * szp,Eterm * lp,Eterm el1,Eterm el2,Eterm el3,Eterm el4)4842 add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
4843 	 Eterm el1, Eterm el2, Eterm el3, Eterm el4)
4844 {
4845     *lp =
4846 	bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp);
4847 }
4848 
4849 static ERTS_INLINE void
add_fix_types(Allctr_t * allctr,int internal,Uint ** hpp,Uint * szp,Eterm * lp,Eterm fix)4850 add_fix_types(Allctr_t *allctr, int internal, Uint **hpp, Uint *szp,
4851 	      Eterm *lp, Eterm fix)
4852 {
4853     if (allctr->fix) {
4854 	if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
4855 	    add_2tup(hpp, szp, lp, am.fix_types, fix);
4856 	else if (internal)
4857 	    add_3tup(hpp, szp, lp,
4858 		     am.fix_types,
4859 		     erts_bld_uword(hpp, szp, ~((UWord) 0)),
4860 		     fix);
4861     }
4862 }
4863 
4864 static Eterm
sz_info_fix(Allctr_t * allctr,int internal,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)4865 sz_info_fix(Allctr_t *allctr,
4866 	    int internal,
4867 	    fmtfn_t *print_to_p,
4868 	    void *print_to_arg,
4869 	    Uint **hpp,
4870 	    Uint *szp)
4871 {
4872     Eterm res;
4873     int ix;
4874 
4875     ASSERT(allctr->fix);
4876 
4877     res = NIL;
4878 
4879     if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
4880 
4881 	if (internal) {
4882 	    for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
4883 		ErtsAlcFixList_t *fix = &allctr->fix[ix];
4884 		UWord alloced = fix->type_size * fix->u.cpool.allocated;
4885 		UWord used = fix->type_size * fix->u.cpool.used;
4886 
4887 		if (print_to_p) {
4888 		    fmtfn_t to = *print_to_p;
4889 		    void *arg = print_to_arg;
4890 		    erts_print(to,
4891 			       arg,
4892 			       "fix type internal: %s %bpu %bpu\n",
4893 			       (char *) ERTS_ALC_T2TD(fix->type),
4894 			       alloced,
4895 			       used);
4896 		}
4897 
4898 		if (hpp || szp) {
4899 		    add_3tup(hpp, szp, &res,
4900 			     alloc_type_atoms[ERTS_ALC_T2N(fix->type)],
4901 			     bld_unstable_uint(hpp, szp, alloced),
4902 			     bld_unstable_uint(hpp, szp, used));
4903 		}
4904 	    }
4905 	}
4906     }
4907     else {
4908 
4909 	for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
4910 	    ErtsAlcFixList_t *fix = &allctr->fix[ix];
4911 	    UWord alloced = fix->type_size * fix->u.nocpool.allocated;
4912 	    UWord used = fix->type_size*fix->u.nocpool.used;
4913 
4914 	    if (print_to_p) {
4915 		fmtfn_t to = *print_to_p;
4916 		void *arg = print_to_arg;
4917 		erts_print(to,
4918 			   arg,
4919 			   "fix type: %s %bpu %bpu\n",
4920 			   (char *) ERTS_ALC_T2TD(fix->type),
4921 			   alloced,
4922 			   used);
4923 	    }
4924 
4925 	    if (hpp || szp) {
4926 		add_3tup(hpp, szp, &res,
4927 			 alloc_type_atoms[ERTS_ALC_T2N(fix->type)],
4928 			 bld_unstable_uint(hpp, szp, alloced),
4929 			 bld_unstable_uint(hpp, szp, used));
4930 	    }
4931 	}
4932     }
4933     return res;
4934 }
4935 
4936 static Eterm
sz_info_carriers(Allctr_t * allctr,CarriersStats_t * cs,char * prefix,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)4937 sz_info_carriers(Allctr_t *allctr,
4938 		 CarriersStats_t *cs,
4939 		 char *prefix,
4940 		 fmtfn_t *print_to_p,
4941 		 void *print_to_arg,
4942 		 Uint **hpp,
4943 		 Uint *szp)
4944 {
4945     Eterm res = THE_NON_VALUE;
4946     UWord curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
4947 
4948     if (print_to_p) {
4949 	fmtfn_t to = *print_to_p;
4950 	void *arg = print_to_arg;
4951 	erts_print(to,
4952 		   arg,
4953 		   "%sblocks size: %bpu %bpu %bpu\n",
4954 		   prefix,
4955 		   cs->blocks.curr.size,
4956 		   cs->blocks.max.size,
4957 		   cs->blocks.max_ever.size);
4958 	erts_print(to,
4959 		   arg,
4960 		   "%scarriers size: %bpu %bpu %bpu\n",
4961 		   prefix,
4962 		   curr_size,
4963 		   cs->max.size,
4964 		   cs->max_ever.size);
4965     }
4966 
4967     if (hpp || szp) {
4968 	res = NIL;
4969 	add_4tup(hpp, szp, &res,
4970 		 am.carriers_size,
4971 		 bld_unstable_uint(hpp, szp, curr_size),
4972 		 bld_unstable_uint(hpp, szp, cs->max.size),
4973 		 bld_unstable_uint(hpp, szp, cs->max_ever.size));
4974 	add_4tup(hpp, szp, &res,
4975 		 am.blocks_size,
4976 		 bld_unstable_uint(hpp, szp, cs->blocks.curr.size),
4977 		 bld_unstable_uint(hpp, szp, cs->blocks.max.size),
4978 		 bld_unstable_uint(hpp, szp, cs->blocks.max_ever.size));
4979     }
4980 
4981     return res;
4982 }
4983 
4984 
4985 static Eterm
info_cpool(Allctr_t * allctr,int sz_only,char * prefix,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)4986 info_cpool(Allctr_t *allctr,
4987 	   int sz_only,
4988 	   char *prefix,
4989 	   fmtfn_t *print_to_p,
4990 	   void *print_to_arg,
4991 	   Uint **hpp,
4992 	   Uint *szp)
4993 {
4994     Eterm res = THE_NON_VALUE;
4995     UWord noc, csz, nob, bsz;
4996 
4997     noc = csz = nob = bsz = ~0;
4998     if (print_to_p || hpp) {
4999 	if (sz_only)
5000 	    cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz);
5001 	else
5002 	    cpool_read_stat(allctr, allctr->alloc_no, &noc, &csz, &nob, &bsz);
5003     }
5004 
5005     if (print_to_p) {
5006 	fmtfn_t to = *print_to_p;
5007 	void *arg = print_to_arg;
5008 	if (!sz_only)
5009 	    erts_print(to, arg, "%sblocks: %bpu\n", prefix, nob);
5010 	erts_print(to, arg, "%sblocks size: %bpu\n", prefix, bsz);
5011 	if (!sz_only)
5012 	    erts_print(to, arg, "%scarriers: %bpu\n", prefix, noc);
5013 	erts_print(to, arg, "%scarriers size: %bpu\n", prefix, csz);
5014     }
5015 
5016     if (hpp || szp) {
5017         Eterm foreign_blocks;
5018         int i;
5019 
5020         foreign_blocks = NIL;
5021 	res = NIL;
5022 
5023       if (!sz_only) {
5024         add_3tup(hpp, szp, &res, am.fail_pooled,
5025                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pooled)),
5026                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pooled)));
5027 
5028         add_3tup(hpp, szp, &res, am.fail_shared,
5029                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_shared)),
5030                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_shared)));
5031 
5032         add_3tup(hpp, szp, &res, am.fail_pend_dealloc,
5033                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pend_dealloc)),
5034                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pend_dealloc)));
5035 
5036         add_3tup(hpp, szp, &res, am.fail,
5037                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail)),
5038                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail)));
5039 
5040         add_3tup(hpp, szp, &res, am.fetch,
5041                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fetch)),
5042                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fetch)));
5043 
5044         add_3tup(hpp, szp, &res, am.skip_size,
5045                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_size)),
5046                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_size)));
5047 
5048         add_3tup(hpp, szp, &res, am.skip_busy,
5049                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_busy)),
5050                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_busy)));
5051 
5052         add_3tup(hpp, szp, &res, am.skip_not_pooled,
5053                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_not_pooled)),
5054                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_not_pooled)));
5055 
5056         add_3tup(hpp, szp, &res, am.skip_homecoming,
5057                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_homecoming)),
5058                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_homecoming)));
5059 
5060         add_3tup(hpp, szp, &res, am.skip_race,
5061                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_race)),
5062                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_race)));
5063 
5064         add_3tup(hpp, szp, &res, am.entrance_removed,
5065                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.entrance_removed)),
5066                  bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.entrance_removed)));
5067        }
5068 
5069 	add_2tup(hpp, szp, &res,
5070 		 am.carriers_size,
5071 		 bld_unstable_uint(hpp, szp, csz));
5072 
5073         if (!sz_only) {
5074             add_2tup(hpp, szp, &res,
5075                      am.carriers,
5076                      bld_unstable_uint(hpp, szp, noc));
5077         }
5078 
5079 	add_2tup(hpp, szp, &res,
5080 		 am.blocks_size,
5081 		 bld_unstable_uint(hpp, szp, bsz));
5082 
5083 	if (!sz_only) {
5084 	    add_2tup(hpp, szp, &res,
5085 		     am.blocks,
5086 		     bld_unstable_uint(hpp, szp, nob));
5087         }
5088 
5089         for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
5090             const char *name_str;
5091             Eterm name, info;
5092 
5093             if (i == allctr->alloc_no) {
5094                 continue;
5095             }
5096 
5097             cpool_read_stat(allctr, i, NULL, NULL, &nob, &bsz);
5098 
5099             if (bsz == 0 && (nob == 0 || sz_only)) {
5100                 continue;
5101             }
5102 
5103             name_str = ERTS_ALC_A2AD(i);
5104             info = NIL;
5105 
5106             add_2tup(hpp, szp, &info,
5107                      am.blocks_size,
5108                      bld_unstable_uint(hpp, szp, bsz));
5109 
5110             if (!sz_only) {
5111                 add_2tup(hpp, szp, &info,
5112                      am.blocks,
5113                      bld_unstable_uint(hpp, szp, nob));
5114             }
5115 
5116             name = am_atom_put(name_str, sys_strlen(name_str));
5117 
5118             add_2tup(hpp, szp, &foreign_blocks, name, info);
5119         }
5120 
5121         add_2tup(hpp, szp, &res, am.foreign_blocks, foreign_blocks);
5122     }
5123 
5124     return res;
5125 }
5126 
5127 
5128 static Eterm
info_carriers(Allctr_t * allctr,CarriersStats_t * cs,char * prefix,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)5129 info_carriers(Allctr_t *allctr,
5130 	      CarriersStats_t *cs,
5131 	      char *prefix,
5132 	      fmtfn_t *print_to_p,
5133 	      void *print_to_arg,
5134 	      Uint **hpp,
5135 	      Uint *szp)
5136 {
5137     Eterm res = THE_NON_VALUE;
5138     UWord curr_no, curr_size;
5139 
5140     curr_no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no;
5141     curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
5142 
5143     if (print_to_p) {
5144 	fmtfn_t to = *print_to_p;
5145 	void *arg = print_to_arg;
5146 	erts_print(to,
5147 		   arg,
5148 		   "%sblocks: %bpu %bpu %bpu\n",
5149 		   prefix,
5150 		   cs->blocks.curr.no,
5151 		   cs->blocks.max.no,
5152 		   cs->blocks.max_ever.no);
5153 	erts_print(to,
5154 		   arg,
5155 		   "%sblocks size: %bpu %bpu %bpu\n",
5156 		   prefix,
5157 		   cs->blocks.curr.size,
5158 		   cs->blocks.max.size,
5159 		   cs->blocks.max_ever.size);
5160 	erts_print(to,
5161 		   arg,
5162 		   "%scarriers: %bpu %bpu %bpu\n",
5163 		   prefix,
5164 		   curr_no,
5165 		   cs->max.no,
5166 		   cs->max_ever.no);
5167 #if HAVE_ERTS_MSEG
5168 	erts_print(to,
5169 		   arg,
5170 		   "%smseg carriers: %bpu\n",
5171 		   prefix,
5172 		   cs->curr.norm.mseg.no);
5173 #endif
5174 	erts_print(to,
5175 		   arg,
5176 		   "%ssys_alloc carriers: %bpu\n",
5177 		   prefix,
5178 		   cs->curr.norm.sys_alloc.no);
5179 	erts_print(to,
5180 		   arg,
5181 		   "%scarriers size: %bpu %bpu %bpu\n",
5182 		   prefix,
5183 		   curr_size,
5184 		   cs->max.size,
5185 		   cs->max_ever.size);
5186 #if HAVE_ERTS_MSEG
5187 	erts_print(to,
5188 		   arg,
5189 		   "%smseg carriers size: %bpu\n",
5190 		   prefix,
5191 		   cs->curr.norm.mseg.size);
5192 #endif
5193 	erts_print(to,
5194 		   arg,
5195 		   "%ssys_alloc carriers size: %bpu\n",
5196 		   prefix,
5197 		   cs->curr.norm.sys_alloc.size);
5198     }
5199 
5200     if (hpp || szp) {
5201 	res = NIL;
5202 	add_2tup(hpp, szp, &res,
5203 		 am.sys_alloc_carriers_size,
5204 		 bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.size));
5205 #if HAVE_ERTS_MSEG
5206 	add_2tup(hpp, szp, &res,
5207 		 am.mseg_alloc_carriers_size,
5208 		 bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.size));
5209 #endif
5210 	add_4tup(hpp, szp, &res,
5211 		 am.carriers_size,
5212 		 bld_unstable_uint(hpp, szp, curr_size),
5213 		 bld_unstable_uint(hpp, szp, cs->max.size),
5214 		 bld_unstable_uint(hpp, szp, cs->max_ever.size));
5215 	add_2tup(hpp, szp, &res,
5216 		 am.sys_alloc_carriers,
5217 		 bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.no));
5218 #if HAVE_ERTS_MSEG
5219 	add_2tup(hpp, szp, &res,
5220 		 am.mseg_alloc_carriers,
5221 		 bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.no));
5222 #endif
5223 	add_4tup(hpp, szp, &res,
5224 		 am.carriers,
5225 		 bld_unstable_uint(hpp, szp, curr_no),
5226 		 bld_unstable_uint(hpp, szp, cs->max.no),
5227 		 bld_unstable_uint(hpp, szp, cs->max_ever.no));
5228 	add_4tup(hpp, szp, &res,
5229 		 am.blocks_size,
5230 		 bld_unstable_uint(hpp, szp, cs->blocks.curr.size),
5231 		 bld_unstable_uint(hpp, szp, cs->blocks.max.size),
5232 		 bld_unstable_uint(hpp, szp, cs->blocks.max_ever.size));
5233 	add_4tup(hpp, szp, &res,
5234 		 am.blocks,
5235 		 bld_unstable_uint(hpp, szp, cs->blocks.curr.no),
5236 		 bld_unstable_uint(hpp, szp, cs->blocks.max.no),
5237 		 bld_unstable_uint(hpp, szp, cs->blocks.max_ever.no));
5238     }
5239 
5240     return res;
5241 }
5242 
5243 static void
make_name_atoms(Allctr_t * allctr)5244 make_name_atoms(Allctr_t *allctr)
5245 {
5246     char alloc[] = "alloc";
5247     char realloc[] = "realloc";
5248     char free[] = "free";
5249     char buf[MAX_ATOM_CHARACTERS];
5250     size_t prefix_len = sys_strlen(allctr->name_prefix);
5251 
5252     if (prefix_len > MAX_ATOM_CHARACTERS + sizeof(realloc) - 1)
5253 	erts_exit(ERTS_ERROR_EXIT,"Too long allocator name: %salloc\n",allctr->name_prefix);
5254 
5255     sys_memcpy((void *) buf, (void *) allctr->name_prefix, prefix_len);
5256 
5257     sys_memcpy((void *) &buf[prefix_len], (void *) alloc, sizeof(alloc) - 1);
5258     allctr->name.alloc = am_atom_put(buf, prefix_len + sizeof(alloc) - 1);
5259 
5260     sys_memcpy((void *) &buf[prefix_len], (void *) realloc, sizeof(realloc) - 1);
5261     allctr->name.realloc = am_atom_put(buf, prefix_len + sizeof(realloc) - 1);
5262 
5263     sys_memcpy((void *) &buf[prefix_len], (void *) free, sizeof(free) - 1);
5264     allctr->name.free = am_atom_put(buf, prefix_len + sizeof(free) - 1);
5265 
5266 }
5267 
5268 static Eterm
info_calls(Allctr_t * allctr,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)5269 info_calls(Allctr_t *allctr,
5270 	   fmtfn_t *print_to_p,
5271 	   void *print_to_arg,
5272 	   Uint **hpp,
5273 	   Uint *szp)
5274 {
5275     Eterm res = THE_NON_VALUE;
5276 
5277 
5278     if (print_to_p) {
5279 
5280 #define PRINT_CC_4(TO, TOA, NAME, CC)					\
5281 	erts_print(TO, TOA, "%s calls: %b64u\n", NAME, CC)
5282 
5283 #define PRINT_CC_5(TO, TOA, PRFX, NAME, CC)				\
5284 	erts_print(TO, TOA, "%s%s calls: %b64u\n",PRFX,NAME,CC)
5285 
5286 	char *prefix = allctr->name_prefix;
5287 	fmtfn_t to = *print_to_p;
5288 	void *arg = print_to_arg;
5289 
5290 	PRINT_CC_5(to, arg, prefix, "alloc",        allctr->calls.this_alloc);
5291 	PRINT_CC_5(to, arg, prefix, "free",         allctr->calls.this_free);
5292 	PRINT_CC_5(to, arg, prefix, "realloc",      allctr->calls.this_realloc);
5293 
5294 #if HAVE_ERTS_MSEG
5295 	PRINT_CC_4(to, arg,         "mseg_alloc",   allctr->calls.mseg_alloc);
5296 	PRINT_CC_4(to, arg,         "mseg_dealloc", allctr->calls.mseg_dealloc);
5297 	PRINT_CC_4(to, arg,         "mseg_realloc", allctr->calls.mseg_realloc);
5298 #endif
5299 
5300 	PRINT_CC_4(to, arg,         "sys_alloc",    allctr->calls.sys_alloc);
5301 	PRINT_CC_4(to, arg,         "sys_free",     allctr->calls.sys_free);
5302 	PRINT_CC_4(to, arg,         "sys_realloc",  allctr->calls.sys_realloc);
5303 
5304 #undef PRINT_CC_4
5305 #undef PRINT_CC_5
5306 
5307     }
5308 
5309 
5310     if (hpp || szp) {
5311 
5312 	ASSERT(allctr->name.alloc   != THE_NON_VALUE);
5313 	ASSERT(allctr->name.realloc != THE_NON_VALUE);
5314 	ASSERT(allctr->name.free    != THE_NON_VALUE);
5315 
5316 	res = NIL;
5317 
5318 	add_3tup(hpp, szp, &res,
5319 		 am.sys_realloc,
5320 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_realloc)),
5321 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_realloc)));
5322 	add_3tup(hpp, szp, &res,
5323 		 am.sys_free,
5324 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_free)),
5325 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_free)));
5326 	add_3tup(hpp, szp, &res,
5327 		 am.sys_alloc,
5328 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.sys_alloc)),
5329 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.sys_alloc)));
5330 #if HAVE_ERTS_MSEG
5331 	add_3tup(hpp, szp, &res,
5332 		 am.mseg_realloc,
5333 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_realloc)),
5334 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_realloc)));
5335 	add_3tup(hpp, szp, &res,
5336 		 am.mseg_dealloc,
5337 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_dealloc)),
5338 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_dealloc)));
5339 	add_3tup(hpp, szp, &res,
5340 		 am.mseg_alloc,
5341 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.mseg_alloc)),
5342 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.mseg_alloc)));
5343 #endif
5344 	add_3tup(hpp, szp, &res,
5345 		 allctr->name.realloc,
5346 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_realloc)),
5347 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_realloc)));
5348 	add_3tup(hpp, szp, &res,
5349 		 allctr->name.free,
5350 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_free)),
5351 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_free)));
5352 	add_3tup(hpp, szp, &res,
5353 		 allctr->name.alloc,
5354 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->calls.this_alloc)),
5355 		 bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->calls.this_alloc)));
5356     }
5357 
5358     return res;
5359 }
5360 
5361 static Eterm
info_options(Allctr_t * allctr,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)5362 info_options(Allctr_t *allctr,
5363              fmtfn_t *print_to_p,
5364 	     void *print_to_arg,
5365 	     Uint **hpp,
5366 	     Uint *szp)
5367 {
5368     Eterm res = THE_NON_VALUE;
5369     UWord acul, acnl, acfml;
5370     char *cp_str;
5371     Eterm cp_atom;
5372 
5373     if (!allctr) {
5374 	if (print_to_p)
5375 	    erts_print(*print_to_p, print_to_arg, "option e: false\n");
5376 	if (hpp || szp) {
5377 	    res = NIL;
5378 	    add_2tup(hpp, szp, &res, am.e, am_false);
5379 	}
5380 	return res;
5381     }
5382 
5383     acul = allctr->cpool.util_limit;
5384     acnl = allctr->cpool.in_pool_limit;
5385     acfml = allctr->cpool.fblk_min_limit;
5386     ASSERT(allctr->cpool.carrier_pool <= ERTS_ALC_A_MAX);
5387     if (allctr->cpool.carrier_pool < ERTS_ALC_A_MIN) {
5388         cp_str = "undefined";
5389         cp_atom = am_undefined;
5390     }
5391     else if (allctr->cpool.carrier_pool == ERTS_ALC_COMMON_CPOOL_IX) {
5392         cp_str = "@";
5393         cp_atom = am.At_sign;
5394     }
5395     else {
5396         cp_str = allocator_char_str[allctr->cpool.carrier_pool];
5397         cp_atom = allocator_char_atom[allctr->cpool.carrier_pool];
5398     }
5399 
5400     if (print_to_p) {
5401 	char topt[21]; /* Enough for any 64-bit integer */
5402 	if (allctr->t)
5403 	    erts_snprintf(&topt[0], sizeof(topt), "%d", allctr->t);
5404 	else
5405 	    erts_snprintf(&topt[0], sizeof(topt), "false");
5406         /*
5407          * Do not use '%T' in the format string here. You'll
5408          * likely get into lock order violations...
5409          */
5410 	erts_print(*print_to_p,
5411 		   print_to_arg,
5412 		   "option e: true\n"
5413 		   "option t: %s\n"
5414 		   "option ramv: %s\n"
5415 		   "option atags: %s\n"
5416 		   "option sbct: %beu\n"
5417 #if HAVE_ERTS_MSEG
5418 		   "option asbcst: %bpu\n"
5419 		   "option rsbcst: %bpu\n"
5420 #endif
5421 		   "option rsbcmt: %beu\n"
5422 		   "option rmbcmt: %beu\n"
5423 		   "option mmbcs: %beu\n"
5424 #if HAVE_ERTS_MSEG
5425 		   "option mmsbc: %beu\n"
5426 		   "option mmmbc: %beu\n"
5427 #endif
5428 		   "option lmbcs: %beu\n"
5429 		   "option smbcs: %beu\n"
5430 		   "option mbcgs: %beu\n"
5431 		   "option acul: %bpu\n"
5432 		   "option acnl: %bpu\n"
5433 		   "option acfml: %bpu\n"
5434 		   "option cp: %s\n",
5435 		   topt,
5436 		   allctr->ramv ? "true" : "false",
5437 		   allctr->atags ? "true" : "false",
5438 		   allctr->sbc_threshold,
5439 #if HAVE_ERTS_MSEG
5440 		   allctr->mseg_opt.abs_shrink_th,
5441 		   allctr->mseg_opt.rel_shrink_th,
5442 #endif
5443 		   allctr->sbc_move_threshold,
5444 		   allctr->mbc_move_threshold,
5445 		   allctr->main_carrier_size,
5446 #if HAVE_ERTS_MSEG
5447 		   allctr->max_mseg_sbcs,
5448 		   allctr->max_mseg_mbcs,
5449 #endif
5450 		   allctr->largest_mbc_size,
5451 		   allctr->smallest_mbc_size,
5452 		   allctr->mbc_growth_stages,
5453 		   acul,
5454                    acnl,
5455                    acfml,
5456                    cp_str);
5457     }
5458 
5459     res = (*allctr->info_options)(allctr, "option ", print_to_p, print_to_arg,
5460 				  hpp, szp);
5461 
5462     if (hpp || szp) {
5463         add_2tup(hpp, szp, &res, am.cp, cp_atom);
5464         add_2tup(hpp, szp, &res,
5465                  am.acfml,
5466                  bld_uint(hpp, szp, acfml));
5467         add_2tup(hpp, szp, &res,
5468                  am.acnl,
5469                  bld_uint(hpp, szp, acnl));
5470 	add_2tup(hpp, szp, &res,
5471 		 am.acul,
5472 		 bld_uint(hpp, szp, acul));
5473 	add_2tup(hpp, szp, &res,
5474 		 am.mbcgs,
5475 		 bld_uint(hpp, szp, allctr->mbc_growth_stages));
5476 	add_2tup(hpp, szp, &res,
5477 		 am.smbcs,
5478 		 bld_uint(hpp, szp, allctr->smallest_mbc_size));
5479 	add_2tup(hpp, szp, &res,
5480 		 am.lmbcs,
5481 		 bld_uint(hpp, szp, allctr->largest_mbc_size));
5482 #if HAVE_ERTS_MSEG
5483 	add_2tup(hpp, szp, &res,
5484 		 am.mmsbc,
5485 		 bld_uint(hpp, szp, allctr->max_mseg_sbcs));
5486 	add_2tup(hpp, szp, &res,
5487 		 am.mmmbc,
5488 		 bld_uint(hpp, szp, allctr->max_mseg_mbcs));
5489 #endif
5490 	add_2tup(hpp, szp, &res,
5491 		 am.mmbcs,
5492 		 bld_uint(hpp, szp, allctr->main_carrier_size));
5493 	add_2tup(hpp, szp, &res,
5494 		 am.rmbcmt,
5495 		 bld_uint(hpp, szp, allctr->mbc_move_threshold));
5496 	add_2tup(hpp, szp, &res,
5497 		 am.rsbcmt,
5498 		 bld_uint(hpp, szp, allctr->sbc_move_threshold));
5499 #if HAVE_ERTS_MSEG
5500 	add_2tup(hpp, szp, &res,
5501 		 am.rsbcst,
5502 		 bld_uint(hpp, szp, allctr->mseg_opt.rel_shrink_th));
5503 	add_2tup(hpp, szp, &res,
5504 		 am.asbcst,
5505 		 bld_uint(hpp, szp, allctr->mseg_opt.abs_shrink_th));
5506 #endif
5507 	add_2tup(hpp, szp, &res,
5508 		 am_sbct,
5509 		 bld_uint(hpp, szp, allctr->sbc_threshold));
5510 	add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false);
5511 	add_2tup(hpp, szp, &res, am.atags, allctr->atags ? am_true : am_false);
5512 	add_2tup(hpp, szp, &res, am.t, (allctr->t ? am_true : am_false));
5513 	add_2tup(hpp, szp, &res, am.e, am_true);
5514     }
5515 
5516     return res;
5517 }
5518 
5519 
5520 static ERTS_INLINE void
update_max_ever_values(CarriersStats_t * cs)5521 update_max_ever_values(CarriersStats_t *cs)
5522 {
5523     if (cs->max_ever.no < cs->max.no)
5524 	cs->max_ever.no = cs->max.no;
5525     if (cs->max_ever.size < cs->max.size)
5526 	cs->max_ever.size = cs->max.size;
5527     if (cs->blocks.max_ever.no < cs->blocks.max.no)
5528 	cs->blocks.max_ever.no = cs->blocks.max.no;
5529     if (cs->blocks.max_ever.size < cs->blocks.max.size)
5530 	cs->blocks.max_ever.size = cs->blocks.max.size;
5531 }
5532 
5533 static ERTS_INLINE void
reset_max_values(CarriersStats_t * cs)5534 reset_max_values(CarriersStats_t *cs)
5535 {
5536     cs->max.no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no;
5537     cs->max.size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
5538     cs->blocks.max.no = cs->blocks.curr.no;
5539     cs->blocks.max.size = cs->blocks.curr.size;
5540 }
5541 
5542 
5543 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
5544  * Exported functions                                                      *
5545 \*                                                                         */
5546 
5547 Eterm
erts_alcu_au_info_options(fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)5548 erts_alcu_au_info_options(fmtfn_t *print_to_p, void *print_to_arg,
5549 			  Uint **hpp, Uint *szp)
5550 {
5551     Eterm res = THE_NON_VALUE;
5552 
5553     if (print_to_p) {
5554 
5555 	erts_print(*print_to_p,
5556 		   print_to_arg,
5557 #if HAVE_ERTS_MSEG
5558 		   "option mmc: %beu\n"
5559 #endif
5560 		   "option ycs: %beu\n"
5561 		   "option sac: %s\n",
5562 #if HAVE_ERTS_MSEG
5563 		   max_mseg_carriers,
5564 #endif
5565 		   sys_alloc_carrier_size,
5566 		   allow_sys_alloc_carriers ? "true" : "false");
5567     }
5568 
5569     if (hpp || szp) {
5570 	res = NIL;
5571 	ensure_atoms_initialized(NULL);
5572 	add_2tup(hpp, szp, &res,
5573 		 am.sac,
5574 		 allow_sys_alloc_carriers ? am_true : am_false);
5575 	add_2tup(hpp, szp, &res,
5576 		 am.ycs,
5577 		 bld_uint(hpp, szp, sys_alloc_carrier_size));
5578 #if HAVE_ERTS_MSEG
5579 	add_2tup(hpp, szp, &res,
5580 		 am.mmc,
5581 		 bld_uint(hpp, szp, max_mseg_carriers));
5582 #endif
5583     }
5584 
5585     return res;
5586 }
5587 
5588 
5589 Eterm
erts_alcu_info_options(Allctr_t * allctr,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)5590 erts_alcu_info_options(Allctr_t *allctr,
5591 		       fmtfn_t *print_to_p,
5592 		       void *print_to_arg,
5593 		       Uint **hpp,
5594 		       Uint *szp)
5595 {
5596     Eterm res;
5597 
5598     if (hpp || szp)
5599 	ensure_atoms_initialized(allctr);
5600 
5601     if (allctr->thread_safe) {
5602 	erts_allctr_wrapper_pre_lock();
5603 	erts_mtx_lock(&allctr->mutex);
5604     }
5605     res = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
5606     if (allctr->thread_safe) {
5607 	erts_mtx_unlock(&allctr->mutex);
5608 	erts_allctr_wrapper_pre_unlock();
5609     }
5610     return res;
5611 }
5612 
5613 /* ----------------------------------------------------------------------- */
5614 
5615 Eterm
erts_alcu_sz_info(Allctr_t * allctr,int internal,int begin_max_period,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)5616 erts_alcu_sz_info(Allctr_t *allctr,
5617 		  int internal,
5618 		  int begin_max_period,
5619 		  fmtfn_t *print_to_p,
5620 		  void *print_to_arg,
5621 		  Uint **hpp,
5622 		  Uint *szp)
5623 {
5624     Eterm res, mbcs, sbcs, fix = THE_NON_VALUE;
5625     Eterm mbcs_pool;
5626 
5627     res  = THE_NON_VALUE;
5628 
5629     if (!allctr) {
5630 	if (print_to_p)
5631 	    erts_print(*print_to_p, print_to_arg, "false\n");
5632 	if (szp)
5633 	    *szp = 0;
5634 	return am_false;
5635     }
5636 
5637     if (hpp || szp)
5638 	ensure_atoms_initialized(allctr);
5639 
5640     if (allctr->thread_safe) {
5641 	erts_allctr_wrapper_pre_lock();
5642 	erts_mtx_lock(&allctr->mutex);
5643     }
5644 
5645     ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
5646 
5647     /* Update sbc values not continuously updated */
5648     allctr->sbcs.blocks.curr.no
5649 	= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
5650     allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
5651 
5652     update_max_ever_values(&allctr->mbcs);
5653     update_max_ever_values(&allctr->sbcs);
5654 
5655     if (allctr->fix)
5656 	fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
5657     mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
5658 			    print_to_arg, hpp, szp);
5659     if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
5660 	mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p,
5661 			       print_to_arg, hpp, szp);
5662     else
5663 	mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
5664     sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
5665 			    print_to_arg, hpp, szp);
5666 
5667     if (hpp || szp) {
5668 	res = NIL;
5669 	add_2tup(hpp, szp, &res, am.sbcs, sbcs);
5670 	if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
5671 	    add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
5672 	add_2tup(hpp, szp, &res, am.mbcs, mbcs);
5673 	add_fix_types(allctr, internal, hpp, szp, &res, fix);
5674     }
5675 
5676     if (begin_max_period) {
5677 	reset_max_values(&allctr->mbcs);
5678 	reset_max_values(&allctr->sbcs);
5679     }
5680 
5681 
5682     if (allctr->thread_safe) {
5683 	erts_mtx_unlock(&allctr->mutex);
5684 	erts_allctr_wrapper_pre_unlock();
5685     }
5686 
5687     return res;
5688 }
5689 
5690 
5691 Eterm
erts_alcu_info(Allctr_t * allctr,int internal,int begin_max_period,fmtfn_t * print_to_p,void * print_to_arg,Uint ** hpp,Uint * szp)5692 erts_alcu_info(Allctr_t *allctr,
5693 	       int internal,
5694 	       int begin_max_period,
5695 	       fmtfn_t *print_to_p,
5696 	       void *print_to_arg,
5697 	       Uint **hpp,
5698 	       Uint *szp)
5699 {
5700     Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE;
5701     Eterm mbcs_pool;
5702 
5703     res  = THE_NON_VALUE;
5704 
5705     if (!allctr) {
5706 	if (print_to_p)
5707 	    erts_print(*print_to_p, print_to_arg, "false\n");
5708 	if (szp)
5709 	    *szp = 0;
5710 	return am_false;
5711     }
5712 
5713     if (hpp || szp)
5714 	ensure_atoms_initialized(allctr);
5715 
5716     if (allctr->thread_safe) {
5717 	erts_allctr_wrapper_pre_lock();
5718 	erts_mtx_lock(&allctr->mutex);
5719     }
5720 
5721     ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
5722 
5723     /* Update sbc values not continuously updated */
5724     allctr->sbcs.blocks.curr.no
5725 	= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
5726     allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
5727 
5728     update_max_ever_values(&allctr->mbcs);
5729     update_max_ever_values(&allctr->sbcs);
5730 
5731     if (print_to_p) {
5732 	erts_print(*print_to_p,
5733 		   print_to_arg,
5734 		   "versions: %s %s\n",
5735 		   allctr->vsn_str,
5736 		   ERTS_ALCU_VSN_STR);
5737     }
5738 
5739     sett = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
5740     if (allctr->fix)
5741 	fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
5742     mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
5743 			 print_to_arg, hpp, szp);
5744     if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
5745 	mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p,
5746 			       print_to_arg, hpp, szp);
5747     else
5748 	mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
5749     sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
5750 			 print_to_arg, hpp, szp);
5751     calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp);
5752 
5753     if (hpp || szp) {
5754 	res = NIL;
5755 
5756 	add_2tup(hpp, szp, &res, am.calls, calls);
5757 	add_2tup(hpp, szp, &res, am.sbcs, sbcs);
5758 	if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
5759 	    add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
5760 	add_2tup(hpp, szp, &res, am.mbcs, mbcs);
5761 	add_fix_types(allctr, internal, hpp, szp, &res, fix);
5762 	add_2tup(hpp, szp, &res, am.options, sett);
5763 	add_3tup(hpp, szp, &res,
5764 		 am.versions,
5765 		 bld_string(hpp, szp, allctr->vsn_str),
5766 		 bld_string(hpp, szp, ERTS_ALCU_VSN_STR));;
5767     }
5768 
5769     if (begin_max_period) {
5770 	reset_max_values(&allctr->mbcs);
5771 	reset_max_values(&allctr->sbcs);
5772     }
5773 
5774 
5775     if (allctr->thread_safe) {
5776 	erts_mtx_unlock(&allctr->mutex);
5777 	erts_allctr_wrapper_pre_unlock();
5778     }
5779 
5780     return res;
5781 }
5782 
5783 void
erts_alcu_foreign_size(Allctr_t * allctr,ErtsAlcType_t alloc_no,AllctrSize_t * size)5784 erts_alcu_foreign_size(Allctr_t *allctr, ErtsAlcType_t alloc_no, AllctrSize_t *size)
5785 {
5786     if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
5787         UWord csz, bsz;
5788         cpool_read_stat(allctr, alloc_no, NULL, &csz, NULL, &bsz);
5789         size->carriers = csz;
5790         size->blocks = bsz;
5791     } else {
5792         size->carriers = 0;
5793         size->blocks = 0;
5794     }
5795 }
5796 
5797 void
erts_alcu_current_size(Allctr_t * allctr,AllctrSize_t * size,ErtsAlcUFixInfo_t * fi,int fisz)5798 erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz)
5799 {
5800 
5801     if (allctr->thread_safe)
5802 	erts_mtx_lock(&allctr->mutex);
5803 
5804     size->carriers = allctr->mbcs.curr.norm.mseg.size;
5805     size->carriers += allctr->mbcs.curr.norm.sys_alloc.size;
5806     size->carriers += allctr->sbcs.curr.norm.mseg.size;
5807     size->carriers += allctr->sbcs.curr.norm.sys_alloc.size;
5808 
5809     size->blocks = allctr->mbcs.blocks.curr.size;
5810     size->blocks += allctr->sbcs.blocks.curr.size;
5811 
5812     if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
5813 	UWord csz, bsz;
5814 	cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz);
5815 	size->blocks += bsz;
5816 	size->carriers += csz;
5817     }
5818 
5819     if (fi) {
5820 	int ix;
5821 	for (ix = 0; ix < fisz; ix++) {
5822 	    if (allctr->fix) {
5823 		if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
5824 		    fi[ix].allocated += (allctr->fix[ix].type_size
5825 					 * allctr->fix[ix].u.cpool.allocated);
5826 		    fi[ix].used += (allctr->fix[ix].type_size
5827 				    * allctr->fix[ix].u.cpool.used);
5828 		}
5829 		else {
5830 		    fi[ix].allocated += (allctr->fix[ix].type_size
5831 					 * allctr->fix[ix].u.nocpool.allocated);
5832 		    fi[ix].used += (allctr->fix[ix].type_size
5833 				    * allctr->fix[ix].u.nocpool.used);
5834 		}
5835 	    }
5836 	}
5837     }
5838 
5839     if (allctr->thread_safe)
5840 	erts_mtx_unlock(&allctr->mutex);
5841 }
5842 
5843 /* ----------------------------------------------------------------------- */
5844 
5845 static ERTS_INLINE void *
do_erts_alcu_alloc(ErtsAlcType_t type,Allctr_t * allctr,Uint size)5846 do_erts_alcu_alloc(ErtsAlcType_t type, Allctr_t *allctr, Uint size)
5847 {
5848     void *res;
5849 
5850     ASSERT(initialized);
5851 
5852     ASSERT(allctr);
5853 
5854     ERTS_LC_ASSERT(!allctr->thread_safe
5855 		       || erts_lc_mtx_is_locked(&allctr->mutex));
5856 
5857     ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
5858 
5859     /* Reject sizes that can't fit into the header word. */
5860     if (size > ~BLK_FLG_MASK) {
5861         return NULL;
5862     }
5863 
5864 #if ALLOC_ZERO_EQ_NULL
5865     if (!size)
5866 	return NULL;
5867 #endif
5868 
5869     INC_CC(allctr->calls.this_alloc);
5870 
5871     if (allctr->fix) {
5872 	if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
5873 	    return fix_cpool_alloc(allctr, type, size);
5874 	else
5875 	    return fix_nocpool_alloc(allctr, type, size);
5876     }
5877 
5878     if (size >= allctr->sbc_threshold) {
5879 	Block_t *blk;
5880 	blk = create_carrier(allctr, size, CFLG_SBC);
5881 	res = blk ? BLK2UMEM(blk) : NULL;
5882     }
5883     else
5884 	res = mbc_alloc(allctr, size);
5885 
5886     return res;
5887 }
5888 
erts_alcu_alloc(ErtsAlcType_t type,void * extra,Uint size)5889 void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
5890 {
5891     Allctr_t *allctr = (Allctr_t *) extra;
5892     void *res;
5893 
5894     ASSERT(!"This is not thread safe");
5895 
5896     res = do_erts_alcu_alloc(type, allctr, size);
5897 
5898     if (allctr->atags && res) {
5899         set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
5900     }
5901 
5902     DEBUG_CHECK_ALIGNMENT(res);
5903 
5904     return res;
5905 }
5906 
5907 
5908 
5909 void *
erts_alcu_alloc_ts(ErtsAlcType_t type,void * extra,Uint size)5910 erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
5911 {
5912     Allctr_t *allctr = (Allctr_t *) extra;
5913     alcu_atag_t tag = 0;
5914     void *res;
5915 
5916     if (allctr->atags) {
5917         tag = determine_alloc_tag(allctr, type);
5918     }
5919 
5920     erts_mtx_lock(&allctr->mutex);
5921 
5922     res = do_erts_alcu_alloc(type, allctr, size);
5923 
5924     if (allctr->atags && res) {
5925         set_alloc_tag(allctr, res, tag);
5926     }
5927 
5928     erts_mtx_unlock(&allctr->mutex);
5929 
5930     DEBUG_CHECK_ALIGNMENT(res);
5931 
5932     return res;
5933 }
5934 
5935 
5936 void *
erts_alcu_alloc_thr_spec(ErtsAlcType_t type,void * extra,Uint size)5937 erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
5938 {
5939     ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
5940     int ix;
5941     alcu_atag_t tag = 0;
5942     Allctr_t *allctr;
5943     void *res;
5944 
5945     ix = ERTS_ALC_GET_THR_IX();
5946 
5947     ASSERT(0 <= ix && ix < tspec->size);
5948 
5949     allctr = tspec->allctr[ix];
5950 
5951     if (allctr->atags) {
5952         tag = determine_alloc_tag(allctr, type);
5953     }
5954 
5955     if (allctr->thread_safe)
5956 	erts_mtx_lock(&allctr->mutex);
5957 
5958     res = do_erts_alcu_alloc(type, allctr, size);
5959 
5960     if (allctr->atags && res) {
5961         set_alloc_tag(allctr, res, tag);
5962     }
5963 
5964     if (allctr->thread_safe)
5965 	erts_mtx_unlock(&allctr->mutex);
5966 
5967     DEBUG_CHECK_ALIGNMENT(res);
5968 
5969     return res;
5970 }
5971 
5972 void *
erts_alcu_alloc_thr_pref(ErtsAlcType_t type,void * extra,Uint size)5973 erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
5974 {
5975     Allctr_t *pref_allctr;
5976     alcu_atag_t tag = 0;
5977     void *res;
5978 
5979     pref_allctr = get_pref_allctr(extra);
5980 
5981     if (pref_allctr->atags) {
5982         tag = determine_alloc_tag(pref_allctr, type);
5983     }
5984 
5985     if (pref_allctr->thread_safe)
5986 	erts_mtx_lock(&pref_allctr->mutex);
5987 
5988     ASSERT(pref_allctr->dd.use);
5989     ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
5990 
5991     ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
5992 
5993     res = do_erts_alcu_alloc(type, pref_allctr, size);
5994 
5995     if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
5996 	/* Cleaned up a bit more; try one more time... */
5997 	res = do_erts_alcu_alloc(type, pref_allctr, size);
5998     }
5999 
6000     if (pref_allctr->atags && res) {
6001         set_alloc_tag(pref_allctr, res, tag);
6002     }
6003 
6004     if (pref_allctr->thread_safe)
6005 	erts_mtx_unlock(&pref_allctr->mutex);
6006 
6007     DEBUG_CHECK_ALIGNMENT(res);
6008 
6009     return res;
6010 }
6011 
6012 
6013 
6014 /* ------------------------------------------------------------------------- */
6015 
6016 static ERTS_INLINE void
do_erts_alcu_free(ErtsAlcType_t type,Allctr_t * allctr,void * p,Carrier_t ** busy_pcrr_pp)6017 do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p,
6018 		  Carrier_t **busy_pcrr_pp)
6019 {
6020     ASSERT(initialized);
6021 
6022     ASSERT(allctr);
6023 
6024     ERTS_LC_ASSERT(!allctr->thread_safe
6025 		       || erts_lc_mtx_is_locked(&allctr->mutex));
6026 
6027     ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
6028 
6029     if (p) {
6030 	INC_CC(allctr->calls.this_free);
6031 
6032         if (ERTS_ALC_IS_FIX_TYPE(type)) {
6033 	    if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
6034 		fix_cpool_free(allctr, type, 0, p, busy_pcrr_pp);
6035 	    else
6036 		fix_nocpool_free(allctr, type, p);
6037 	}
6038 	else {
6039 	    Block_t *blk = UMEM2BLK(p);
6040 	    if (IS_SBC_BLK(blk))
6041 		destroy_carrier(allctr, blk, NULL);
6042 	    else
6043 		mbc_free(allctr, type, p, busy_pcrr_pp);
6044 	}
6045     }
6046 }
6047 
erts_alcu_free(ErtsAlcType_t type,void * extra,void * p)6048 void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
6049 {
6050     Allctr_t *allctr = (Allctr_t *) extra;
6051     do_erts_alcu_free(type, allctr, p, NULL);
6052 }
6053 
6054 
6055 void
erts_alcu_free_ts(ErtsAlcType_t type,void * extra,void * p)6056 erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
6057 {
6058     Allctr_t *allctr = (Allctr_t *) extra;
6059     erts_mtx_lock(&allctr->mutex);
6060     do_erts_alcu_free(type, allctr, p, NULL);
6061     erts_mtx_unlock(&allctr->mutex);
6062 }
6063 
6064 
6065 void
erts_alcu_free_thr_spec(ErtsAlcType_t type,void * extra,void * p)6066 erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
6067 {
6068     ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
6069     int ix;
6070     Allctr_t *allctr;
6071 
6072     ix = ERTS_ALC_GET_THR_IX();
6073 
6074     ASSERT(0 <= ix && ix < tspec->size);
6075 
6076     allctr = tspec->allctr[ix];
6077 
6078     if (allctr->thread_safe)
6079 	erts_mtx_lock(&allctr->mutex);
6080 
6081     do_erts_alcu_free(type, allctr, p, NULL);
6082 
6083     if (allctr->thread_safe)
6084 	erts_mtx_unlock(&allctr->mutex);
6085 }
6086 
6087 void
erts_alcu_free_thr_pref(ErtsAlcType_t type,void * extra,void * p)6088 erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
6089 {
6090     if (p) {
6091 	Carrier_t *busy_pcrr_p;
6092 	Allctr_t *pref_allctr, *used_allctr;
6093 
6094 	pref_allctr = get_pref_allctr(extra);
6095 	used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_IF_USED,
6096 				      p, NULL, &busy_pcrr_p);
6097 	if (pref_allctr != used_allctr) {
6098 	    enqueue_dealloc_other_instance(type,
6099                                            used_allctr,
6100                                            p,
6101                                            (used_allctr->dd.ix
6102                                             - pref_allctr->dd.ix));
6103         }
6104 	else {
6105 	    ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
6106 	    do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p);
6107 	    clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
6108 	    if (pref_allctr->thread_safe)
6109 		erts_mtx_unlock(&pref_allctr->mutex);
6110 	}
6111     }
6112 }
6113 
6114 
6115 
6116 /* ------------------------------------------------------------------------- */
6117 
6118 static ERTS_INLINE void *
do_erts_alcu_realloc(ErtsAlcType_t type,Allctr_t * allctr,void * p,Uint size,Uint32 alcu_flgs,Carrier_t ** busy_pcrr_pp)6119 do_erts_alcu_realloc(ErtsAlcType_t type,
6120 		     Allctr_t *allctr,
6121 		     void *p,
6122 		     Uint size,
6123 		     Uint32 alcu_flgs,
6124 		     Carrier_t **busy_pcrr_pp)
6125 {
6126     Block_t *blk;
6127     void *res;
6128 
6129     ASSERT(initialized);
6130 
6131     ASSERT(allctr);
6132 
6133     ERTS_LC_ASSERT(!allctr->thread_safe
6134 		       || erts_lc_mtx_is_locked(&allctr->mutex));
6135 
6136     ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
6137 
6138     if (!p) {
6139 	res = do_erts_alcu_alloc(type, allctr, size);
6140 	INC_CC(allctr->calls.this_realloc);
6141 	DEC_CC(allctr->calls.this_alloc);
6142 	return res;
6143     }
6144 
6145     /* Reject sizes that can't fit into the header word. */
6146     if (size > ~BLK_FLG_MASK) {
6147         return NULL;
6148     }
6149 
6150 #if ALLOC_ZERO_EQ_NULL
6151     if (!size) {
6152 	ASSERT(p);
6153 	do_erts_alcu_free(type, allctr, p, busy_pcrr_pp);
6154 	INC_CC(allctr->calls.this_realloc);
6155 	DEC_CC(allctr->calls.this_free);
6156 	return NULL;
6157     }
6158 #endif
6159 
6160     INC_CC(allctr->calls.this_realloc);
6161 
6162     blk = UMEM2BLK(p);
6163 
6164     if (size < allctr->sbc_threshold) {
6165 	if (IS_MBC_BLK(blk))
6166 	    res = mbc_realloc(allctr, type, p, size, alcu_flgs, busy_pcrr_pp);
6167 	else {
6168 	    Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size;
6169 	    Uint crr_sz;
6170 	    Uint diff_sz_val;
6171 	    Uint crr_sz_val;
6172 
6173 #if HAVE_ERTS_MSEG
6174 	    if (IS_SYS_ALLOC_CARRIER(BLK_TO_SBC(blk)))
6175 #endif
6176 		crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz);
6177 #if HAVE_ERTS_MSEG
6178 	    else
6179 		crr_sz = ERTS_SACRR_UNIT_CEILING(used_sz);
6180 #endif
6181 	    diff_sz_val = crr_sz - used_sz;
6182 	    if (diff_sz_val < (~((Uint) 0) / 100))
6183 		crr_sz_val = crr_sz;
6184 	    else {
6185 		/* div both by 128 */
6186 		crr_sz_val = crr_sz >> 7;
6187 		/* A sys_alloc carrier could potentially be
6188 		   smaller than 128 bytes (but not likely) */
6189 		if (crr_sz_val == 0)
6190 		    goto do_carrier_resize;
6191 		diff_sz_val >>= 7;
6192 	    }
6193 
6194 	    if (100*diff_sz_val < allctr->sbc_move_threshold*crr_sz_val)
6195 		/* Data won't be copied into a new carrier... */
6196 		goto do_carrier_resize;
6197 	    else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
6198 		return NULL;
6199 
6200 	    res = mbc_alloc(allctr, size);
6201 	    if (res) {
6202 		sys_memcpy((void*) res,
6203 			   (void*) p,
6204 			   MIN(SBC_BLK_SZ(blk) - ABLK_HDR_SZ, size));
6205 		destroy_carrier(allctr, blk, NULL);
6206 	    }
6207 	}
6208     }
6209     else {
6210 	Block_t *new_blk;
6211 	if(IS_SBC_BLK(blk)) {
6212 	do_carrier_resize:
6213 	    new_blk = resize_carrier(allctr, blk, size, CFLG_SBC);
6214 	    res = new_blk ? BLK2UMEM(new_blk) : NULL;
6215 	}
6216 	else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
6217 	    return NULL;
6218 	else {
6219 	    new_blk = create_carrier(allctr, size, CFLG_SBC);
6220 	    if (new_blk) {
6221 		res = BLK2UMEM(new_blk);
6222 		sys_memcpy((void *) res,
6223 			   (void *) p,
6224 			   MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size));
6225 		mbc_free(allctr, type, p, busy_pcrr_pp);
6226 	    }
6227 	    else
6228 		res = NULL;
6229 	}
6230     }
6231 
6232     return res;
6233 }
6234 
6235 void *
erts_alcu_realloc(ErtsAlcType_t type,void * extra,void * p,Uint size)6236 erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size)
6237 {
6238     Allctr_t *allctr = (Allctr_t *)extra;
6239     void *res;
6240 
6241     res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
6242 
6243     DEBUG_CHECK_ALIGNMENT(res);
6244 
6245     if (allctr->atags && res) {
6246         set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
6247     }
6248 
6249     return res;
6250 }
6251 
6252 void *
erts_alcu_realloc_mv(ErtsAlcType_t type,void * extra,void * p,Uint size)6253 erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
6254 {
6255     Allctr_t *allctr = (Allctr_t *)extra;
6256     void *res;
6257 
6258     res = do_erts_alcu_alloc(type, allctr, size);
6259     if (!res)
6260         res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
6261     else {
6262 	Block_t *blk;
6263 	size_t cpy_size;
6264 
6265 	blk = UMEM2BLK(p);
6266 	cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
6267 	if (cpy_size > size)
6268 	    cpy_size = size;
6269 	sys_memcpy(res, p, cpy_size);
6270 	do_erts_alcu_free(type, allctr, p, NULL);
6271     }
6272 
6273     DEBUG_CHECK_ALIGNMENT(res);
6274 
6275     if (allctr->atags && res) {
6276         set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
6277     }
6278 
6279     return res;
6280 }
6281 
6282 
6283 void *
erts_alcu_realloc_ts(ErtsAlcType_t type,void * extra,void * ptr,Uint size)6284 erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
6285 {
6286     Allctr_t *allctr = (Allctr_t *) extra;
6287     alcu_atag_t tag = 0;
6288     void *res;
6289 
6290     if (allctr->atags) {
6291         tag = determine_alloc_tag(allctr, type);
6292     }
6293 
6294     erts_mtx_lock(&allctr->mutex);
6295 
6296     res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
6297 
6298     if (allctr->atags && res) {
6299         set_alloc_tag(allctr, res, tag);
6300     }
6301 
6302     erts_mtx_unlock(&allctr->mutex);
6303 
6304     DEBUG_CHECK_ALIGNMENT(res);
6305 
6306     return res;
6307 }
6308 
6309 void *
erts_alcu_realloc_mv_ts(ErtsAlcType_t type,void * extra,void * p,Uint size)6310 erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
6311 {
6312     Allctr_t *allctr = (Allctr_t *) extra;
6313     alcu_atag_t tag = 0;
6314     void *res;
6315 
6316     if (allctr->atags) {
6317         tag = determine_alloc_tag(allctr, type);
6318     }
6319 
6320     erts_mtx_lock(&allctr->mutex);
6321     res = do_erts_alcu_alloc(type, allctr, size);
6322     if (!res)
6323 	res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
6324     else {
6325 	Block_t *blk;
6326 	size_t cpy_size;
6327 
6328 	blk = UMEM2BLK(p);
6329 	cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
6330 	if (cpy_size > size)
6331 	    cpy_size = size;
6332 	sys_memcpy(res, p, cpy_size);
6333 	do_erts_alcu_free(type, allctr, p, NULL);
6334     }
6335 
6336     if (allctr->atags && res) {
6337         set_alloc_tag(allctr, res, tag);
6338     }
6339 
6340     erts_mtx_unlock(&allctr->mutex);
6341 
6342     DEBUG_CHECK_ALIGNMENT(res);
6343 
6344     return res;
6345 }
6346 
6347 
6348 void *
erts_alcu_realloc_thr_spec(ErtsAlcType_t type,void * extra,void * ptr,Uint size)6349 erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
6350 			   void *ptr, Uint size)
6351 {
6352     ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
6353     int ix;
6354     alcu_atag_t tag = 0;
6355     Allctr_t *allctr;
6356     void *res;
6357 
6358     ix = ERTS_ALC_GET_THR_IX();
6359 
6360     ASSERT(0 <= ix && ix < tspec->size);
6361 
6362     allctr = tspec->allctr[ix];
6363 
6364     if (allctr->atags) {
6365         tag = determine_alloc_tag(allctr, type);
6366     }
6367 
6368     if (allctr->thread_safe)
6369 	erts_mtx_lock(&allctr->mutex);
6370 
6371     res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
6372 
6373     if (allctr->atags && res) {
6374         set_alloc_tag(allctr, res, tag);
6375     }
6376 
6377     if (allctr->thread_safe)
6378 	erts_mtx_unlock(&allctr->mutex);
6379 
6380     DEBUG_CHECK_ALIGNMENT(res);
6381 
6382     return res;
6383 }
6384 
6385 void *
erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type,void * extra,void * ptr,Uint size)6386 erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
6387 			      void *ptr, Uint size)
6388 {
6389     ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
6390     int ix;
6391     alcu_atag_t tag = 0;
6392     Allctr_t *allctr;
6393     void *res;
6394 
6395     ix = ERTS_ALC_GET_THR_IX();
6396 
6397     ASSERT(0 <= ix && ix < tspec->size);
6398 
6399     allctr = tspec->allctr[ix];
6400 
6401     if (allctr->atags) {
6402         tag = determine_alloc_tag(allctr, type);
6403     }
6404 
6405     if (allctr->thread_safe)
6406 	erts_mtx_lock(&allctr->mutex);
6407 
6408     res = do_erts_alcu_alloc(type, allctr, size);
6409     if (!res) {
6410         res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
6411     }
6412     else {
6413 	Block_t *blk;
6414 	size_t cpy_size;
6415 
6416 	blk = UMEM2BLK(ptr);
6417 	cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
6418 	if (cpy_size > size)
6419 	    cpy_size = size;
6420 	sys_memcpy(res, ptr, cpy_size);
6421 	do_erts_alcu_free(type, allctr, ptr, NULL);
6422     }
6423 
6424     if (allctr->atags && res) {
6425         set_alloc_tag(allctr, res, tag);
6426     }
6427 
6428     if (allctr->thread_safe)
6429         erts_mtx_unlock(&allctr->mutex);
6430 
6431     DEBUG_CHECK_ALIGNMENT(res);
6432 
6433     return res;
6434 }
6435 
6436 static ERTS_INLINE void *
realloc_thr_pref(ErtsAlcType_t type,Allctr_t * pref_allctr,void * p,Uint size,int force_move)6437 realloc_thr_pref(ErtsAlcType_t type, Allctr_t *pref_allctr, void *p, Uint size,
6438 		 int force_move)
6439 {
6440     void *res;
6441     Allctr_t *used_allctr;
6442     UWord old_user_size;
6443     Carrier_t *busy_pcrr_p;
6444     alcu_atag_t tag = 0;
6445     int retried;
6446 
6447     if (pref_allctr->atags) {
6448         tag = determine_alloc_tag(pref_allctr, type);
6449     }
6450 
6451     if (pref_allctr->thread_safe)
6452 	erts_mtx_lock(&pref_allctr->mutex);
6453 
6454     ASSERT(pref_allctr->dd.use);
6455     ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
6456     retried = 0;
6457 restart:
6458 
6459     used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO,
6460 				  p, &old_user_size, &busy_pcrr_p);
6461 
6462     ASSERT(used_allctr && pref_allctr);
6463 
6464     if (!force_move && used_allctr == pref_allctr) {
6465 	ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
6466 	res = do_erts_alcu_realloc(type,
6467 				   used_allctr,
6468 				   p,
6469 				   size,
6470 				   0,
6471 				   &busy_pcrr_p);
6472 	clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
6473 	if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
6474 	    /* Cleaned up a bit more; try one more time... */
6475 	    retried = 1;
6476 	    goto restart;
6477 	}
6478 
6479         if (pref_allctr->atags && res) {
6480             set_alloc_tag(pref_allctr, res, tag);
6481         }
6482 
6483 	if (pref_allctr->thread_safe)
6484 	    erts_mtx_unlock(&pref_allctr->mutex);
6485     }
6486     else {
6487 	res = do_erts_alcu_alloc(type, pref_allctr, size);
6488 	if (!res)
6489 	    goto unlock_ts_return;
6490 	else {
6491             if (pref_allctr->atags) {
6492                 set_alloc_tag(pref_allctr, res, tag);
6493             }
6494 
6495 	    DEBUG_CHECK_ALIGNMENT(res);
6496 
6497 	    if (used_allctr != pref_allctr) {
6498 		if (pref_allctr->thread_safe)
6499 		    erts_mtx_unlock(&pref_allctr->mutex);
6500 
6501 		sys_memcpy(res, p, MIN(size, old_user_size));
6502 
6503 		enqueue_dealloc_other_instance(type,
6504 					       used_allctr,
6505 					       p,
6506 					       (used_allctr->dd.ix
6507 						- pref_allctr->dd.ix));
6508 	    }
6509 	    else {
6510 
6511 		sys_memcpy(res, p, MIN(size, old_user_size));
6512 
6513 		do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p);
6514 		ASSERT(pref_allctr == used_allctr);
6515 		clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
6516 
6517 	    unlock_ts_return:
6518 		if (pref_allctr->thread_safe)
6519 		    erts_mtx_unlock(&pref_allctr->mutex);
6520 	    }
6521 	}
6522     }
6523 
6524     DEBUG_CHECK_ALIGNMENT(res);
6525 
6526     return res;
6527 }
6528 
6529 void *
erts_alcu_realloc_thr_pref(ErtsAlcType_t type,void * extra,void * p,Uint size)6530 erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
6531 {
6532     if (p) {
6533         Allctr_t *pref_allctr = get_pref_allctr(extra);
6534 
6535         return realloc_thr_pref(type, pref_allctr, p, size, 0);
6536     }
6537 
6538     return erts_alcu_alloc_thr_pref(type, extra, size);
6539 }
6540 
6541 void *
erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type,void * extra,void * p,Uint size)6542 erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
6543 			      void *p, Uint size)
6544 {
6545     if (p) {
6546         Allctr_t *pref_allctr = get_pref_allctr(extra);
6547 
6548         return realloc_thr_pref(type, pref_allctr, p, size, 1);
6549     }
6550 
6551     return erts_alcu_alloc_thr_pref(type, extra, size);
6552 }
6553 
6554 
6555 
adjust_sbct(Allctr_t * allctr,Uint sbct)6556 static Uint adjust_sbct(Allctr_t* allctr, Uint sbct)
6557 {
6558 #ifndef ARCH_64
6559     if (sbct > 0) {
6560 	Uint max_mbc_block_sz = UNIT_CEILING(sbct - 1 + ABLK_HDR_SZ);
6561 	if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK
6562 	    || max_mbc_block_sz < sbct) { /* wrap around */
6563 	    /*
6564 	     * By limiting sbc_threshold to (hard limit - min_block_size)
6565 	     * we avoid having to split off free "residue blocks"
6566 	     * smaller than min_block_size.
6567 	     */
6568 	    max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1);
6569 	    sbct = max_mbc_block_sz - ABLK_HDR_SZ + 1;
6570 	}
6571     }
6572 #endif
6573     return sbct;
6574 }
6575 
erts_alcu_try_set_dyn_param(Allctr_t * allctr,Eterm param,Uint value)6576 int erts_alcu_try_set_dyn_param(Allctr_t* allctr, Eterm param, Uint value)
6577 {
6578     const Uint MIN_DYN_SBCT = 4000;  /* a lame catastrophe prevention */
6579 
6580     if (param == am_sbct && value >= MIN_DYN_SBCT) {
6581         allctr->sbc_threshold = adjust_sbct(allctr, value);
6582         return 1;
6583     }
6584     return 0;
6585 }
6586 
6587 /* ------------------------------------------------------------------------- */
6588 
6589 int
erts_alcu_start(Allctr_t * allctr,AllctrInit_t * init)6590 erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
6591 {
6592     /* erts_alcu_start assumes that allctr has been zeroed */
6593     int i;
6594 
6595     if (((UWord)allctr & ERTS_CRR_ALCTR_FLG_MASK) != 0) {
6596         erts_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n",
6597                  __FILE__, __LINE__);
6598     }
6599 
6600     /* The various fields packed into the header word must not overlap */
6601     ERTS_CT_ASSERT(!(MBC_ABLK_OFFSET_MASK & MBC_ABLK_SZ_MASK));
6602     ERTS_CT_ASSERT(!(MBC_ABLK_OFFSET_MASK & BLK_FLG_MASK));
6603     ERTS_CT_ASSERT(!(MBC_ABLK_SZ_MASK & BLK_FLG_MASK));
6604     ERTS_CT_ASSERT(!(MBC_FBLK_SZ_MASK & BLK_FLG_MASK));
6605     ERTS_CT_ASSERT(!(SBC_BLK_SZ_MASK & BLK_FLG_MASK));
6606     ERTS_CT_ASSERT(!(CRR_SZ_MASK & CRR_FLG_MASK));
6607 
6608     if (!initialized)
6609 	goto error;
6610 
6611 #if HAVE_ERTS_MSEG
6612     sys_memcpy((void *) &allctr->mseg_opt,
6613 	       (void *) &erts_mseg_default_opt,
6614 	       sizeof(ErtsMsegOpt_t));
6615     if (init->tspec || init->tpref)
6616 	allctr->mseg_opt.sched_spec = 1;
6617 #endif /* HAVE_ERTS_MSEG */
6618 
6619     allctr->name_prefix			= init->name_prefix;
6620     if (!allctr->name_prefix)
6621 	goto error;
6622 
6623     allctr->ix				= init->ix;
6624     allctr->alloc_no			= init->alloc_no;
6625     allctr->alloc_strat			= init->alloc_strat;
6626 
6627     ASSERT(allctr->alloc_no >= ERTS_ALC_A_MIN &&
6628            allctr->alloc_no <= ERTS_ALC_A_MAX);
6629 
6630     if (allctr->alloc_no < ERTS_ALC_A_MIN
6631 	|| ERTS_ALC_A_MAX < allctr->alloc_no)
6632 	allctr->alloc_no = ERTS_ALC_A_INVALID;
6633 
6634     if (!allctr->vsn_str)
6635 	goto error;
6636 
6637     allctr->name.alloc			= THE_NON_VALUE;
6638     allctr->name.realloc		= THE_NON_VALUE;
6639     allctr->name.free			= THE_NON_VALUE;
6640 
6641     if (init->tspec)
6642 	allctr->t			= init->tspec;
6643     else if (init->tpref)
6644 	allctr->t			= init->tpref;
6645     else
6646 	allctr->t			= 0;
6647 
6648     allctr->ramv			= init->ramv;
6649     allctr->atags                       = init->atags;
6650     allctr->main_carrier_size		= init->mmbcs;
6651 
6652 #if HAVE_ERTS_MSEG
6653     allctr->mseg_opt.abs_shrink_th	= init->asbcst;
6654     allctr->mseg_opt.rel_shrink_th	= init->rsbcst;
6655 #endif
6656     allctr->sbc_move_threshold		= init->rsbcmt;
6657     allctr->mbc_move_threshold		= init->rmbcmt;
6658 #if HAVE_ERTS_MSEG
6659     allctr->max_mseg_sbcs		= init->mmsbc;
6660 # if ERTS_SUPER_ALIGNED_MSEG_ONLY
6661     allctr->max_mseg_mbcs		= ~(Uint)0;
6662 # else
6663     allctr->max_mseg_mbcs		= init->mmmbc;
6664 # endif
6665 #endif
6666 
6667     allctr->largest_mbc_size		= MAX(init->lmbcs, init->smbcs);
6668 #ifndef ARCH_64
6669     if (allctr->largest_mbc_size > MBC_SZ_MAX_LIMIT) {
6670 	allctr->largest_mbc_size = MBC_SZ_MAX_LIMIT;
6671     }
6672 #endif
6673     allctr->smallest_mbc_size		= init->smbcs;
6674     allctr->mbc_growth_stages		= MAX(1, init->mbcgs);
6675 
6676     if (allctr->min_block_size < ABLK_HDR_SZ)
6677 	goto error;
6678     allctr->min_block_size		= UNIT_CEILING(allctr->min_block_size
6679 						       + sizeof(FreeBlkFtr_t));
6680     if (init->tpref) {
6681 	Uint sz = ABLK_HDR_SZ;
6682 	sz += sizeof(ErtsAllctrDDBlock_t);
6683 	sz = UNIT_CEILING(sz);
6684 	if (sz > allctr->min_block_size)
6685 	    allctr->min_block_size = sz;
6686     }
6687 
6688     allctr->cpool.pooled_tree = NULL;
6689     allctr->cpool.dc_list.first = NULL;
6690     allctr->cpool.dc_list.last = NULL;
6691     allctr->cpool.abandon_limit = 0;
6692     allctr->cpool.disable_abandon = 0;
6693     for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
6694         erts_atomic_init_nob(&allctr->cpool.stat.blocks_size[i], 0);
6695         erts_atomic_init_nob(&allctr->cpool.stat.no_blocks[i], 0);
6696     }
6697     erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0);
6698     erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0);
6699     if (!init->ts && init->acul && init->acnl && init->cp >= 0) {
6700         ASSERT(allctr->add_mbc);
6701         ASSERT(allctr->remove_mbc);
6702         ASSERT(allctr->largest_fblk_in_mbc);
6703         ASSERT(allctr->first_fblk_in_mbc);
6704         ASSERT(allctr->next_fblk_in_mbc);
6705 
6706         allctr->cpool.util_limit = init->acul;
6707         allctr->cpool.in_pool_limit = init->acnl;
6708         allctr->cpool.fblk_min_limit = init->acfml;
6709         allctr->cpool.carrier_pool = init->cp;
6710 
6711         if (allctr->alloc_strat == ERTS_ALC_S_FIRSTFIT) {
6712             allctr->cpool.sentinel = &firstfit_carrier_pools[init->cp].sentinel;
6713         }
6714         else if (allctr->alloc_no != ERTS_ALC_A_TEST) {
6715             ERTS_INTERNAL_ERROR("Impossible carrier migration config.");
6716         }
6717     }
6718     else {
6719         allctr->cpool.util_limit = 0;
6720         allctr->cpool.in_pool_limit = 0;
6721         allctr->cpool.fblk_min_limit = 0;
6722         allctr->cpool.carrier_pool = -1;
6723     }
6724 
6725     /* The invasive tests don't really care whether the pool is enabled or not,
6726      * so we need to set this unconditionally for this allocator type. */
6727     if (allctr->alloc_no == ERTS_ALC_A_TEST) {
6728         allctr->cpool.sentinel = &firstfit_carrier_pools[ERTS_ALC_TEST_CPOOL_IX].sentinel;
6729     }
6730 
6731     allctr->sbc_threshold = adjust_sbct(allctr, init->sbct);
6732 
6733 #if HAVE_ERTS_MSEG
6734     if (allctr->mseg_opt.abs_shrink_th > ~((UWord) 0) / 100)
6735 	allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100;
6736 #endif
6737 
6738     if (init->ts) {
6739 	allctr->thread_safe = 1;
6740 
6741         erts_mtx_init(&allctr->mutex, "alcu_allocator", make_small(allctr->alloc_no),
6742             ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
6743 
6744 #ifdef DEBUG
6745 	allctr->debug.saved_tid = 0;
6746 #endif
6747     }
6748 
6749     if(!allctr->get_free_block
6750        || !allctr->link_free_block
6751        || !allctr->unlink_free_block
6752        || !allctr->info_options)
6753 	goto error;
6754 
6755     if (!allctr->get_next_mbc_size)
6756 	allctr->get_next_mbc_size = get_next_mbc_size;
6757 
6758     if (allctr->mbc_header_size < sizeof(Carrier_t))
6759 	goto error;
6760     allctr->dd.use = 0;
6761     if (init->tpref) {
6762 	allctr->dd.use = 1;
6763 	init_dd_queue(&allctr->dd.q);
6764 	allctr->dd.ix = init->ix;
6765     }
6766     allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
6767 					    + ABLK_HDR_SZ)
6768 			       - ABLK_HDR_SZ);
6769 
6770     if (init->sys_alloc) {
6771         ASSERT(init->sys_realloc && init->sys_dealloc);
6772         allctr->sys_alloc   = init->sys_alloc;
6773         allctr->sys_realloc = init->sys_realloc;
6774         allctr->sys_dealloc = init->sys_dealloc;
6775     }
6776     else {
6777         ASSERT(!init->sys_realloc && !init->sys_dealloc);
6778         allctr->sys_alloc   = &erts_alcu_sys_alloc;
6779         allctr->sys_realloc = &erts_alcu_sys_realloc;
6780         allctr->sys_dealloc = &erts_alcu_sys_dealloc;
6781     }
6782 
6783     allctr->try_set_dyn_param = &erts_alcu_try_set_dyn_param;
6784 
6785 #if HAVE_ERTS_MSEG
6786     if (init->mseg_alloc) {
6787         ASSERT(init->mseg_realloc && init->mseg_dealloc);
6788         allctr->mseg_alloc   = init->mseg_alloc;
6789         allctr->mseg_realloc = init->mseg_realloc;
6790         allctr->mseg_dealloc = init->mseg_dealloc;
6791         allctr->mseg_mmapper = init->mseg_mmapper;
6792     }
6793     else {
6794         ASSERT(!init->mseg_realloc && !init->mseg_dealloc);
6795         allctr->mseg_alloc   = &erts_alcu_mseg_alloc;
6796         allctr->mseg_realloc = &erts_alcu_mseg_realloc;
6797         allctr->mseg_dealloc = &erts_alcu_mseg_dealloc;
6798     }
6799 
6800     /* If a custom carrier alloc function is specified, make sure it's used */
6801     if (init->mseg_alloc && !init->sys_alloc) {
6802         allctr->crr_set_flgs = CFLG_FORCE_MSEG;
6803         allctr->crr_clr_flgs = CFLG_FORCE_SYS_ALLOC;
6804     }
6805     else if (!init->mseg_alloc && init->sys_alloc) {
6806         allctr->crr_set_flgs = CFLG_FORCE_SYS_ALLOC;
6807         allctr->crr_clr_flgs = CFLG_FORCE_MSEG;
6808     }
6809 #endif
6810 
6811     if (allctr->main_carrier_size) {
6812 	Block_t *blk;
6813 
6814 	blk = create_carrier(allctr,
6815 			     allctr->main_carrier_size,
6816                              (ERTS_SUPER_ALIGNED_MSEG_ONLY
6817                               ? CFLG_FORCE_MSEG : CFLG_FORCE_SYS_ALLOC)
6818                              | CFLG_MBC
6819 			     | CFLG_FORCE_SIZE
6820 			     | CFLG_NO_CPOOL
6821 			     | CFLG_MAIN_CARRIER);
6822 	if (!blk) {
6823 	  if (allctr->thread_safe)
6824 	    erts_mtx_destroy(&allctr->mutex);
6825 	  erts_exit(ERTS_ABORT_EXIT,
6826 	    "Failed to create main carrier for %salloc\n",
6827 	    init->name_prefix);
6828 	}
6829 
6830 	(*allctr->link_free_block)(allctr, blk);
6831 
6832 	HARD_CHECK_BLK_CARRIER(allctr, blk);
6833 
6834     }
6835 
6836     if (init->fix) {
6837 	int i;
6838 	allctr->fix = init->fix;
6839 	allctr->fix_shrink_scheduled = 0;
6840 	for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) {
6841 	    allctr->fix[i].type_size = init->fix_type_size[i];
6842 	    allctr->fix[i].type = ERTS_ALC_N2T(i + ERTS_ALC_N_MIN_A_FIXED_SIZE);
6843 	    allctr->fix[i].list_size = 0;
6844 	    allctr->fix[i].list = NULL;
6845 	    if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
6846 		allctr->fix[i].u.cpool.min_list_size = 0;
6847 		allctr->fix[i].u.cpool.shrink_list = 0;
6848 		allctr->fix[i].u.cpool.allocated = 0;
6849 		allctr->fix[i].u.cpool.used = 0;
6850 	    }
6851 	    else {
6852 		allctr->fix[i].u.nocpool.max_used = 0;
6853 		allctr->fix[i].u.nocpool.limit = 0;
6854 		allctr->fix[i].u.nocpool.allocated = 0;
6855 		allctr->fix[i].u.nocpool.used = 0;
6856 	    }
6857 	}
6858     }
6859 
6860     return 1;
6861 
6862  error:
6863 
6864     if (allctr->thread_safe)
6865 	erts_mtx_destroy(&allctr->mutex);
6866 
6867     return 0;
6868 
6869 }
6870 
6871 /* ------------------------------------------------------------------------- */
6872 
6873 void
erts_alcu_stop(Allctr_t * allctr)6874 erts_alcu_stop(Allctr_t *allctr)
6875 {
6876     allctr->stopped = 1;
6877 
6878     while (allctr->sbc_list.first)
6879 	destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first), NULL);
6880     while (allctr->mbc_list.first)
6881 	destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL);
6882 
6883     if (allctr->thread_safe)
6884 	erts_mtx_destroy(&allctr->mutex);
6885 
6886 }
6887 
6888 /* ------------------------------------------------------------------------- */
6889 
6890 void
erts_alcu_init(AlcUInit_t * init)6891 erts_alcu_init(AlcUInit_t *init)
6892 {
6893     int i;
6894     ErtsAlcCPoolData_t *sentinel;
6895 
6896     for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
6897         sentinel = &firstfit_carrier_pools[i].sentinel;
6898         erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
6899         erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
6900     }
6901     sentinel = &firstfit_carrier_pools[ERTS_ALC_A_INVALID].sentinel;
6902     erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
6903     erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
6904 
6905     ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
6906 #if HAVE_ERTS_MSEG
6907     ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
6908     max_mseg_carriers = init->mmc;
6909     sys_alloc_carrier_size = ERTS_SACRR_UNIT_CEILING(init->ycs);
6910 #else /* #if HAVE_ERTS_MSEG */
6911     sys_alloc_carrier_size = ((init->ycs + 4095) / 4096) * 4096;
6912 #endif
6913     allow_sys_alloc_carriers = init->sac;
6914 
6915     sys_page_size = erts_sys_get_page_size();
6916 
6917 #ifdef DEBUG
6918     carrier_alignment = sizeof(Unit_t);
6919 #endif
6920 
6921 #ifdef DEBUG
6922     for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++)
6923         allocator_char_str[i] = NULL;
6924 #endif
6925     allocator_char_str[ERTS_ALC_A_SYSTEM] = "Y";
6926     allocator_char_str[ERTS_ALC_A_TEMPORARY] = "T";
6927     allocator_char_str[ERTS_ALC_A_SHORT_LIVED] = "S";
6928     allocator_char_str[ERTS_ALC_A_STANDARD] = "D";
6929     allocator_char_str[ERTS_ALC_A_LONG_LIVED] = "L";
6930     allocator_char_str[ERTS_ALC_A_EHEAP] = "H";
6931     allocator_char_str[ERTS_ALC_A_ETS] = "E";
6932     allocator_char_str[ERTS_ALC_A_FIXED_SIZE] = "F";
6933     allocator_char_str[ERTS_ALC_A_LITERAL] = "I";
6934 #ifdef ERTS_ALC_A_EXEC
6935     allocator_char_str[ERTS_ALC_A_EXEC] = "X";
6936 #endif
6937     allocator_char_str[ERTS_ALC_A_BINARY] = "B";
6938     allocator_char_str[ERTS_ALC_A_DRIVER] = "R";
6939     allocator_char_str[ERTS_ALC_A_TEST] = "Z";
6940 #ifdef DEBUG
6941     for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++)
6942         ASSERT(allocator_char_str[i]);
6943 #endif
6944 
6945     erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms", NIL,
6946         ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
6947 
6948     atoms_initialized = 0;
6949     initialized = 1;
6950 }
6951 
6952 /* ------------------------------------------------------------------------- */
6953 
6954 /* Allocation histograms and carrier information is gathered by walking through
6955  * all carriers associated with each allocator instance. This is done as
6956  * aux_yield_work on the scheduler that owns each instance.
6957  *
6958  * Yielding is implemented by temporarily inserting a "dummy carrier" at the
6959  * last position. It's permanently "busy" so it won't get picked up by someone
6960  * else when in the carrier pool, and we never make the employer aware of it
6961  * through callbacks so we can't accidentally allocate on it.
6962  *
6963  * Plain malloc/free is used to guarantee we won't allocate with the allocator
6964  * we're scanning. */
6965 
6966 /* Yield between carriers once this many blocks have been processed. Note that
6967  * a single carrier scan may exceed this figure. */
6968 #ifndef DEBUG
6969     #define BLOCKSCAN_REDUCTIONS (8000)
6970 #else
6971     #define BLOCKSCAN_REDUCTIONS (400)
6972 #endif
6973 
6974 /* Abort a single carrier scan after this many blocks to prevent really large
6975  * MBCs from blocking forever. */
6976 #define BLOCKSCAN_BAILOUT_THRESHOLD (16000)
6977 
6978 typedef struct alcu_blockscan {
6979     /* A per-scheduler list used when multiple scans have been queued. The
6980      * current scanner will always run until completion/abort before moving on
6981      * to the next. */
6982     struct alcu_blockscan *scanner_queue;
6983 
6984     Allctr_t *allocator;
6985     Process *process;
6986 
6987     int (*current_op)(struct alcu_blockscan *scanner);
6988     int (*next_op)(struct alcu_blockscan *scanner);
6989     int reductions;
6990 
6991     ErtsAlcCPoolData_t *cpool_cursor;
6992     CarrierList_t *current_clist;
6993     Carrier_t *clist_cursor;
6994     Carrier_t dummy_carrier;
6995 
6996     /* Called if the process that started this job dies before we're done. */
6997     void (*abort)(void *user_data);
6998 
6999     /* Called on each carrier. The callback must return the number of blocks
7000      * scanned to yield properly between carriers.
7001      *
7002      * Note that it's not possible to "yield back" into a carrier. */
7003     int (*scan)(Allctr_t *, void *user_data, Carrier_t *);
7004 
7005     /* Called when all carriers have been scanned. The callback may return
7006      * non-zero to yield. */
7007     int (*finish)(void *user_data);
7008 
7009     void *user_data;
7010 } blockscan_t;
7011 
blockscan_restore_clist_cursor(blockscan_t * state)7012 static Carrier_t *blockscan_restore_clist_cursor(blockscan_t *state)
7013 {
7014     Carrier_t *cursor = state->clist_cursor;
7015 
7016     ASSERT(state->clist_cursor == (state->current_clist)->first ||
7017            state->clist_cursor == &state->dummy_carrier);
7018 
7019     if (cursor == &state->dummy_carrier) {
7020         cursor = cursor->next;
7021 
7022         unlink_carrier(state->current_clist, state->clist_cursor);
7023     }
7024 
7025     return cursor;
7026 }
7027 
blockscan_save_clist_cursor(blockscan_t * state,Carrier_t * after)7028 static void blockscan_save_clist_cursor(blockscan_t *state, Carrier_t *after)
7029 {
7030     ASSERT(state->clist_cursor == (state->current_clist)->first ||
7031            state->clist_cursor == &state->dummy_carrier);
7032 
7033     state->clist_cursor = &state->dummy_carrier;
7034 
7035     (state->clist_cursor)->next = after->next;
7036     (state->clist_cursor)->prev = after;
7037 
7038     relink_carrier(state->current_clist, state->clist_cursor);
7039 }
7040 
blockscan_clist_yielding(blockscan_t * state)7041 static int blockscan_clist_yielding(blockscan_t *state)
7042 {
7043     Carrier_t *cursor = blockscan_restore_clist_cursor(state);
7044 
7045     if (ERTS_PROC_IS_EXITING(state->process)) {
7046         return 0;
7047     }
7048 
7049     while (cursor) {
7050         /* Skip dummy carriers inserted by another (concurrent) block scan.
7051          * This can happen when scanning thread-safe allocators from multiple
7052          * schedulers. */
7053         if (CARRIER_SZ(cursor) > 0) {
7054             int blocks_scanned = state->scan(state->allocator,
7055                                              state->user_data,
7056                                              cursor);
7057 
7058             state->reductions -= blocks_scanned;
7059 
7060             if (state->reductions <= 0) {
7061                 blockscan_save_clist_cursor(state, cursor);
7062                 return 1;
7063             }
7064         }
7065 
7066         cursor = cursor->next;
7067     }
7068 
7069     return 0;
7070 }
7071 
blockscan_restore_cpool_cursor(blockscan_t * state)7072 static ErtsAlcCPoolData_t *blockscan_restore_cpool_cursor(blockscan_t *state)
7073 {
7074     ErtsAlcCPoolData_t *cursor;
7075 
7076     cursor = cpool_aint2cpd(cpool_read(&(state->cpool_cursor)->next));
7077 
7078     if (state->cpool_cursor == &state->dummy_carrier.cpool) {
7079         cpool_delete(state->allocator, state->allocator, &state->dummy_carrier);
7080     }
7081 
7082     return cursor;
7083 }
7084 
blockscan_save_cpool_cursor(blockscan_t * state,ErtsAlcCPoolData_t * after)7085 static void blockscan_save_cpool_cursor(blockscan_t *state,
7086                                         ErtsAlcCPoolData_t *after)
7087 {
7088     ErtsAlcCPoolData_t *dummy_carrier, *prev_carrier, *next_carrier;
7089 
7090     dummy_carrier = &state->dummy_carrier.cpool;
7091 
7092     next_carrier = cpool_aint2cpd(cpool_mod_mark(&after->next));
7093     prev_carrier = cpool_aint2cpd(cpool_mod_mark(&next_carrier->prev));
7094 
7095     cpool_init(&dummy_carrier->next, (erts_aint_t)next_carrier);
7096     cpool_init(&dummy_carrier->prev, (erts_aint_t)prev_carrier);
7097 
7098     cpool_set_mod_marked(&prev_carrier->next,
7099                          (erts_aint_t)dummy_carrier,
7100                          (erts_aint_t)next_carrier);
7101     cpool_set_mod_marked(&next_carrier->prev,
7102                          (erts_aint_t)dummy_carrier,
7103                          (erts_aint_t)prev_carrier);
7104 
7105     state->cpool_cursor = dummy_carrier;
7106 }
7107 
blockscan_cpool_yielding(blockscan_t * state)7108 static int blockscan_cpool_yielding(blockscan_t *state)
7109 {
7110     ErtsAlcCPoolData_t *sentinel, *cursor;
7111 
7112     sentinel = (state->allocator)->cpool.sentinel;
7113     cursor = blockscan_restore_cpool_cursor(state);
7114 
7115     if (ERTS_PROC_IS_EXITING(state->process)) {
7116         return 0;
7117     }
7118 
7119     while (cursor != sentinel) {
7120         Carrier_t *carrier;
7121         erts_aint_t exp;
7122 
7123         /* When a deallocation happens on a pooled carrier it will be routed to
7124          * its owner, so the only way to be sure that it isn't modified while
7125          * scanning is to skip all carriers that aren't ours. The deallocations
7126          * deferred to us will get handled when we're done. */
7127         while (cursor->orig_allctr != state->allocator) {
7128             cursor = cpool_aint2cpd(cpool_read(&cursor->next));
7129 
7130             if (cursor == sentinel) {
7131                 return 0;
7132             }
7133         }
7134 
7135         carrier = ErtsContainerStruct(cursor, Carrier_t, cpool);
7136         exp = erts_atomic_read_rb(&carrier->allctr);
7137 
7138         if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
7139             ASSERT(state->allocator == (Allctr_t*)(exp & ~ERTS_CRR_ALCTR_FLG_MASK));
7140             ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY));
7141 
7142             if (erts_atomic_cmpxchg_acqb(&carrier->allctr,
7143                                          exp | ERTS_CRR_ALCTR_FLG_BUSY,
7144                                          exp) == exp) {
7145                 /* Skip dummy carriers inserted by another (concurrent) block
7146                  * scan. This can happen when scanning thread-safe allocators
7147                  * from multiple schedulers. */
7148                 if (CARRIER_SZ(carrier) > 0) {
7149                     int blocks_scanned = state->scan(state->allocator,
7150                                                      state->user_data,
7151                                                      carrier);
7152 
7153                     state->reductions -= blocks_scanned;
7154 
7155                     if (state->reductions <= 0) {
7156                         blockscan_save_cpool_cursor(state, cursor);
7157                         erts_atomic_set_relb(&carrier->allctr, exp);
7158 
7159                         return 1;
7160                     }
7161                 }
7162 
7163                 erts_atomic_set_relb(&carrier->allctr, exp);
7164             }
7165         }
7166 
7167         cursor = cpool_aint2cpd(cpool_read(&cursor->next));
7168     }
7169 
7170     return 0;
7171 }
7172 
7173 /* */
7174 
blockscan_finish(blockscan_t * state)7175 static int blockscan_finish(blockscan_t *state)
7176 {
7177     if (ERTS_PROC_IS_EXITING(state->process)) {
7178         state->abort(state->user_data);
7179         return 0;
7180     }
7181 
7182     state->current_op = blockscan_finish;
7183 
7184     return state->finish(state->user_data);
7185 }
7186 
blockscan_lock_helper(blockscan_t * state)7187 static void blockscan_lock_helper(blockscan_t *state) {
7188     if ((state->allocator)->thread_safe) {
7189         /* Locked scans have to be as short as possible. */
7190         state->reductions = 1;
7191 
7192         erts_mtx_lock(&(state->allocator)->mutex);
7193     } else {
7194         state->reductions = BLOCKSCAN_REDUCTIONS;
7195     }
7196 }
7197 
blockscan_unlock_helper(blockscan_t * state)7198 static void blockscan_unlock_helper(blockscan_t *state) {
7199     if ((state->allocator)->thread_safe) {
7200         erts_mtx_unlock(&(state->allocator)->mutex);
7201     }
7202 }
7203 
blockscan_sweep_sbcs(blockscan_t * state)7204 static int blockscan_sweep_sbcs(blockscan_t *state)
7205 {
7206     blockscan_lock_helper(state);
7207 
7208     if (state->current_op != blockscan_sweep_sbcs) {
7209         SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_SBC, state->allocator);
7210         state->current_clist = &(state->allocator)->sbc_list;
7211         state->clist_cursor = (state->current_clist)->first;
7212     }
7213 
7214     state->current_op = blockscan_sweep_sbcs;
7215     state->next_op = blockscan_finish;
7216 
7217     if (blockscan_clist_yielding(state)) {
7218         state->next_op = state->current_op;
7219     }
7220 
7221     blockscan_unlock_helper(state);
7222 
7223     return 1;
7224 }
7225 
blockscan_sweep_mbcs(blockscan_t * state)7226 static int blockscan_sweep_mbcs(blockscan_t *state)
7227 {
7228     blockscan_lock_helper(state);
7229 
7230     if (state->current_op != blockscan_sweep_mbcs) {
7231         SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
7232         state->current_clist = &(state->allocator)->mbc_list;
7233         state->clist_cursor = (state->current_clist)->first;
7234     }
7235 
7236     state->current_op = blockscan_sweep_mbcs;
7237     state->next_op = blockscan_sweep_sbcs;
7238 
7239     if (blockscan_clist_yielding(state)) {
7240         state->next_op = state->current_op;
7241     }
7242 
7243     blockscan_unlock_helper(state);
7244 
7245     return 1;
7246 }
7247 
blockscan_sweep_cpool(blockscan_t * state)7248 static int blockscan_sweep_cpool(blockscan_t *state)
7249 {
7250     blockscan_lock_helper(state);
7251 
7252     if (state->current_op != blockscan_sweep_cpool) {
7253         SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
7254         state->cpool_cursor = (state->allocator)->cpool.sentinel;
7255     }
7256 
7257     state->current_op = blockscan_sweep_cpool;
7258     state->next_op = blockscan_sweep_mbcs;
7259 
7260     if (blockscan_cpool_yielding(state)) {
7261         state->next_op = state->current_op;
7262     }
7263 
7264     blockscan_unlock_helper(state);
7265 
7266     return 1;
7267 }
7268 
blockscan_get_specific_allocator(int allocator_num,int sched_id,Allctr_t ** out)7269 static int blockscan_get_specific_allocator(int allocator_num,
7270                                             int sched_id,
7271                                             Allctr_t **out)
7272 {
7273     ErtsAllocatorInfo_t *ai;
7274     Allctr_t *allocator;
7275 
7276     ASSERT(allocator_num >= ERTS_ALC_A_MIN &&
7277            allocator_num <= ERTS_ALC_A_MAX);
7278     ASSERT(sched_id >= 0 && sched_id <= erts_no_schedulers);
7279 
7280     ai = &erts_allctrs_info[allocator_num];
7281 
7282     if (!ai->enabled || !ai->alloc_util) {
7283         return 0;
7284     }
7285 
7286     if (!ai->thr_spec) {
7287         if (sched_id != 0) {
7288             /* Only thread-specific allocators can be scanned on a specific
7289              * scheduler. */
7290             return 0;
7291         }
7292 
7293         allocator = (Allctr_t*)ai->extra;
7294         ASSERT(allocator->thread_safe);
7295     } else {
7296         ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t*)ai->extra;
7297 
7298         ASSERT(sched_id < tspec->size);
7299 
7300         allocator = tspec->allctr[sched_id];
7301     }
7302 
7303     *out = allocator;
7304 
7305     return 1;
7306 }
7307 
blockscan_sched_trampoline(void * arg)7308 static void blockscan_sched_trampoline(void *arg)
7309 {
7310     ErtsAlcuBlockscanYieldData *yield;
7311     ErtsSchedulerData *esdp;
7312     blockscan_t *scanner;
7313 
7314     esdp = erts_get_scheduler_data();
7315     scanner = (blockscan_t*)arg;
7316 
7317     yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan);
7318 
7319     ASSERT((yield->last == NULL) == (yield->current == NULL));
7320 
7321     if (yield->last != NULL) {
7322         blockscan_t *prev_scanner = yield->last;
7323 
7324         ASSERT(prev_scanner->scanner_queue == NULL);
7325 
7326         prev_scanner->scanner_queue = scanner;
7327     } else {
7328         yield->current = scanner;
7329     }
7330 
7331     scanner->scanner_queue = NULL;
7332     yield->last = scanner;
7333 
7334     erts_notify_new_aux_yield_work(esdp);
7335 }
7336 
blockscan_dispatch(blockscan_t * scanner,Process * owner,Allctr_t * allocator,int sched_id)7337 static void blockscan_dispatch(blockscan_t *scanner, Process *owner,
7338                                Allctr_t *allocator, int sched_id)
7339 {
7340     ASSERT(erts_get_scheduler_id() != 0);
7341 
7342     if (sched_id == 0) {
7343         /* Global instances are always handled on the current scheduler. */
7344         sched_id = ERTS_ALC_GET_THR_IX();
7345         ASSERT(allocator->thread_safe);
7346     }
7347 
7348     scanner->allocator = allocator;
7349     scanner->process = owner;
7350 
7351     erts_proc_inc_refc(scanner->process);
7352 
7353     cpool_init_carrier_data(scanner->allocator, &scanner->dummy_carrier);
7354     erts_atomic_init_nob(&(scanner->dummy_carrier).allctr,
7355                          (erts_aint_t)allocator | ERTS_CRR_ALCTR_FLG_BUSY);
7356 
7357     if (ERTS_ALC_IS_CPOOL_ENABLED(scanner->allocator)) {
7358         scanner->next_op = blockscan_sweep_cpool;
7359     } else {
7360         scanner->next_op = blockscan_sweep_mbcs;
7361     }
7362 
7363     /* Aux yield jobs can only be set up while running on the scheduler that
7364      * services them, so we move there before continuing.
7365      *
7366      * We can't drive the scan itself through this since the scheduler will
7367      * always finish *all* misc aux work in one go which makes it impossible to
7368      * yield. */
7369     erts_schedule_misc_aux_work(sched_id, blockscan_sched_trampoline, scanner);
7370 }
7371 
erts_handle_yielded_alcu_blockscan(ErtsSchedulerData * esdp,ErtsAlcuBlockscanYieldData * yield)7372 int erts_handle_yielded_alcu_blockscan(ErtsSchedulerData *esdp,
7373                                        ErtsAlcuBlockscanYieldData *yield)
7374 {
7375     blockscan_t *scanner = yield->current;
7376 
7377     (void)esdp;
7378 
7379     ASSERT((yield->last == NULL) == (yield->current == NULL));
7380 
7381     if (scanner) {
7382         if (scanner->next_op(scanner)) {
7383             return 1;
7384         }
7385 
7386         ASSERT(ERTS_PROC_IS_EXITING(scanner->process) ||
7387                scanner->current_op == blockscan_finish);
7388 
7389         yield->current = scanner->scanner_queue;
7390 
7391         if (yield->current == NULL) {
7392             ASSERT(scanner == yield->last);
7393             yield->last = NULL;
7394         }
7395 
7396         erts_proc_dec_refc(scanner->process);
7397 
7398         /* Plain free is intentional. */
7399         free(scanner);
7400 
7401         return yield->current != NULL;
7402     }
7403 
7404     return 0;
7405 }
7406 
erts_alcu_sched_spec_data_init(ErtsSchedulerData * esdp)7407 void erts_alcu_sched_spec_data_init(ErtsSchedulerData *esdp)
7408 {
7409     ErtsAlcuBlockscanYieldData *yield;
7410 
7411     yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan);
7412 
7413     yield->current = NULL;
7414     yield->last = NULL;
7415 }
7416 
7417 /* ------------------------------------------------------------------------- */
7418 
u64_log2(Uint64 v)7419 static ERTS_INLINE int u64_log2(Uint64 v)
7420 {
7421     static const int log2_tab64[64] = {
7422         63,  0, 58,  1, 59, 47, 53,  2,
7423         60, 39, 48, 27, 54, 33, 42,  3,
7424         61, 51, 37, 40, 49, 18, 28, 20,
7425         55, 30, 34, 11, 43, 14, 22,  4,
7426         62, 57, 46, 52, 38, 26, 32, 41,
7427         50, 36, 17, 19, 29, 10, 13, 21,
7428         56, 45, 25, 31, 35, 16,  9, 12,
7429         44, 24, 15,  8, 23,  7,  6,  5};
7430 
7431     v |= v >> 1;
7432     v |= v >> 2;
7433     v |= v >> 4;
7434     v |= v >> 8;
7435     v |= v >> 16;
7436     v |= v >> 32;
7437 
7438     return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
7439 }
7440 
7441 /* ------------------------------------------------------------------------- */
7442 
7443 typedef struct hist_tree__ {
7444     struct hist_tree__ *parent;
7445     struct hist_tree__ *left;
7446     struct hist_tree__ *right;
7447 
7448     int is_red;
7449 
7450     alcu_atag_t tag;
7451     UWord histogram[1];
7452 } hist_tree_t;
7453 
7454 #define ERTS_RBT_PREFIX hist_tree
7455 #define ERTS_RBT_T hist_tree_t
7456 #define ERTS_RBT_KEY_T UWord
7457 #define ERTS_RBT_FLAGS_T int
7458 #define ERTS_RBT_INIT_EMPTY_TNODE(T) ((void)0)
7459 #define ERTS_RBT_IS_RED(T) ((T)->is_red)
7460 #define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
7461 #define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
7462 #define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
7463 #define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
7464 #define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
7465 #define ERTS_RBT_GET_PARENT(T) ((T)->parent)
7466 #define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
7467 #define ERTS_RBT_GET_RIGHT(T) ((T)->right)
7468 #define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
7469 #define ERTS_RBT_GET_LEFT(T) ((T)->left)
7470 #define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
7471 #define ERTS_RBT_GET_KEY(T) ((T)->tag)
7472 #define ERTS_RBT_IS_LT(KX, KY) (KX < KY)
7473 #define ERTS_RBT_IS_EQ(KX, KY) (KX == KY)
7474 #define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
7475 #define ERTS_RBT_WANT_FOREACH_DESTROY
7476 #define ERTS_RBT_WANT_INSERT
7477 #define ERTS_RBT_WANT_LOOKUP
7478 #define ERTS_RBT_UNDEF
7479 
7480 #include "erl_rbtree.h"
7481 
7482 typedef struct {
7483     blockscan_t common;
7484 
7485     ErtsIRefStorage iref;
7486     Process *process;
7487 
7488     hist_tree_rbt_yield_state_t hist_tree_yield;
7489     hist_tree_t *hist_tree;
7490     UWord hist_count;
7491 
7492     UWord hist_slot_start;
7493     int hist_slot_count;
7494 
7495     UWord unscanned_size;
7496 
7497     ErtsHeapFactory msg_factory;
7498     int building_result;
7499     Eterm result_list;
7500 } gather_ahist_t;
7501 
gather_ahist_update(gather_ahist_t * state,UWord tag,UWord size)7502 static void gather_ahist_update(gather_ahist_t *state, UWord tag, UWord size)
7503 {
7504     hist_tree_t *hist_node;
7505     UWord size_interval;
7506     int hist_slot;
7507 
7508     hist_node = hist_tree_rbt_lookup(state->hist_tree, tag);
7509 
7510     if (hist_node == NULL) {
7511         /* Plain calloc is intentional. */
7512         hist_node = (hist_tree_t*)calloc(1, sizeof(hist_tree_t) +
7513                                             (state->hist_slot_count - 1) *
7514                                             sizeof(hist_node->histogram[0]));
7515         hist_node->tag = tag;
7516 
7517         hist_tree_rbt_insert(&state->hist_tree, hist_node);
7518         state->hist_count++;
7519     }
7520 
7521     size_interval = (size / state->hist_slot_start);
7522     size_interval = u64_log2(size_interval + 1);
7523 
7524     hist_slot = MIN(size_interval, state->hist_slot_count - 1);
7525 
7526     hist_node->histogram[hist_slot]++;
7527 }
7528 
gather_ahist_scan(Allctr_t * allocator,void * user_data,Carrier_t * carrier)7529 static int gather_ahist_scan(Allctr_t *allocator,
7530                              void *user_data,
7531                              Carrier_t *carrier)
7532 {
7533     gather_ahist_t *state;
7534     int blocks_scanned;
7535     Block_t *block;
7536 
7537     state = (gather_ahist_t*)user_data;
7538     blocks_scanned = 1;
7539 
7540     if (IS_SB_CARRIER(carrier)) {
7541         alcu_atag_t tag;
7542 
7543         block = SBC2BLK(allocator, carrier);
7544 
7545         if (BLK_HAS_ATAG(block)) {
7546             tag = GET_BLK_ATAG(block);
7547 
7548             ASSERT(DBG_IS_VALID_ATAG(tag));
7549 
7550             gather_ahist_update(state, tag, SBC_BLK_SZ(block));
7551         }
7552     } else {
7553         UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
7554 
7555         ASSERT(IS_MB_CARRIER(carrier));
7556 
7557         block = MBC_TO_FIRST_BLK(allocator, carrier);
7558 
7559         while (1) {
7560             UWord block_size = MBC_BLK_SZ(block);
7561 
7562             if (IS_ALLOCED_BLK(block) && BLK_HAS_ATAG(block)) {
7563                 alcu_atag_t tag = GET_BLK_ATAG(block);
7564 
7565                 ASSERT(DBG_IS_VALID_ATAG(tag));
7566 
7567                 gather_ahist_update(state, tag, block_size);
7568             }
7569 
7570             scanned_bytes += block_size;
7571 
7572             if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) {
7573                 state->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes;
7574                 break;
7575             } else if (IS_LAST_BLK(block)) {
7576                 break;
7577             }
7578 
7579             block = NXT_BLK(block);
7580             blocks_scanned++;
7581         }
7582     }
7583 
7584     return blocks_scanned;
7585 }
7586 
gather_ahist_append_result(hist_tree_t * node,void * arg,Sint reds)7587 static int gather_ahist_append_result(hist_tree_t *node, void *arg, Sint reds)
7588 {
7589     gather_ahist_t *state = (gather_ahist_t*)arg;
7590 
7591     Eterm histogram_tuple, tag_tuple;
7592 
7593     Eterm *hp;
7594     int ix;
7595 
7596     ASSERT(state->building_result);
7597 
7598     hp = erts_produce_heap(&state->msg_factory, 7 + state->hist_slot_count, 0);
7599 
7600     hp[0] = make_arityval(state->hist_slot_count);
7601 
7602     for (ix = 0; ix < state->hist_slot_count; ix++) {
7603         hp[1 + ix] = make_small(node->histogram[ix]);
7604     }
7605 
7606     histogram_tuple = make_tuple(hp);
7607     hp += 1 + state->hist_slot_count;
7608 
7609     hp[0] = make_arityval(3);
7610     hp[1] = ATAG_ID(node->tag);
7611     hp[2] = alloc_type_atoms[ATAG_TYPE(node->tag)];
7612     hp[3] = histogram_tuple;
7613 
7614     tag_tuple = make_tuple(hp);
7615     hp += 4;
7616 
7617     state->result_list = CONS(hp, tag_tuple, state->result_list);
7618 
7619     /* Plain free is intentional. */
7620     free(node);
7621     return 1;
7622 }
7623 
gather_ahist_send(gather_ahist_t * state)7624 static void gather_ahist_send(gather_ahist_t *state)
7625 {
7626     Eterm result_tuple, unscanned_size, task_ref;
7627 
7628     Uint term_size;
7629     Eterm *hp;
7630 
7631     ASSERT((state->result_list == NIL) ^ (state->hist_count > 0));
7632     ASSERT(state->building_result);
7633 
7634     term_size = 4 + erts_iref_storage_heap_size(&state->iref);
7635     term_size += IS_USMALL(0, state->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE;
7636 
7637     hp = erts_produce_heap(&state->msg_factory, term_size, 0);
7638 
7639     task_ref = erts_iref_storage_make_ref(&state->iref, &hp,
7640         &(state->msg_factory.message)->hfrag.off_heap, 0);
7641 
7642     unscanned_size = bld_unstable_uint(&hp, NULL, state->unscanned_size);
7643 
7644     hp[0] = make_arityval(3);
7645     hp[1] = task_ref;
7646     hp[2] = unscanned_size;
7647     hp[3] = state->result_list;
7648 
7649     result_tuple = make_tuple(hp);
7650 
7651     erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1);
7652 
7653     erts_queue_message(state->process, 0, state->msg_factory.message,
7654                        result_tuple, am_system);
7655 }
7656 
gather_ahist_finish(void * arg)7657 static int gather_ahist_finish(void *arg)
7658 {
7659     gather_ahist_t *state = (gather_ahist_t*)arg;
7660 
7661     if (!state->building_result) {
7662         ErtsMessage *message;
7663         Uint minimum_size;
7664         Eterm *hp;
7665 
7666         /* {Ref, unscanned size, [{Tag, {Histogram}} | Rest]} */
7667         minimum_size = 4 + erts_iref_storage_heap_size(&state->iref) +
7668                        state->hist_count * (7 + state->hist_slot_count);
7669 
7670         message = erts_alloc_message(minimum_size, &hp);
7671         erts_factory_selfcontained_message_init(&state->msg_factory,
7672                                                 message, hp);
7673 
7674         ERTS_RBT_YIELD_STAT_INIT(&state->hist_tree_yield);
7675 
7676         state->result_list = NIL;
7677         state->building_result = 1;
7678     }
7679 
7680     if (!hist_tree_rbt_foreach_destroy_yielding(&state->hist_tree,
7681                                                 &gather_ahist_append_result,
7682                                                 state,
7683                                                 &state->hist_tree_yield,
7684                                                 BLOCKSCAN_REDUCTIONS)) {
7685         return 1;
7686     }
7687 
7688     gather_ahist_send(state);
7689 
7690     return 0;
7691 }
7692 
gather_ahist_destroy_result(hist_tree_t * node,void * arg,Sint reds)7693 static int gather_ahist_destroy_result(hist_tree_t *node, void *arg, Sint reds)
7694 {
7695     (void)arg;
7696     free(node);
7697     return 1;
7698 }
7699 
gather_ahist_abort(void * arg)7700 static void gather_ahist_abort(void *arg)
7701 {
7702     gather_ahist_t *state = (gather_ahist_t*)arg;
7703 
7704     if (state->building_result) {
7705         erts_factory_undo(&state->msg_factory);
7706     }
7707 
7708     hist_tree_rbt_foreach_destroy(&state->hist_tree,
7709                                   &gather_ahist_destroy_result,
7710                                   NULL);
7711 }
7712 
erts_alcu_gather_alloc_histograms(Process * p,int allocator_num,int sched_id,int hist_width,UWord hist_start,Eterm ref)7713 int erts_alcu_gather_alloc_histograms(Process *p, int allocator_num,
7714                                       int sched_id, int hist_width,
7715                                       UWord hist_start, Eterm ref)
7716 {
7717     gather_ahist_t *gather_state;
7718     blockscan_t *scanner;
7719     Allctr_t *allocator;
7720 
7721     ASSERT(is_internal_ref(ref));
7722 
7723     if (!blockscan_get_specific_allocator(allocator_num,
7724                                           sched_id,
7725                                           &allocator)) {
7726         return 0;
7727     }
7728 
7729     ensure_atoms_initialized(allocator);
7730 
7731     /* Plain calloc is intentional. */
7732     gather_state = (gather_ahist_t*)calloc(1, sizeof(gather_ahist_t));
7733     scanner = &gather_state->common;
7734 
7735     scanner->abort = gather_ahist_abort;
7736     scanner->scan = gather_ahist_scan;
7737     scanner->finish = gather_ahist_finish;
7738     scanner->user_data = gather_state;
7739 
7740     erts_iref_storage_save(&gather_state->iref, ref);
7741     gather_state->hist_slot_start = hist_start;
7742     gather_state->hist_slot_count = hist_width;
7743     gather_state->process = p;
7744 
7745     blockscan_dispatch(scanner, p, allocator, sched_id);
7746 
7747     return 1;
7748 }
7749 
7750 /* ------------------------------------------------------------------------- */
7751 
7752 typedef struct chist_node__ {
7753     struct chist_node__ *next;
7754 
7755     UWord carrier_size;
7756     UWord unscanned_size;
7757     UWord allocated_size;
7758 
7759     /* BLOCKSCAN_BAILOUT_THRESHOLD guarantees we won't overflow this or the
7760      * counters in the free block histogram. */
7761     int allocated_count;
7762     int flags;
7763 
7764     int histogram[1];
7765 } chist_node_t;
7766 
7767 typedef struct {
7768     blockscan_t common;
7769 
7770     ErtsIRefStorage iref;
7771     Process *process;
7772 
7773     Eterm allocator_desc;
7774 
7775     chist_node_t *info_list;
7776     UWord info_count;
7777 
7778     UWord hist_slot_start;
7779     int hist_slot_count;
7780 
7781     ErtsHeapFactory msg_factory;
7782     int building_result;
7783     Eterm result_list;
7784 } gather_cinfo_t;
7785 
gather_cinfo_scan(Allctr_t * allocator,void * user_data,Carrier_t * carrier)7786 static int gather_cinfo_scan(Allctr_t *allocator,
7787                              void *user_data,
7788                              Carrier_t *carrier)
7789 {
7790     gather_cinfo_t *state;
7791     chist_node_t *node;
7792     int blocks_scanned;
7793     Block_t *block;
7794 
7795     state = (gather_cinfo_t*)user_data;
7796     node = calloc(1, sizeof(chist_node_t) +
7797                      (state->hist_slot_count - 1) *
7798                      sizeof(node->histogram[0]));
7799     blocks_scanned = 1;
7800 
7801     /* ERTS_CRR_ALCTR_FLG_BUSY is ignored since we've set it ourselves and it
7802      * would be misleading to include it. */
7803     node->flags = erts_atomic_read_rb(&carrier->allctr) &
7804                   (ERTS_CRR_ALCTR_FLG_MASK & ~ERTS_CRR_ALCTR_FLG_BUSY);
7805     node->carrier_size = CARRIER_SZ(carrier);
7806 
7807     if (IS_SB_CARRIER(carrier)) {
7808         UWord block_size;
7809 
7810         block = SBC2BLK(allocator, carrier);
7811         block_size = SBC_BLK_SZ(block);
7812 
7813         node->allocated_size = block_size;
7814         node->allocated_count = 1;
7815     } else {
7816         UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
7817 
7818         block = MBC_TO_FIRST_BLK(allocator, carrier);
7819 
7820         while (1) {
7821             UWord block_size = MBC_BLK_SZ(block);
7822 
7823             scanned_bytes += block_size;
7824 
7825             if (IS_ALLOCED_BLK(block)) {
7826                 node->allocated_size += block_size;
7827                 node->allocated_count++;
7828             } else {
7829                 UWord size_interval;
7830                 int hist_slot;
7831 
7832                 size_interval = (block_size / state->hist_slot_start);
7833                 size_interval = u64_log2(size_interval + 1);
7834 
7835                 hist_slot = MIN(size_interval, state->hist_slot_count - 1);
7836 
7837                 node->histogram[hist_slot]++;
7838             }
7839 
7840             if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) {
7841                 node->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes;
7842                 break;
7843             } else if (IS_LAST_BLK(block)) {
7844                 break;
7845             }
7846 
7847             block = NXT_BLK(block);
7848             blocks_scanned++;
7849         }
7850     }
7851 
7852     node->next = state->info_list;
7853     state->info_list = node;
7854     state->info_count++;
7855 
7856     return blocks_scanned;
7857 }
7858 
gather_cinfo_append_result(gather_cinfo_t * state,chist_node_t * info)7859 static void gather_cinfo_append_result(gather_cinfo_t *state,
7860                                        chist_node_t *info)
7861 {
7862     Eterm carrier_size, unscanned_size, allocated_size;
7863     Eterm histogram_tuple, carrier_tuple;
7864 
7865     Uint term_size;
7866     Eterm *hp;
7867     int ix;
7868 
7869     ASSERT(state->building_result);
7870 
7871     term_size = 11 + state->hist_slot_count;
7872     term_size += IS_USMALL(0, info->carrier_size) ? 0 : BIG_UINT_HEAP_SIZE;
7873     term_size += IS_USMALL(0, info->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE;
7874     term_size += IS_USMALL(0, info->allocated_size) ? 0 : BIG_UINT_HEAP_SIZE;
7875 
7876     hp = erts_produce_heap(&state->msg_factory, term_size, 0);
7877 
7878     hp[0] = make_arityval(state->hist_slot_count);
7879 
7880     for (ix = 0; ix < state->hist_slot_count; ix++) {
7881         hp[1 + ix] = make_small(info->histogram[ix]);
7882     }
7883 
7884     histogram_tuple = make_tuple(hp);
7885     hp += 1 + state->hist_slot_count;
7886 
7887     carrier_size = bld_unstable_uint(&hp, NULL, info->carrier_size);
7888     unscanned_size = bld_unstable_uint(&hp, NULL, info->unscanned_size);
7889     allocated_size = bld_unstable_uint(&hp, NULL, info->allocated_size);
7890 
7891     hp[0] = make_arityval(7);
7892     hp[1] = state->allocator_desc;
7893     hp[2] = carrier_size;
7894     hp[3] = unscanned_size;
7895     hp[4] = allocated_size;
7896     hp[5] = make_small(info->allocated_count);
7897     hp[6] = (info->flags & ERTS_CRR_ALCTR_FLG_IN_POOL) ? am_true : am_false;
7898     hp[7] = histogram_tuple;
7899 
7900     carrier_tuple = make_tuple(hp);
7901     hp += 8;
7902 
7903     state->result_list = CONS(hp, carrier_tuple, state->result_list);
7904 
7905     free(info);
7906 }
7907 
gather_cinfo_send(gather_cinfo_t * state)7908 static void gather_cinfo_send(gather_cinfo_t *state)
7909 {
7910     Eterm result_tuple, task_ref;
7911 
7912     int term_size;
7913     Eterm *hp;
7914 
7915     ASSERT((state->result_list == NIL) ^ (state->info_count > 0));
7916     ASSERT(state->building_result);
7917 
7918     term_size = 3 + erts_iref_storage_heap_size(&state->iref);
7919     hp = erts_produce_heap(&state->msg_factory, term_size, 0);
7920 
7921     task_ref = erts_iref_storage_make_ref(&state->iref, &hp,
7922         &(state->msg_factory.message)->hfrag.off_heap, 0);
7923 
7924     hp[0] = make_arityval(2);
7925     hp[1] = task_ref;
7926     hp[2] = state->result_list;
7927 
7928     result_tuple = make_tuple(hp);
7929 
7930     erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1);
7931 
7932     erts_queue_message(state->process, 0, state->msg_factory.message,
7933                        result_tuple, am_system);
7934 }
7935 
gather_cinfo_finish(void * arg)7936 static int gather_cinfo_finish(void *arg)
7937 {
7938     gather_cinfo_t *state = (gather_cinfo_t*)arg;
7939     int reductions = BLOCKSCAN_REDUCTIONS;
7940 
7941     if (!state->building_result) {
7942         ErtsMessage *message;
7943         Uint minimum_size;
7944         Eterm *hp;
7945 
7946         /* {Ref, [{Carrier size, unscanned size, allocated size,
7947          *         allocated block count, {Free block histogram}} | Rest]} */
7948         minimum_size = 3 + erts_iref_storage_heap_size(&state->iref) +
7949                        state->info_count * (11 + state->hist_slot_count);
7950 
7951         message = erts_alloc_message(minimum_size, &hp);
7952         erts_factory_selfcontained_message_init(&state->msg_factory,
7953                                                 message, hp);
7954 
7955         state->result_list = NIL;
7956         state->building_result = 1;
7957     }
7958 
7959     while (state->info_list) {
7960         chist_node_t *current = state->info_list;
7961         state->info_list = current->next;
7962 
7963         gather_cinfo_append_result(state, current);
7964 
7965         if (reductions-- <= 0) {
7966             return 1;
7967         }
7968     }
7969 
7970     gather_cinfo_send(state);
7971 
7972     return 0;
7973 }
7974 
gather_cinfo_abort(void * arg)7975 static void gather_cinfo_abort(void *arg)
7976 {
7977     gather_cinfo_t *state = (gather_cinfo_t*)arg;
7978 
7979     if (state->building_result) {
7980         erts_factory_undo(&state->msg_factory);
7981     }
7982 
7983     while (state->info_list) {
7984         chist_node_t *current = state->info_list;
7985         state->info_list = current->next;
7986 
7987         free(current);
7988     }
7989 }
7990 
erts_alcu_gather_carrier_info(struct process * p,int allocator_num,int sched_id,int hist_width,UWord hist_start,Eterm ref)7991 int erts_alcu_gather_carrier_info(struct process *p, int allocator_num,
7992                                   int sched_id, int hist_width,
7993                                   UWord hist_start, Eterm ref)
7994 {
7995     gather_cinfo_t *gather_state;
7996     blockscan_t *scanner;
7997 
7998     const char *allocator_desc;
7999     Allctr_t *allocator;
8000 
8001     ASSERT(is_internal_ref(ref));
8002 
8003     if (!blockscan_get_specific_allocator(allocator_num,
8004                                           sched_id,
8005                                           &allocator)) {
8006         return 0;
8007     }
8008 
8009     allocator_desc = ERTS_ALC_A2AD(allocator_num);
8010 
8011     /* Plain calloc is intentional. */
8012     gather_state = (gather_cinfo_t*)calloc(1, sizeof(gather_cinfo_t));
8013     scanner = &gather_state->common;
8014 
8015     scanner->abort = gather_cinfo_abort;
8016     scanner->scan = gather_cinfo_scan;
8017     scanner->finish = gather_cinfo_finish;
8018     scanner->user_data = gather_state;
8019 
8020     gather_state->allocator_desc = erts_atom_put((byte *)allocator_desc,
8021                                                  sys_strlen(allocator_desc),
8022                                                  ERTS_ATOM_ENC_LATIN1, 1);
8023     erts_iref_storage_save(&gather_state->iref, ref);
8024     gather_state->hist_slot_start = hist_start * 2;
8025     gather_state->hist_slot_count = hist_width;
8026     gather_state->process = p;
8027 
8028     blockscan_dispatch(scanner, p, allocator, sched_id);
8029 
8030     return 1;
8031 }
8032 
8033 
8034 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
8035  * NOTE:  erts_alcu_test() is only supposed to be used for testing.          *
8036  *                                                                           *
8037  * Keep alloc_SUITE_data/allocator_test.h updated if changes are made        *
8038  * to erts_alcu_test()                                                       *
8039 \*                                                                           */
8040 
8041 UWord
erts_alcu_test(UWord op,UWord a1,UWord a2)8042 erts_alcu_test(UWord op, UWord a1, UWord a2)
8043 {
8044     switch (op) {
8045     case 0x000:	return (UWord) BLK_SZ((Block_t *) a1);
8046     case 0x001:	return (UWord) BLK_UMEM_SZ((Block_t *) a1);
8047     case 0x002:	return (UWord) IS_PREV_BLK_FREE((Block_t *) a1);
8048     case 0x003:	return (UWord) IS_FREE_BLK((Block_t *) a1);
8049     case 0x004:	return (UWord) IS_LAST_BLK((Block_t *) a1);
8050     case 0x005:	return (UWord) UMEM2BLK((void *) a1);
8051     case 0x006:	return (UWord) BLK2UMEM((Block_t *) a1);
8052     case 0x007:	return (UWord) IS_SB_CARRIER((Carrier_t *) a1);
8053     case 0x008:	return (UWord) IS_SBC_BLK((Block_t *) a1);
8054     case 0x009:	return (UWord) IS_MB_CARRIER((Carrier_t *) a1);
8055     case 0x00a:	return (UWord) IS_MSEG_CARRIER((Carrier_t *) a1);
8056     case 0x00b:	return (UWord) CARRIER_SZ((Carrier_t *) a1);
8057     case 0x00c:	return (UWord) SBC2BLK((Allctr_t *) a1,
8058 					       (Carrier_t *) a2);
8059     case 0x00d:	return (UWord) BLK_TO_SBC((Block_t *) a2);
8060     case 0x00e:	return (UWord) MBC_TO_FIRST_BLK((Allctr_t *) a1,
8061 						(Carrier_t *) a2);
8062     case 0x00f:	return (UWord) FIRST_BLK_TO_MBC((Allctr_t *) a1,
8063 						(Block_t *) a2);
8064     case 0x010:	return (UWord) ((Allctr_t *) a1)->mbc_list.first;
8065     case 0x011:	return (UWord) ((Allctr_t *) a1)->mbc_list.last;
8066     case 0x012:	return (UWord) ((Allctr_t *) a1)->sbc_list.first;
8067     case 0x013:	return (UWord) ((Allctr_t *) a1)->sbc_list.last;
8068     case 0x014:	return (UWord) ((Carrier_t *) a1)->next;
8069     case 0x015:	return (UWord) ((Carrier_t *) a1)->prev;
8070     case 0x016:	return (UWord) ABLK_HDR_SZ;
8071     case 0x017:	return (UWord) ((Allctr_t *) a1)->min_block_size;
8072     case 0x018:	return (UWord) NXT_BLK((Block_t *) a1);
8073     case 0x019:	return (UWord) PREV_BLK((Block_t *) a1);
8074     case 0x01a: return (UWord) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2);
8075     case 0x01b: return (UWord) sizeof(Unit_t);
8076     case 0x01c: return (UWord) BLK_TO_MBC((Block_t*) a1);
8077     case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
8078     case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
8079     case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t);
8080     case 0x020:
8081 	SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1);
8082 	cpool_init_carrier_data((Allctr_t *) a1, (Carrier_t *) a2);
8083 	return (UWord) a2;
8084     case 0x021:
8085 	cpool_insert((Allctr_t *) a1, (Carrier_t *) a2);
8086 	return (UWord) a2;
8087     case 0x022:
8088 	cpool_delete((Allctr_t *) a1, (Allctr_t *) a1, (Carrier_t *) a2);
8089 	return (UWord) a2;
8090     case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1);
8091     case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2);
8092     case 0x025: /* UMEM2BLK_TEST*/
8093 #ifdef DEBUG
8094 # ifdef HARD_DEBUG
8095 	return (UWord)UMEM2BLK(a1-3*sizeof(UWord));
8096 # else
8097 	return (UWord)UMEM2BLK(a1-2*sizeof(UWord));
8098 # endif
8099 #else
8100 	return (UWord)UMEM2BLK(a1);
8101 #endif
8102 
8103     default:	ASSERT(0); return ~((UWord) 0);
8104     }
8105     return 0;
8106 }
8107 
8108 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
8109  * Debug functions                                                           *
8110 \*                                                                           */
8111 
8112 void
erts_alcu_assert_failed(char * expr,char * file,int line,char * func)8113 erts_alcu_assert_failed(char* expr, char* file, int line, char *func)
8114 {
8115     fflush(stdout);
8116     fprintf(stderr, "%s:%d:%s(): Assertion failed: %s\n",
8117 	    file, line, func, expr);
8118     fflush(stderr);
8119 #if defined(__WIN__) || defined(__WIN32__)
8120     DebugBreak();
8121 #else
8122     abort();
8123 #endif
8124 }
8125 
8126 void
erts_alcu_verify_unused(Allctr_t * allctr)8127 erts_alcu_verify_unused(Allctr_t *allctr)
8128 {
8129     UWord no;
8130 
8131     no = allctr->sbcs.curr.norm.mseg.no;
8132     no += allctr->sbcs.curr.norm.sys_alloc.no;
8133     no += allctr->mbcs.blocks.curr.no;
8134 
8135     if (no) {
8136 	UWord sz = allctr->sbcs.blocks.curr.size;
8137 	sz += allctr->mbcs.blocks.curr.size;
8138 	erts_exit(ERTS_ABORT_EXIT,
8139 		 "%salloc() used when expected to be unused!\n"
8140 		 "Total amount of blocks allocated: %bpu\n"
8141 		 "Total amount of bytes allocated: %bpu\n",
8142 		 allctr->name_prefix, no, sz);
8143     }
8144 }
8145 
8146 void
erts_alcu_verify_unused_ts(Allctr_t * allctr)8147 erts_alcu_verify_unused_ts(Allctr_t *allctr)
8148 {
8149     erts_mtx_lock(&allctr->mutex);
8150     erts_alcu_verify_unused(allctr);
8151     erts_mtx_unlock(&allctr->mutex);
8152 }
8153 
8154 
8155 #ifdef DEBUG
is_sbc_blk(Block_t * blk)8156 int is_sbc_blk(Block_t* blk)
8157 {
8158     return IS_SBC_BLK(blk);
8159 }
8160 #endif
8161 
8162 #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
8163 
8164 static void
check_blk_carrier(Allctr_t * allctr,Block_t * iblk)8165 check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
8166 {
8167     Carrier_t *crr;
8168     CarrierList_t *cl;
8169 
8170     if (IS_SBC_BLK(iblk)) {
8171 	Carrier_t *sbc = BLK_TO_SBC(iblk);
8172 
8173 	ASSERT(SBC2BLK(allctr, sbc) == iblk);
8174 	ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk));
8175 	crr = sbc;
8176 	cl = &allctr->sbc_list;
8177     }
8178     else {
8179 	Block_t *prev_blk = NULL;
8180 	Block_t *blk;
8181 	char *carrier_end;
8182 	Uint is_free_blk;
8183 	Uint tot_blk_sz;
8184 	Uint blk_sz;
8185 	int has_wrapped_around = 0;
8186 
8187 	blk = iblk;
8188 	tot_blk_sz = 0;
8189 	crr = BLK_TO_MBC(blk);
8190 	ASSERT(IS_MB_CARRIER(crr));
8191 
8192 	/* Step around the carrier one whole lap starting at 'iblk'
8193 	 */
8194 	while (1) {
8195 	    ASSERT(IS_MBC_BLK(blk));
8196 	    ASSERT(BLK_TO_MBC(blk) == crr);
8197 
8198 	    if (prev_blk) {
8199 		ASSERT(NXT_BLK(prev_blk) == blk);
8200 		if (IS_FREE_BLK(prev_blk)) {
8201 		    ASSERT(IS_PREV_BLK_FREE(blk));
8202 		    ASSERT(prev_blk == PREV_BLK(blk));
8203 		}
8204 		else {
8205 		    ASSERT(IS_PREV_BLK_ALLOCED(blk));
8206 		}
8207 	    }
8208 
8209 	    if (has_wrapped_around) {
8210 		ASSERT(((Block_t *) crr) < blk);
8211 		if (blk == iblk)
8212 		    break;
8213 		ASSERT(blk < iblk);
8214 	    }
8215 	    else
8216 		ASSERT(blk >= iblk);
8217 
8218 	    blk_sz = MBC_BLK_SZ(blk);
8219 
8220 	    ASSERT(blk_sz % sizeof(Unit_t) == 0);
8221 	    ASSERT(blk_sz >= allctr->min_block_size);
8222 
8223 	    tot_blk_sz += blk_sz;
8224 
8225 	    is_free_blk = (int) IS_FREE_BLK(blk);
8226 	    ASSERT(!is_free_blk
8227 		   || IS_LAST_BLK(blk)
8228 		   || PREV_BLK_SZ(((char *) blk)+blk_sz) == blk_sz);
8229 
8230 	    if (allctr->check_block)
8231 		(*allctr->check_block)(allctr, blk, (int) is_free_blk);
8232 
8233 	    if (IS_LAST_BLK(blk)) {
8234 		carrier_end = ((char *) NXT_BLK(blk));
8235 		has_wrapped_around = 1;
8236 		prev_blk = NULL;
8237 		blk = MBC_TO_FIRST_BLK(allctr, crr);
8238 		ASSERT(IS_MBC_FIRST_BLK(allctr,blk));
8239 	    }
8240 	    else {
8241 		prev_blk = blk;
8242 		blk = NXT_BLK(blk);
8243 	    }
8244 	}
8245 
8246 	ASSERT((((char *) crr)
8247 		+ MBC_HEADER_SIZE(allctr)
8248 		+ tot_blk_sz) == carrier_end);
8249 	ASSERT(((char *) crr) + CARRIER_SZ(crr) - sizeof(Unit_t) <= carrier_end
8250 	       && carrier_end <= ((char *) crr) + CARRIER_SZ(crr));
8251 
8252 	if (allctr->check_mbc)
8253 	    (*allctr->check_mbc)(allctr, crr);
8254 
8255 #if HAVE_ERTS_MSEG
8256 	if (IS_MSEG_CARRIER(crr)) {
8257 	    ASSERT(CARRIER_SZ(crr) % ERTS_SACRR_UNIT_SZ == 0);
8258 	}
8259 #endif
8260 	cl = &allctr->mbc_list;
8261     }
8262 
8263 #ifdef DEBUG
8264     if (cl->first == crr) {
8265 	ASSERT(!crr->prev);
8266     }
8267     else {
8268 	ASSERT(crr->prev);
8269 	ASSERT(crr->prev->next == crr);
8270     }
8271     if (cl->last == crr) {
8272 	ASSERT(!crr->next);
8273     }
8274     else {
8275 	ASSERT(crr->next);
8276 	ASSERT(crr->next->prev == crr);
8277     }
8278 #endif
8279 }
8280 
8281 #endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
8282 
8283 #ifdef ERTS_ENABLE_LOCK_COUNT
8284 
lcnt_enable_allocator_lock_count(Allctr_t * allocator,int enable)8285 static void lcnt_enable_allocator_lock_count(Allctr_t *allocator, int enable) {
8286     if(!allocator->thread_safe) {
8287         return;
8288     }
8289 
8290     if(enable) {
8291         erts_lcnt_install_new_lock_info(&allocator->mutex.lcnt,
8292             "alcu_allocator", make_small(allocator->alloc_no),
8293             ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
8294     } else {
8295         erts_lcnt_uninstall(&allocator->mutex.lcnt);
8296     }
8297 }
8298 
lcnt_update_thread_spec_locks(ErtsAllocatorThrSpec_t * tspec,int enable)8299 static void lcnt_update_thread_spec_locks(ErtsAllocatorThrSpec_t *tspec, int enable) {
8300     if(tspec->enabled) {
8301         int i;
8302 
8303         for(i = 0; i < tspec->size; i++) {
8304             lcnt_enable_allocator_lock_count(tspec->allctr[i], enable);
8305         }
8306     }
8307 }
8308 
erts_lcnt_update_allocator_locks(int enable)8309 void erts_lcnt_update_allocator_locks(int enable) {
8310     int i;
8311 
8312     for(i = ERTS_ALC_A_MIN; i < ERTS_ALC_A_MAX; i++) {
8313         ErtsAllocatorInfo_t *ai = &erts_allctrs_info[i];
8314 
8315         if(ai->enabled && ai->alloc_util) {
8316             if(ai->thr_spec) {
8317                 lcnt_update_thread_spec_locks((ErtsAllocatorThrSpec_t*)ai->extra, enable);
8318             } else {
8319                 lcnt_enable_allocator_lock_count((Allctr_t*)ai->extra, enable);
8320             }
8321         }
8322     }
8323 }
8324 #endif /* ERTS_ENABLE_LOCK_COUNT */
8325