1 #ifndef JEMALLOC_INTERNAL_BIN_H
2 #define JEMALLOC_INTERNAL_BIN_H
3 
4 #include "jemalloc/internal/bin_stats.h"
5 #include "jemalloc/internal/bin_types.h"
6 #include "jemalloc/internal/extent_types.h"
7 #include "jemalloc/internal/extent_structs.h"
8 #include "jemalloc/internal/mutex.h"
9 #include "jemalloc/internal/sc.h"
10 
11 /*
12  * A bin contains a set of extents that are currently being used for slab
13  * allocations.
14  */
15 
16 /*
17  * Read-only information associated with each element of arena_t's bins array
18  * is stored separately, partly to reduce memory usage (only one copy, rather
19  * than one per arena), but mainly to avoid false cacheline sharing.
20  *
21  * Each slab has the following layout:
22  *
23  *   /--------------------\
24  *   | region 0           |
25  *   |--------------------|
26  *   | region 1           |
27  *   |--------------------|
28  *   | ...                |
29  *   | ...                |
30  *   | ...                |
31  *   |--------------------|
32  *   | region nregs-1     |
33  *   \--------------------/
34  */
35 typedef struct bin_info_s bin_info_t;
36 struct bin_info_s {
37 	/* Size of regions in a slab for this bin's size class. */
38 	size_t			reg_size;
39 
40 	/* Total size of a slab for this bin's size class. */
41 	size_t			slab_size;
42 
43 	/* Total number of regions in a slab for this bin's size class. */
44 	uint32_t		nregs;
45 
46 	/* Number of sharded bins in each arena for this size class. */
47 	uint32_t		n_shards;
48 
49 	/*
50 	 * Metadata used to manipulate bitmaps for slabs associated with this
51 	 * bin.
52 	 */
53 	bitmap_info_t		bitmap_info;
54 };
55 
56 extern bin_info_t bin_infos[SC_NBINS];
57 
58 typedef struct bin_s bin_t;
59 struct bin_s {
60 	/* All operations on bin_t fields require lock ownership. */
61 	malloc_mutex_t		lock;
62 
63 	/*
64 	 * Current slab being used to service allocations of this bin's size
65 	 * class.  slabcur is independent of slabs_{nonfull,full}; whenever
66 	 * slabcur is reassigned, the previous slab must be deallocated or
67 	 * inserted into slabs_{nonfull,full}.
68 	 */
69 	extent_t		*slabcur;
70 
71 	/*
72 	 * Heap of non-full slabs.  This heap is used to assure that new
73 	 * allocations come from the non-full slab that is oldest/lowest in
74 	 * memory.
75 	 */
76 	extent_heap_t		slabs_nonfull;
77 
78 	/* List used to track full slabs. */
79 	extent_list_t		slabs_full;
80 
81 	/* Bin statistics. */
82 	bin_stats_t	stats;
83 };
84 
85 /* A set of sharded bins of the same size class. */
86 typedef struct bins_s bins_t;
87 struct bins_s {
88 	/* Sharded bins.  Dynamically sized. */
89 	bin_t *bin_shards;
90 };
91 
92 void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
93 bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
94     size_t end_size, size_t nshards);
95 void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
96 
97 /* Initializes a bin to empty.  Returns true on error. */
98 bool bin_init(bin_t *bin);
99 
100 /* Forking. */
101 void bin_prefork(tsdn_t *tsdn, bin_t *bin);
102 void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
103 void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
104 
105 /* Stats. */
106 static inline void
107 bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
108 	malloc_mutex_lock(tsdn, &bin->lock);
109 	malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
110 	dst_bin_stats->nmalloc += bin->stats.nmalloc;
111 	dst_bin_stats->ndalloc += bin->stats.ndalloc;
112 	dst_bin_stats->nrequests += bin->stats.nrequests;
113 	dst_bin_stats->curregs += bin->stats.curregs;
114 	dst_bin_stats->nfills += bin->stats.nfills;
115 	dst_bin_stats->nflushes += bin->stats.nflushes;
116 	dst_bin_stats->nslabs += bin->stats.nslabs;
117 	dst_bin_stats->reslabs += bin->stats.reslabs;
118 	dst_bin_stats->curslabs += bin->stats.curslabs;
119 	dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs;
120 	malloc_mutex_unlock(tsdn, &bin->lock);
121 }
122 
123 #endif /* JEMALLOC_INTERNAL_BIN_H */
124