1 #ifndef LAS_MEMORY_HPP_
2 #define LAS_MEMORY_HPP_
3 
4 #include <stddef.h>                    // for size_t
5 #include <list>                        // for list
6 #include <map>                         // for map
7 #include <set>                         // for set
8 #include <stack>                       // for stack
9 #include "macros.h"                    // for ATTR_ASSUME_ALIGNED
10 #include "las-config.h"                // for BUCKET_REGION, MEMSET_MIN
11 #include "lock_guarded_container.hpp"  // for lock_guarded_container
12 
13 
14 /* This structure is shared by threads that have the same memory binding.
15  * It is in charge of providing momory-bound zones, e.g. for buckets, or
16  * bucket regions.
17  *
18  * alloc()/free() calls are not meant to be fast, here.
19  */
20 
21 class las_memory_accessor {
22     lock_guarded_container<std::map<size_t, std::stack<void *>>> frequent_regions_pool;
23     std::list<void*> large_pages_for_pool;
24 
25     /* large memory chunks follow the same logic as in utils/memory.c,
26      * but we reimplement it here so as to stick to one memory binding
27      * only.
28      *
29      * A large memory area may be returned as follows, in decreasing
30      * priority order:
31      *
32      *  - if support is available, via mmap(...,MAP_HUGETLB). If it
33      *  succeeds, we get a memory are in multiples of 2G, and call that
34      *  an "mmapped region".
35      *
36      *  - if support is available, via malloc_aligned() +
37      *  madvise(MADV_HUGEPAGE). If it succeeds, we call that a "malloced
38      *  region"
39      *
40      *  - otherwise, via malloc_aligned(), and it is still called a
41      *  "default malloced region".
42      *
43      * How the requested size is rounded up depends on the mode.
44      */
45     lock_guarded_container<std::set<void*>> was_mmapped; // used on free()
46 
47     void touch(void *, size_t);
48     public:
49 
bucket_region_size()50     static inline size_t bucket_region_size() {
51         /* round to next multiple of 128 */
52         return (((BUCKET_REGION + MEMSET_MIN) - 1) | 127) + 1;
53     }
54     void * alloc_frequent_size(size_t);
55     void free_frequent_size(void *, size_t);
alloc_bucket_region()56     inline unsigned char * alloc_bucket_region() { return (unsigned char *) alloc_frequent_size(bucket_region_size()); }
free_bucket_region(unsigned char * p)57     inline void free_bucket_region(unsigned char * p) { free_frequent_size((void *) p, bucket_region_size()); }
58 
59     void * physical_alloc(size_t, bool = false) ATTR_ASSUME_ALIGNED(256);
60     void physical_free(void*, size_t);
61 
62     las_memory_accessor() = default;
63     las_memory_accessor(las_memory_accessor const &) = delete;
64     las_memory_accessor(las_memory_accessor&&) = default;
65     ~las_memory_accessor();
66 };
67 
68 #endif	/* LAS_MEMORY_HPP_ */
69