1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ 2 /* This Source Code Form is subject to the terms of the Mozilla Public 3 * License, v. 2.0. If a copy of the MPL was not distributed with this 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 5 6 #ifndef _MORKZONE_ 7 #define _MORKZONE_ 1 8 9 #ifndef _MORK_ 10 # include "mork.h" 11 #endif 12 13 #ifndef _MORKNODE_ 14 # include "morkNode.h" 15 #endif 16 17 #ifndef _MORKDEQUE_ 18 # include "morkDeque.h" 19 #endif 20 21 // 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789 22 23 /*| CONFIG_DEBUG: do paranoid debug checks if defined. 24 |*/ 25 #ifdef MORK_DEBUG 26 # define morkZone_CONFIG_DEBUG 1 /* debug paranoid if defined */ 27 #endif /*MORK_DEBUG*/ 28 29 /*| CONFIG_STATS: keep volume and usage statistics. 30 |*/ 31 #define morkZone_CONFIG_VOL_STATS 1 /* count space used by zone instance */ 32 33 /*| CONFIG_ARENA: if this is defined, then the morkZone class will alloc big 34 **| blocks from the zone's heap, and suballocate from these. If undefined, 35 **| then morkZone will just pass all calls through to the zone's heap. 36 |*/ 37 #ifdef MORK_ENABLE_ZONE_ARENAS 38 # define morkZone_CONFIG_ARENA 1 /* be arena, if defined; otherwise no-op */ 39 #endif /*MORK_ENABLE_ZONE_ARENAS*/ 40 41 /*| CONFIG_ALIGN_8: if this is defined, then the morkZone class will give 42 **| blocks 8 byte alignment instead of only 4 byte alignment. 43 |*/ 44 #ifdef MORK_CONFIG_ALIGN_8 45 # define morkZone_CONFIG_ALIGN_8 1 /* ifdef: align to 8 bytes, otherwise 4 */ 46 #endif /*MORK_CONFIG_ALIGN_8*/ 47 48 /*| CONFIG_PTR_SIZE_4: if this is defined, then the morkZone class will 49 **| assume sizeof(void*) == 4, so a tag slot for padding is needed. 50 |*/ 51 #ifdef MORK_CONFIG_PTR_SIZE_4 52 # define morkZone_CONFIG_PTR_SIZE_4 1 /* ifdef: sizeof(void*) == 4 */ 53 #endif /*MORK_CONFIG_PTR_SIZE_4*/ 54 55 /*| morkZone_USE_TAG_SLOT: if this is defined, then define slot mRun_Tag 56 **| in order to achieve eight byte alignment after the mRun_Next slot. 57 |*/ 58 #if defined(morkZone_CONFIG_ALIGN_8) && defined(morkZone_CONFIG_PTR_SIZE_4) 59 # define morkRun_USE_TAG_SLOT 1 /* need mRun_Tag slot inside morkRun */ 60 # define morkHunk_USE_TAG_SLOT 1 /* need mHunk_Tag slot inside morkHunk */ 61 #endif 62 63 // 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789 64 65 #define morkRun_kTag ((mork_u4)0x6D52754E) /* ascii 'mRuN' */ 66 67 /*| morkRun: structure used by morkZone for sized blocks 68 |*/ 69 class morkRun { 70 protected: // member variable slots 71 #ifdef morkRun_USE_TAG_SLOT 72 mork_u4 mRun_Tag; // force 8 byte alignment after mRun_Next 73 #endif /* morkRun_USE_TAG_SLOT */ 74 75 morkRun* mRun_Next; 76 77 public: // pointer interpretation of mRun_Next (when inside a list): RunNext()78 morkRun* RunNext() const { return mRun_Next; } RunSetNext(morkRun * ioNext)79 void RunSetNext(morkRun* ioNext) { mRun_Next = ioNext; } 80 81 public: // size interpretation of mRun_Next (when not inside a list): RunSize()82 mork_size RunSize() const { return (mork_size)((mork_ip)mRun_Next); } RunSetSize(mork_size inSize)83 void RunSetSize(mork_size inSize) { mRun_Next = (morkRun*)((mork_ip)inSize); } 84 85 public: // maintenance and testing of optional tag magic signature slot: 86 #ifdef morkRun_USE_TAG_SLOT RunInitTag()87 void RunInitTag() { mRun_Tag = morkRun_kTag; } RunGoodTag()88 mork_bool RunGoodTag() { return (mRun_Tag == morkRun_kTag); } 89 #endif /* morkRun_USE_TAG_SLOT */ 90 91 public: // conversion back and forth to inline block following run instance: RunAsBlock()92 void* RunAsBlock() { return (((mork_u1*)this) + sizeof(morkRun)); } 93 BlockAsRun(void * ioBlock)94 static morkRun* BlockAsRun(void* ioBlock) { 95 return (morkRun*)(((mork_u1*)ioBlock) - sizeof(morkRun)); 96 } 97 98 public: // typing & errors 99 static void BadRunTagError(morkEnv* ev); 100 static void RunSizeAlignError(morkEnv* ev); 101 }; 102 103 // 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789 104 105 /*| morkOldRun: more space to record size when run is put into old free list 106 |*/ 107 class morkOldRun : public morkRun { 108 protected: // need another size field when mRun_Next is used for linkage: 109 mdb_size mOldRun_Size; 110 111 public: // size getter/setter OldSize()112 mork_size OldSize() const { return mOldRun_Size; } OldSetSize(mork_size inSize)113 void OldSetSize(mork_size inSize) { mOldRun_Size = inSize; } 114 }; 115 116 // 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789 117 118 #define morkHunk_kTag ((mork_u4)0x68556E4B) /* ascii 'hUnK' */ 119 120 /*| morkHunk: structure used by morkZone for heap allocations. 121 |*/ 122 class morkHunk { 123 protected: // member variable slots 124 #ifdef morkHunk_USE_TAG_SLOT 125 mork_u4 mHunk_Tag; // force 8 byte alignment after mHunk_Next 126 #endif /* morkHunk_USE_TAG_SLOT */ 127 128 morkHunk* mHunk_Next; 129 130 morkRun mHunk_Run; 131 132 public: // setters HunkSetNext(morkHunk * ioNext)133 void HunkSetNext(morkHunk* ioNext) { mHunk_Next = ioNext; } 134 135 public: // getters HunkNext()136 morkHunk* HunkNext() const { return mHunk_Next; } 137 HunkRun()138 morkRun* HunkRun() { return &mHunk_Run; } 139 140 public: // maintenance and testing of optional tag magic signature slot: 141 #ifdef morkHunk_USE_TAG_SLOT HunkInitTag()142 void HunkInitTag() { mHunk_Tag = morkHunk_kTag; } HunkGoodTag()143 mork_bool HunkGoodTag() { return (mHunk_Tag == morkHunk_kTag); } 144 #endif /* morkHunk_USE_TAG_SLOT */ 145 146 public: // typing & errors 147 static void BadHunkTagWarning(morkEnv* ev); 148 }; 149 150 // 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789 151 152 /*| kNewHunkSize: the default size for a hunk, assuming we must allocate 153 **| a new one whenever the free hunk list does not already have. Note this 154 **| number should not be changed without also considering suitable changes 155 **| in the related kMaxHunkWaste and kMinHunkSize constants. 156 |*/ 157 #define morkZone_kNewHunkSize ((mork_size)(64 * 1024)) /* 64K per hunk */ 158 159 /*| kMaxFreeVolume: some number of bytes of free space in the free hunk list 160 **| over which we no longer want to add more free hunks to the list, for fear 161 **| of accumulating too much unused, fragmented free space. This should be a 162 **| small multiple of kNewHunkSize, say about two to four times as great, to 163 **| allow for no more free hunk space than fits in a handful of new hunks. 164 **| This strategy will let us usefully accumulate "some" free space in the 165 **| free hunk list, but without accumulating "too much" free space that way. 166 |*/ 167 #define morkZone_kMaxFreeVolume (morkZone_kNewHunkSize * 3) 168 169 /*| kMaxHunkWaste: if a current request is larger than this, and we cannot 170 **| satisfy the request with the current hunk, then we just allocate the 171 **| block from the heap without changing the current hunk. Basically this 172 **| number represents the largest amount of memory we are willing to waste, 173 **| since a block request barely less than this can cause the current hunk 174 **| to be retired (with any unused space wasted) as well get a new hunk. 175 |*/ 176 #define morkZone_kMaxHunkWaste ((mork_size)4096) /* 1/16 kNewHunkSize */ 177 178 /*| kRound*: the algorithm for rounding up allocation sizes for caching 179 **| in free lists works like the following. We add kRoundAdd to any size 180 **| requested, and then bitwise AND with kRoundMask, and this will give us 181 **| the smallest multiple of kRoundSize that is at least as large as the 182 **| requested size. Then if we rightshift this number by kRoundBits, we 183 **| will have the index into the mZone_FreeRuns array which will hold any 184 **| cache runs of that size. So 4 bits of shift gives us a granularity 185 **| of 16 bytes, so that free lists will hold successive runs that are 186 **| 16 bytes greater than the next smaller run size. If we have 256 free 187 **| lists of nonzero sized runs altogether, then the largest run that can 188 **| be cached is 4096, or 4K (since 4096 == 16 * 256). A larger run that 189 **| gets freed will go in to the free hunk list (or back to the heap). 190 |*/ 191 #define morkZone_kRoundBits 4 /* bits to round-up size for free lists */ 192 #define morkZone_kRoundSize (1 << morkZone_kRoundBits) 193 #define morkZone_kRoundAdd ((1 << morkZone_kRoundBits) - 1) 194 #define morkZone_kRoundMask (~((mork_ip)morkZone_kRoundAdd)) 195 196 #define morkZone_kBuckets 256 /* number of distinct free lists */ 197 198 /*| kMaxCachedRun: the largest run that will be stored inside a free 199 **| list of old zapped runs. A run larger than this cannot be put in 200 **| a free list, and must be allocated from the heap at need, and put 201 **| into the free hunk list when discarded. 202 |*/ 203 #define morkZone_kMaxCachedRun (morkZone_kBuckets * morkZone_kRoundSize) 204 205 #define morkDerived_kZone /*i*/ 0x5A6E /* ascii 'Zn' */ 206 207 /*| morkZone: a pooled memory allocator like an NSPR arena. The term 'zone' 208 **| is roughly synonymous with 'heap'. I avoid calling this class a "heap" 209 **| to avoid any confusion with nsIMdbHeap, and I avoid calling this class 210 **| an arean to avoid confusion with NSPR usage. 211 |*/ 212 class morkZone : public morkNode, public nsIMdbHeap { 213 // public: // slots inherited from morkNode (meant to inform only) 214 // nsIMdbHeap* mNode_Heap; 215 216 // mork_base mNode_Base; // must equal morkBase_kNode 217 // mork_derived mNode_Derived; // depends on specific node subclass 218 219 // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead 220 // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone 221 // mork_able mNode_Mutable; // can this node be modified? 222 // mork_load mNode_Load; // is this node clean or dirty? 223 224 // mork_uses mNode_Uses; // refcount for strong refs 225 // mork_refs mNode_Refs; // refcount for strong refs + weak refs 226 227 public: // state is public because the entire Mork system is private 228 nsIMdbHeap* mZone_Heap; // strong ref to heap allocating all space 229 230 mork_size mZone_HeapVolume; // total bytes allocated from heap 231 mork_size mZone_BlockVolume; // total bytes in all zone blocks 232 mork_size mZone_RunVolume; // total bytes in all zone runs 233 mork_size mZone_ChipVolume; // total bytes in all zone chips 234 235 mork_size mZone_FreeOldRunVolume; // total bytes in all used hunks 236 237 mork_count mZone_HunkCount; // total number of used hunks 238 mork_count mZone_FreeOldRunCount; // total free old runs 239 240 morkHunk* mZone_HunkList; // linked list of all used hunks 241 morkRun* mZone_FreeOldRunList; // linked list of free old runs 242 243 // note mZone_At is a byte pointer for single byte address arithmetic: 244 mork_u1* mZone_At; // current position in most recent hunk 245 mork_size mZone_AtSize; // number of bytes remaining in this hunk 246 247 // kBuckets+1 so indexes zero through kBuckets are all okay to use: 248 249 morkRun* mZone_FreeRuns[morkZone_kBuckets + 1]; 250 // Each piece of memory stored in list mZone_FreeRuns[ i ] has an 251 // allocation size equal to sizeof(morkRun) + (i * kRoundSize), so 252 // that callers can be given a piece of memory with (i * kRoundSize) 253 // bytes of writeable space while reserving the first sizeof(morkRun) 254 // bytes to keep track of size information for later re-use. Note 255 // that mZone_FreeRuns[ 0 ] is unused because no run will be zero 256 // bytes in size (and morkZone plans to complain about zero sizes). 257 258 protected: // zone utilities 259 mork_size zone_grow_at(morkEnv* ev, mork_size inNeededSize); 260 261 void* zone_new_chip(morkEnv* ev, mdb_size inSize); // alloc 262 morkHunk* zone_new_hunk(morkEnv* ev, mdb_size inRunSize); // alloc 263 264 // { ===== begin nsIMdbHeap methods ===== 265 public: 266 NS_IMETHOD Alloc( 267 nsIMdbEnv* ev, // allocate a piece of memory 268 mdb_size inSize, // requested size of new memory block 269 void** outBlock) override; // memory block of inSize bytes, or nil 270 271 NS_IMETHOD Free(nsIMdbEnv* ev, // free block allocated earlier by Alloc() 272 void* inBlock) override; 273 GetUsedSize()274 virtual size_t GetUsedSize() override { return mZone_Heap->GetUsedSize(); } 275 // } ===== end nsIMdbHeap methods ===== 276 277 // { ===== begin morkNode interface ===== 278 public: // morkNode virtual methods 279 virtual void CloseMorkNode(morkEnv* ev) override; // CloseZone() only if open 280 virtual ~morkZone(); // assert that CloseMap() executed earlier 281 282 public: // morkMap construction & destruction 283 morkZone(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioNodeHeap, 284 nsIMdbHeap* ioZoneHeap); 285 286 void CloseZone(morkEnv* ev); // called by CloseMorkNode() 287 288 public: // dynamic type identification IsZone()289 mork_bool IsZone() const { 290 return IsNode() && mNode_Derived == morkDerived_kZone; 291 } 292 // } ===== end morkNode methods ===== 293 294 // { ===== begin morkZone methods ===== 295 public: // chips do not know how big they are... 296 void* ZoneNewChip(morkEnv* ev, mdb_size inSize); // alloc 297 298 public: // ...but runs do indeed know how big they are 299 void* ZoneNewRun(morkEnv* ev, mdb_size inSize); // alloc 300 void ZoneZapRun(morkEnv* ev, void* ioRunBody); // free 301 void* ZoneGrowRun(morkEnv* ev, void* ioRunBody, mdb_size inSize); // realloc 302 303 // } ===== end morkZone methods ===== 304 305 public: // typing & errors 306 static void NonZoneTypeError(morkEnv* ev); 307 static void NilZoneHeapError(morkEnv* ev); 308 static void BadZoneTagError(morkEnv* ev); 309 }; 310 311 // 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789 312 313 #endif /* _MORKZONE_ */ 314