1 #include <Core/Core.h>
2 
3 #define LTIMING(x) // RTIMING(x)
4 
5 namespace Upp {
6 
7 #ifdef UPP_HEAP
8 
9 #include "HeapImp.h"
10 
11 #define LLOG(x) //  LOG((void *)this << ' ' << x)
12 
Format(int k)13 inline void Heap::Page::Format(int k)
14 {
15 	DbgFreeFill(Begin(), End() - Begin());
16 	klass = k;
17 	active = 0;
18 	int sz = Ksz(k);
19 	byte *ptr = End() - sz;
20 	byte *b = Begin();
21 	FreeLink *l = NULL;
22 	while(ptr >= b) {
23 		((FreeLink *)ptr)->next = l;
24 		l = (FreeLink *)ptr;
25 		ptr -= sz;
26 	}
27 	freelist = l;
28 }
29 
WorkPage(int k)30 Heap::Page *Heap::WorkPage(int k) // get a new workpage with empty blocks
31 {
32 	LLOG("AllocK - next work not available " << k << " empty: " << (void *)empty[k]);
33 	Page *page = empty[k]; // hot empty page of the same klass
34 	empty[k] = NULL;
35 	if(!page) { // try to reacquire pages freed remotely
36 		LLOG("AllocK - trying FreeRemote");
37 		SmallFreeRemote();
38 		if(work[k]->freelist) { // partially free page found
39 			LLOG("AllocK - work available after FreeRemote " << k);
40 			return work[k];
41 		}
42 		page = empty[k]; // hot empty page
43 		empty[k] = NULL;
44 	}
45 	if(!page)
46 		for(int i = 0; i < NKLASS; i++) // Try hot empty page of different klass
47 			if(empty[i]) {
48 				LLOG("AllocK - free page available for reformatting " << k);
49 				page = empty[i];
50 				empty[i] = NULL;
51 				page->Format(k); // reformat the page for the required klass
52 				break;
53 			}
54 	if(!page) { // Attempt to find page in global storage of free pages
55 		Mutex::Lock __(mutex);
56 		aux.SmallFreeRemoteRaw();
57 		if(aux.work[k]->next != aux.work[k]) { // Try page of the same klass first
58 			page = aux.work[k]->next;
59 			page->Unlink();
60 			LLOG("AllocK - adopting aux page " << k << " page: " << (void *)page << ", free " << (void *)page->freelist);
61 		}
62 		if(!page && aux.empty[k]) { // Try free page of the same klass (no need to format it)
63 			page = aux.empty[k];
64 			aux.empty[k] = page->next;
65 			free_4KB--;
66 			ASSERT(free_4KB < max_free_spages);
67 			LLOG("AllocK - empty aux page available of the same format " << k << " page: " << (void *)page << ", free " << (void *)page->freelist);
68 		}
69 		if(!page)
70 			for(int i = 0; i < NKLASS; i++) // Finally try to find free page of different klass
71 				if(aux.empty[i]) {
72 					page = aux.empty[i];
73 					aux.empty[i] = page->next;
74 					free_4KB--;
75 					page->Format(k);
76 					ASSERT(free_4KB < max_free_spages);
77 					LLOG("AllocK - empty aux page available for reformatting " << k << " page: " << (void *)page << ", free " << (void *)page->freelist);
78 					break;
79 				}
80 		if(!page) { // No free memory was found, ask huge for the new page
81 			page = (Page *)HugeAlloc(1);
82 			LLOG("AllocK - allocated new system page " << (void *)page << " " << k);
83 			page->Format(k);
84 		}
85 		page->heap = this;
86 	}
87 	page->Link(work[k]);
88 	ASSERT(page->klass == k);
89 	return page;
90 }
91 
AllocK(int k)92 void *Heap::AllocK(int k)
93 {
94 	LLOG("AllocK " << k);
95 	if(!initialized)
96 		Init();
97 	Page *page = work[k]->next;
98 	for(;;) {
99 		ASSERT(page->klass == k);
100 		FreeLink *p = page->freelist;
101 		if(p) {
102 			LLOG("AllocK allocating from " << (void *)page << " " << (void *)p);
103 			page->freelist = p->next;
104 			++page->active;
105 			return p;
106 		}
107 		LLOG("AllocK - page exhausted " << k << " page: " << (void *)page);
108 		if(page->next != page) {
109 			LLOG("Moving " << (void *)page << " to full");
110 			page->Unlink();
111 			page->Link(full[k]);
112 			page = work[k]->next;
113 		}
114 		if(page->next == page)
115 			page = WorkPage(k);
116 	}
117 }
118 
119 force_inline
Allok(int k)120 void *Heap::Allok(int k)
121 { // Try to alloc from the front-cache first
122 	LTIMING("Allok");
123 	FreeLink *ptr = cache[k];
124 	if(ptr) {
125 		cachen[k]++;
126 		cache[k] = ptr->next;
127 		return DbgFreeCheckK(ptr, k);
128 	}
129 	return DbgFreeCheckK(AllocK(k), k);
130 }
131 
132 force_inline
AllocSz(size_t & sz)133 void *Heap::AllocSz(size_t& sz)
134 {
135 	LTIMING("Alloc");
136 	LLOG("Alloc " << asString(sz));
137 	Stat(sz);
138 	if(sz <= 384) {
139 		if(sz == 0)
140 			sz = 1;
141 		int k = ((int)sz - 1) >> 5;
142 		sz = (k + 1) << 5;
143 		return Allok(k);
144 	}
145 	if(sz <= 992) {
146 		if(sz <= 448) {
147 			sz = 448;
148 			return Allok(12);
149 		}
150 		if(sz <= 576) {
151 			sz = 576;
152 			return Allok(13);
153 		}
154 		if(sz <= 672) {
155 			sz = 672;
156 			return Allok(14);
157 		}
158 		if(sz <= 800) {
159 			sz = 800;
160 			return Allok(15);
161 		}
162 		sz = 992;
163 		return Allok(16);
164 	}
165 	return LAlloc(sz);
166 }
167 
168 force_inline
FreeK(void * ptr,Page * page,int k)169 void Heap::FreeK(void *ptr, Page *page, int k)
170 {
171 	if(page->freelist) {
172 		LLOG("Free next in work page " << k);
173 		((FreeLink *)ptr)->next = page->freelist;
174 	}
175 	else {
176 		LLOG("Free full to work " << k << " heap: " << (void *)page->heap);
177 		page->Unlink();
178 		page->Link(work[k]);
179 		((FreeLink *)ptr)->next = NULL;
180 	}
181 	page->freelist = (FreeLink *)ptr;
182 	if(--page->active == 0) { // there are no more allocated blocks in this page
183 		LLOG("Free page is empty " << (void *)page);
184 		page->Unlink();
185 		if(this == &aux) {
186 			LLOG("...is aux " << asString(free_4KB));
187 			Mutex::Lock __(mutex);
188 			Free4KB(k, page);
189 		}
190 		else {
191 			if(empty[k]) { // Keep one hot empty page per klass in thread, put rest to 'aux' global storage
192 				Mutex::Lock __(mutex);
193 				Free4KB(k, empty[k]); // Free current hot page to reserve/huge
194 			}
195 			empty[k] = page; // this empty page is now hot
196 		}
197 	}
198 }
199 
Free4KB(int k,Page * page)200 void Heap::Free4KB(int k, Page *page)
201 { // put empty 4KB to aux reserve or back to huge blocks if the reserve is full
202 	LLOG("Global Free4KB " << k << " " << (void *)empty);
203 	if(free_4KB < max_free_spages) { // only keep max_free_spages, release if more
204 		page->heap = &aux;
205 		page->next = aux.empty[k];
206 		aux.empty[k] = page;
207 		free_4KB++;
208 		LLOG("Reserve 4KB " << asString(free_4KB));
209 	}
210 	else {
211 		aux.HugeFree(page);
212 		LLOG("HugeFree 4KB " << asString(free_4KB));
213 	}
214 }
215 
216 force_inline
Free(void * ptr,Page * page,int k)217 void Heap::Free(void *ptr, Page *page, int k)
218 {
219 	LTIMING("Small Free");
220 	LLOG("Small free page: " << (void *)page << ", k: " << k << ", ksz: " << Ksz(k));
221 	ASSERT((4096 - ((uintptr_t)ptr & (uintptr_t)4095)) % Ksz(k) == 0);
222 	if(page->heap != this) { // freeing block allocated in different thread
223 		RemoteFree(ptr, Ksz(k)); // add to originating heap's list of free pages to be properly freed later
224 		return;
225 	}
226 	DbgFreeFillK(ptr, k);
227 	if(cachen[k]) {
228 		cachen[k]--;
229 		FreeLink *l = (FreeLink *)ptr;
230 		l->next = cache[k];
231 		cache[k] = l;
232 		return;
233 	}
234 	FreeK(ptr, page, k);
235 }
236 
237 force_inline
Free(void * ptr)238 void Heap::Free(void *ptr)
239 {
240 	if(!ptr) return;
241 	LLOG("Free " << ptr);
242 	if(IsSmall(ptr)) {
243 		Page *page = GetPage(ptr);
244 		Free(ptr, page, page->klass);
245 	}
246 	else
247 		LFree(ptr);
248 }
249 
GetBlockSize(void * ptr)250 size_t Heap::GetBlockSize(void *ptr)
251 {
252 	if(!ptr) return 0;
253 	LLOG("GetBlockSize " << ptr);
254 	if(IsSmall(ptr)) {
255 		Page *page = GetPage(ptr);
256 		int k = page->klass;
257 		return Ksz(k);
258 	}
259 	return LGetBlockSize(ptr);
260 }
261 
SmallFreeDirect(void * ptr)262 void Heap::SmallFreeDirect(void *ptr)
263 { // does not need to check for target heap or small vs large
264 	LLOG("Free Direct " << ptr);
265 	Page *page = GetPage(ptr);
266 	ASSERT(page->heap == this);
267 	int k = page->klass;
268 	LLOG("Small free page: " << (void *)page << ", k: " << k << ", ksz: " << Ksz(k));
269 	ASSERT((4096 - ((uintptr_t)ptr & (uintptr_t)4095)) % Ksz(k) == 0);
270 	DbgFreeFillK(ptr, k);
271 	FreeK(ptr, page, k);
272 }
273 
FreeSmallEmpty(int size4KB,int count)274 bool Heap::FreeSmallEmpty(int size4KB, int count)
275 { // attempt to release small 4KB pages to gain count4KB space or count of releases
276 	bool released;
277 	do {
278 		released = false;
279 		for(int i = 0; i < NKLASS; i++)
280 			if(aux.empty[i]) {
281 				Page *q = aux.empty[i];
282 				aux.empty[i] = q->next;
283 				free_4KB--;
284 				ASSERT(free_4KB < max_free_spages);
285 				if(aux.HugeFree(q) >= size4KB || --count <= 0) // HugeFree is really static, aux needed just to compile
286 					return true;
287 				released = true;
288 			}
289 	}
290 	while(released);
291 	return false;
292 }
293 
294 force_inline
Alloc32()295 void *Heap::Alloc32()
296 {
297 	Stat(32);
298 	return Allok(KLASS_32);
299 }
300 
301 force_inline
Free(void * ptr,int k)302 void Heap::Free(void *ptr, int k)
303 {
304 	Free(ptr, GetPage(ptr), k);
305 }
306 
307 force_inline
Free32(void * ptr)308 void Heap::Free32(void *ptr)
309 {
310 	Free(ptr, KLASS_32);
311 }
312 
313 force_inline
Alloc48()314 void *Heap::Alloc48()
315 {
316 	Stat(48);
317 	return Allok(KLASS_48);
318 }
319 
320 force_inline
Free48(void * ptr)321 void Heap::Free48(void *ptr)
322 {
323 	Free(ptr, KLASS_48);
324 }
325 
326 force_inline
ThreadHeap()327 Heap *ThreadHeap()
328 {
329 	thread_local Heap *heap_tls;
330 	Heap *heap = heap_tls;
331 	if(!heap) { // we definitely need a lock here because some Shutdown can be in progress
332 		Mutex::Lock __(Heap::mutex);
333 		thread_local byte sHeap[sizeof(Heap)];
334 		heap_tls = heap = (Heap *)sHeap;
335 	}
336 	return heap;
337 }
338 
MemoryFreek__(int klass,void * ptr)339 void MemoryFreek__(int klass, void *ptr)
340 {
341 	ThreadHeap()->Free((void *)ptr, klass);
342 }
343 
MemoryAllok__(int klass)344 void *MemoryAllok__(int klass)
345 {
346 	return ThreadHeap()->Allok(klass);
347 }
348 
349 #if defined(HEAPDBG)
350 
MemoryAllocSz_(size_t & sz)351 void *MemoryAllocSz_(size_t& sz)
352 {
353 	return ThreadHeap()->AllocSz(sz);
354 }
355 
MemoryFree_(void * ptr)356 void  MemoryFree_(void *ptr)
357 {
358 	ThreadHeap()->Free(ptr);
359 }
360 
MemoryTryRealloc_(void * ptr,size_t & size)361 bool  MemoryTryRealloc_(void *ptr, size_t& size)
362 {
363 	return ThreadHeap()->TryRealloc(ptr, size);
364 }
365 
GetMemoryBlockSize_(void * ptr)366 size_t GetMemoryBlockSize_(void *ptr)
367 {
368 	return ThreadHeap()->GetBlockSize(ptr);
369 }
370 
371 #else
372 
373 
374 #ifdef flagHEAPLOG
375 
376 #undef AllocSz
377 
378 StaticMutex sHeapLogLock;
379 
380 static FILE *sLog = fopen(GetExeDirFile("heap.log"), "w");
381 
LogFree(void * ptr)382 void LogFree(void *ptr)
383 {
384 	if(sLog) {
385 		Mutex::Lock __(sHeapLogLock);
386 		fprintf(sLog, "-%x %p\n", (int)Thread::GetCurrentId(), ptr);
387 	}
388 }
389 
LogAlloc(void * ptr,size_t sz)390 void *LogAlloc(void *ptr, size_t sz)
391 {
392 	if(sLog) {
393 		Mutex::Lock __(sHeapLogLock);
394 		fprintf(sLog, "%x %zx %p\n", (int)Thread::GetCurrentId(), sz, ptr);
395 	}
396 	return ptr;
397 }
398 
399 #else
400 
LogFree(void * ptr)401 inline void LogFree(void *ptr) {}
402 
LogAlloc(void * ptr,size_t sz)403 inline void *LogAlloc(void *ptr, size_t sz) { return ptr; }
404 
405 #endif
406 
MemoryAlloc(size_t sz)407 void *MemoryAlloc(size_t sz)
408 {
409 	LTIMING("MemoryAlloc");
410 	return LogAlloc(ThreadHeap()->AllocSz(sz), sz);
411 }
412 
MemoryAllocSz(size_t & sz)413 void *MemoryAllocSz(size_t& sz)
414 {
415 	LTIMING("MemoryAllocSz");
416 	return LogAlloc(ThreadHeap()->AllocSz(sz), sz);
417 }
418 
MemoryFree(void * ptr)419 void  MemoryFree(void *ptr)
420 {
421 	LTIMING("MemoryFree");
422 	LogFree(ptr);
423 	ThreadHeap()->Free(ptr);
424 }
425 
GetMemoryBlockSize(void * ptr)426 size_t GetMemoryBlockSize(void *ptr)
427 {
428 	return ThreadHeap()->GetBlockSize(ptr);
429 }
430 
MemoryTryRealloc__(void * ptr,size_t & size)431 bool MemoryTryRealloc__(void *ptr, size_t& size)
432 {
433 	return ThreadHeap()->TryRealloc(ptr, size);
434 }
435 
MemoryAlloc32()436 void *MemoryAlloc32()
437 {
438 	LTIMING("MemoryAlloc32");
439 	return LogAlloc(ThreadHeap()->Alloc32(), 32);
440 }
441 
MemoryFree32(void * ptr)442 void  MemoryFree32(void *ptr)
443 {
444 	LTIMING("MemoryFree32");
445 	LogFree(ptr);
446 	ThreadHeap()->Free32(ptr);
447 }
448 
MemoryAlloc48()449 void *MemoryAlloc48()
450 {
451 	LTIMING("MemoryAlloc48");
452 	return LogAlloc(ThreadHeap()->Alloc48(), 48);
453 }
454 
MemoryFree48(void * ptr)455 void  MemoryFree48(void *ptr)
456 {
457 	LTIMING("MemoryFree48");
458 	LogFree(ptr);
459 	ThreadHeap()->Free48(ptr);
460 }
461 
462 #endif
463 
MemoryFreeThread()464 void MemoryFreeThread()
465 {
466 	ThreadHeap()->Shutdown();
467 }
468 
MemoryCheck()469 void MemoryCheck()
470 {
471 	ThreadHeap()->Check();
472 }
473 
MemoryProfile()474 MemoryProfile::MemoryProfile()
475 {
476 	ThreadHeap()->Make(*this);
477 }
478 
MemoryDumpLarge()479 void MemoryDumpLarge()
480 {
481 	ThreadHeap()->DumpLarge();
482 }
483 
MemoryDumpHuge()484 void MemoryDumpHuge()
485 {
486 	ThreadHeap()->DumpHuge();
487 }
488 
489 #endif
490 
491 }
492