1 // This is a version of dlmalloc.c ported to Rust. You can find the original
2 // source at ftp://g.oswego.edu/pub/misc/malloc.c
3 //
4 // The original source was written by Doug Lea and released to the public domain
5
6 use core::cmp;
7 use core::mem;
8 use core::ptr;
9
10 use Allocator;
11
12 pub struct Dlmalloc<A> {
13 smallmap: u32,
14 treemap: u32,
15 smallbins: [*mut Chunk; (NSMALLBINS + 1) * 2],
16 treebins: [*mut TreeChunk; NTREEBINS],
17 dvsize: usize,
18 topsize: usize,
19 dv: *mut Chunk,
20 top: *mut Chunk,
21 footprint: usize,
22 max_footprint: usize,
23 seg: Segment,
24 trim_check: usize,
25 least_addr: *mut u8,
26 release_checks: usize,
27 system_allocator: A,
28 }
29 unsafe impl<A: Send> Send for Dlmalloc<A> {}
30
31 // TODO: document this
32 const NSMALLBINS: usize = 32;
33 const NTREEBINS: usize = 32;
34 const SMALLBIN_SHIFT: usize = 3;
35 const TREEBIN_SHIFT: usize = 8;
36
37 // TODO: runtime configurable? documentation?
38 const DEFAULT_GRANULARITY: usize = 64 * 1024;
39 const DEFAULT_TRIM_THRESHOLD: usize = 2 * 1024 * 1024;
40 const MAX_RELEASE_CHECK_RATE: usize = 4095;
41
42 #[repr(C)]
43 struct Chunk {
44 prev_foot: usize,
45 head: usize,
46 prev: *mut Chunk,
47 next: *mut Chunk,
48 }
49
50 #[repr(C)]
51 struct TreeChunk {
52 chunk: Chunk,
53 child: [*mut TreeChunk; 2],
54 parent: *mut TreeChunk,
55 index: u32,
56 }
57
58 #[repr(C)]
59 #[derive(Clone, Copy)]
60 struct Segment {
61 base: *mut u8,
62 size: usize,
63 next: *mut Segment,
64 flags: u32,
65 }
66
align_up(a: usize, alignment: usize) -> usize67 fn align_up(a: usize, alignment: usize) -> usize {
68 debug_assert!(alignment.is_power_of_two());
69 (a + (alignment - 1)) & !(alignment - 1)
70 }
71
left_bits(x: u32) -> u3272 fn left_bits(x: u32) -> u32 {
73 (x << 1) | (!(x << 1)).wrapping_add(1)
74 }
75
least_bit(x: u32) -> u3276 fn least_bit(x: u32) -> u32 {
77 x & (!x + 1)
78 }
79
leftshift_for_tree_index(x: u32) -> u3280 fn leftshift_for_tree_index(x: u32) -> u32 {
81 let x = x as usize;
82 if x == NTREEBINS - 1 {
83 0
84 } else {
85 (mem::size_of::<usize>() * 8 - 1 - ((x >> 1) + TREEBIN_SHIFT - 2)) as u32
86 }
87 }
88
89 impl<A> Dlmalloc<A> {
new(system_allocator: A) -> Dlmalloc<A>90 pub const fn new(system_allocator: A) -> Dlmalloc<A> {
91 Dlmalloc {
92 smallmap: 0,
93 treemap: 0,
94 smallbins: [0 as *mut _; (NSMALLBINS + 1) * 2],
95 treebins: [0 as *mut _; NTREEBINS],
96 dvsize: 0,
97 topsize: 0,
98 dv: 0 as *mut _,
99 top: 0 as *mut _,
100 footprint: 0,
101 max_footprint: 0,
102 seg: Segment {
103 base: 0 as *mut _,
104 size: 0,
105 next: 0 as *mut _,
106 flags: 0,
107 },
108 trim_check: 0,
109 least_addr: 0 as *mut _,
110 release_checks: 0,
111 system_allocator,
112 }
113 }
114 }
115
116 impl<A: Allocator> Dlmalloc<A> {
117 // TODO: can we get rid of this?
malloc_alignment(&self) -> usize118 pub fn malloc_alignment(&self) -> usize {
119 mem::size_of::<usize>() * 2
120 }
121
122 // TODO: dox
chunk_overhead(&self) -> usize123 fn chunk_overhead(&self) -> usize {
124 mem::size_of::<usize>()
125 }
126
mmap_chunk_overhead(&self) -> usize127 fn mmap_chunk_overhead(&self) -> usize {
128 2 * mem::size_of::<usize>()
129 }
130
131 // TODO: dox
min_large_size(&self) -> usize132 fn min_large_size(&self) -> usize {
133 1 << TREEBIN_SHIFT
134 }
135
136 // TODO: dox
max_small_size(&self) -> usize137 fn max_small_size(&self) -> usize {
138 self.min_large_size() - 1
139 }
140
141 // TODO: dox
max_small_request(&self) -> usize142 fn max_small_request(&self) -> usize {
143 self.max_small_size() - (self.malloc_alignment() - 1) - self.chunk_overhead()
144 }
145
146 // TODO: dox
min_chunk_size(&self) -> usize147 fn min_chunk_size(&self) -> usize {
148 align_up(mem::size_of::<Chunk>(), self.malloc_alignment())
149 }
150
151 // TODO: dox
min_request(&self) -> usize152 fn min_request(&self) -> usize {
153 self.min_chunk_size() - self.chunk_overhead() - 1
154 }
155
156 // TODO: dox
max_request(&self) -> usize157 fn max_request(&self) -> usize {
158 // min_sys_alloc_space: the largest `X` such that
159 // pad_request(X - 1) -- minus 1, because requests of exactly
160 // `max_request` will not be honored
161 // + self.top_foot_size()
162 // + self.malloc_alignment()
163 // + DEFAULT_GRANULARITY
164 // ==
165 // usize::MAX
166 let min_sys_alloc_space =
167 ((!0 - (DEFAULT_GRANULARITY + self.top_foot_size() + self.malloc_alignment()) + 1)
168 & !self.malloc_alignment())
169 - self.chunk_overhead()
170 + 1;
171
172 cmp::min((!self.min_chunk_size() + 1) << 2, min_sys_alloc_space)
173 }
174
pad_request(&self, amt: usize) -> usize175 fn pad_request(&self, amt: usize) -> usize {
176 align_up(amt + self.chunk_overhead(), self.malloc_alignment())
177 }
178
small_index(&self, size: usize) -> u32179 fn small_index(&self, size: usize) -> u32 {
180 (size >> SMALLBIN_SHIFT) as u32
181 }
182
small_index2size(&self, idx: u32) -> usize183 fn small_index2size(&self, idx: u32) -> usize {
184 (idx as usize) << SMALLBIN_SHIFT
185 }
186
is_small(&self, s: usize) -> bool187 fn is_small(&self, s: usize) -> bool {
188 s >> SMALLBIN_SHIFT < NSMALLBINS
189 }
190
is_aligned(&self, a: usize) -> bool191 fn is_aligned(&self, a: usize) -> bool {
192 a & (self.malloc_alignment() - 1) == 0
193 }
194
align_offset(&self, addr: *mut u8) -> usize195 fn align_offset(&self, addr: *mut u8) -> usize {
196 self.align_offset_usize(addr as usize)
197 }
198
align_offset_usize(&self, addr: usize) -> usize199 fn align_offset_usize(&self, addr: usize) -> usize {
200 align_up(addr, self.malloc_alignment()) - (addr as usize)
201 }
202
top_foot_size(&self) -> usize203 fn top_foot_size(&self) -> usize {
204 self.align_offset_usize(Chunk::mem_offset() as usize)
205 + self.pad_request(mem::size_of::<Segment>())
206 + self.min_chunk_size()
207 }
208
mmap_foot_pad(&self) -> usize209 fn mmap_foot_pad(&self) -> usize {
210 4 * mem::size_of::<usize>()
211 }
212
align_as_chunk(&self, ptr: *mut u8) -> *mut Chunk213 fn align_as_chunk(&self, ptr: *mut u8) -> *mut Chunk {
214 unsafe {
215 let chunk = Chunk::to_mem(ptr as *mut Chunk);
216 ptr.offset(self.align_offset(chunk) as isize) as *mut Chunk
217 }
218 }
219
request2size(&self, req: usize) -> usize220 fn request2size(&self, req: usize) -> usize {
221 if req < self.min_request() {
222 self.min_chunk_size()
223 } else {
224 self.pad_request(req)
225 }
226 }
227
overhead_for(&self, p: *mut Chunk) -> usize228 unsafe fn overhead_for(&self, p: *mut Chunk) -> usize {
229 if Chunk::mmapped(p) {
230 self.mmap_chunk_overhead()
231 } else {
232 self.chunk_overhead()
233 }
234 }
235
calloc_must_clear(&self, ptr: *mut u8) -> bool236 pub unsafe fn calloc_must_clear(&self, ptr: *mut u8) -> bool {
237 !self.system_allocator.allocates_zeros() || !Chunk::mmapped(Chunk::from_mem(ptr))
238 }
239
malloc(&mut self, size: usize) -> *mut u8240 pub unsafe fn malloc(&mut self, size: usize) -> *mut u8 {
241 self.check_malloc_state();
242
243 let nb;
244 if size <= self.max_small_request() {
245 nb = self.request2size(size);
246 let mut idx = self.small_index(nb);
247 let smallbits = self.smallmap >> idx;
248
249 // Check the bin for `idx` (the lowest bit) but also check the next
250 // bin up to use that to satisfy our request, if needed.
251 if smallbits & 0b11 != 0 {
252 // If our the lowest bit, our `idx`, is unset then bump up the
253 // index as we'll be using the next bucket up.
254 idx += !smallbits & 1;
255
256 let b = self.smallbin_at(idx);
257 let p = (*b).prev;
258 self.unlink_first_small_chunk(b, p, idx);
259 let smallsize = self.small_index2size(idx);
260 Chunk::set_inuse_and_pinuse(p, smallsize);
261 let ret = Chunk::to_mem(p);
262 self.check_malloced_chunk(ret, nb);
263 return ret;
264 }
265
266 if nb > self.dvsize {
267 // If there's some other bin with some memory, then we just use
268 // the next smallest bin
269 if smallbits != 0 {
270 let leftbits = (smallbits << idx) & left_bits(1 << idx);
271 let leastbit = least_bit(leftbits);
272 let i = leastbit.trailing_zeros();
273 let b = self.smallbin_at(i);
274 let p = (*b).prev;
275 debug_assert_eq!(Chunk::size(p), self.small_index2size(i));
276 self.unlink_first_small_chunk(b, p, i);
277 let smallsize = self.small_index2size(i);
278 let rsize = smallsize - nb;
279 if mem::size_of::<usize>() != 4 && rsize < self.min_chunk_size() {
280 Chunk::set_inuse_and_pinuse(p, smallsize);
281 } else {
282 Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
283 let r = Chunk::plus_offset(p, nb);
284 Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
285 self.replace_dv(r, rsize);
286 }
287 let ret = Chunk::to_mem(p);
288 self.check_malloced_chunk(ret, nb);
289 return ret;
290 } else if self.treemap != 0 {
291 let mem = self.tmalloc_small(nb);
292 if !mem.is_null() {
293 self.check_malloced_chunk(mem, nb);
294 self.check_malloc_state();
295 return mem;
296 }
297 }
298 }
299 } else if size >= self.max_request() {
300 // TODO: translate this to unsupported
301 return ptr::null_mut();
302 } else {
303 nb = self.pad_request(size);
304 if self.treemap != 0 {
305 let mem = self.tmalloc_large(nb);
306 if !mem.is_null() {
307 self.check_malloced_chunk(mem, nb);
308 self.check_malloc_state();
309 return mem;
310 }
311 }
312 }
313
314 // use the `dv` node if we can, splitting it if necessary or otherwise
315 // exhausting the entire chunk
316 if nb <= self.dvsize {
317 let rsize = self.dvsize - nb;
318 let p = self.dv;
319 if rsize >= self.min_chunk_size() {
320 self.dv = Chunk::plus_offset(p, nb);
321 self.dvsize = rsize;
322 let r = self.dv;
323 Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
324 Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
325 } else {
326 let dvs = self.dvsize;
327 self.dvsize = 0;
328 self.dv = ptr::null_mut();
329 Chunk::set_inuse_and_pinuse(p, dvs);
330 }
331 let ret = Chunk::to_mem(p);
332 self.check_malloced_chunk(ret, nb);
333 self.check_malloc_state();
334 return ret;
335 }
336
337 // Split the top node if we can
338 if nb < self.topsize {
339 self.topsize -= nb;
340 let rsize = self.topsize;
341 let p = self.top;
342 self.top = Chunk::plus_offset(p, nb);
343 let r = self.top;
344 (*r).head = rsize | PINUSE;
345 Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
346 self.check_top_chunk(self.top);
347 let ret = Chunk::to_mem(p);
348 self.check_malloced_chunk(ret, nb);
349 self.check_malloc_state();
350 return ret;
351 }
352
353 self.sys_alloc(nb)
354 }
355
356 /// allocates system resources
sys_alloc(&mut self, size: usize) -> *mut u8357 unsafe fn sys_alloc(&mut self, size: usize) -> *mut u8 {
358 self.check_malloc_state();
359 // keep in sync with max_request
360 let asize = align_up(
361 size + self.top_foot_size() + self.malloc_alignment(),
362 DEFAULT_GRANULARITY,
363 );
364
365 let (tbase, tsize, flags) = self.system_allocator.alloc(asize);
366 if tbase.is_null() {
367 return tbase;
368 }
369
370 self.footprint += tsize;
371 self.max_footprint = cmp::max(self.max_footprint, self.footprint);
372
373 if self.top.is_null() {
374 if self.least_addr.is_null() || tbase < self.least_addr {
375 self.least_addr = tbase;
376 }
377 self.seg.base = tbase;
378 self.seg.size = tsize;
379 self.seg.flags = flags;
380 self.release_checks = MAX_RELEASE_CHECK_RATE;
381 self.init_bins();
382 let tsize = tsize - self.top_foot_size();
383 self.init_top(tbase as *mut Chunk, tsize);
384 // let mn = Chunk::next(Chunk::from_mem(self as *mut _ as *mut u8));
385 // let top_foot_size = self.top_foot_size();
386 // self.init_top(mn, tbase as usize + tsize - mn as usize - top_foot_size);
387 } else {
388 let mut sp = &mut self.seg as *mut Segment;
389 while !sp.is_null() && tbase != Segment::top(sp) {
390 sp = (*sp).next;
391 }
392 if !sp.is_null()
393 && !Segment::is_extern(sp)
394 && Segment::sys_flags(sp) == flags
395 && Segment::holds(sp, self.top as *mut u8)
396 {
397 (*sp).size += tsize;
398 let ptr = self.top;
399 let size = self.topsize + tsize;
400 self.init_top(ptr, size);
401 } else {
402 self.least_addr = cmp::min(tbase, self.least_addr);
403 let mut sp = &mut self.seg as *mut Segment;
404 while !sp.is_null() && (*sp).base != tbase.offset(tsize as isize) {
405 sp = (*sp).next;
406 }
407 if !sp.is_null() && !Segment::is_extern(sp) && Segment::sys_flags(sp) == flags {
408 let oldbase = (*sp).base;
409 (*sp).base = tbase;
410 (*sp).size += tsize;
411 return self.prepend_alloc(tbase, oldbase, size);
412 } else {
413 self.add_segment(tbase, tsize, flags);
414 }
415 }
416 }
417
418 if size < self.topsize {
419 self.topsize -= size;
420 let rsize = self.topsize;
421 let p = self.top;
422 self.top = Chunk::plus_offset(p, size);
423 let r = self.top;
424 (*r).head = rsize | PINUSE;
425 Chunk::set_size_and_pinuse_of_inuse_chunk(p, size);
426 let ret = Chunk::to_mem(p);
427 self.check_top_chunk(self.top);
428 self.check_malloced_chunk(ret, size);
429 self.check_malloc_state();
430 return ret;
431 }
432
433 return ptr::null_mut();
434 }
435
realloc(&mut self, oldmem: *mut u8, bytes: usize) -> *mut u8436 pub unsafe fn realloc(&mut self, oldmem: *mut u8, bytes: usize) -> *mut u8 {
437 if bytes >= self.max_request() {
438 return ptr::null_mut();
439 }
440 let nb = self.request2size(bytes);
441 let oldp = Chunk::from_mem(oldmem);
442 let newp = self.try_realloc_chunk(oldp, nb, true);
443 if !newp.is_null() {
444 self.check_inuse_chunk(newp);
445 return Chunk::to_mem(newp);
446 }
447 let ptr = self.malloc(bytes);
448 if !ptr.is_null() {
449 let oc = Chunk::size(oldp) - self.overhead_for(oldp);
450 ptr::copy_nonoverlapping(oldmem, ptr, cmp::min(oc, bytes));
451 self.free(oldmem);
452 }
453 return ptr;
454 }
455
try_realloc_chunk(&mut self, p: *mut Chunk, nb: usize, can_move: bool) -> *mut Chunk456 unsafe fn try_realloc_chunk(&mut self, p: *mut Chunk, nb: usize, can_move: bool) -> *mut Chunk {
457 let oldsize = Chunk::size(p);
458 let next = Chunk::plus_offset(p, oldsize);
459
460 if Chunk::mmapped(p) {
461 self.mmap_resize(p, nb, can_move)
462 } else if oldsize >= nb {
463 let rsize = oldsize - nb;
464 if rsize >= self.min_chunk_size() {
465 let r = Chunk::plus_offset(p, nb);
466 Chunk::set_inuse(p, nb);
467 Chunk::set_inuse(r, rsize);
468 self.dispose_chunk(r, rsize);
469 }
470 p
471 } else if next == self.top {
472 // extend into top
473 if oldsize + self.topsize <= nb {
474 return ptr::null_mut();
475 }
476 let newsize = oldsize + self.topsize;
477 let newtopsize = newsize - nb;
478 let newtop = Chunk::plus_offset(p, nb);
479 Chunk::set_inuse(p, nb);
480 (*newtop).head = newtopsize | PINUSE;
481 self.top = newtop;
482 self.topsize = newtopsize;
483 p
484 } else if next == self.dv {
485 // extend into dv
486 let dvs = self.dvsize;
487 if oldsize + dvs < nb {
488 return ptr::null_mut();
489 }
490 let dsize = oldsize + dvs - nb;
491 if dsize >= self.min_chunk_size() {
492 let r = Chunk::plus_offset(p, nb);
493 let n = Chunk::plus_offset(r, dsize);
494 Chunk::set_inuse(p, nb);
495 Chunk::set_size_and_pinuse_of_free_chunk(r, dsize);
496 Chunk::clear_pinuse(n);
497 self.dvsize = dsize;
498 self.dv = r;
499 } else {
500 // exhaust dv
501 let newsize = oldsize + dvs;
502 Chunk::set_inuse(p, newsize);
503 self.dvsize = 0;
504 self.dv = ptr::null_mut();
505 }
506 return p;
507 } else if !Chunk::cinuse(next) {
508 // extend into the next free chunk
509 let nextsize = Chunk::size(next);
510 if oldsize + nextsize < nb {
511 return ptr::null_mut();
512 }
513 let rsize = oldsize + nextsize - nb;
514 self.unlink_chunk(next, nextsize);
515 if rsize < self.min_chunk_size() {
516 let newsize = oldsize + nextsize;
517 Chunk::set_inuse(p, newsize);
518 } else {
519 let r = Chunk::plus_offset(p, nb);
520 Chunk::set_inuse(p, nb);
521 Chunk::set_inuse(r, rsize);
522 self.dispose_chunk(r, rsize);
523 }
524 p
525 } else {
526 ptr::null_mut()
527 }
528 }
529
mmap_resize(&mut self, oldp: *mut Chunk, nb: usize, can_move: bool) -> *mut Chunk530 unsafe fn mmap_resize(&mut self, oldp: *mut Chunk, nb: usize, can_move: bool) -> *mut Chunk {
531 let oldsize = Chunk::size(oldp);
532 // Can't shrink mmap regions below a small size
533 if self.is_small(nb) {
534 return ptr::null_mut();
535 }
536
537 // Keep the old chunk if it's big enough but not too big
538 if oldsize >= nb + mem::size_of::<usize>() && (oldsize - nb) <= (DEFAULT_GRANULARITY << 1) {
539 return oldp;
540 }
541
542 let offset = (*oldp).prev_foot;
543 let oldmmsize = oldsize + offset + self.mmap_foot_pad();
544 let newmmsize =
545 self.mmap_align(nb + 6 * mem::size_of::<usize>() + self.malloc_alignment() - 1);
546 let ptr = self.system_allocator.remap(
547 (oldp as *mut u8).offset(-(offset as isize)),
548 oldmmsize,
549 newmmsize,
550 can_move,
551 );
552 if ptr.is_null() {
553 return ptr::null_mut();
554 }
555 let newp = ptr.offset(offset as isize) as *mut Chunk;
556 let psize = newmmsize - offset - self.mmap_foot_pad();
557 (*newp).head = psize;
558 (*Chunk::plus_offset(newp, psize)).head = Chunk::fencepost_head();
559 (*Chunk::plus_offset(newp, psize + mem::size_of::<usize>())).head = 0;
560 self.least_addr = cmp::min(ptr, self.least_addr);
561 self.footprint = self.footprint + newmmsize - oldmmsize;
562 self.max_footprint = cmp::max(self.max_footprint, self.footprint);
563 self.check_mmapped_chunk(newp);
564 return newp;
565 }
566
mmap_align(&self, a: usize) -> usize567 fn mmap_align(&self, a: usize) -> usize {
568 align_up(a, self.system_allocator.page_size())
569 }
570
571 // Only call this with power-of-two alignment and alignment >
572 // `self.malloc_alignment()`
memalign(&mut self, mut alignment: usize, bytes: usize) -> *mut u8573 pub unsafe fn memalign(&mut self, mut alignment: usize, bytes: usize) -> *mut u8 {
574 if alignment < self.min_chunk_size() {
575 alignment = self.min_chunk_size();
576 }
577 if bytes >= self.max_request() - alignment {
578 return ptr::null_mut();
579 }
580 let nb = self.request2size(bytes);
581 let req = nb + alignment + self.min_chunk_size() - self.chunk_overhead();
582 let mem = self.malloc(req);
583 if mem.is_null() {
584 return mem;
585 }
586 let mut p = Chunk::from_mem(mem);
587 if mem as usize & (alignment - 1) != 0 {
588 // Here we find an aligned sopt inside the chunk. Since we need to
589 // give back leading space in a chunk of at least `min_chunk_size`,
590 // if the first calculation places us at a spot with less than
591 // `min_chunk_size` leader we can move to the next aligned spot.
592 // we've allocated enough total room so that this is always possible
593 let br =
594 Chunk::from_mem(((mem as usize + alignment - 1) & (!alignment + 1)) as *mut u8);
595 let pos = if (br as usize - p as usize) > self.min_chunk_size() {
596 br as *mut u8
597 } else {
598 (br as *mut u8).offset(alignment as isize)
599 };
600 let newp = pos as *mut Chunk;
601 let leadsize = pos as usize - p as usize;
602 let newsize = Chunk::size(p) - leadsize;
603
604 // for mmapped chunks just adjust the offset
605 if Chunk::mmapped(p) {
606 (*newp).prev_foot = (*p).prev_foot + leadsize;
607 (*newp).head = newsize;
608 } else {
609 // give back the leader, use the rest
610 Chunk::set_inuse(newp, newsize);
611 Chunk::set_inuse(p, leadsize);
612 self.dispose_chunk(p, leadsize);
613 }
614 p = newp;
615 }
616
617 // give back spare room at the end
618 if !Chunk::mmapped(p) {
619 let size = Chunk::size(p);
620 if size > nb + self.min_chunk_size() {
621 let remainder_size = size - nb;
622 let remainder = Chunk::plus_offset(p, nb);
623 Chunk::set_inuse(p, nb);
624 Chunk::set_inuse(remainder, remainder_size);
625 self.dispose_chunk(remainder, remainder_size);
626 }
627 }
628
629 let mem = Chunk::to_mem(p);
630 debug_assert!(Chunk::size(p) >= nb);
631 debug_assert_eq!(align_up(mem as usize, alignment), mem as usize);
632 self.check_inuse_chunk(p);
633 return mem;
634 }
635
636 // consolidate and bin a chunk, differs from exported versions of free
637 // mainly in that the chunk need not be marked as inuse
dispose_chunk(&mut self, mut p: *mut Chunk, mut psize: usize)638 unsafe fn dispose_chunk(&mut self, mut p: *mut Chunk, mut psize: usize) {
639 let next = Chunk::plus_offset(p, psize);
640 if !Chunk::pinuse(p) {
641 let prevsize = (*p).prev_foot;
642 if Chunk::mmapped(p) {
643 psize += prevsize + self.mmap_foot_pad();
644 if self
645 .system_allocator
646 .free((p as *mut u8).offset(-(prevsize as isize)), psize)
647 {
648 self.footprint -= psize;
649 }
650 return;
651 }
652 let prev = Chunk::minus_offset(p, prevsize);
653 psize += prevsize;
654 p = prev;
655 if p != self.dv {
656 self.unlink_chunk(p, prevsize);
657 } else if (*next).head & INUSE == INUSE {
658 self.dvsize = psize;
659 Chunk::set_free_with_pinuse(p, psize, next);
660 return;
661 }
662 }
663
664 if !Chunk::cinuse(next) {
665 // consolidate forward
666 if next == self.top {
667 self.topsize += psize;
668 let tsize = self.topsize;
669 self.top = p;
670 (*p).head = tsize | PINUSE;
671 if p == self.dv {
672 self.dv = ptr::null_mut();
673 self.dvsize = 0;
674 }
675 return;
676 } else if next == self.dv {
677 self.dvsize += psize;
678 let dsize = self.dvsize;
679 self.dv = p;
680 Chunk::set_size_and_pinuse_of_free_chunk(p, dsize);
681 return;
682 } else {
683 let nsize = Chunk::size(next);
684 psize += nsize;
685 self.unlink_chunk(next, nsize);
686 Chunk::set_size_and_pinuse_of_free_chunk(p, psize);
687 if p == self.dv {
688 self.dvsize = psize;
689 return;
690 }
691 }
692 } else {
693 Chunk::set_free_with_pinuse(p, psize, next);
694 }
695 self.insert_chunk(p, psize);
696 }
697
init_top(&mut self, ptr: *mut Chunk, size: usize)698 unsafe fn init_top(&mut self, ptr: *mut Chunk, size: usize) {
699 let offset = self.align_offset(Chunk::to_mem(ptr));
700 let p = Chunk::plus_offset(ptr, offset);
701 let size = size - offset;
702
703 self.top = p;
704 self.topsize = size;
705 (*p).head = size | PINUSE;
706 (*Chunk::plus_offset(p, size)).head = self.top_foot_size();
707 self.trim_check = DEFAULT_TRIM_THRESHOLD;
708 }
709
init_bins(&mut self)710 unsafe fn init_bins(&mut self) {
711 for i in 0..NSMALLBINS as u32 {
712 let bin = self.smallbin_at(i);
713 (*bin).next = bin;
714 (*bin).prev = bin;
715 }
716 }
717
prepend_alloc(&mut self, newbase: *mut u8, oldbase: *mut u8, size: usize) -> *mut u8718 unsafe fn prepend_alloc(&mut self, newbase: *mut u8, oldbase: *mut u8, size: usize) -> *mut u8 {
719 let p = self.align_as_chunk(newbase);
720 let mut oldfirst = self.align_as_chunk(oldbase);
721 let psize = oldfirst as usize - p as usize;
722 let q = Chunk::plus_offset(p, size);
723 let mut qsize = psize - size;
724 Chunk::set_size_and_pinuse_of_inuse_chunk(p, size);
725
726 debug_assert!(oldfirst > q);
727 debug_assert!(Chunk::pinuse(oldfirst));
728 debug_assert!(qsize >= self.min_chunk_size());
729
730 // consolidate the remainder with the first chunk of the old base
731 if oldfirst == self.top {
732 self.topsize += qsize;
733 let tsize = self.topsize;
734 self.top = q;
735 (*q).head = tsize | PINUSE;
736 self.check_top_chunk(q);
737 } else if oldfirst == self.dv {
738 self.dvsize += qsize;
739 let dsize = self.dvsize;
740 self.dv = q;
741 Chunk::set_size_and_pinuse_of_free_chunk(q, dsize);
742 } else {
743 if !Chunk::inuse(oldfirst) {
744 let nsize = Chunk::size(oldfirst);
745 self.unlink_chunk(oldfirst, nsize);
746 oldfirst = Chunk::plus_offset(oldfirst, nsize);
747 qsize += nsize;
748 }
749 Chunk::set_free_with_pinuse(q, qsize, oldfirst);
750 self.insert_chunk(q, qsize);
751 self.check_free_chunk(q);
752 }
753
754 let ret = Chunk::to_mem(p);
755 self.check_malloced_chunk(ret, size);
756 self.check_malloc_state();
757 return ret;
758 }
759
760 // add a segment to hold a new noncontiguous region
add_segment(&mut self, tbase: *mut u8, tsize: usize, flags: u32)761 unsafe fn add_segment(&mut self, tbase: *mut u8, tsize: usize, flags: u32) {
762 // TODO: what in the world is this function doing
763
764 // Determine locations and sizes of segment, fenceposts, and the old top
765 let old_top = self.top as *mut u8;
766 let oldsp = self.segment_holding(old_top);
767 let old_end = Segment::top(oldsp);
768 let ssize = self.pad_request(mem::size_of::<Segment>());
769 let offset = ssize + mem::size_of::<usize>() * 4 + self.malloc_alignment() - 1;
770 let rawsp = old_end.offset(-(offset as isize));
771 let offset = self.align_offset(Chunk::to_mem(rawsp as *mut Chunk));
772 let asp = rawsp.offset(offset as isize);
773 let csp = if asp < old_top.offset(self.min_chunk_size() as isize) {
774 old_top
775 } else {
776 asp
777 };
778 let sp = csp as *mut Chunk;
779 let ss = Chunk::to_mem(sp) as *mut Segment;
780 let tnext = Chunk::plus_offset(sp, ssize);
781 let mut p = tnext;
782 let mut nfences = 0;
783
784 // reset the top to our new space
785 let size = tsize - self.top_foot_size();
786 self.init_top(tbase as *mut Chunk, size);
787
788 // set up our segment record
789 debug_assert!(self.is_aligned(ss as usize));
790 Chunk::set_size_and_pinuse_of_inuse_chunk(sp, ssize);
791 *ss = self.seg; // push our current record
792 self.seg.base = tbase;
793 self.seg.size = tsize;
794 self.seg.flags = flags;
795 self.seg.next = ss;
796
797 // insert trailing fences
798 loop {
799 let nextp = Chunk::plus_offset(p, mem::size_of::<usize>());
800 (*p).head = Chunk::fencepost_head();
801 nfences += 1;
802 if (&(*nextp).head as *const usize as *mut u8) < old_end {
803 p = nextp;
804 } else {
805 break;
806 }
807 }
808 debug_assert!(nfences >= 2);
809
810 // insert the rest of the old top into a bin as an ordinary free chunk
811 if csp != old_top {
812 let q = old_top as *mut Chunk;
813 let psize = csp as usize - old_top as usize;
814 let tn = Chunk::plus_offset(q, psize);
815 Chunk::set_free_with_pinuse(q, psize, tn);
816 self.insert_chunk(q, psize);
817 }
818
819 self.check_top_chunk(self.top);
820 self.check_malloc_state();
821 }
822
segment_holding(&self, ptr: *mut u8) -> *mut Segment823 unsafe fn segment_holding(&self, ptr: *mut u8) -> *mut Segment {
824 let mut sp = &self.seg as *const Segment as *mut Segment;
825 while !sp.is_null() {
826 if (*sp).base <= ptr && ptr < Segment::top(sp) {
827 return sp;
828 }
829 sp = (*sp).next;
830 }
831 ptr::null_mut()
832 }
833
tmalloc_small(&mut self, size: usize) -> *mut u8834 unsafe fn tmalloc_small(&mut self, size: usize) -> *mut u8 {
835 let leastbit = least_bit(self.treemap);
836 let i = leastbit.trailing_zeros();
837 let mut v = *self.treebin_at(i);
838 let mut t = v;
839 let mut rsize = Chunk::size(TreeChunk::chunk(t)) - size;
840
841 loop {
842 t = TreeChunk::leftmost_child(t);
843 if t.is_null() {
844 break;
845 }
846 let trem = Chunk::size(TreeChunk::chunk(t)) - size;
847 if trem < rsize {
848 rsize = trem;
849 v = t;
850 }
851 }
852
853 let vc = TreeChunk::chunk(v);
854 let r = Chunk::plus_offset(vc, size) as *mut TreeChunk;
855 debug_assert_eq!(Chunk::size(vc), rsize + size);
856 self.unlink_large_chunk(v);
857 if rsize < self.min_chunk_size() {
858 Chunk::set_inuse_and_pinuse(vc, rsize + size);
859 } else {
860 let rc = TreeChunk::chunk(r);
861 Chunk::set_size_and_pinuse_of_inuse_chunk(vc, size);
862 Chunk::set_size_and_pinuse_of_free_chunk(rc, rsize);
863 self.replace_dv(rc, rsize);
864 }
865 Chunk::to_mem(vc)
866 }
867
tmalloc_large(&mut self, size: usize) -> *mut u8868 unsafe fn tmalloc_large(&mut self, size: usize) -> *mut u8 {
869 let mut v = ptr::null_mut();
870 let mut rsize = !size + 1;
871 let idx = self.compute_tree_index(size);
872 let mut t = *self.treebin_at(idx);
873 if !t.is_null() {
874 // Traverse thre tree for this bin looking for a node with size
875 // equal to the `size` above.
876 let mut sizebits = size << leftshift_for_tree_index(idx);
877 // Keep track of the deepest untaken right subtree
878 let mut rst = ptr::null_mut();
879 loop {
880 let csize = Chunk::size(TreeChunk::chunk(t));
881 if csize >= size && csize - size < rsize {
882 v = t;
883 rsize = csize - size;
884 if rsize == 0 {
885 break;
886 }
887 }
888 let rt = (*t).child[1];
889 t = (*t).child[(sizebits >> (mem::size_of::<usize>() * 8 - 1)) & 1];
890 if !rt.is_null() && rt != t {
891 rst = rt;
892 }
893 if t.is_null() {
894 // Reset `t` to the least subtree holding sizes greater than
895 // the `size` above, breaking out
896 t = rst;
897 break;
898 }
899 sizebits <<= 1;
900 }
901 }
902
903 // Set t to the root of the next non-empty treebin
904 if t.is_null() && v.is_null() {
905 let leftbits = left_bits(1 << idx) & self.treemap;
906 if leftbits != 0 {
907 let leastbit = least_bit(leftbits);
908 let i = leastbit.trailing_zeros();
909 t = *self.treebin_at(i);
910 }
911 }
912
913 // Find the smallest of this tree or subtree
914 while !t.is_null() {
915 let csize = Chunk::size(TreeChunk::chunk(t));
916 if csize >= size && csize - size < rsize {
917 rsize = csize - size;
918 v = t;
919 }
920 t = TreeChunk::leftmost_child(t);
921 }
922
923 // If dv is a better fit, then return null so malloc will use it
924 if v.is_null() || (self.dvsize >= size && !(rsize < self.dvsize - size)) {
925 return ptr::null_mut();
926 }
927
928 let vc = TreeChunk::chunk(v);
929 let r = Chunk::plus_offset(vc, size);
930 debug_assert_eq!(Chunk::size(vc), rsize + size);
931 self.unlink_large_chunk(v);
932 if rsize < self.min_chunk_size() {
933 Chunk::set_inuse_and_pinuse(vc, rsize + size);
934 } else {
935 Chunk::set_size_and_pinuse_of_inuse_chunk(vc, size);
936 Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
937 self.insert_chunk(r, rsize);
938 }
939 Chunk::to_mem(vc)
940 }
941
smallbin_at(&mut self, idx: u32) -> *mut Chunk942 unsafe fn smallbin_at(&mut self, idx: u32) -> *mut Chunk {
943 debug_assert!(((idx * 2) as usize) < self.smallbins.len());
944 &mut *self.smallbins.get_unchecked_mut((idx as usize) * 2) as *mut *mut Chunk as *mut Chunk
945 }
946
treebin_at(&mut self, idx: u32) -> *mut *mut TreeChunk947 unsafe fn treebin_at(&mut self, idx: u32) -> *mut *mut TreeChunk {
948 debug_assert!((idx as usize) < self.treebins.len());
949 &mut *self.treebins.get_unchecked_mut(idx as usize)
950 }
951
compute_tree_index(&self, size: usize) -> u32952 fn compute_tree_index(&self, size: usize) -> u32 {
953 let x = size >> TREEBIN_SHIFT;
954 if x == 0 {
955 0
956 } else if x > 0xffff {
957 NTREEBINS as u32 - 1
958 } else {
959 let k = mem::size_of_val(&x) * 8 - 1 - (x.leading_zeros() as usize);
960 ((k << 1) + (size >> (k + TREEBIN_SHIFT - 1) & 1)) as u32
961 }
962 }
963
unlink_first_small_chunk(&mut self, head: *mut Chunk, next: *mut Chunk, idx: u32)964 unsafe fn unlink_first_small_chunk(&mut self, head: *mut Chunk, next: *mut Chunk, idx: u32) {
965 let ptr = (*next).prev;
966 debug_assert!(next != head);
967 debug_assert!(next != ptr);
968 debug_assert_eq!(Chunk::size(next), self.small_index2size(idx));
969 if head == ptr {
970 self.clear_smallmap(idx);
971 } else {
972 (*ptr).next = head;
973 (*head).prev = ptr;
974 }
975 }
976
replace_dv(&mut self, chunk: *mut Chunk, size: usize)977 unsafe fn replace_dv(&mut self, chunk: *mut Chunk, size: usize) {
978 let dvs = self.dvsize;
979 debug_assert!(self.is_small(dvs));
980 if dvs != 0 {
981 let dv = self.dv;
982 self.insert_small_chunk(dv, dvs);
983 }
984 self.dvsize = size;
985 self.dv = chunk;
986 }
987
insert_chunk(&mut self, chunk: *mut Chunk, size: usize)988 unsafe fn insert_chunk(&mut self, chunk: *mut Chunk, size: usize) {
989 if self.is_small(size) {
990 self.insert_small_chunk(chunk, size);
991 } else {
992 self.insert_large_chunk(chunk as *mut TreeChunk, size);
993 }
994 }
995
insert_small_chunk(&mut self, chunk: *mut Chunk, size: usize)996 unsafe fn insert_small_chunk(&mut self, chunk: *mut Chunk, size: usize) {
997 let idx = self.small_index(size);
998 let head = self.smallbin_at(idx);
999 let mut f = head;
1000 debug_assert!(size >= self.min_chunk_size());
1001 if !self.smallmap_is_marked(idx) {
1002 self.mark_smallmap(idx);
1003 } else {
1004 f = (*head).prev;
1005 }
1006
1007 (*head).prev = chunk;
1008 (*f).next = chunk;
1009 (*chunk).prev = f;
1010 (*chunk).next = head;
1011 }
1012
insert_large_chunk(&mut self, chunk: *mut TreeChunk, size: usize)1013 unsafe fn insert_large_chunk(&mut self, chunk: *mut TreeChunk, size: usize) {
1014 let idx = self.compute_tree_index(size);
1015 let h = self.treebin_at(idx);
1016 (*chunk).index = idx;
1017 (*chunk).child[0] = ptr::null_mut();
1018 (*chunk).child[1] = ptr::null_mut();
1019 let chunkc = TreeChunk::chunk(chunk);
1020 if !self.treemap_is_marked(idx) {
1021 self.mark_treemap(idx);
1022 *h = chunk;
1023 (*chunk).parent = h as *mut TreeChunk; // TODO: dubious?
1024 (*chunkc).next = chunkc;
1025 (*chunkc).prev = chunkc;
1026 } else {
1027 let mut t = *h;
1028 let mut k = size << leftshift_for_tree_index(idx);
1029 loop {
1030 if Chunk::size(TreeChunk::chunk(t)) != size {
1031 let c = &mut (*t).child[(k >> mem::size_of::<usize>() * 8 - 1) & 1];
1032 k <<= 1;
1033 if !c.is_null() {
1034 t = *c;
1035 } else {
1036 *c = chunk;
1037 (*chunk).parent = t;
1038 (*chunkc).next = chunkc;
1039 (*chunkc).prev = chunkc;
1040 break;
1041 }
1042 } else {
1043 let tc = TreeChunk::chunk(t);
1044 let f = (*tc).prev;
1045 (*f).next = chunkc;
1046 (*tc).prev = chunkc;
1047 (*chunkc).prev = f;
1048 (*chunkc).next = tc;
1049 (*chunk).parent = ptr::null_mut();
1050 break;
1051 }
1052 }
1053 }
1054 }
1055
smallmap_is_marked(&self, idx: u32) -> bool1056 unsafe fn smallmap_is_marked(&self, idx: u32) -> bool {
1057 self.smallmap & (1 << idx) != 0
1058 }
1059
mark_smallmap(&mut self, idx: u32)1060 unsafe fn mark_smallmap(&mut self, idx: u32) {
1061 self.smallmap |= 1 << idx;
1062 }
1063
clear_smallmap(&mut self, idx: u32)1064 unsafe fn clear_smallmap(&mut self, idx: u32) {
1065 self.smallmap &= !(1 << idx);
1066 }
1067
treemap_is_marked(&self, idx: u32) -> bool1068 unsafe fn treemap_is_marked(&self, idx: u32) -> bool {
1069 self.treemap & (1 << idx) != 0
1070 }
1071
mark_treemap(&mut self, idx: u32)1072 unsafe fn mark_treemap(&mut self, idx: u32) {
1073 self.treemap |= 1 << idx;
1074 }
1075
clear_treemap(&mut self, idx: u32)1076 unsafe fn clear_treemap(&mut self, idx: u32) {
1077 self.treemap &= !(1 << idx);
1078 }
1079
unlink_chunk(&mut self, chunk: *mut Chunk, size: usize)1080 unsafe fn unlink_chunk(&mut self, chunk: *mut Chunk, size: usize) {
1081 if self.is_small(size) {
1082 self.unlink_small_chunk(chunk, size)
1083 } else {
1084 self.unlink_large_chunk(chunk as *mut TreeChunk);
1085 }
1086 }
1087
unlink_small_chunk(&mut self, chunk: *mut Chunk, size: usize)1088 unsafe fn unlink_small_chunk(&mut self, chunk: *mut Chunk, size: usize) {
1089 let f = (*chunk).prev;
1090 let b = (*chunk).next;
1091 let idx = self.small_index(size);
1092 debug_assert!(chunk != b);
1093 debug_assert!(chunk != f);
1094 debug_assert_eq!(Chunk::size(chunk), self.small_index2size(idx));
1095 if b == f {
1096 self.clear_smallmap(idx);
1097 } else {
1098 (*f).next = b;
1099 (*b).prev = f;
1100 }
1101 }
1102
unlink_large_chunk(&mut self, chunk: *mut TreeChunk)1103 unsafe fn unlink_large_chunk(&mut self, chunk: *mut TreeChunk) {
1104 let xp = (*chunk).parent;
1105 let mut r;
1106 if TreeChunk::next(chunk) != chunk {
1107 let f = TreeChunk::prev(chunk);
1108 r = TreeChunk::next(chunk);
1109 (*f).chunk.next = TreeChunk::chunk(r);
1110 (*r).chunk.prev = TreeChunk::chunk(f);
1111 } else {
1112 let mut rp = &mut (*chunk).child[1];
1113 if rp.is_null() {
1114 rp = &mut (*chunk).child[0];
1115 }
1116 r = *rp;
1117 if !rp.is_null() {
1118 loop {
1119 let mut cp = &mut (**rp).child[1];
1120 if cp.is_null() {
1121 cp = &mut (**rp).child[0];
1122 }
1123 if cp.is_null() {
1124 break;
1125 }
1126 rp = cp;
1127 }
1128 r = *rp;
1129 *rp = ptr::null_mut();
1130 }
1131 }
1132
1133 if xp.is_null() {
1134 return;
1135 }
1136
1137 let h = self.treebin_at((*chunk).index);
1138 if chunk == *h {
1139 *h = r;
1140 if r.is_null() {
1141 self.clear_treemap((*chunk).index);
1142 }
1143 } else {
1144 if (*xp).child[0] == chunk {
1145 (*xp).child[0] = r;
1146 } else {
1147 (*xp).child[1] = r;
1148 }
1149 }
1150
1151 if !r.is_null() {
1152 (*r).parent = xp;
1153 let c0 = (*chunk).child[0];
1154 if !c0.is_null() {
1155 (*r).child[0] = c0;
1156 (*c0).parent = r;
1157 }
1158 let c1 = (*chunk).child[1];
1159 if !c1.is_null() {
1160 (*r).child[1] = c1;
1161 (*c1).parent = r;
1162 }
1163 }
1164 }
1165
free(&mut self, mem: *mut u8)1166 pub unsafe fn free(&mut self, mem: *mut u8) {
1167 self.check_malloc_state();
1168
1169 let mut p = Chunk::from_mem(mem);
1170 let mut psize = Chunk::size(p);
1171 let next = Chunk::plus_offset(p, psize);
1172 if !Chunk::pinuse(p) {
1173 let prevsize = (*p).prev_foot;
1174
1175 if Chunk::mmapped(p) {
1176 psize += prevsize + self.mmap_foot_pad();
1177 if self
1178 .system_allocator
1179 .free((p as *mut u8).offset(-(prevsize as isize)), psize)
1180 {
1181 self.footprint -= psize;
1182 }
1183 return;
1184 }
1185
1186 let prev = Chunk::minus_offset(p, prevsize);
1187 psize += prevsize;
1188 p = prev;
1189 if p != self.dv {
1190 self.unlink_chunk(p, prevsize);
1191 } else if (*next).head & INUSE == INUSE {
1192 self.dvsize = psize;
1193 Chunk::set_free_with_pinuse(p, psize, next);
1194 return;
1195 }
1196 }
1197
1198 // Consolidate forward if we can
1199 if !Chunk::cinuse(next) {
1200 if next == self.top {
1201 self.topsize += psize;
1202 let tsize = self.topsize;
1203 self.top = p;
1204 (*p).head = tsize | PINUSE;
1205 if p == self.dv {
1206 self.dv = ptr::null_mut();
1207 self.dvsize = 0;
1208 }
1209 if self.should_trim(tsize) {
1210 self.sys_trim(0);
1211 }
1212 return;
1213 } else if next == self.dv {
1214 self.dvsize += psize;
1215 let dsize = self.dvsize;
1216 self.dv = p;
1217 Chunk::set_size_and_pinuse_of_free_chunk(p, dsize);
1218 return;
1219 } else {
1220 let nsize = Chunk::size(next);
1221 psize += nsize;
1222 self.unlink_chunk(next, nsize);
1223 Chunk::set_size_and_pinuse_of_free_chunk(p, psize);
1224 if p == self.dv {
1225 self.dvsize = psize;
1226 return;
1227 }
1228 }
1229 } else {
1230 Chunk::set_free_with_pinuse(p, psize, next);
1231 }
1232
1233 if self.is_small(psize) {
1234 self.insert_small_chunk(p, psize);
1235 self.check_free_chunk(p);
1236 } else {
1237 self.insert_large_chunk(p as *mut TreeChunk, psize);
1238 self.check_free_chunk(p);
1239 self.release_checks -= 1;
1240 if self.release_checks == 0 {
1241 self.release_unused_segments();
1242 }
1243 }
1244 }
1245
should_trim(&self, size: usize) -> bool1246 fn should_trim(&self, size: usize) -> bool {
1247 size > self.trim_check
1248 }
1249
sys_trim(&mut self, mut pad: usize) -> bool1250 unsafe fn sys_trim(&mut self, mut pad: usize) -> bool {
1251 let mut released = 0;
1252 if pad < self.max_request() && !self.top.is_null() {
1253 pad += self.top_foot_size();
1254 if self.topsize > pad {
1255 let unit = DEFAULT_GRANULARITY;
1256 let extra = ((self.topsize - pad + unit - 1) / unit - 1) * unit;
1257 let sp = self.segment_holding(self.top as *mut u8);
1258 debug_assert!(!sp.is_null());
1259
1260 if !Segment::is_extern(sp) {
1261 if Segment::can_release_part(&self.system_allocator, sp) {
1262 if (*sp).size >= extra && !self.has_segment_link(sp) {
1263 let newsize = (*sp).size - extra;
1264 if self
1265 .system_allocator
1266 .free_part((*sp).base, (*sp).size, newsize)
1267 {
1268 released = extra;
1269 }
1270 }
1271 }
1272 }
1273
1274 if released != 0 {
1275 (*sp).size -= released;
1276 self.footprint -= released;
1277 let top = self.top;
1278 let topsize = self.topsize - released;
1279 self.init_top(top, topsize);
1280 self.check_top_chunk(self.top);
1281 }
1282 }
1283
1284 released += self.release_unused_segments();
1285
1286 if released == 0 && self.topsize > self.trim_check {
1287 self.trim_check = usize::max_value();
1288 }
1289 }
1290
1291 released != 0
1292 }
1293
has_segment_link(&self, ptr: *mut Segment) -> bool1294 unsafe fn has_segment_link(&self, ptr: *mut Segment) -> bool {
1295 let mut sp = &self.seg as *const Segment as *mut Segment;
1296 while !sp.is_null() {
1297 if Segment::holds(ptr, sp as *mut u8) {
1298 return true;
1299 }
1300 sp = (*sp).next;
1301 }
1302 false
1303 }
1304
1305 /// Unmap and unlink any mapped segments that don't contain used chunks
release_unused_segments(&mut self) -> usize1306 unsafe fn release_unused_segments(&mut self) -> usize {
1307 let mut released = 0;
1308 let mut nsegs = 0;
1309 let mut pred = &mut self.seg as *mut Segment;
1310 let mut sp = (*pred).next;
1311 while !sp.is_null() {
1312 let base = (*sp).base;
1313 let size = (*sp).size;
1314 let next = (*sp).next;
1315 nsegs += 1;
1316
1317 if Segment::can_release_part(&self.system_allocator, sp) && !Segment::is_extern(sp) {
1318 let p = self.align_as_chunk(base);
1319 let psize = Chunk::size(p);
1320 // We can unmap if the first chunk holds the entire segment and
1321 // isn't pinned.
1322 let chunk_top = (p as *mut u8).offset(psize as isize);
1323 let top = base.offset((size - self.top_foot_size()) as isize);
1324 if !Chunk::inuse(p) && chunk_top >= top {
1325 let tp = p as *mut TreeChunk;
1326 debug_assert!(Segment::holds(sp, sp as *mut u8));
1327 if p == self.dv {
1328 self.dv = ptr::null_mut();
1329 self.dvsize = 0;
1330 } else {
1331 self.unlink_large_chunk(tp);
1332 }
1333 if self.system_allocator.free(base, size) {
1334 released += size;
1335 self.footprint -= size;
1336 // unlink our obsolete record
1337 sp = pred;
1338 (*sp).next = next;
1339 } else {
1340 // back out if we can't unmap
1341 self.insert_large_chunk(tp, psize);
1342 }
1343 }
1344 }
1345 pred = sp;
1346 sp = next;
1347 }
1348 self.release_checks = if nsegs > MAX_RELEASE_CHECK_RATE {
1349 nsegs
1350 } else {
1351 MAX_RELEASE_CHECK_RATE
1352 };
1353 return released;
1354 }
1355
1356 // Sanity checks
1357
check_any_chunk(&self, p: *mut Chunk)1358 unsafe fn check_any_chunk(&self, p: *mut Chunk) {
1359 if !cfg!(debug_assertions) {
1360 return;
1361 }
1362 debug_assert!(
1363 self.is_aligned(Chunk::to_mem(p) as usize) || (*p).head == Chunk::fencepost_head()
1364 );
1365 debug_assert!(p as *mut u8 >= self.least_addr);
1366 }
1367
check_top_chunk(&self, p: *mut Chunk)1368 unsafe fn check_top_chunk(&self, p: *mut Chunk) {
1369 if !cfg!(debug_assertions) {
1370 return;
1371 }
1372 let sp = self.segment_holding(p as *mut u8);
1373 let sz = (*p).head & !INUSE;
1374 debug_assert!(!sp.is_null());
1375 debug_assert!(
1376 self.is_aligned(Chunk::to_mem(p) as usize) || (*p).head == Chunk::fencepost_head()
1377 );
1378 debug_assert!(p as *mut u8 >= self.least_addr);
1379 debug_assert_eq!(sz, self.topsize);
1380 debug_assert!(sz > 0);
1381 debug_assert_eq!(
1382 sz,
1383 (*sp).base as usize + (*sp).size - p as usize - self.top_foot_size()
1384 );
1385 debug_assert!(Chunk::pinuse(p));
1386 debug_assert!(!Chunk::pinuse(Chunk::plus_offset(p, sz)));
1387 }
1388
check_malloced_chunk(&self, mem: *mut u8, s: usize)1389 unsafe fn check_malloced_chunk(&self, mem: *mut u8, s: usize) {
1390 if !cfg!(debug_assertions) {
1391 return;
1392 }
1393 if mem.is_null() {
1394 return;
1395 }
1396 let p = Chunk::from_mem(mem);
1397 let sz = (*p).head & !INUSE;
1398 self.check_inuse_chunk(p);
1399 debug_assert_eq!(align_up(sz, self.malloc_alignment()), sz);
1400 debug_assert!(sz >= self.min_chunk_size());
1401 debug_assert!(sz >= s);
1402 debug_assert!(Chunk::mmapped(p) || sz < (s + self.min_chunk_size()));
1403 }
1404
check_inuse_chunk(&self, p: *mut Chunk)1405 unsafe fn check_inuse_chunk(&self, p: *mut Chunk) {
1406 self.check_any_chunk(p);
1407 debug_assert!(Chunk::inuse(p));
1408 debug_assert!(Chunk::pinuse(Chunk::next(p)));
1409 debug_assert!(Chunk::mmapped(p) || Chunk::pinuse(p) || Chunk::next(Chunk::prev(p)) == p);
1410 if Chunk::mmapped(p) {
1411 self.check_mmapped_chunk(p);
1412 }
1413 }
1414
check_mmapped_chunk(&self, p: *mut Chunk)1415 unsafe fn check_mmapped_chunk(&self, p: *mut Chunk) {
1416 if !cfg!(debug_assertions) {
1417 return;
1418 }
1419 let sz = Chunk::size(p);
1420 let len = sz + (*p).prev_foot + self.mmap_foot_pad();
1421 debug_assert!(Chunk::mmapped(p));
1422 debug_assert!(
1423 self.is_aligned(Chunk::to_mem(p) as usize) || (*p).head == Chunk::fencepost_head()
1424 );
1425 debug_assert!(p as *mut u8 >= self.least_addr);
1426 debug_assert!(!self.is_small(sz));
1427 debug_assert_eq!(align_up(len, self.system_allocator.page_size()), len);
1428 debug_assert_eq!((*Chunk::plus_offset(p, sz)).head, Chunk::fencepost_head());
1429 debug_assert_eq!(
1430 (*Chunk::plus_offset(p, sz + mem::size_of::<usize>())).head,
1431 0
1432 );
1433 }
1434
check_free_chunk(&self, p: *mut Chunk)1435 unsafe fn check_free_chunk(&self, p: *mut Chunk) {
1436 if !cfg!(debug_assertions) {
1437 return;
1438 }
1439 let sz = Chunk::size(p);
1440 let next = Chunk::plus_offset(p, sz);
1441 self.check_any_chunk(p);
1442 debug_assert!(!Chunk::inuse(p));
1443 debug_assert!(!Chunk::pinuse(Chunk::next(p)));
1444 debug_assert!(!Chunk::mmapped(p));
1445 if p != self.dv && p != self.top {
1446 if sz >= self.min_chunk_size() {
1447 debug_assert_eq!(align_up(sz, self.malloc_alignment()), sz);
1448 debug_assert!(self.is_aligned(Chunk::to_mem(p) as usize));
1449 debug_assert_eq!((*next).prev_foot, sz);
1450 debug_assert!(Chunk::pinuse(p));
1451 debug_assert!(next == self.top || Chunk::inuse(next));
1452 debug_assert_eq!((*(*p).next).prev, p);
1453 debug_assert_eq!((*(*p).prev).next, p);
1454 } else {
1455 debug_assert_eq!(sz, mem::size_of::<usize>());
1456 }
1457 }
1458 }
1459
check_malloc_state(&mut self)1460 unsafe fn check_malloc_state(&mut self) {
1461 if !cfg!(debug_assertions) {
1462 return;
1463 }
1464 for i in 0..NSMALLBINS {
1465 self.check_smallbin(i as u32);
1466 }
1467 for i in 0..NTREEBINS {
1468 self.check_treebin(i as u32);
1469 }
1470 if self.dvsize != 0 {
1471 self.check_any_chunk(self.dv);
1472 debug_assert_eq!(self.dvsize, Chunk::size(self.dv));
1473 debug_assert!(self.dvsize >= self.min_chunk_size());
1474 let dv = self.dv;
1475 debug_assert!(!self.bin_find(dv));
1476 }
1477 if !self.top.is_null() {
1478 self.check_top_chunk(self.top);
1479 debug_assert!(self.topsize > 0);
1480 let top = self.top;
1481 debug_assert!(!self.bin_find(top));
1482 }
1483 let total = self.traverse_and_check();
1484 debug_assert!(total <= self.footprint);
1485 debug_assert!(self.footprint <= self.max_footprint);
1486 }
1487
check_smallbin(&mut self, idx: u32)1488 unsafe fn check_smallbin(&mut self, idx: u32) {
1489 if !cfg!(debug_assertions) {
1490 return;
1491 }
1492 let b = self.smallbin_at(idx);
1493 let mut p = (*b).next;
1494 let empty = self.smallmap & (1 << idx) == 0;
1495 if p == b {
1496 debug_assert!(empty)
1497 }
1498 if !empty {
1499 while p != b {
1500 let size = Chunk::size(p);
1501 self.check_free_chunk(p);
1502 debug_assert_eq!(self.small_index(size), idx);
1503 debug_assert!((*p).next == b || Chunk::size((*p).next) == Chunk::size(p));
1504 let q = Chunk::next(p);
1505 if (*q).head != Chunk::fencepost_head() {
1506 self.check_inuse_chunk(q);
1507 }
1508 p = (*p).next;
1509 }
1510 }
1511 }
1512
check_treebin(&mut self, idx: u32)1513 unsafe fn check_treebin(&mut self, idx: u32) {
1514 if !cfg!(debug_assertions) {
1515 return;
1516 }
1517 let tb = self.treebin_at(idx);
1518 let t = *tb;
1519 let empty = self.treemap & (1 << idx) == 0;
1520 if t.is_null() {
1521 debug_assert!(empty);
1522 }
1523 if !empty {
1524 self.check_tree(t);
1525 }
1526 }
1527
check_tree(&mut self, t: *mut TreeChunk)1528 unsafe fn check_tree(&mut self, t: *mut TreeChunk) {
1529 if !cfg!(debug_assertions) {
1530 return;
1531 }
1532 let tc = TreeChunk::chunk(t);
1533 let tindex = (*t).index;
1534 let tsize = Chunk::size(tc);
1535 let idx = self.compute_tree_index(tsize);
1536 debug_assert_eq!(tindex, idx);
1537 debug_assert!(tsize >= self.min_large_size());
1538 debug_assert!(tsize >= self.min_size_for_tree_index(idx));
1539 debug_assert!(idx == NTREEBINS as u32 - 1 || tsize < self.min_size_for_tree_index(idx + 1));
1540
1541 let mut u = t;
1542 let mut head = ptr::null_mut::<TreeChunk>();
1543 loop {
1544 let uc = TreeChunk::chunk(u);
1545 self.check_any_chunk(uc);
1546 debug_assert_eq!((*u).index, tindex);
1547 debug_assert_eq!(Chunk::size(uc), tsize);
1548 debug_assert!(!Chunk::inuse(uc));
1549 debug_assert!(!Chunk::pinuse(Chunk::next(uc)));
1550 debug_assert_eq!((*(*uc).next).prev, uc);
1551 debug_assert_eq!((*(*uc).prev).next, uc);
1552 let left = (*u).child[0];
1553 let right = (*u).child[1];
1554 if (*u).parent.is_null() {
1555 debug_assert!(left.is_null());
1556 debug_assert!(right.is_null());
1557 } else {
1558 debug_assert!(head.is_null());
1559 head = u;
1560 debug_assert!((*u).parent != u);
1561 debug_assert!(
1562 (*(*u).parent).child[0] == u
1563 || (*(*u).parent).child[1] == u
1564 || *((*u).parent as *mut *mut TreeChunk) == u
1565 );
1566 if !left.is_null() {
1567 debug_assert_eq!((*left).parent, u);
1568 debug_assert!(left != u);
1569 self.check_tree(left);
1570 }
1571 if !right.is_null() {
1572 debug_assert_eq!((*right).parent, u);
1573 debug_assert!(right != u);
1574 self.check_tree(right);
1575 }
1576 if !left.is_null() && !right.is_null() {
1577 debug_assert!(
1578 Chunk::size(TreeChunk::chunk(left)) < Chunk::size(TreeChunk::chunk(right))
1579 );
1580 }
1581 }
1582
1583 u = TreeChunk::prev(u);
1584 if u == t {
1585 break;
1586 }
1587 }
1588 debug_assert!(!head.is_null());
1589 }
1590
min_size_for_tree_index(&self, idx: u32) -> usize1591 fn min_size_for_tree_index(&self, idx: u32) -> usize {
1592 let idx = idx as usize;
1593 (1 << ((idx >> 1) + TREEBIN_SHIFT)) | ((idx & 1) << ((idx >> 1) + TREEBIN_SHIFT - 1))
1594 }
1595
bin_find(&mut self, chunk: *mut Chunk) -> bool1596 unsafe fn bin_find(&mut self, chunk: *mut Chunk) -> bool {
1597 let size = Chunk::size(chunk);
1598 if self.is_small(size) {
1599 let sidx = self.small_index(size);
1600 let b = self.smallbin_at(sidx);
1601 if !self.smallmap_is_marked(sidx) {
1602 return false;
1603 }
1604 let mut p = b;
1605 loop {
1606 if p == chunk {
1607 return true;
1608 }
1609 p = (*p).prev;
1610 if p == b {
1611 return false;
1612 }
1613 }
1614 } else {
1615 let tidx = self.compute_tree_index(size);
1616 if !self.treemap_is_marked(tidx) {
1617 return false;
1618 }
1619 let mut t = *self.treebin_at(tidx);
1620 let mut sizebits = size << leftshift_for_tree_index(tidx);
1621 while !t.is_null() && Chunk::size(TreeChunk::chunk(t)) != size {
1622 t = (*t).child[(sizebits >> (mem::size_of::<usize>() * 8 - 1)) & 1];
1623 sizebits <<= 1;
1624 }
1625 if t.is_null() {
1626 return false;
1627 }
1628 let mut u = t;
1629 let chunk = chunk as *mut TreeChunk;
1630 loop {
1631 if u == chunk {
1632 return true;
1633 }
1634 u = TreeChunk::prev(u);
1635 if u == t {
1636 return false;
1637 }
1638 }
1639 }
1640 }
1641
traverse_and_check(&self) -> usize1642 unsafe fn traverse_and_check(&self) -> usize {
1643 0
1644 }
1645 }
1646
1647 const PINUSE: usize = 1 << 0;
1648 const CINUSE: usize = 1 << 1;
1649 const FLAG4: usize = 1 << 2;
1650 const INUSE: usize = PINUSE | CINUSE;
1651 const FLAG_BITS: usize = PINUSE | CINUSE | FLAG4;
1652
1653 impl Chunk {
fencepost_head() -> usize1654 unsafe fn fencepost_head() -> usize {
1655 INUSE | mem::size_of::<usize>()
1656 }
1657
size(me: *mut Chunk) -> usize1658 unsafe fn size(me: *mut Chunk) -> usize {
1659 (*me).head & !FLAG_BITS
1660 }
1661
next(me: *mut Chunk) -> *mut Chunk1662 unsafe fn next(me: *mut Chunk) -> *mut Chunk {
1663 (me as *mut u8).offset(((*me).head & !FLAG_BITS) as isize) as *mut Chunk
1664 }
1665
prev(me: *mut Chunk) -> *mut Chunk1666 unsafe fn prev(me: *mut Chunk) -> *mut Chunk {
1667 (me as *mut u8).offset(-((*me).prev_foot as isize)) as *mut Chunk
1668 }
1669
cinuse(me: *mut Chunk) -> bool1670 unsafe fn cinuse(me: *mut Chunk) -> bool {
1671 (*me).head & CINUSE != 0
1672 }
1673
pinuse(me: *mut Chunk) -> bool1674 unsafe fn pinuse(me: *mut Chunk) -> bool {
1675 (*me).head & PINUSE != 0
1676 }
1677
clear_pinuse(me: *mut Chunk)1678 unsafe fn clear_pinuse(me: *mut Chunk) {
1679 (*me).head &= !PINUSE;
1680 }
1681
inuse(me: *mut Chunk) -> bool1682 unsafe fn inuse(me: *mut Chunk) -> bool {
1683 (*me).head & INUSE != PINUSE
1684 }
1685
mmapped(me: *mut Chunk) -> bool1686 unsafe fn mmapped(me: *mut Chunk) -> bool {
1687 (*me).head & INUSE == 0
1688 }
1689
set_inuse(me: *mut Chunk, size: usize)1690 unsafe fn set_inuse(me: *mut Chunk, size: usize) {
1691 (*me).head = ((*me).head & PINUSE) | size | CINUSE;
1692 let next = Chunk::plus_offset(me, size);
1693 (*next).head |= PINUSE;
1694 }
1695
set_inuse_and_pinuse(me: *mut Chunk, size: usize)1696 unsafe fn set_inuse_and_pinuse(me: *mut Chunk, size: usize) {
1697 (*me).head = PINUSE | size | CINUSE;
1698 let next = Chunk::plus_offset(me, size);
1699 (*next).head |= PINUSE;
1700 }
1701
set_size_and_pinuse_of_inuse_chunk(me: *mut Chunk, size: usize)1702 unsafe fn set_size_and_pinuse_of_inuse_chunk(me: *mut Chunk, size: usize) {
1703 (*me).head = size | PINUSE | CINUSE;
1704 }
1705
set_size_and_pinuse_of_free_chunk(me: *mut Chunk, size: usize)1706 unsafe fn set_size_and_pinuse_of_free_chunk(me: *mut Chunk, size: usize) {
1707 (*me).head = size | PINUSE;
1708 Chunk::set_foot(me, size);
1709 }
1710
set_free_with_pinuse(p: *mut Chunk, size: usize, n: *mut Chunk)1711 unsafe fn set_free_with_pinuse(p: *mut Chunk, size: usize, n: *mut Chunk) {
1712 Chunk::clear_pinuse(n);
1713 Chunk::set_size_and_pinuse_of_free_chunk(p, size);
1714 }
1715
set_foot(me: *mut Chunk, size: usize)1716 unsafe fn set_foot(me: *mut Chunk, size: usize) {
1717 let next = Chunk::plus_offset(me, size);
1718 (*next).prev_foot = size;
1719 }
1720
plus_offset(me: *mut Chunk, offset: usize) -> *mut Chunk1721 unsafe fn plus_offset(me: *mut Chunk, offset: usize) -> *mut Chunk {
1722 (me as *mut u8).offset(offset as isize) as *mut Chunk
1723 }
1724
minus_offset(me: *mut Chunk, offset: usize) -> *mut Chunk1725 unsafe fn minus_offset(me: *mut Chunk, offset: usize) -> *mut Chunk {
1726 (me as *mut u8).offset(-(offset as isize)) as *mut Chunk
1727 }
1728
to_mem(me: *mut Chunk) -> *mut u81729 unsafe fn to_mem(me: *mut Chunk) -> *mut u8 {
1730 (me as *mut u8).offset(Chunk::mem_offset())
1731 }
1732
mem_offset() -> isize1733 fn mem_offset() -> isize {
1734 2 * (mem::size_of::<usize>() as isize)
1735 }
1736
from_mem(mem: *mut u8) -> *mut Chunk1737 unsafe fn from_mem(mem: *mut u8) -> *mut Chunk {
1738 mem.offset(-2 * (mem::size_of::<usize>() as isize)) as *mut Chunk
1739 }
1740 }
1741
1742 impl TreeChunk {
leftmost_child(me: *mut TreeChunk) -> *mut TreeChunk1743 unsafe fn leftmost_child(me: *mut TreeChunk) -> *mut TreeChunk {
1744 let left = (*me).child[0];
1745 if left.is_null() {
1746 (*me).child[1]
1747 } else {
1748 left
1749 }
1750 }
1751
chunk(me: *mut TreeChunk) -> *mut Chunk1752 unsafe fn chunk(me: *mut TreeChunk) -> *mut Chunk {
1753 &mut (*me).chunk
1754 }
1755
next(me: *mut TreeChunk) -> *mut TreeChunk1756 unsafe fn next(me: *mut TreeChunk) -> *mut TreeChunk {
1757 (*TreeChunk::chunk(me)).next as *mut TreeChunk
1758 }
1759
prev(me: *mut TreeChunk) -> *mut TreeChunk1760 unsafe fn prev(me: *mut TreeChunk) -> *mut TreeChunk {
1761 (*TreeChunk::chunk(me)).prev as *mut TreeChunk
1762 }
1763 }
1764
1765 const EXTERN: u32 = 1 << 0;
1766
1767 impl Segment {
is_extern(seg: *mut Segment) -> bool1768 unsafe fn is_extern(seg: *mut Segment) -> bool {
1769 (*seg).flags & EXTERN != 0
1770 }
1771
can_release_part<A: Allocator>(system_allocator: &A, seg: *mut Segment) -> bool1772 unsafe fn can_release_part<A: Allocator>(system_allocator: &A, seg: *mut Segment) -> bool {
1773 system_allocator.can_release_part((*seg).flags >> 1)
1774 }
1775
sys_flags(seg: *mut Segment) -> u321776 unsafe fn sys_flags(seg: *mut Segment) -> u32 {
1777 (*seg).flags >> 1
1778 }
1779
holds(seg: *mut Segment, addr: *mut u8) -> bool1780 unsafe fn holds(seg: *mut Segment, addr: *mut u8) -> bool {
1781 (*seg).base <= addr && addr < Segment::top(seg)
1782 }
1783
top(seg: *mut Segment) -> *mut u81784 unsafe fn top(seg: *mut Segment) -> *mut u8 {
1785 (*seg).base.offset((*seg).size as isize)
1786 }
1787 }
1788
1789 #[cfg(test)]
1790 mod tests {
1791 use super::*;
1792 use System;
1793
1794 // Prime the allocator with some allocations such that there will be free
1795 // chunks in the treemap
setup_treemap<A: Allocator>(a: &mut Dlmalloc<A>)1796 unsafe fn setup_treemap<A: Allocator>(a: &mut Dlmalloc<A>) {
1797 let large_request_size = NSMALLBINS * (1 << SMALLBIN_SHIFT);
1798 assert!(!a.is_small(large_request_size));
1799 let large_request1 = a.malloc(large_request_size);
1800 assert_ne!(large_request1, ptr::null_mut());
1801 let large_request2 = a.malloc(large_request_size);
1802 assert_ne!(large_request2, ptr::null_mut());
1803 a.free(large_request1);
1804 assert_ne!(a.treemap, 0);
1805 }
1806
1807 #[test]
1808 // Test allocating, with a non-empty treemap, a specific size that used to
1809 // trigger an integer overflow bug
treemap_alloc_overflow_minimal()1810 fn treemap_alloc_overflow_minimal() {
1811 let mut a = Dlmalloc::new(System::new());
1812 unsafe {
1813 setup_treemap(&mut a);
1814 let min_idx31_size = (0xc000 << TREEBIN_SHIFT) - a.chunk_overhead() + 1;
1815 assert_ne!(a.malloc(min_idx31_size), ptr::null_mut());
1816 }
1817 }
1818
1819 #[test]
1820 // Test allocating the maximum request size with a non-empty treemap
treemap_alloc_max()1821 fn treemap_alloc_max() {
1822 let mut a = Dlmalloc::new(System::new());
1823 unsafe {
1824 setup_treemap(&mut a);
1825 let max_request_size = a.max_request() - 1;
1826 assert_eq!(a.malloc(max_request_size), ptr::null_mut());
1827 }
1828 }
1829 }
1830