1 //! Rust bindings to the `jemalloc` C library.
2 //!
3 //! `jemalloc` is a general purpose memory allocation, its documentation
4 //! can be found here:
5 //!
6 //! * [API documentation][jemalloc_docs]
7 //! * [Wiki][jemalloc_wiki] (design documents, presentations, profiling, debugging, tuning, ...)
8 //!
9 //! `jemalloc` exposes both a standard and a non-standard API.
10 //!
11 //! # Standard API
12 //!
13 //! The standard API includes: the [`malloc`], [`calloc`], [`realloc`], and
14 //! [`free`], which conform to to ISO/IEC 9899:1990 (“ISO C90”),
15 //! [`posix_memalign`] which conforms to conforms to POSIX.1-2016, and
16 //! [`aligned_alloc`].
17 //!
18 //! Note that these standard leave some details as _implementation defined_.
19 //! This docs document this behavior for `jemalloc`, but keep in mind that other
20 //! standard-conforming implementations of these functions in other allocators
21 //! might behave slightly different.
22 //!
23 //! # Non-Standard API
24 //!
25 //! The non-standard API includes: [`mallocx`], [`rallocx`], [`xallocx`],
26 //! [`sallocx`], [`dallocx`], [`sdallocx`], and [`nallocx`]. These functions all
27 //! have a `flags` argument that can be used to specify options. Use bitwise or
28 //! `|` to specify one or more of the following: [`MALLOCX_LG_ALIGN`],
29 //! [`MALLOCX_ALIGN`], [`MALLOCX_ZERO`], [`MALLOCX_TCACHE`],
30 //! [`MALLOCX_TCACHE_NONE`], and [`MALLOCX_ARENA`].
31 //!
32 //! # Environment variables
33 //!
34 //! The `MALLOC_CONF` environment variable affects the execution of the allocation functions.
35 //!
36 //! For the documentation of the [`MALLCTL` namespace visit the jemalloc
37 //! documenation][jemalloc_mallctl].
38 //!
39 //! [jemalloc_docs]: http://jemalloc.net/jemalloc.3.html
40 //! [jemalloc_wiki]: https://github.com/jemalloc/jemalloc/wiki
41 //! [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace
42 #![no_std]
43 #![allow(non_snake_case, non_camel_case_types)]
44 #![cfg_attr(
45 feature = "cargo-clippy",
46 allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)
47 )]
48 #![deny(missing_docs, broken_intra_doc_links)]
49
50 use libc::{c_char, c_int, c_uint, c_void, size_t};
51 type c_bool = c_int;
52
53 /// Align the memory allocation to start at an address that is a
54 /// multiple of `1 << la`.
55 ///
56 /// # Safety
57 ///
58 /// It does not validate that `la` is within the valid range.
59 #[inline]
MALLOCX_LG_ALIGN(la: usize) -> c_int60 pub fn MALLOCX_LG_ALIGN(la: usize) -> c_int {
61 la as c_int
62 }
63
64 /// Align the memory allocation to start at an address that is a multiple of `align`,
65 /// where a is a power of two.
66 ///
67 /// # Safety
68 ///
69 /// This macro does not validate that a is a power of 2.
70 #[inline]
MALLOCX_ALIGN(aling: usize) -> c_int71 pub fn MALLOCX_ALIGN(aling: usize) -> c_int {
72 aling.trailing_zeros() as c_int
73 }
74
75 /// Initialize newly allocated memory to contain zero bytes.
76 ///
77 /// In the growing reallocation case, the real size prior to reallocation
78 /// defines the boundary between untouched bytes and those that are initialized
79 /// to contain zero bytes.
80 ///
81 /// If this option is not set, newly allocated memory is uninitialized.
82 pub const MALLOCX_ZERO: c_int = 0x40;
83
84 /// Use the thread-specific cache (_tcache_) specified by the identifier `tc`.
85 ///
86 /// # Safety
87 ///
88 /// `tc` must have been acquired via the `tcache.create mallctl`. This function
89 /// does not validate that `tc` specifies a valid identifier.
90 #[inline]
MALLOCX_TCACHE(tc: usize) -> c_int91 pub fn MALLOCX_TCACHE(tc: usize) -> c_int {
92 tc.wrapping_add(2).wrapping_shl(8) as c_int
93 }
94
95 /// Do not use a thread-specific cache (_tcache_).
96 ///
97 /// Unless `MALLOCX_TCACHE(tc)` or `MALLOCX_TCACHE_NONE` is specified, an
98 /// automatically managed _tcache_ will be used under many circumstances.
99 ///
100 /// # Safety
101 ///
102 /// This option cannot be used in the same `flags` argument as
103 /// `MALLOCX_TCACHE(tc)`.
104 // FIXME: This should just be a const.
105 #[inline]
MALLOCX_TCACHE_NONE() -> c_int106 pub fn MALLOCX_TCACHE_NONE() -> c_int {
107 MALLOCX_TCACHE(!0)
108 }
109
110 /// Use the arena specified by the index `a`.
111 ///
112 /// This option has no effect for regions that were allocated via an arena other
113 /// than the one specified.
114 ///
115 /// # Safety
116 ///
117 /// This function does not validate that `a` specifies an arena index in the
118 /// valid range.
119 #[inline]
MALLOCX_ARENA(a: usize) -> c_int120 pub fn MALLOCX_ARENA(a: usize) -> c_int {
121 (a as c_int).wrapping_add(1).wrapping_shl(20)
122 }
123
124 extern "C" {
125 /// Allocates `size` bytes of uninitialized memory.
126 ///
127 /// It returns a pointer to the start (lowest byte address) of the allocated
128 /// space. This pointer is suitably aligned so that it may be assigned to a
129 /// pointer to any type of object and then used to access such an object in
130 /// the space allocated until the space is explicitly deallocated. Each
131 /// yielded pointer points to an object disjoint from any other object.
132 ///
133 /// If the `size` of the space requested is zero, either a null pointer is
134 /// returned, or the behavior is as if the `size` were some nonzero value,
135 /// except that the returned pointer shall not be used to access an object.
136 ///
137 /// # Errors
138 ///
139 /// If the space cannot be allocated, a null pointer is returned and `errno`
140 /// is set to `ENOMEM`.
141 #[cfg_attr(prefixed, link_name = "_rjem_malloc")]
malloc(size: size_t) -> *mut c_void142 pub fn malloc(size: size_t) -> *mut c_void;
143 /// Allocates zero-initialized space for an array of `number` objects, each
144 /// of whose size is `size`.
145 ///
146 /// The result is identical to calling [`malloc`] with an argument of
147 /// `number * size`, with the exception that the allocated memory is
148 /// explicitly initialized to _zero_ bytes.
149 ///
150 /// Note: zero-initialized memory need not be the same as the
151 /// representation of floating-point zero or a null pointer constant.
152 #[cfg_attr(prefixed, link_name = "_rjem_calloc")]
calloc(number: size_t, size: size_t) -> *mut c_void153 pub fn calloc(number: size_t, size: size_t) -> *mut c_void;
154
155 /// Allocates `size` bytes of memory at an address which is a multiple of
156 /// `alignment` and is placed in `*ptr`.
157 ///
158 /// If `size` is zero, then the value placed in `*ptr` is either null, or
159 /// the behavior is as if the `size` were some nonzero value, except that
160 /// the returned pointer shall not be used to access an object.
161 ///
162 /// # Errors
163 ///
164 /// On success, it returns zero. On error, the value of `errno` is _not_ set,
165 /// `*ptr` is not modified, and the return values can be:
166 ///
167 /// - `EINVAL`: the `alignment` argument was not a power-of-two or was not a multiple of
168 /// `mem::size_of::<*const c_void>()`.
169 /// - `ENOMEM`: there was insufficient memory to fulfill the allocation request.
170 ///
171 /// # Safety
172 ///
173 /// The behavior is _undefined_ if:
174 ///
175 /// * `ptr` is null.
176 #[cfg_attr(prefixed, link_name = "_rjem_posix_memalign")]
posix_memalign(ptr: *mut *mut c_void, alignment: size_t, size: size_t) -> c_int177 pub fn posix_memalign(ptr: *mut *mut c_void, alignment: size_t, size: size_t) -> c_int;
178
179 /// Allocates `size` bytes of memory at an address which is a multiple of
180 /// `alignment`.
181 ///
182 /// If the `size` of the space requested is zero, either a null pointer is
183 /// returned, or the behavior is as if the `size` were some nonzero value,
184 /// except that the returned pointer shall not be used to access an object.
185 ///
186 /// # Errors
187 ///
188 /// Returns null if the request fails.
189 ///
190 /// # Safety
191 ///
192 /// The behavior is _undefined_ if:
193 ///
194 /// * `alignment` is not a power-of-two
195 /// * `size` is not an integral multiple of `alignment`
196 #[cfg_attr(prefixed, link_name = "_rjem_aligned_alloc")]
aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void197 pub fn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void;
198
199 /// Resizes the previously-allocated memory region referenced by `ptr` to
200 /// `size` bytes.
201 ///
202 /// Deallocates the old object pointed to by `ptr` and returns a pointer to
203 /// a new object that has the size specified by `size`. The contents of the
204 /// new object are the same as that of the old object prior to deallocation,
205 /// up to the lesser of the new and old sizes.
206 ///
207 /// The memory in the new object beyond the size of the old object is
208 /// uninitialized.
209 ///
210 /// The returned pointer to a new object may have the same value as a
211 /// pointer to the old object, but [`realloc`] may move the memory
212 /// allocation, resulting in a different return value than `ptr`.
213 ///
214 /// If `ptr` is null, [`realloc`] behaves identically to [`malloc`] for the
215 /// specified size.
216 ///
217 /// If the size of the space requested is zero, the behavior is
218 /// implementation-defined: either a null pointer is returned, or the
219 /// behavior is as if the size were some nonzero value, except that the
220 /// returned pointer shall not be used to access an object # Errors
221 ///
222 /// # Errors
223 ///
224 /// If memory for the new object cannot be allocated, the old object is not
225 /// deallocated, its value is unchanged, [`realloc`] returns null, and
226 /// `errno` is set to `ENOMEM`.
227 ///
228 /// # Safety
229 ///
230 /// The behavior is _undefined_ if:
231 ///
232 /// * `ptr` does not match a pointer previously returned by the memory
233 /// allocation functions of this crate, or
234 /// * the memory region referenced by `ptr` has been deallocated.
235 #[cfg_attr(prefixed, link_name = "_rjem_realloc")]
realloc(ptr: *mut c_void, size: size_t) -> *mut c_void236 pub fn realloc(ptr: *mut c_void, size: size_t) -> *mut c_void;
237
238 /// Deallocates previously-allocated memory region referenced by `ptr`.
239 ///
240 /// This makes the space available for future allocations.
241 ///
242 /// If `ptr` is null, no action occurs.
243 ///
244 /// # Safety
245 ///
246 /// The behavior is _undefined_ if:
247 ///
248 /// * `ptr` does not match a pointer earlier returned by the memory
249 /// allocation functions of this crate, or
250 /// * the memory region referenced by `ptr` has been deallocated.
251 #[cfg_attr(prefixed, link_name = "_rjem_free")]
free(ptr: *mut c_void)252 pub fn free(ptr: *mut c_void);
253
254 /// Allocates at least `size` bytes of memory according to `flags`.
255 ///
256 /// It returns a pointer to the start (lowest byte address) of the allocated
257 /// space. This pointer is suitably aligned so that it may be assigned to a
258 /// pointer to any type of object and then used to access such an object in
259 /// the space allocated until the space is explicitly deallocated. Each
260 /// yielded pointer points to an object disjoint from any other object.
261 ///
262 /// # Errors
263 ///
264 /// On success it returns a non-null pointer. A null pointer return value
265 /// indicates that insufficient contiguous memory was available to service
266 /// the allocation request.
267 ///
268 /// # Safety
269 ///
270 /// The behavior is _undefined_ if `size == 0`.
271 #[cfg_attr(prefixed, link_name = "_rjem_mallocx")]
mallocx(size: size_t, flags: c_int) -> *mut c_void272 pub fn mallocx(size: size_t, flags: c_int) -> *mut c_void;
273
274 /// Resizes the previously-allocated memory region referenced by `ptr` to be
275 /// at least `size` bytes.
276 ///
277 /// Deallocates the old object pointed to by `ptr` and returns a pointer to
278 /// a new object that has the size specified by `size`. The contents of the
279 /// new object are the same as that of the old object prior to deallocation,
280 /// up to the lesser of the new and old sizes.
281 ///
282 /// The the memory in the new object beyond the size of the old object is
283 /// obtained according to `flags` (it might be uninitialized).
284 ///
285 /// The returned pointer to a new object may have the same value as a
286 /// pointer to the old object, but [`rallocx`] may move the memory
287 /// allocation, resulting in a different return value than `ptr`.
288 ///
289 /// # Errors
290 ///
291 /// On success it returns a non-null pointer. A null pointer return value
292 /// indicates that insufficient contiguous memory was available to service
293 /// the allocation request. In this case, the old object is not
294 /// deallocated, and its value is unchanged.
295 ///
296 /// # Safety
297 ///
298 /// The behavior is _undefiend_ if:
299 ///
300 /// * `size == 0`, or
301 /// * `ptr` does not match a pointer earlier returned by
302 /// the memory allocation functions of this crate, or
303 /// * the memory region referenced by `ptr` has been deallocated.
304 #[cfg_attr(prefixed, link_name = "_rjem_rallocx")]
rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void305 pub fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
306
307 /// Resizes the previously-allocated memory region referenced by `ptr` _in
308 /// place_ to be at least `size` bytes, returning the real size of the
309 /// allocation.
310 ///
311 /// Deallocates the old object pointed to by `ptr` and sets `ptr` to a new
312 /// object that has the size returned; the old a new objects share the same
313 /// base address. The contents of the new object are the same as that of the
314 /// old object prior to deallocation, up to the lesser of the new and old
315 /// sizes.
316 ///
317 /// If `extra` is non-zero, an attempt is made to resize the allocation to
318 /// be at least `size + extra` bytes. Inability to allocate the `extra`
319 /// bytes will not by itself result in failure to resize.
320 ///
321 /// The memory in the new object beyond the size of the old object is
322 /// obtained according to `flags` (it might be uninitialized).
323 ///
324 /// # Errors
325 ///
326 /// If the allocation cannot be adequately grown in place up to `size`, the
327 /// size returned is smaller than `size`.
328 ///
329 /// Note:
330 ///
331 /// * the size value returned can be larger than the size requested during
332 /// allocation
333 /// * when shrinking an allocation, use the size returned to determine
334 /// whether the allocation was shrunk sufficiently or not.
335 ///
336 /// # Safety
337 ///
338 /// The behavior is _undefined_ if:
339 ///
340 /// * `size == 0`, or
341 /// * `size + extra > size_t::max_value()`, or
342 /// * `ptr` does not match a pointer earlier returned by the memory
343 /// allocation functions of this crate, or
344 /// * the memory region referenced by `ptr` has been deallocated.
345 #[cfg_attr(prefixed, link_name = "_rjem_xallocx")]
xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t346 pub fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
347
348 /// Returns the real size of the previously-allocated memory region
349 /// referenced by `ptr`.
350 ///
351 /// The value may be larger than the size requested on allocation.
352 ///
353 /// # Safety
354 ///
355 /// The behavior is _undefined_ if:
356 ///
357 /// * `ptr` does not match a pointer earlier returned by the memory
358 /// allocation functions of this crate, or
359 /// * the memory region referenced by `ptr` has been deallocated.
360 #[cfg_attr(prefixed, link_name = "_rjem_sallocx")]
sallocx(ptr: *const c_void, flags: c_int) -> size_t361 pub fn sallocx(ptr: *const c_void, flags: c_int) -> size_t;
362
363 /// Deallocates previously-allocated memory region referenced by `ptr`.
364 ///
365 /// This makes the space available for future allocations.
366 ///
367 /// # Safety
368 ///
369 /// The behavior is _undefined_ if:
370 ///
371 /// * `ptr` does not match a pointer earlier returned by the memory
372 /// allocation functions of this crate, or
373 /// * `ptr` is null, or
374 /// * the memory region referenced by `ptr` has been deallocated.
375 #[cfg_attr(prefixed, link_name = "_rjem_dallocx")]
dallocx(ptr: *mut c_void, flags: c_int)376 pub fn dallocx(ptr: *mut c_void, flags: c_int);
377
378 /// Deallocates previously-allocated memory region referenced by `ptr` with
379 /// `size` hint.
380 ///
381 /// This makes the space available for future allocations.
382 ///
383 /// # Safety
384 ///
385 /// The behavior is _undefined_ if:
386 ///
387 /// * `size` is not in range `[req_size, alloc_size]`, where `req_size` is
388 /// the size requested when performing the allocation, and `alloc_size` is
389 /// the allocation size returned by [`nallocx`], [`sallocx`], or
390 /// [`xallocx`],
391 /// * `ptr` does not match a pointer earlier returned by the memory
392 /// allocation functions of this crate, or
393 /// * `ptr` is null, or
394 /// * the memory region referenced by `ptr` has been deallocated.
395 #[cfg_attr(prefixed, link_name = "_rjem_sdallocx")]
sdallocx(ptr: *mut c_void, size: size_t, flags: c_int)396 pub fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
397
398 /// Returns the real size of the allocation that would result from a
399 /// [`mallocx`] function call with the same arguments.
400 ///
401 /// # Errors
402 ///
403 /// If the inputs exceed the maximum supported size class and/or alignment
404 /// it returns zero.
405 ///
406 /// # Safety
407 ///
408 /// The behavior is _undefined_ if `size == 0`.
409 #[cfg_attr(prefixed, link_name = "_rjem_nallocx")]
nallocx(size: size_t, flags: c_int) -> size_t410 pub fn nallocx(size: size_t, flags: c_int) -> size_t;
411
412 /// Returns the real size of the previously-allocated memory region
413 /// referenced by `ptr`.
414 ///
415 /// The value may be larger than the size requested on allocation.
416 ///
417 /// Although the excess bytes can be overwritten by the application without
418 /// ill effects, this is not good programming practice: the number of excess
419 /// bytes in an allocation depends on the underlying implementation.
420 ///
421 /// The main use of this function is for debugging and introspection.
422 ///
423 /// # Errors
424 ///
425 /// If `ptr` is null, 0 is returned.
426 ///
427 /// # Safety
428 ///
429 /// The behavior is _undefined_ if:
430 ///
431 /// * `ptr` does not match a pointer earlier returned by the memory
432 /// allocation functions of this crate, or
433 /// * the memory region referenced by `ptr` has been deallocated.
434 #[cfg_attr(prefixed, link_name = "_rjem_malloc_usable_size")]
malloc_usable_size(ptr: *const c_void) -> size_t435 pub fn malloc_usable_size(ptr: *const c_void) -> size_t;
436
437 /// General interface for introspecting the memory allocator, as well as
438 /// setting modifiable parameters and triggering actions.
439 ///
440 /// The period-separated name argument specifies a location in a
441 /// tree-structured namespace ([see jemalloc's `MALLCTL`
442 /// documentation][jemalloc_mallctl]).
443 ///
444 /// To read a value, pass a pointer via `oldp` to adequate space to contain
445 /// the value, and a pointer to its length via `oldlenp``; otherwise pass
446 /// null and null. Similarly, to write a value, pass a pointer to the value
447 /// via `newp`, and its length via `newlen`; otherwise pass null and 0.
448 ///
449 /// # Errors
450 ///
451 /// Returns `0` on success, otherwise returns:
452 ///
453 /// * `EINVAL`: if `newp` is not null, and `newlen` is too large or too
454 /// small. Alternatively, `*oldlenp` is too large or too small; in this case
455 /// as much data as possible are read despite the error.
456 ///
457 /// * `ENOENT`: `name` or mib specifies an unknown/invalid value.
458 ///
459 /// * `EPERM`: Attempt to read or write void value, or attempt to write read-only value.
460 ///
461 /// * `EAGAIN`: A memory allocation failure occurred.
462 ///
463 /// * `EFAULT`: An interface with side effects failed in some way not
464 /// directly related to `mallctl` read/write processing.
465 ///
466 /// [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace
467 #[cfg_attr(prefixed, link_name = "_rjem_mallctl")]
mallctl( name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t, newp: *mut c_void, newlen: size_t, ) -> c_int468 pub fn mallctl(
469 name: *const c_char,
470 oldp: *mut c_void,
471 oldlenp: *mut size_t,
472 newp: *mut c_void,
473 newlen: size_t,
474 ) -> c_int;
475 /// Translates a name to a “Management Information Base” (MIB) that can be
476 /// passed repeatedly to [`mallctlbymib`].
477 ///
478 /// This avoids repeated name lookups for applications that repeatedly query
479 /// the same portion of the namespace.
480 ///
481 /// On success, `mibp` contains an array of `*miblenp` integers, where
482 /// `*miblenp` is the lesser of the number of components in name and the
483 /// input value of `*miblenp`. Thus it is possible to pass a `*miblenp` that is
484 /// smaller than the number of period-separated name components, which
485 /// results in a partial MIB that can be used as the basis for constructing
486 /// a complete MIB. For name components that are integers (e.g. the 2 in
487 /// arenas.bin.2.size), the corresponding MIB component will always be that
488 /// integer.
489 #[cfg_attr(prefixed, link_name = "_rjem_mallctlnametomib")]
mallctlnametomib(name: *const c_char, mibp: *mut size_t, miblenp: *mut size_t) -> c_int490 pub fn mallctlnametomib(name: *const c_char, mibp: *mut size_t, miblenp: *mut size_t) -> c_int;
491
492 /// Like [`mallctl`] but taking a `mib` as input instead of a name.
493 #[cfg_attr(prefixed, link_name = "_rjem_mallctlbymib")]
mallctlbymib( mib: *const size_t, miblen: size_t, oldp: *mut c_void, oldpenp: *mut size_t, newp: *mut c_void, newlen: size_t, ) -> c_int494 pub fn mallctlbymib(
495 mib: *const size_t,
496 miblen: size_t,
497 oldp: *mut c_void,
498 oldpenp: *mut size_t,
499 newp: *mut c_void,
500 newlen: size_t,
501 ) -> c_int;
502
503 /// Writes summary statistics via the `write_cb` callback function pointer
504 /// and `cbopaque` data passed to `write_cb`, or [`malloc_message`] if `write_cb`
505 /// is null.
506 ///
507 /// The statistics are presented in human-readable form unless “J”
508 /// is specified as a character within the opts string, in which case the
509 /// statistics are presented in JSON format.
510 ///
511 /// This function can be called repeatedly.
512 ///
513 /// General information that never changes during execution can be omitted
514 /// by specifying `g` as a character within the opts string.
515 ///
516 /// Note that [`malloc_message`] uses the `mallctl*` functions internally,
517 /// so inconsistent statistics can be reported if multiple threads use these
518 /// functions simultaneously.
519 ///
520 /// If the Cargo feature `stats` is enabled, `m`, `d`, and `a` can be
521 /// specified to omit merged arena, destroyed merged arena, and per arena
522 /// statistics, respectively; `b` and `l` can be specified to omit per size
523 /// class statistics for bins and large objects, respectively; `x` can be
524 /// specified to omit all mutex statistics. Unrecognized characters are
525 /// silently ignored.
526 ///
527 /// Note that thread caching may prevent some statistics from being
528 /// completely up to date, since extra locking would be required to merge
529 /// counters that track thread cache operations.
530 #[cfg_attr(prefixed, link_name = "_rjem_malloc_stats_print")]
malloc_stats_print( write_cb: Option<unsafe extern "C" fn(*mut c_void, *const c_char)>, cbopaque: *mut c_void, opts: *const c_char, )531 pub fn malloc_stats_print(
532 write_cb: Option<unsafe extern "C" fn(*mut c_void, *const c_char)>,
533 cbopaque: *mut c_void,
534 opts: *const c_char,
535 );
536
537 /// Allows overriding the function which emits the text strings forming the
538 /// errors and warnings if for some reason the `STDERR_FILENO` file descriptor
539 /// is not suitable for this.
540 ///
541 /// [`malloc_message`] takes the `cbopaque` pointer argument that is null,
542 /// unless overridden by the arguments in a call to [`malloc_stats_print`],
543 /// followed by a string pointer.
544 ///
545 /// Please note that doing anything which tries to allocate memory in this
546 /// function is likely to result in a crash or deadlock.
547 #[cfg_attr(prefixed, link_name = "_rjem_malloc_message")]
548 pub static mut malloc_message:
549 Option<unsafe extern "C" fn(cbopaque: *mut c_void, s: *const c_char)>;
550
551 /// Compile-time string of configuration options.
552 ///
553 /// Once, when the first call is made to one of the memory allocation
554 /// routines, the allocator initializes its internals based in part on
555 /// various options that can be specified at compile- or run-time.
556 ///
557 /// The string specified via `--with-malloc-conf`, the string pointed to by
558 /// the global variable `malloc_conf`, the “name” of the file referenced by
559 /// the symbolic link named `/etc/malloc.conf`, and the value of the
560 /// environment variable `MALLOC_CONF`, will be interpreted, in that order,
561 /// from left to right as options. Note that `malloc_conf` may be read
562 /// before `main()` is entered, so the declaration of `malloc_conf` should
563 /// specify an initializer that contains the final value to be read by
564 /// `jemalloc`.
565 ///
566 /// `--with-malloc-conf` and `malloc_conf` are compile-time mechanisms, whereas
567 /// `/etc/malloc.conf` and `MALLOC_CONF` can be safely set any time prior to
568 /// program invocation.
569 ///
570 /// An options string is a comma-separated list of `option:value` pairs.
571 /// There is one key corresponding to each `opt.* mallctl` (see the `MALLCTL
572 /// NAMESPACE` section for options documentation). For example,
573 /// `abort:true,narenas:1` sets the `opt.abort` and `opt.narenas` options.
574 /// Some options have boolean values (`true`/`false`), others have integer
575 /// values (base `8`, `10`, or `16`, depending on prefix), and yet others
576 /// have raw string values.
577 #[cfg_attr(prefixed, link_name = "_rjem_malloc_conf")]
578 pub static malloc_conf: Option<&'static c_char>;
579 }
580
581 /// Extent lifetime management functions.
582 pub type extent_hooks_t = extent_hooks_s;
583
584 // note: there are two structs here, one is used when compiling the crate normally,
585 // and the other one is behind the `--cfg jemallocator_docs` flag and used only
586 // when generating docs.
587 //
588 // For the docs we want to use type aliases here, but `ctest` does see through
589 // them when generating the code to verify the FFI bindings, and it needs to
590 // be able to tell that these are `fn` types so that `Option<fn>` gets lowered
591 // to C function pointers.
592
593 #[repr(C)]
594 #[cfg(not(jemallocator_docs))]
595 #[derive(Copy, Clone, Default)]
596 #[doc(hidden)]
597 #[allow(missing_docs)]
598 pub struct extent_hooks_s {
599 pub alloc: Option<
600 unsafe extern "C" fn(
601 *mut extent_hooks_t,
602 *mut c_void,
603 size_t,
604 size_t,
605 *mut c_bool,
606 *mut c_bool,
607 c_uint,
608 ) -> *mut c_void,
609 >,
610 pub dalloc: Option<
611 unsafe extern "C" fn(*mut extent_hooks_t, *mut c_void, size_t, c_bool, c_uint) -> c_bool,
612 >,
613 pub destroy:
614 Option<unsafe extern "C" fn(*mut extent_hooks_t, *mut c_void, size_t, c_bool, c_uint)>,
615 pub commit: Option<
616 unsafe extern "C" fn(
617 *mut extent_hooks_t,
618 *mut c_void,
619 size_t,
620 size_t,
621 size_t,
622 c_uint,
623 ) -> c_bool,
624 >,
625 pub decommit: Option<
626 unsafe extern "C" fn(
627 *mut extent_hooks_t,
628 *mut c_void,
629 size_t,
630 size_t,
631 size_t,
632 c_uint,
633 ) -> c_bool,
634 >,
635 pub purge_lazy: Option<
636 unsafe extern "C" fn(
637 *mut extent_hooks_t,
638 *mut c_void,
639 size_t,
640 size_t,
641 size_t,
642 c_uint,
643 ) -> c_bool,
644 >,
645 pub purge_forced: Option<
646 unsafe extern "C" fn(
647 *mut extent_hooks_t,
648 *mut c_void,
649 size_t,
650 size_t,
651 size_t,
652 c_uint,
653 ) -> c_bool,
654 >,
655 pub split: Option<
656 unsafe extern "C" fn(
657 *mut extent_hooks_t,
658 *mut c_void,
659 size_t,
660 size_t,
661 size_t,
662 c_bool,
663 c_uint,
664 ) -> c_bool,
665 >,
666 pub merge: Option<
667 unsafe extern "C" fn(
668 *mut extent_hooks_t,
669 *mut c_void,
670 size_t,
671 *mut c_void,
672 size_t,
673 c_bool,
674 c_uint,
675 ) -> c_bool,
676 >,
677 }
678
679 /// Extent lifetime management functions.
680 ///
681 /// The extent_hooks_t structure comprises function pointers which are described
682 /// individually below. `jemalloc` uses these functions to manage extent lifetime,
683 /// which starts off with allocation of mapped committed memory, in the simplest
684 /// case followed by deallocation. However, there are performance and platform
685 /// reasons to retain extents for later reuse. Cleanup attempts cascade from
686 /// deallocation to decommit to forced purging to lazy purging, which gives the
687 /// extent management functions opportunities to reject the most permanent
688 /// cleanup operations in favor of less permanent (and often less costly)
689 /// operations. All operations except allocation can be universally opted out of
690 /// by setting the hook pointers to `NULL`, or selectively opted out of by
691 /// returning failure. Note that once the extent hook is set, the structure is
692 /// accessed directly by the associated arenas, so it must remain valid for the
693 /// entire lifetime of the arenas.
694 #[repr(C)]
695 #[cfg(jemallocator_docs)]
696 #[derive(Copy, Clone, Default)]
697 pub struct extent_hooks_s {
698 #[allow(missing_docs)]
699 pub alloc: Option<extent_alloc_t>,
700 #[allow(missing_docs)]
701 pub dalloc: Option<extent_dalloc_t>,
702 #[allow(missing_docs)]
703 pub destroy: Option<extent_destroy_t>,
704 #[allow(missing_docs)]
705 pub commit: Option<extent_commit_t>,
706 #[allow(missing_docs)]
707 pub decommit: Option<extent_decommit_t>,
708 #[allow(missing_docs)]
709 pub purge_lazy: Option<extent_purge_t>,
710 #[allow(missing_docs)]
711 pub purge_forced: Option<extent_purge_t>,
712 #[allow(missing_docs)]
713 pub split: Option<extent_split_t>,
714 #[allow(missing_docs)]
715 pub merge: Option<extent_merge_t>,
716 }
717
718 /// Extent allocation function.
719 ///
720 /// On success returns a pointer to `size` bytes of mapped memory on behalf of
721 /// arena `arena_ind` such that the extent's base address is a multiple of
722 /// `alignment`, as well as setting `*zero` to indicate whether the extent is
723 /// zeroed and `*commit` to indicate whether the extent is committed.
724 ///
725 /// Zeroing is mandatory if `*zero` is `true` upon function entry. Committing is mandatory if
726 /// `*commit` is true upon function entry. If `new_addr` is not null, the returned
727 /// pointer must be `new_addr` on success or null on error.
728 ///
729 /// Committed memory may be committed in absolute terms as on a system that does
730 /// not overcommit, or in implicit terms as on a system that overcommits and
731 /// satisfies physical memory needs on demand via soft page faults. Note that
732 /// replacing the default extent allocation function makes the arena's
733 /// `arena.<i>.dss` setting irrelevant.
734 ///
735 /// # Errors
736 ///
737 /// On error the function returns null and leaves `*zero` and `*commit` unmodified.
738 ///
739 /// # Safety
740 ///
741 /// The behavior is _undefined_ if:
742 ///
743 /// * the `size` parameter is not a multiple of the page size
744 /// * the `alignment` parameter is not a power of two at least as large as the page size
745 pub type extent_alloc_t = unsafe extern "C" fn(
746 extent_hooks: *mut extent_hooks_t,
747 new_addr: *mut c_void,
748 size: size_t,
749 alignment: size_t,
750 zero: *mut c_bool,
751 commit: *mut c_bool,
752 arena_ind: c_uint,
753 ) -> *mut c_void;
754
755 /// Extent deallocation function.
756 ///
757 /// Deallocates an extent at given `addr` and `size` with `committed`/decommited
758 /// memory as indicated, on behalf of arena `arena_ind`, returning `false` upon
759 /// success.
760 ///
761 /// If the function returns `true`, this indicates opt-out from deallocation;
762 /// the virtual memory mapping associated with the extent remains mapped, in the
763 /// same commit state, and available for future use, in which case it will be
764 /// automatically retained for later reuse.
765 pub type extent_dalloc_t = unsafe extern "C" fn(
766 extent_hooks: *mut extent_hooks_t,
767 addr: *mut c_void,
768 size: size_t,
769 committed: c_bool,
770 arena_ind: c_uint,
771 ) -> c_bool;
772
773 /// Extent destruction function.
774 ///
775 /// Unconditionally destroys an extent at given `addr` and `size` with
776 /// `committed`/decommited memory as indicated, on behalf of arena `arena_ind`.
777 ///
778 /// This function may be called to destroy retained extents during arena
779 /// destruction (see `arena.<i>.destroy`).
780 pub type extent_destroy_t = unsafe extern "C" fn(
781 extent_hooks: *mut extent_hooks_t,
782 addr: *mut c_void,
783 size: size_t,
784 committed: c_bool,
785 arena_ind: c_uint,
786 );
787
788 /// Extent commit function.
789 ///
790 /// Commits zeroed physical memory to back pages within an extent at given
791 /// `addr` and `size` at `offset` bytes, extending for `length` on behalf of
792 /// arena `arena_ind`, returning `false` upon success.
793 ///
794 /// Committed memory may be committed in absolute terms as on a system that does
795 /// not overcommit, or in implicit terms as on a system that overcommits and
796 /// satisfies physical memory needs on demand via soft page faults. If the
797 /// function returns `true`, this indicates insufficient physical memory to
798 /// satisfy the request.
799 pub type extent_commit_t = unsafe extern "C" fn(
800 extent_hooks: *mut extent_hooks_t,
801 addr: *mut c_void,
802 size: size_t,
803 offset: size_t,
804 length: size_t,
805 arena_ind: c_uint,
806 ) -> c_bool;
807
808 /// Extent decommit function.
809 ///
810 /// Decommits any physical memory that is backing pages within an extent at
811 /// given `addr` and `size` at `offset` bytes, extending for `length` on behalf of arena
812 /// `arena_ind`, returning `false` upon success, in which case the pages will be
813 /// committed via the extent commit function before being reused.
814 ///
815 /// If the function returns `true`, this indicates opt-out from decommit; the
816 /// memory remains committed and available for future use, in which case it will
817 /// be automatically retained for later reuse.
818 pub type extent_decommit_t = unsafe extern "C" fn(
819 extent_hooks: *mut extent_hooks_t,
820 addr: *mut c_void,
821 size: size_t,
822 offset: size_t,
823 length: size_t,
824 arena_ind: c_uint,
825 ) -> c_bool;
826
827 /// Extent purge function.
828 ///
829 /// Discards physical pages within the virtual memory mapping associated with an
830 /// extent at given `addr` and `size` at `offset` bytes, extending for `length` on
831 /// behalf of arena `arena_ind`.
832 ///
833 /// A lazy extent purge function (e.g. implemented via `madvise(...MADV_FREE)`)
834 /// can delay purging indefinitely and leave the pages within the purged virtual
835 /// memory range in an indeterminite state, whereas a forced extent purge
836 /// function immediately purges, and the pages within the virtual memory range
837 /// will be zero-filled the next time they are accessed. If the function returns
838 /// `true`, this indicates failure to purge.
839 pub type extent_purge_t = unsafe extern "C" fn(
840 extent_hooks: *mut extent_hooks_t,
841 addr: *mut c_void,
842 size: size_t,
843 offset: size_t,
844 length: size_t,
845 arena_ind: c_uint,
846 ) -> c_bool;
847
848 /// Extent split function.
849 ///
850 /// Optionally splits an extent at given `addr` and `size` into two adjacent
851 /// extents, the first of `size_a` bytes, and the second of `size_b` bytes,
852 /// operating on `committed`/decommitted memory as indicated, on behalf of arena
853 /// `arena_ind`, returning `false` upon success.
854 ///
855 /// If the function returns `true`, this indicates that the extent remains
856 /// unsplit and therefore should continue to be operated on as a whole.
857 pub type extent_split_t = unsafe extern "C" fn(
858 extent_hooks: *mut extent_hooks_t,
859 addr: *mut c_void,
860 size: size_t,
861 size_a: size_t,
862 size_b: size_t,
863 committed: c_bool,
864 arena_ind: c_uint,
865 ) -> c_bool;
866
867 /// Extent merge function.
868 ///
869 /// Optionally merges adjacent extents, at given `addr_a` and `size_a` with given
870 /// `addr_b` and `size_b` into one contiguous extent, operating on
871 /// `committed`/decommitted memory as indicated, on behalf of arena `arena_ind`,
872 /// returning `false` upon success.
873 ///
874 /// If the function returns `true`, this indicates that the extents remain
875 /// distinct mappings and therefore should continue to be operated on
876 /// independently.
877 pub type extent_merge_t = unsafe extern "C" fn(
878 extent_hooks: *mut extent_hooks_t,
879 addr_a: *mut c_void,
880 size_a: size_t,
881 addr_b: *mut c_void,
882 size_b: size_t,
883 committed: c_bool,
884 arena_ind: c_uint,
885 ) -> c_bool;
886
887 // These symbols are used by jemalloc on android but the really old android
888 // we're building on doesn't have them defined, so just make sure the symbols
889 // are available.
890 #[no_mangle]
891 #[cfg(target_os = "android")]
892 #[doc(hidden)]
pthread_atfork( _prefork: *mut u8, _postfork_parent: *mut u8, _postfork_child: *mut u8, ) -> i32893 pub extern "C" fn pthread_atfork(
894 _prefork: *mut u8,
895 _postfork_parent: *mut u8,
896 _postfork_child: *mut u8,
897 ) -> i32 {
898 0
899 }
900