1 //! Low-level abstraction for allocating and managing zero-filled pages
2 //! of memory.
3
4 use more_asserts::assert_le;
5 use more_asserts::assert_lt;
6 use std::io;
7 use std::ptr;
8 use std::slice;
9
10 /// Round `size` up to the nearest multiple of `page_size`.
round_up_to_page_size(size: usize, page_size: usize) -> usize11 fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
12 (size + (page_size - 1)) & !(page_size - 1)
13 }
14
15 /// A simple struct consisting of a page-aligned pointer to page-aligned
16 /// and initially-zeroed memory and a length.
17 #[derive(Debug)]
18 pub struct Mmap {
19 // Note that this is stored as a `usize` instead of a `*const` or `*mut`
20 // pointer to allow this structure to be natively `Send` and `Sync` without
21 // `unsafe impl`. This type is sendable across threads and shareable since
22 // the coordination all happens at the OS layer.
23 ptr: usize,
24 len: usize,
25 }
26
27 impl Mmap {
28 /// Construct a new empty instance of `Mmap`.
new() -> Self29 pub fn new() -> Self {
30 // Rust's slices require non-null pointers, even when empty. `Vec`
31 // contains code to create a non-null dangling pointer value when
32 // constructed empty, so we reuse that here.
33 let empty = Vec::<u8>::new();
34 Self {
35 ptr: empty.as_ptr() as usize,
36 len: 0,
37 }
38 }
39
40 /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
with_at_least(size: usize) -> Result<Self, String>41 pub fn with_at_least(size: usize) -> Result<Self, String> {
42 let page_size = region::page::size();
43 let rounded_size = round_up_to_page_size(size, page_size);
44 Self::accessible_reserved(rounded_size, rounded_size)
45 }
46
47 /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
48 /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
49 /// must be native page-size multiples.
50 #[cfg(not(target_os = "windows"))]
accessible_reserved( accessible_size: usize, mapping_size: usize, ) -> Result<Self, String>51 pub fn accessible_reserved(
52 accessible_size: usize,
53 mapping_size: usize,
54 ) -> Result<Self, String> {
55 let page_size = region::page::size();
56 assert_le!(accessible_size, mapping_size);
57 assert_eq!(mapping_size & (page_size - 1), 0);
58 assert_eq!(accessible_size & (page_size - 1), 0);
59
60 // Mmap may return EINVAL if the size is zero, so just
61 // special-case that.
62 if mapping_size == 0 {
63 return Ok(Self::new());
64 }
65
66 Ok(if accessible_size == mapping_size {
67 // Allocate a single read-write region at once.
68 let ptr = unsafe {
69 libc::mmap(
70 ptr::null_mut(),
71 mapping_size,
72 libc::PROT_READ | libc::PROT_WRITE,
73 libc::MAP_PRIVATE | libc::MAP_ANON,
74 -1,
75 0,
76 )
77 };
78 if ptr as isize == -1_isize {
79 return Err(io::Error::last_os_error().to_string());
80 }
81
82 Self {
83 ptr: ptr as usize,
84 len: mapping_size,
85 }
86 } else {
87 // Reserve the mapping size.
88 let ptr = unsafe {
89 libc::mmap(
90 ptr::null_mut(),
91 mapping_size,
92 libc::PROT_NONE,
93 libc::MAP_PRIVATE | libc::MAP_ANON,
94 -1,
95 0,
96 )
97 };
98 if ptr as isize == -1_isize {
99 return Err(io::Error::last_os_error().to_string());
100 }
101
102 let mut result = Self {
103 ptr: ptr as usize,
104 len: mapping_size,
105 };
106
107 if accessible_size != 0 {
108 // Commit the accessible size.
109 result.make_accessible(0, accessible_size)?;
110 }
111
112 result
113 })
114 }
115
116 /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
117 /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
118 /// must be native page-size multiples.
119 #[cfg(target_os = "windows")]
accessible_reserved( accessible_size: usize, mapping_size: usize, ) -> Result<Self, String>120 pub fn accessible_reserved(
121 accessible_size: usize,
122 mapping_size: usize,
123 ) -> Result<Self, String> {
124 use winapi::um::memoryapi::VirtualAlloc;
125 use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
126
127 let page_size = region::page::size();
128 assert_le!(accessible_size, mapping_size);
129 assert_eq!(mapping_size & (page_size - 1), 0);
130 assert_eq!(accessible_size & (page_size - 1), 0);
131
132 Ok(if accessible_size == mapping_size {
133 // Allocate a single read-write region at once.
134 let ptr = unsafe {
135 VirtualAlloc(
136 ptr::null_mut(),
137 mapping_size,
138 MEM_RESERVE | MEM_COMMIT,
139 PAGE_READWRITE,
140 )
141 };
142 if ptr.is_null() {
143 return Err(io::Error::last_os_error().to_string());
144 }
145
146 Self {
147 ptr: ptr as usize,
148 len: mapping_size,
149 }
150 } else {
151 // Reserve the mapping size.
152 let ptr =
153 unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
154 if ptr.is_null() {
155 return Err(io::Error::last_os_error().to_string());
156 }
157
158 let mut result = Self {
159 ptr: ptr as usize,
160 len: mapping_size,
161 };
162
163 if accessible_size != 0 {
164 // Commit the accessible size.
165 result.make_accessible(0, accessible_size)?;
166 }
167
168 result
169 })
170 }
171
172 /// Make the memory starting at `start` and extending for `len` bytes accessible.
173 /// `start` and `len` must be native page-size multiples and describe a range within
174 /// `self`'s reserved memory.
175 #[cfg(not(target_os = "windows"))]
make_accessible(&mut self, start: usize, len: usize) -> Result<(), String>176 pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
177 let page_size = region::page::size();
178 assert_eq!(start & (page_size - 1), 0);
179 assert_eq!(len & (page_size - 1), 0);
180 assert_lt!(len, self.len);
181 assert_lt!(start, self.len - len);
182
183 // Commit the accessible size.
184 let ptr = self.ptr as *const u8;
185 unsafe { region::protect(ptr.add(start), len, region::Protection::READ_WRITE) }
186 .map_err(|e| e.to_string())
187 }
188
189 /// Make the memory starting at `start` and extending for `len` bytes accessible.
190 /// `start` and `len` must be native page-size multiples and describe a range within
191 /// `self`'s reserved memory.
192 #[cfg(target_os = "windows")]
make_accessible(&mut self, start: usize, len: usize) -> Result<(), String>193 pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
194 use winapi::ctypes::c_void;
195 use winapi::um::memoryapi::VirtualAlloc;
196 use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
197 let page_size = region::page::size();
198 assert_eq!(start & (page_size - 1), 0);
199 assert_eq!(len & (page_size - 1), 0);
200 assert_lt!(len, self.len);
201 assert_lt!(start, self.len - len);
202
203 // Commit the accessible size.
204 let ptr = self.ptr as *const u8;
205 if unsafe {
206 VirtualAlloc(
207 ptr.add(start) as *mut c_void,
208 len,
209 MEM_COMMIT,
210 PAGE_READWRITE,
211 )
212 }
213 .is_null()
214 {
215 return Err(io::Error::last_os_error().to_string());
216 }
217
218 Ok(())
219 }
220
221 /// Return the allocated memory as a slice of u8.
as_slice(&self) -> &[u8]222 pub fn as_slice(&self) -> &[u8] {
223 unsafe { slice::from_raw_parts(self.ptr as *const u8, self.len) }
224 }
225
226 /// Return the allocated memory as a mutable slice of u8.
as_mut_slice(&mut self) -> &mut [u8]227 pub fn as_mut_slice(&mut self) -> &mut [u8] {
228 unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.len) }
229 }
230
231 /// Return the allocated memory as a pointer to u8.
as_ptr(&self) -> *const u8232 pub fn as_ptr(&self) -> *const u8 {
233 self.ptr as *const u8
234 }
235
236 /// Return the allocated memory as a mutable pointer to u8.
as_mut_ptr(&mut self) -> *mut u8237 pub fn as_mut_ptr(&mut self) -> *mut u8 {
238 self.ptr as *mut u8
239 }
240
241 /// Return the length of the allocated memory.
len(&self) -> usize242 pub fn len(&self) -> usize {
243 self.len
244 }
245
246 /// Return whether any memory has been allocated.
is_empty(&self) -> bool247 pub fn is_empty(&self) -> bool {
248 self.len() == 0
249 }
250 }
251
252 impl Drop for Mmap {
253 #[cfg(not(target_os = "windows"))]
drop(&mut self)254 fn drop(&mut self) {
255 if self.len != 0 {
256 let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
257 assert_eq!(r, 0, "munmap failed: {}", io::Error::last_os_error());
258 }
259 }
260
261 #[cfg(target_os = "windows")]
drop(&mut self)262 fn drop(&mut self) {
263 if self.len != 0 {
264 use winapi::ctypes::c_void;
265 use winapi::um::memoryapi::VirtualFree;
266 use winapi::um::winnt::MEM_RELEASE;
267 let r = unsafe { VirtualFree(self.ptr as *mut c_void, 0, MEM_RELEASE) };
268 assert_ne!(r, 0);
269 }
270 }
271 }
272
_assert()273 fn _assert() {
274 fn _assert_send_sync<T: Send + Sync>() {}
275 _assert_send_sync::<Mmap>();
276 }
277
278 #[cfg(test)]
279 mod tests {
280 use super::*;
281
282 #[test]
test_round_up_to_page_size()283 fn test_round_up_to_page_size() {
284 assert_eq!(round_up_to_page_size(0, 4096), 0);
285 assert_eq!(round_up_to_page_size(1, 4096), 4096);
286 assert_eq!(round_up_to_page_size(4096, 4096), 4096);
287 assert_eq!(round_up_to_page_size(4097, 4096), 8192);
288 }
289 }
290