1 use crate::alloc::{instance_heap_offset, AddrLocation, Alloc, AllocStrategy, Limits, Slot};
2 use crate::embed_ctx::CtxMap;
3 use crate::error::Error;
4 use crate::instance::{new_instance_handle, Instance, InstanceHandle, InstanceInternal};
5 use crate::module::Module;
6 use crate::region::{Region, RegionCreate, RegionInternal};
7 use crate::sysdeps::host_page_size;
8 use crate::WASM_PAGE_SIZE;
9 use crate::{lucet_bail, lucet_ensure, lucet_format_err};
10 use libc::c_void;
11 use nix::poll;
12 use nix::sys::mman::{madvise, mmap, munmap, MapFlags, MmapAdvise, ProtFlags};
13 use std::os::unix::io::{AsRawFd, RawFd};
14 use std::ptr;
15 use std::sync::{Arc, Mutex, Weak};
16 use std::thread::{self, JoinHandle};
17 use userfaultfd::{IoctlFlags, Uffd, UffdBuilder};
18 
19 /// A [`Region`](trait.Region.html) backed by `mmap` and managed by `userfaultfd`.
20 ///
21 /// Much like [`MmapRegion`](struct.MmapRegion.html) `UffdRegion` lays out virtual memory in a
22 /// contiguous block. See [`MmapRegion`](struct.MmapRegion.html) for details of the memory layout.
23 ///
24 /// The difference is that `UffdRegion` is lazy. Only the minimum required physical memory is set up
25 /// to back that virtual memory before an `Instance` begins running. The stack and the heap are both
26 /// lazily allocated at runtime.
27 ///
28 /// That lazy allocation is handled by the [`userfaultfd`][userfaultfd] system, using extensions
29 /// available in Linux version 4.11 or newer. The entire `Region` is registered with `userfaultfd`
30 /// handle.  When page faults occur due to attempts by the guest to access the lazy memory, the
31 /// guest thread is paused and a message is sent over the `userfaultfd` handle.
32 ///
33 /// That message is picked up a separate thread which has the job of handling page faults. How it is
34 /// handled is dependent on where the page fault occurred. In the case where it occurs in the stack,
35 /// we just zero out the page. In the case it occurs in the heap, it is handled differently
36 /// depending on whether the page should contain data defined in the WebAssembly module. In the case
37 /// it should be blank we again just zero it out. In the case that it should contain data, we copy
38 /// the data into the page. In any case we finish by reawakening the guest thread.
39 ///
40 /// If the fault occurs in a guard page, we do nothing, and reawaken the thread without allocating
41 /// the backing physical memory. This ends up causing the guest thread to raise a SIGBUS, which is
42 /// treated as a fatal error by the Lucet signal handler.
43 ///
44 /// [userfaultfd]: http://man7.org/linux/man-pages/man2/userfaultfd.2.html
45 pub struct UffdRegion {
46     uffd: Arc<Uffd>,
47     start: *mut c_void,
48     limits: Limits,
49     freelist: Mutex<Vec<Slot>>,
50     instance_capacity: usize,
51     handler: Option<JoinHandle<Result<(), Error>>>,
52     handler_pipe: RawFd,
53 }
54 
55 // the start pointer prevents these from auto-deriving
56 unsafe impl Send for UffdRegion {}
57 unsafe impl Sync for UffdRegion {}
58 
uffd_handler( uffd_strategy: impl UffdStrategy, uffd: Arc<Uffd>, start: *mut c_void, instance_capacity: usize, handler_pipe: RawFd, limits: Limits, ) -> Result<(), Error>59 fn uffd_handler(
60     uffd_strategy: impl UffdStrategy,
61     uffd: Arc<Uffd>,
62     start: *mut c_void,
63     instance_capacity: usize,
64     handler_pipe: RawFd,
65     limits: Limits,
66 ) -> Result<(), Error> {
67     use userfaultfd::Event;
68 
69     let mut pollfds = [
70         poll::PollFd::new(uffd.as_raw_fd(), poll::PollFlags::POLLIN),
71         poll::PollFd::new(handler_pipe, poll::PollFlags::POLLIN),
72     ];
73 
74     loop {
75         let poll_res = poll::poll(&mut pollfds, 500)?;
76         let uffd_pfd = pollfds[0];
77         let pipe_pfd = pollfds[1];
78 
79         if poll_res == 0 {
80             // we set a timeout on the poll in case the main thread panics, so the handler doesn't
81             // run forever; just run the loop again
82             continue;
83         }
84 
85         // reading anything from the handler pipe kills this thread
86         if let Some(ev) = pipe_pfd.revents() {
87             lucet_ensure!(!ev.contains(poll::PollFlags::POLLERR), "pipe event error");
88             if ev.contains(poll::PollFlags::POLLIN) {
89                 break;
90             }
91         }
92 
93         if let Some(ev) = uffd_pfd.revents() {
94             lucet_ensure!(
95                 !ev.contains(poll::PollFlags::POLLERR) && ev.contains(poll::PollFlags::POLLIN),
96                 "unexpected uffd event flags: {:?}",
97                 ev
98             );
99         }
100 
101         // eprintln!("handling a fault on fd {}", uffd.as_raw_fd());
102 
103         match uffd.read_event() {
104             Err(e) => lucet_bail!("error reading event from uffd: {}", e),
105             Ok(None) => lucet_bail!("uffd had POLLIN set, but could not be read"),
106             Ok(Some(Event::Pagefault {
107                 addr: fault_addr, ..
108             })) => {
109                 // eprintln!("fd {} fault address: {:p}", uffd.as_raw_fd(), fault_addr);
110                 let fault_addr = fault_addr as usize;
111                 let fault_page = fault_addr - (fault_addr % host_page_size());
112                 let instance_size = limits.total_memory_size();
113 
114                 let in_region = fault_addr >= start as usize
115                     && fault_addr < start as usize + instance_size * instance_capacity;
116                 lucet_ensure!(in_region, "fault is within the uffd region");
117 
118                 let fault_offs = fault_addr - start as usize;
119                 let fault_base = fault_offs - (fault_offs % instance_size);
120                 let inst_base = start as usize + fault_base;
121 
122                 // NB: we are blatantly lying to the compiler here! the lifetime is *not* actually
123                 // static, but for the purposes of reaching in to read the sparse page data and the
124                 // heap layout, we can treat it as such. The important property to maintain is that
125                 // the *real* region lifetime (`'r`) lives at least as long as this handler thread,
126                 // which can be shown by examining the `drop` method of `UffdRegion`.
127 
128                 let inst: &mut Instance = unsafe {
129                     (inst_base as *mut Instance)
130                         .as_mut()
131                         .ok_or(lucet_format_err!("instance pointer is non-null"))?
132                 };
133                 if !inst.valid_magic() {
134                     eprintln!(
135                         "instance magic incorrect, fault address {:p}",
136                         fault_addr as *mut c_void
137                     );
138                     lucet_bail!("instance magic incorrect");
139                 }
140 
141                 let alloc = inst.alloc();
142                 let loc = alloc.addr_location(fault_addr as *const c_void);
143                 match loc {
144                     AddrLocation::InaccessibleHeap | AddrLocation::StackGuard => {
145                         // eprintln!("fault in heap guard!");
146                         // page fault occurred out of bounds; trigger a fault by waking the faulting
147                         // thread without copying or zeroing
148                         uffd.wake(fault_page as *mut c_void, host_page_size())
149                             .map_err(|e| Error::InternalError(e.into()))?;
150                     }
151                     AddrLocation::SigStackGuard | AddrLocation::Unknown => {
152                         tracing::error!("UFFD pagefault at fatal location: {:?}", loc);
153                         uffd.wake(fault_page as *mut c_void, host_page_size())
154                             .map_err(|e| Error::InternalError(e.into()))?;
155                     }
156                     AddrLocation::Globals | AddrLocation::SigStack => {
157                         tracing::error!("UFFD pagefault at unexpected location: {:?}", loc);
158                         uffd.wake(fault_page as *mut c_void, host_page_size())
159                             .map_err(|e| Error::InternalError(e.into()))?;
160                     }
161                     AddrLocation::Stack => {
162                         uffd_strategy.stack_fault(&uffd, fault_page as *mut c_void)?
163                     }
164                     AddrLocation::Heap => uffd_strategy.heap_fault(
165                         &uffd,
166                         inst.module(),
167                         alloc,
168                         fault_page as *mut c_void,
169                     )?,
170                 }
171             }
172             Ok(Some(ev)) => panic!("unexpected uffd event: {:?}", ev),
173         }
174     }
175 
176     Ok(())
177 }
178 
179 impl Region for UffdRegion {
free_slots(&self) -> usize180     fn free_slots(&self) -> usize {
181         self.freelist.lock().unwrap().len()
182     }
183 
used_slots(&self) -> usize184     fn used_slots(&self) -> usize {
185         self.capacity() - self.free_slots()
186     }
187 
capacity(&self) -> usize188     fn capacity(&self) -> usize {
189         self.instance_capacity
190     }
191 }
192 
193 impl RegionInternal for UffdRegion {
new_instance_with( &self, module: Arc<dyn Module>, embed_ctx: CtxMap, heap_memory_size_limit: usize, mut alloc_strategy: AllocStrategy, ) -> Result<InstanceHandle, Error>194     fn new_instance_with(
195         &self,
196         module: Arc<dyn Module>,
197         embed_ctx: CtxMap,
198         heap_memory_size_limit: usize,
199         mut alloc_strategy: AllocStrategy,
200     ) -> Result<InstanceHandle, Error> {
201         let limits = self.get_limits();
202         module.validate_runtime_spec(&limits, heap_memory_size_limit)?;
203 
204         // Use the supplied alloc_strategy to get the next available slot
205         // for this new instance.
206         let slot;
207         {
208             let mut free_slot_vector = self.freelist.lock().unwrap();
209             let slot_index = alloc_strategy.next(free_slot_vector.len(), self.capacity())?;
210             slot = free_slot_vector.swap_remove(slot_index);
211         }
212 
213         assert_eq!(
214             slot.heap as usize % host_page_size(),
215             0,
216             "heap must be page-aligned"
217         );
218 
219         for (ptr, len) in [
220             // zero the globals
221             (slot.globals, limits.globals_size),
222             // zero the sigstack
223             (slot.sigstack, limits.signal_stack_size),
224         ]
225         .iter()
226         {
227             // globals_size = 0 is valid, but the ioctl fails if you pass it 0
228             if *len > 0 {
229                 // eprintln!("zeroing {:p}[{:x}]", *ptr, len);
230                 unsafe {
231                     self.uffd
232                         .zeropage(*ptr, *len, true)
233                         .expect("uffd.zeropage succeeds");
234                 }
235             }
236         }
237 
238         let inst_ptr = slot.start as *mut Instance;
239 
240         // upgrade the slot's weak region pointer so the region can't get dropped while the instance
241         // exists
242         let region = slot
243             .region
244             .upgrade()
245             // if this precondition isn't met, something is deeply wrong as some other region's slot
246             // ended up in our freelist
247             .expect("backing region of slot (`self`) exists");
248 
249         let alloc = Alloc {
250             heap_accessible_size: module
251                 .heap_spec()
252                 .map(|h| h.initial_size as usize)
253                 .unwrap_or(0),
254             heap_inaccessible_size: slot.limits.heap_address_space_size,
255             heap_memory_size_limit,
256             slot: Some(slot),
257             region,
258         };
259 
260         let inst = new_instance_handle(inst_ptr, module, alloc, embed_ctx)?;
261 
262         Ok(inst)
263     }
264 
drop_alloc(&self, alloc: &mut Alloc)265     fn drop_alloc(&self, alloc: &mut Alloc) {
266         let slot = alloc
267             .slot
268             .take()
269             .expect("alloc didn't have a slot during drop; dropped twice?");
270 
271         if slot.heap as usize % host_page_size() != 0 {
272             panic!("heap is not page-aligned");
273         }
274 
275         // set dontneed for everything past the `Instance` page
276         let ptr = (slot.start as usize + instance_heap_offset()) as *mut c_void;
277         let len = slot.limits.total_memory_size() - instance_heap_offset();
278         // eprintln!("setting none {:p}[{:x}]", ptr, len);
279         unsafe {
280             madvise(ptr, len, MmapAdvise::MADV_DONTNEED).expect("madvise succeeds during drop");
281         }
282 
283         self.freelist.lock().unwrap().push(slot);
284     }
285 
expand_heap(&self, _slot: &Slot, _start: u32, _len: u32) -> Result<(), Error>286     fn expand_heap(&self, _slot: &Slot, _start: u32, _len: u32) -> Result<(), Error> {
287         // the actual work of heap expansion for UFFD is done in the worker thread; we just need the
288         // `Alloc` to validate the new limits and update the metadata
289         Ok(())
290     }
291 
reset_heap(&self, alloc: &mut Alloc, module: &dyn Module) -> Result<(), Error>292     fn reset_heap(&self, alloc: &mut Alloc, module: &dyn Module) -> Result<(), Error> {
293         // zero the heap, if any of it is currently accessible
294         if alloc.heap_accessible_size > 0 {
295             unsafe {
296                 madvise(
297                     alloc.slot().heap,
298                     alloc.heap_accessible_size,
299                     MmapAdvise::MADV_DONTNEED,
300                 )?;
301             }
302         }
303 
304         // reset the heap to the initial size
305         let initial_size = module
306             .heap_spec()
307             .map(|h| h.initial_size as usize)
308             .unwrap_or(0);
309         alloc.heap_accessible_size = initial_size;
310         alloc.heap_inaccessible_size = alloc.slot().limits.heap_address_space_size - initial_size;
311         Ok(())
312     }
313 
get_limits(&self) -> &Limits314     fn get_limits(&self) -> &Limits {
315         &self.limits
316     }
317 
as_dyn_internal(&self) -> &dyn RegionInternal318     fn as_dyn_internal(&self) -> &dyn RegionInternal {
319         self
320     }
321 }
322 
323 impl RegionCreate for UffdRegion {
324     const TYPE_NAME: &'static str = "UffdRegion";
325 
create(instance_capacity: usize, limits: &Limits) -> Result<Arc<Self>, Error>326     fn create(instance_capacity: usize, limits: &Limits) -> Result<Arc<Self>, Error> {
327         UffdRegion::create(instance_capacity, limits, WasmPageSizedUffdStrategy {})
328     }
329 }
330 
331 impl Drop for UffdRegion {
drop(&mut self)332     fn drop(&mut self) {
333         // eprintln!("UffdRegion::drop()");
334         // write to the pipe to notify the handler to exit
335         if let Err(e) = nix::unistd::write(self.handler_pipe, b"macht nichts") {
336             // this probably means the handler errored out; note it but don't panic
337             eprintln!("couldn't write to handler shutdown pipe: {}", e);
338         };
339 
340         // eprintln!("joining");
341         // wait for the handler to exit
342         let res = self
343             .handler
344             .take()
345             .expect("region has a join handle")
346             .join()
347             .expect("join on uffd handler");
348 
349         // close the send end of the pipe; the handler closes the other end
350         nix::unistd::close(self.handler_pipe).expect("close handler exit pipe");
351 
352         let total_region_size = self.instance_capacity * self.limits.total_memory_size();
353         unsafe {
354             munmap(self.start, total_region_size).expect("unmapping region");
355         }
356 
357         if let Err(e) = res {
358             panic!("uffd handler thread failed: {}", e);
359         }
360     }
361 }
362 
363 impl UffdRegion {
364     /// Create a new `UffdRegion` that can support a given number of instances, each subject to the
365     /// same runtime limits.
366     ///
367     /// The region is returned in an `Arc`, because any instances created from it carry a reference
368     /// back to the region.
369     ///
370     /// This also creates and starts a separate thread that is responsible for handling page faults
371     /// that occur within the memory region.
create( instance_capacity: usize, limits: &Limits, strategy: impl UffdStrategy, ) -> Result<Arc<Self>, Error>372     pub fn create(
373         instance_capacity: usize,
374         limits: &Limits,
375         strategy: impl UffdStrategy,
376     ) -> Result<Arc<Self>, Error> {
377         if instance_capacity == 0 {
378             return Err(Error::InvalidArgument(
379                 "region must be able to hold at least one instance",
380             ));
381         }
382         limits.validate()?;
383 
384         let uffd = Arc::new(
385             UffdBuilder::new()
386                 .close_on_exec(true)
387                 .non_blocking(true)
388                 .create()
389                 .map_err(|e| Error::InternalError(e.into()))?,
390         );
391 
392         // map the chunk of virtual memory for all of the slots
393         let total_region_size =
394             if let Some(sz) = instance_capacity.checked_mul(limits.total_memory_size()) {
395                 sz
396             } else {
397                 return Err(Error::InvalidArgument("requested region size too large"));
398             };
399         let start = unsafe {
400             mmap(
401                 ptr::null_mut(),
402                 total_region_size,
403                 ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
404                 MapFlags::MAP_ANONYMOUS | MapFlags::MAP_PRIVATE | MapFlags::MAP_NORESERVE,
405                 0,
406                 0,
407             )?
408         };
409 
410         // register the memory region with uffd and verify the required ioctls are supported
411         let ioctls = uffd
412             .register(start, total_region_size)
413             .map_err(|e| Error::InternalError(e.into()))?;
414         if !ioctls.contains(IoctlFlags::WAKE | IoctlFlags::COPY | IoctlFlags::ZEROPAGE) {
415             panic!("required uffd ioctls not supported; found: {:?}", ioctls);
416         }
417 
418         let (handler_pipe_recv, handler_pipe) = nix::unistd::pipe()?;
419 
420         let handler_uffd = uffd.clone();
421         // morally equivalent to `unsafe impl Send`
422         let handler_start = start as usize;
423         let handler_limits = limits.clone();
424         let handler = thread::Builder::new()
425             .name("uffd region handler".into())
426             .spawn(move || {
427                 let res = uffd_handler(
428                     strategy,
429                     handler_uffd.clone(),
430                     handler_start as *mut c_void,
431                     instance_capacity,
432                     handler_pipe_recv,
433                     handler_limits,
434                 );
435                 // clean up the shutdown pipe before terminating
436                 if let Err(e) = nix::unistd::close(handler_pipe_recv) {
437                     // note but don't return an error just for the pipe
438                     eprintln!("error closing handler_pipe_recv: {}", e);
439                 }
440                 if res.is_err() {
441                     // We can't currently recover from something going wrong in the handler thread,
442                     // so we unregister the region and wake all faulting threads so that they crash
443                     // rather than hanging. This is in lieu of bringing down the other threads with
444                     // `panic!`
445                     handler_uffd
446                         .unregister(handler_start as *mut c_void, total_region_size)
447                         .unwrap_or_else(|e| {
448                             eprintln!("error while unregistering in error case: {}", e)
449                         });
450                     handler_uffd
451                         .wake(handler_start as *mut c_void, total_region_size)
452                         .unwrap_or_else(|e| eprintln!("error while waking in error case: {}", e));
453                 }
454                 res
455             })
456             .expect("error spawning uffd region handler");
457 
458         let region = Arc::new(UffdRegion {
459             uffd,
460             start,
461             limits: limits.clone(),
462             freelist: Mutex::new(Vec::with_capacity(instance_capacity)),
463             instance_capacity,
464             handler: Some(handler),
465             handler_pipe,
466         });
467 
468         {
469             let mut freelist = region.freelist.lock().unwrap();
470             for i in 0..instance_capacity {
471                 freelist.push(UffdRegion::create_slot(&region, i)?);
472             }
473         }
474 
475         Ok(region)
476     }
477 
create_slot(region: &Arc<UffdRegion>, index: usize) -> Result<Slot, Error>478     fn create_slot(region: &Arc<UffdRegion>, index: usize) -> Result<Slot, Error> {
479         // get the memory from the offset into the overall region
480         let start =
481             (region.start as usize + (index * region.limits.total_memory_size())) as *mut c_void;
482         // lay out the other sections in memory
483         let heap = start as usize + instance_heap_offset();
484         let stack = heap + region.limits.heap_address_space_size + host_page_size();
485         let globals = stack + region.limits.stack_size;
486         let sigstack = globals + region.limits.globals_size + host_page_size();
487 
488         // turn on the `Instance` page
489         // eprintln!("zeroing {:p}[{:x}]", start, host_page_size());
490         unsafe {
491             region
492                 .uffd
493                 .zeropage(start, host_page_size(), true)
494                 .map_err(|e| Error::InternalError(e.into()))?;
495         }
496 
497         Ok(Slot {
498             start,
499             heap: heap as *mut c_void,
500             stack: stack as *mut c_void,
501             globals: globals as *mut c_void,
502             sigstack: sigstack as *mut c_void,
503             limits: region.limits.clone(),
504             region: Arc::downgrade(region) as Weak<dyn RegionInternal>,
505         })
506     }
507 }
508 
509 pub trait UffdStrategy: Send + Sync + 'static {
stack_fault(&self, uffd: &Uffd, fault_page: *mut c_void) -> Result<(), Error>510     fn stack_fault(&self, uffd: &Uffd, fault_page: *mut c_void) -> Result<(), Error>;
heap_fault( &self, uffd: &Uffd, module: &dyn Module, alloc: &Alloc, fault_page: *mut c_void, ) -> Result<(), Error>511     fn heap_fault(
512         &self,
513         uffd: &Uffd,
514         module: &dyn Module,
515         alloc: &Alloc,
516         fault_page: *mut c_void,
517     ) -> Result<(), Error>;
518 }
519 
520 pub struct HostPageSizedUffdStrategy;
521 
522 impl UffdStrategy for HostPageSizedUffdStrategy {
stack_fault(&self, uffd: &Uffd, fault_page: *mut c_void) -> Result<(), Error>523     fn stack_fault(&self, uffd: &Uffd, fault_page: *mut c_void) -> Result<(), Error> {
524         unsafe {
525             uffd.zeropage(fault_page as *mut c_void, host_page_size(), true)
526                 .map_err(|e| Error::InternalError(e.into()))?;
527         }
528         Ok(())
529     }
530 
heap_fault( &self, uffd: &Uffd, module: &dyn Module, alloc: &Alloc, fault_page: *mut c_void, ) -> Result<(), Error>531     fn heap_fault(
532         &self,
533         uffd: &Uffd,
534         module: &dyn Module,
535         alloc: &Alloc,
536         fault_page: *mut c_void,
537     ) -> Result<(), Error> {
538         let pages_into_heap = (fault_page as usize - alloc.slot().heap as usize) / host_page_size();
539 
540         // page fault occurred in the heap; copy or zero
541         if let Some(page) = module.get_sparse_page_data(pages_into_heap) {
542             // we are in the sparse data area, with a non-empty page; copy it in
543             unsafe {
544                 uffd.copy(
545                     page.as_ptr() as *const c_void,
546                     fault_page,
547                     host_page_size(),
548                     true,
549                 )
550                 .map_err(|e| Error::InternalError(e.into()))?;
551             }
552         } else {
553             // else if outside the sparse data area, or with an empty page
554             unsafe {
555                 uffd.zeropage(fault_page, host_page_size(), true)
556                     .map_err(|e| Error::InternalError(e.into()))?;
557             }
558         }
559         Ok(())
560     }
561 }
562 
563 pub struct WasmPageSizedUffdStrategy;
564 
565 impl UffdStrategy for WasmPageSizedUffdStrategy {
stack_fault(&self, uffd: &Uffd, fault_page: *mut c_void) -> Result<(), Error>566     fn stack_fault(&self, uffd: &Uffd, fault_page: *mut c_void) -> Result<(), Error> {
567         unsafe {
568             uffd.zeropage(fault_page as *mut c_void, host_page_size(), true)
569                 .map_err(|e| Error::InternalError(e.into()))?;
570         }
571         Ok(())
572     }
573 
heap_fault( &self, uffd: &Uffd, module: &dyn Module, alloc: &Alloc, fault_page: *mut c_void, ) -> Result<(), Error>574     fn heap_fault(
575         &self,
576         uffd: &Uffd,
577         module: &dyn Module,
578         alloc: &Alloc,
579         fault_page: *mut c_void,
580     ) -> Result<(), Error> {
581         let slot = alloc.slot.as_ref().unwrap();
582         // Find the address of the fault relative to the heap base
583         let rel_fault_addr = fault_page as usize - slot.heap as usize;
584         // Find the base of the wasm page, relative to the heap start
585         let rel_wasm_page_base_addr = rel_fault_addr - (rel_fault_addr % WASM_PAGE_SIZE as usize);
586         // Find the absolute address of the base of the wasm page
587         let wasm_page_base_addr = slot.heap as usize + rel_wasm_page_base_addr;
588         // Find the number of host pages into the heap the wasm page base begins at
589         let base_pages_into_heap = rel_wasm_page_base_addr / host_page_size();
590 
591         assert!(WASM_PAGE_SIZE as usize > host_page_size());
592         assert_eq!(WASM_PAGE_SIZE as usize % host_page_size(), 0);
593 
594         let host_pages_per_wasm_page = WASM_PAGE_SIZE as usize / host_page_size();
595 
596         for page_num in 0..host_pages_per_wasm_page {
597             let pages_into_heap = base_pages_into_heap + page_num;
598             let host_page_addr = wasm_page_base_addr + (page_num * host_page_size());
599 
600             if alloc.addr_location(host_page_addr as *const c_void) != AddrLocation::Heap {
601                 tracing::error!("Heap ended earlier than expected.");
602                 break;
603             }
604 
605             // page fault occurred in the heap; copy or zero
606             if let Some(page) = module.get_sparse_page_data(pages_into_heap) {
607                 // we are in the sparse data area, with a non-empty page; copy it in
608                 unsafe {
609                     uffd.copy(
610                         page.as_ptr() as *const c_void,
611                         host_page_addr as *mut c_void,
612                         host_page_size(),
613                         false,
614                     )
615                     .map_err(|e| Error::InternalError(e.into()))?;
616                 }
617             } else {
618                 // else if outside the sparse data area, or with an empty page
619                 unsafe {
620                     uffd.zeropage(host_page_addr as *mut c_void, host_page_size(), false)
621                         .map_err(|e| Error::InternalError(e.into()))?;
622                 }
623             }
624         }
625 
626         uffd.wake(fault_page as *mut c_void, host_page_size())
627             .map_err(|e| Error::InternalError(e.into()))?;
628 
629         Ok(())
630     }
631 }
632