1 /* Page fault and protection handling. */
2 
3 /* -------------------------- Specification ---------------------------- */
4 
5 /* "Physical" page size. When a fault occurs, an entire physical page must
6  change its protections.
7  This may actually be a multiple of the system_pagesize. However, for the
8  purposes of memory management and GC (allocation, protection changes, etc.)
9  we always treat it as a unit. A better name would be "memory management page"
10  but this term is already defined in spvw_page.d. */
11 local /* uintL */ aint physpagesize;  /* = map_pagesize or mmap_pagesize */
12 
13 /* 2^physpageshift = physpagesize */
14 local uintL physpageshift;
15 
16 /* Initialization.
17  local void init_physpagesize (void); */
18 
19 /* Tries to repair a page fault at a single address. */
20 typedef enum { handler_failed, handler_done } handle_fault_result_t;
21 local handle_fault_result_t handle_fault (aint address, int verbose);
22 
23 #if defined(GENERATIONAL_GC) && defined(SPVW_MIXED)
24 /* Tries to repair a fault spanning a range of pages.
25  handle_fault_range(PROT_READ,start,end) ensures an address range is readable.
26  handle_fault_range(PROT_READ_WRITE,start,end) makes it writable. */
27 global bool handle_fault_range (int prot, aint start_address, aint end_address);
28 #endif
29 
30 #ifdef GENERATIONAL_GC
31 
32 /* Does the same as mprotect.
33  Aborts if unsuccessful. */
34 local void xmprotect (aint addr, uintM len, int prot);
35 
36 #endif
37 
38 /* -------------------------- Implementation --------------------------- */
39 
40 /* Initialization. */
41 #define init_physpageshift()                      \
42   { var uintL x = physpagesize;                   \
43     var uintL i = 0;                              \
44     while ((x >>= 1) != 0) { i++; }               \
45     if (!((1UL << i) == physpagesize)) abort();   \
46     physpageshift = i;                            \
47   }
48 #ifdef SINGLEMAP_MEMORY
49   #define init_physpagesize()                   \
50     physpagesize = map_pagesize;                \
51     init_physpageshift();
52 #else
53   #define init_physpagesize()                   \
54    physpagesize = mmap_pagesize;                \
55    init_physpageshift();
56 #endif
57 
58 #ifdef GENERATIONAL_GC
59 /* implies SPVW_PURE_BLOCKS <==> SINGLEMAP_MEMORY
60    or      SPVW_MIXED_BLOCKS_STAGGERED
61    or      SPVW_MIXED_BLOCKS_OPPOSITE
62 
63 subroutine for protection: PROT_NONE -> PROT_READ */
handle_read_fault(aint address,physpage_state_t * physpage)64 local int handle_read_fault (aint address, physpage_state_t* physpage)
65 {
66   /* During GC the physpage cache contents may be abused by MORRIS_GC,
67    so don't use it. */
68   if (inside_gc) {
69     fprint(stderr,"\n*** - " "handle_fault called at a point inside GC where it shouldn't!\n");
70     OS_set_errno(0);
71     return -1;
72   }
73   /* bring page up to date with the state of the cache: */
74   {
75     var uintL count = physpage->cache_size;
76     if (count > 0) {
77       var old_new_pointer_t* ptr = physpage->cache;
78       if (mprotect((void*)address, physpagesize, PROT_READ_WRITE) < 0)
79         return -1;
80       dotimespL(count,count, {
81         DEBUG_SPVW_ASSERT(consp(*(ptr->p))
82                           ? consp(ptr->o) && is_valid_cons_address(as_oint(ptr->o))
83                           : !consp(ptr->o) && is_valid_varobject_address(as_oint(ptr->o)));
84         *(ptr->p) = ptr->o;
85         ptr++;
86       });
87     }
88   }
89   /* superimpose page read-only: */
90   if (mprotect((void*)address, physpagesize, PROT_READ) < 0)
91     return -1;
92   physpage->protection = PROT_READ;
93   return 0;
94 }
95 
96 /* subroutine for protection: PROT_READ -> PROT_READ_WRITE */
handle_readwrite_fault(aint address,physpage_state_t * physpage)97 local int handle_readwrite_fault (aint address, physpage_state_t* physpage)
98 {
99   /* superimose page read-write: */
100   if (mprotect((void*)address, physpagesize, PROT_READ_WRITE) < 0)
101     return -1;
102   physpage->protection = PROT_READ_WRITE;
103   return 0;
104 }
105 
106 #ifdef MULTITHREAD
107   #define LOCK_PAGE_CACHE(page) spinlock_acquire(&page->cache_lock)
108   #define UNLOCK_PAGE_CACHE(page) spinlock_release(&page->cache_lock)
109 #else
110   #define LOCK_PAGE_CACHE(page)
111   #define UNLOCK_PAGE_CACHE(page)
112 #endif
113 
114 /* mapped generation: the old one */
115 #define heap_mgen_start  heap_gen0_start
116 #define heap_mgen_end    heap_gen0_end
117 
118 #else
119 
120 #define heap_mgen_start  heap_start
121 #define heap_mgen_end    heap_end
122 
123 #endif  /* GENERATIONAL_GC */
124 
handle_fault(aint address,int verbose)125 local handle_fault_result_t handle_fault (aint address, int verbose)
126 {
127   var uintL heapnr;
128   var object obj = as_object((oint)address << oint_addr_shift);
129   var aint pa_address = address & -physpagesize; /* page aligned address */
130   #ifdef SPVW_PURE_BLOCKS
131   heapnr = typecode(obj);
132   #else
133   #if defined(GENERATIONAL_GC)
134   /* Different heaps never occupy parts of the same physpage; otherwise the
135      fault handler for an object of one heap would have an effect on the
136      state of the physpage of the other heap.
137      In case of SPVW_MIXED_BLOCKS_STAGGERED this is guaranteed by the
138      initialization of the heap limits. In case of SPVW_MIXED_BLOCKS_OPPOSITE
139      this is guaranteed by spvw_allocate.d.
140      This also guarantees that when SIGSEGV_FAULT_ADDRESS_ALIGNMENT > 1UL
141      and the address argument is not the exact fault address, but the address
142      rounded down to page alignment, the comparisons below still compute the
143      correct heapnr. */
144   #endif
145   #if defined(SPVW_MIXED_BLOCKS_STAGGERED)
146   heapnr = (address >= mem.heaps[1].heap_mgen_start ? 1 : 0);
147   #else  /* SPVW_MIXED_BLOCKS_OPPOSITE */
148   heapnr = (address >= mem.heaps[1].heap_start ? 1 : 0);
149   #endif
150   #endif
151   {
152     #ifdef GENERATIONAL_GC
153     var Heap* heap = &mem.heaps[heapnr];
154     if (!is_heap_containing_objects(heapnr))
155       goto error1;
156     if (!(((heap->heap_gen0_start & ~(SIGSEGV_FAULT_ADDRESS_ALIGNMENT > 1UL ? system_pagesize-1 : 0)) <= address)
157           && (address < heap->heap_gen0_end)))
158       goto error2;
159     if (heap->physpages == NULL)
160       goto error5;
161     {
162       var uintL pageno = (pa_address>>physpageshift)-(heap->heap_gen0_start>>physpageshift);
163       {
164         var int ret;
165         var physpage_state_t* physpage = &heap->physpages[pageno];
166         LOCK_PAGE_CACHE(physpage);
167         switch (physpage->protection) {
168           case PROT_NONE:
169             /* protection: PROT_NONE -> PROT_READ */
170             ret=handle_read_fault(pa_address,physpage);
171             UNLOCK_PAGE_CACHE(physpage);
172             if (ret < 0) goto error6;
173             return handler_done;
174           case PROT_READ:
175             /* protection: PROT_READ -> PROT_READ_WRITE */
176             ret=handle_readwrite_fault(pa_address,physpage);
177             UNLOCK_PAGE_CACHE(physpage);
178             if (ret < 0) goto error7;
179             return handler_done;
180           case PROT_READ_WRITE:
181             /* it is very unlikely page that has PROT_READ_WRITE to cause
182                segfault. probably we are here, since a page that has been
183                PROT_READ or PROT_NONE, has changed to PROT_READ_WRITE while
184                we were waiting to be executed.
185                So we just return sucess here - other threads have done
186                what is required.*/
187             UNLOCK_PAGE_CACHE(physpage);
188             return handler_done;
189             /* goto error8; */
190           default:
191             UNLOCK_PAGE_CACHE(physpage);
192             goto error9;
193         }
194        error6:                  /* handle_read_fault() failed */
195         if (verbose) {
196           var int saved_errno = OS_errno;
197           fprintf(stderr,"\n*** - " "handle_fault error6 ! mprotect(0x%lx,0x%lx,...) -> ", address & -physpagesize, physpagesize);
198           errno_out(saved_errno);
199         }
200         goto error;
201        error7:                  /* handle_readwrite_fault() failed */
202         if (verbose) {
203           var int saved_errno = OS_errno;
204           fprintf(stderr,"\n*** - " "handle_fault error7 ! mprotect(0x%lx,0x%lx,%d) -> ", address & -physpagesize, physpagesize, PROT_READ_WRITE);
205           errno_out(saved_errno);
206         }
207         goto error;
208        error8:                  /* fault on a read-write page */
209         if (verbose)
210           fprintf(stderr,"\n*** - " "handle_fault error8 ! protection = %d", physpage->protection);
211         goto error;
212        error9:                  /* invalid protection value */
213         if (verbose)
214           fprintf(stderr,"\n*** - " "handle_fault error9 ! protection = %d", physpage->protection);
215         goto error;
216       }
217     }
218    error5:      /* fault on a read-write page with no physpages array */
219     if (verbose)
220       fprint(stderr,"\n*** - " "handle_fault error5 !");
221     goto error;
222    error1:          /* A fault was not expected on this type of heap. */
223     if (verbose)
224       fprint(stderr,"\n*** - " "handle_fault error1 !");
225     goto error;
226    error2: /* The address is outside of the used address range for this heap. */
227     if (verbose)
228       fprintf(stderr,"\n*** - " "handle_fault error2 ! address = 0x%lx not in [0x%lx,0x%lx) !", address, heap->heap_mgen_start, heap->heap_mgen_end);
229     goto error;
230     #endif
231   }
232  error:
233   return handler_failed;
234 }
235 
236 #if defined(GENERATIONAL_GC) && defined(SPVW_MIXED_BLOCKS)
237 /* System calls like read() and write(), when they operate on pages with
238  insufficient permissions, don't signal SIGSEGV. Instead, they return with
239  errno=EFAULT and unpredictable side effects.
240  handle_fault_range(PROT_READ,start,end) makes an address range readable.
241  handle_fault_range(PROT_READ_WRITE,start,end) makes an address range writable. */
handle_fault_range(int prot,aint start_address,aint end_address)242 modexp bool handle_fault_range (int prot, aint start_address, aint end_address)
243 {
244   if (!(start_address < end_address))
245     return true;
246   var Heap* heap = &mem.heaps[0]; /* varobject_heap */
247   var bool did_pagein = false;
248   if ((end_address <= heap->heap_mgen_start) || (heap->heap_mgen_end <= start_address))
249     return true; /* nothing to do, but strange that an error occurred at all */
250  #ifdef GENERATIONAL_GC
251   if (heap->physpages == NULL) {
252     if (did_pagein)
253       return true;
254     return false;
255   }
256   {
257     var aint pa_address;
258     var int ret=0;
259     for (pa_address = start_address & -physpagesize;
260          pa_address < end_address; pa_address += physpagesize)
261       if ((heap->heap_gen0_start <= pa_address)
262           && (pa_address < heap->heap_gen0_end)) {
263         var uintL pageno = (pa_address>>physpageshift)
264           -(heap->heap_gen0_start>>physpageshift);
265         var physpage_state_t* physpage = &heap->physpages[pageno];
266         LOCK_PAGE_CACHE(physpage);
267         if ((physpage->protection == PROT_NONE)
268             && (prot == PROT_READ || prot == PROT_READ_WRITE)) {
269           /* protection: PROT_NONE -> PROT_READ */
270           ret=handle_read_fault(pa_address,physpage);
271         }
272         if (!(physpage->protection == PROT_READ_WRITE)
273             && (prot == PROT_READ_WRITE)) {
274           /* protection: PROT_READ -> PROT_READ_WRITE */
275           ret=handle_readwrite_fault(pa_address,physpage);
276         }
277         UNLOCK_PAGE_CACHE(physpage);
278         if (ret < 0) return false;
279       }
280   }
281   return true;
282  #else
283   return did_pagein;
284  #endif
285 }
286 #endif
287 
288 #ifdef GENERATIONAL_GC
289 
xmprotect(aint addr,uintM len,int prot)290 local void xmprotect (aint addr, uintM len, int prot) {
291   if (mprotect((void*)addr,len,prot) < 0) {
292     fprintf(stderr,GETTEXTL("mprotect(0x%lx,%d,%d) failed."),addr,len,prot);
293     errno_out(OS_errno);
294     abort();
295   }
296 }
297 
298 #undef LOCK_PAGE_CACHE
299 #undef UNLOCK_PAGE_CACHE
300 
301 #endif  /* GENERATIONAL_GC */
302