1 /*
2  * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 
7 #include <string.h>
8 
9 #include "native_client/src/include/build_config.h"
10 #include "native_client/src/include/concurrency_ops.h"
11 #include "native_client/src/include/nacl_platform.h"
12 #include "native_client/src/include/portability.h"
13 #include "native_client/src/shared/platform/nacl_check.h"
14 #include "native_client/src/shared/platform/nacl_log.h"
15 #include "native_client/src/shared/platform/nacl_sync.h"
16 #include "native_client/src/shared/platform/nacl_sync_checked.h"
17 #include "native_client/src/trusted/desc/nacl_desc_base.h"
18 #include "native_client/src/trusted/desc/nacl_desc_effector.h"
19 #include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
20 #include "native_client/src/trusted/desc/nacl_desc_imc_shm.h"
21 #include "native_client/src/trusted/perf_counter/nacl_perf_counter.h"
22 #include "native_client/src/trusted/platform_qualify/nacl_os_qualify.h"
23 #include "native_client/src/trusted/service_runtime/arch/sel_ldr_arch.h"
24 #include "native_client/src/trusted/service_runtime/include/bits/mman.h"
25 #include "native_client/src/trusted/service_runtime/include/sys/errno.h"
26 #include "native_client/src/trusted/service_runtime/nacl_app_thread.h"
27 #include "native_client/src/trusted/service_runtime/nacl_error_code.h"
28 #include "native_client/src/trusted/service_runtime/nacl_text.h"
29 #include "native_client/src/trusted/service_runtime/sel_ldr.h"
30 #include "native_client/src/trusted/service_runtime/sel_memory.h"
31 #include "native_client/src/trusted/service_runtime/thread_suspension.h"
32 
33 #if NACL_OSX
34 #include "native_client/src/trusted/desc/osx/nacl_desc_imc_shm_mach.h"
35 #endif
36 
37 /* initial size of the malloced buffer for dynamic regions */
38 static const int kMinDynamicRegionsAllocated = 32;
39 
40 static const int kBitsPerByte = 8;
41 
BitmapAllocate(uint32_t indexes)42 static uint8_t *BitmapAllocate(uint32_t indexes) {
43   uint32_t byte_count = (indexes + kBitsPerByte - 1) / kBitsPerByte;
44   uint8_t *bitmap = malloc(byte_count);
45   if (bitmap != NULL) {
46     memset(bitmap, 0, byte_count);
47   }
48   return bitmap;
49 }
50 
BitmapIsBitSet(uint8_t * bitmap,uint32_t index)51 static int BitmapIsBitSet(uint8_t *bitmap, uint32_t index) {
52   return (bitmap[index / kBitsPerByte] & (1 << (index % kBitsPerByte))) != 0;
53 }
54 
BitmapSetBit(uint8_t * bitmap,uint32_t index)55 static void BitmapSetBit(uint8_t *bitmap, uint32_t index) {
56   bitmap[index / kBitsPerByte] |= 1 << (index % kBitsPerByte);
57 }
58 
59 #if NACL_OSX
60 /*
61  * Helper function for NaClMakeDynamicTextShared.
62  */
MakeImcShmMachDesc(uintptr_t size)63 static struct NaClDesc *MakeImcShmMachDesc(uintptr_t size) {
64   struct NaClDescImcShmMach *shm =
65       (struct NaClDescImcShmMach *) malloc(sizeof(struct NaClDescImcShmMach));
66   CHECK(shm);
67 
68   if (!NaClDescImcShmMachAllocCtor(shm, size, /* executable= */ 1)) {
69     free(shm);
70     NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n");
71     return NULL;
72   }
73 
74   return &shm->base;
75 }
76 #endif
77 
78 /*
79  * Helper function for NaClMakeDynamicTextShared.
80  */
MakeImcShmDesc(uintptr_t size)81 static struct NaClDesc *MakeImcShmDesc(uintptr_t size) {
82 #if NACL_OSX
83   if (NaClOSX10Dot7OrLater())
84     return MakeImcShmMachDesc(size);
85 #endif
86   struct NaClDescImcShm *shm =
87       (struct NaClDescImcShm *) malloc(sizeof(struct NaClDescImcShm));
88   CHECK(shm);
89 
90   if (!NaClDescImcShmAllocCtor(shm, size, /* executable= */ 1)) {
91     free(shm);
92     NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n");
93     return NULL;
94   }
95 
96   return &shm->base;
97 }
98 
NaClMakeDynamicTextShared(struct NaClApp * nap)99 NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) {
100   uintptr_t                   dynamic_text_size;
101   uintptr_t                   shm_vaddr_base;
102   int                         mmap_protections;
103   uintptr_t                   mmap_ret;
104 
105   uintptr_t                   shm_upper_bound;
106   uintptr_t                   text_sysaddr;
107   struct NaClDesc *           shm;
108 
109   shm_vaddr_base = NaClEndOfStaticText(nap);
110   NaClLog(4,
111           "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
112           shm_vaddr_base);
113   shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base);
114   NaClLog(4,
115           "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
116           shm_vaddr_base);
117 
118   /*
119    * Default is that there is no usable dynamic code area.
120    */
121   nap->dynamic_text_start = shm_vaddr_base;
122   nap->dynamic_text_end = shm_vaddr_base;
123   if (!nap->use_shm_for_dynamic_text) {
124     NaClLog(4,
125             "NaClMakeDynamicTextShared:"
126             "  rodata / data segments not allocation aligned\n");
127     NaClLog(4,
128             " not using shm for text\n");
129     return LOAD_OK;
130   }
131 
132   /*
133    * Allocate a shm region the size of which is nap->rodata_start -
134    * end-of-text.  This implies that the "core" text will not be
135    * backed by shm.
136    */
137   shm_upper_bound = nap->rodata_start;
138   if (0 == shm_upper_bound) {
139     shm_upper_bound = NaClTruncAllocPage(nap->data_start);
140   }
141   if (0 == shm_upper_bound) {
142     shm_upper_bound = shm_vaddr_base;
143   }
144 
145   NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound);
146 
147   dynamic_text_size = shm_upper_bound - shm_vaddr_base;
148   NaClLog(4,
149           "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n",
150           dynamic_text_size);
151 
152   if (0 == dynamic_text_size) {
153     NaClLog(4, "Empty JITtable region\n");
154     return LOAD_OK;
155   }
156 
157   shm = MakeImcShmDesc(dynamic_text_size);
158   if (!shm) {
159     return LOAD_NO_MEMORY_FOR_DYNAMIC_TEXT;
160   }
161 
162   text_sysaddr = NaClUserToSys(nap, shm_vaddr_base);
163 
164   /*
165    * On Windows, we must unmap this range before the OS will let us remap
166    * it.  This involves opening up an address space hole, which is risky
167    * because another thread might call mmap() and receive an allocation
168    * inside that hole.  We don't need to take that risk on Unix, where
169    * MAP_FIXED overwrites mappings atomically.
170    *
171    * We use NaClPageFree() here because the existing memory was mapped
172    * using VirtualAlloc().
173    */
174   if (NACL_WINDOWS) {
175     NaClPageFree((void *) text_sysaddr, dynamic_text_size);
176   }
177 
178   /*
179    * Unix allows us to map pages with PROT_NONE initially and later
180    * increase the mapping permissions with mprotect().
181    *
182    * Windows does not allow this, however: the initial permissions are
183    * an upper bound on what the permissions may later be changed to
184    * with VirtualProtect() or VirtualAlloc().  Given this, using
185    * PROT_NONE at this point does not even make sense.  On Windows,
186    * the pages start off as uncommitted, which makes them inaccessible
187    * regardless of the page permissions they are mapped with.
188    *
189    * Write permissions are included here for nacl64-gdb to set
190    * breakpoints.
191    */
192 #if NACL_WINDOWS
193   mmap_protections =
194     NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE;
195 #else
196   mmap_protections = NACL_ABI_PROT_NONE;
197 #endif
198   NaClLog(4,
199           "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x,"
200           " prot=0x%x, flags=0x%x, offset=0)\n",
201           text_sysaddr,
202           (int) dynamic_text_size,
203           mmap_protections,
204           NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED);
205   mmap_ret = (*NACL_VTBL(NaClDesc, shm)->
206               Map)(shm,
207                    NaClDescEffectorTrustedMem(),
208                    (void *) text_sysaddr,
209                    dynamic_text_size,
210                    mmap_protections,
211                    NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED,
212                    0);
213   if (text_sysaddr != mmap_ret) {
214     NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n");
215   }
216 
217   nap->dynamic_page_bitmap =
218     BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE));
219   if (NULL == nap->dynamic_page_bitmap) {
220     NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n");
221   }
222 
223   nap->dynamic_text_start = shm_vaddr_base;
224   nap->dynamic_text_end = shm_upper_bound;
225   nap->text_shm = shm;
226   return LOAD_OK;
227 }
228 
229 /*
230  * Binary search nap->dynamic_regions to find the maximal region with start<=ptr
231  * caller must hold nap->dynamic_load_mutex, and must discard result
232  * when lock is released.
233  */
NaClDynamicRegionFindClosestLEQ(struct NaClApp * nap,uintptr_t ptr)234 struct NaClDynamicRegion* NaClDynamicRegionFindClosestLEQ(struct NaClApp *nap,
235                                                           uintptr_t ptr) {
236   const int kBinarySearchToScanCutoff = 16;
237   int begin = 0;
238   int end = nap->num_dynamic_regions;
239   if (0 == nap->num_dynamic_regions) {
240     return NULL;
241   }
242   /* as an optimization, check the last region first */
243   if (nap->dynamic_regions[nap->num_dynamic_regions-1].start <= ptr) {
244     return nap->dynamic_regions + nap->num_dynamic_regions-1;
245   }
246   /* comes before everything */
247   if (ptr < nap->dynamic_regions[0].start) {
248     return NULL;
249   }
250   /* binary search, until range is small */
251   while (begin + kBinarySearchToScanCutoff + 1 < end) {
252     int mid = begin + (end - begin)/2;
253     if (nap->dynamic_regions[mid].start <= ptr) {
254       begin = mid;
255     } else {
256       end = mid;
257     }
258   }
259   /* linear scan, faster for small ranges */
260   while (begin + 1 < end && nap->dynamic_regions[begin + 1].start <= ptr) {
261     begin++;
262   }
263   return nap->dynamic_regions + begin;
264 }
265 
NaClDynamicRegionFind(struct NaClApp * nap,uintptr_t ptr,size_t size)266 struct NaClDynamicRegion* NaClDynamicRegionFind(struct NaClApp *nap,
267                                                 uintptr_t ptr,
268                                                 size_t size) {
269   struct NaClDynamicRegion *p =
270       NaClDynamicRegionFindClosestLEQ(nap, ptr + size - 1);
271   return (p != NULL && ptr < p->start + p->size) ? p : NULL;
272 }
273 
NaClDynamicRegionCreate(struct NaClApp * nap,uintptr_t start,size_t size,int is_mmap)274 int NaClDynamicRegionCreate(struct NaClApp *nap,
275                             uintptr_t start,
276                             size_t size,
277                             int is_mmap) {
278   struct NaClDynamicRegion item, *regionp, *end;
279   item.start = start;
280   item.size = size;
281   item.delete_generation = -1;
282   item.is_mmap = is_mmap;
283   if (nap->dynamic_regions_allocated == nap->num_dynamic_regions) {
284     /* out of space, double buffer size */
285     nap->dynamic_regions_allocated *= 2;
286     if (nap->dynamic_regions_allocated < kMinDynamicRegionsAllocated) {
287       nap->dynamic_regions_allocated = kMinDynamicRegionsAllocated;
288     }
289     nap->dynamic_regions = realloc(nap->dynamic_regions,
290                 sizeof(struct NaClDynamicRegion) *
291                    nap->dynamic_regions_allocated);
292     if (NULL == nap->dynamic_regions) {
293       NaClLog(LOG_FATAL, "NaClDynamicRegionCreate: realloc failed");
294       return 0;
295     }
296   }
297   /* find preceding entry */
298   regionp = NaClDynamicRegionFindClosestLEQ(nap, start + size - 1);
299   if (regionp != NULL && start < regionp->start + regionp->size) {
300     /* target already in use */
301     return 0;
302   }
303   if (NULL == regionp) {
304     /* start at beginning if we couldn't find predecessor */
305     regionp = nap->dynamic_regions;
306   }
307   end = nap->dynamic_regions + nap->num_dynamic_regions;
308   /* scroll to insertion point (this should scroll at most 1 element) */
309   for (; regionp != end && regionp->start < item.start; ++regionp);
310   /* insert and shift everything forward by 1 */
311   for (; regionp != end; ++regionp) {
312     /* swap(*i, item); */
313     struct NaClDynamicRegion t = *regionp;
314     *regionp = item;
315     item = t;
316   }
317   *regionp = item;
318   nap->num_dynamic_regions++;
319   return 1;
320 }
321 
NaClDynamicRegionDelete(struct NaClApp * nap,struct NaClDynamicRegion * r)322 void NaClDynamicRegionDelete(struct NaClApp *nap, struct NaClDynamicRegion* r) {
323   struct NaClDynamicRegion *end = nap->dynamic_regions
324                                 + nap->num_dynamic_regions;
325   /* shift everything down */
326   for (; r + 1 < end; ++r) {
327     r[0] = r[1];
328   }
329   nap->num_dynamic_regions--;
330 
331   if ( nap->dynamic_regions_allocated > kMinDynamicRegionsAllocated
332      && nap->dynamic_regions_allocated/4 > nap->num_dynamic_regions) {
333     /* too much waste, shrink buffer*/
334     nap->dynamic_regions_allocated /= 2;
335     nap->dynamic_regions = realloc(nap->dynamic_regions,
336                 sizeof(struct NaClDynamicRegion) *
337                    nap->dynamic_regions_allocated);
338     if (NULL == nap->dynamic_regions) {
339       NaClLog(LOG_FATAL, "NaClDynamicRegionCreate: realloc failed");
340       return;
341     }
342   }
343 }
344 
345 
NaClSetThreadGeneration(struct NaClAppThread * natp,int generation)346 void NaClSetThreadGeneration(struct NaClAppThread *natp, int generation) {
347   /*
348    * outer check handles fast case (no change)
349    * since threads only set their own generation it is safe
350    */
351   if (natp->dynamic_delete_generation != generation)  {
352     NaClXMutexLock(&natp->mu);
353     CHECK(natp->dynamic_delete_generation <= generation);
354     natp->dynamic_delete_generation = generation;
355     NaClXMutexUnlock(&natp->mu);
356   }
357 }
358 
NaClMinimumThreadGeneration(struct NaClApp * nap)359 int NaClMinimumThreadGeneration(struct NaClApp *nap) {
360   size_t index;
361   int rv = INT_MAX;
362   NaClXMutexLock(&nap->threads_mu);
363   for (index = 0; index < nap->threads.num_entries; ++index) {
364     struct NaClAppThread *thread = NaClGetThreadMu(nap, (int) index);
365     if (thread != NULL) {
366       NaClXMutexLock(&thread->mu);
367       if (rv > thread->dynamic_delete_generation) {
368         rv = thread->dynamic_delete_generation;
369       }
370       NaClXMutexUnlock(&thread->mu);
371     }
372   }
373   NaClXMutexUnlock(&nap->threads_mu);
374   return rv;
375 }
376 
CopyBundleTails(uint8_t * dest,uint8_t * src,int32_t size,int bundle_size)377 static void CopyBundleTails(uint8_t *dest,
378                             uint8_t *src,
379                             int32_t size,
380                             int     bundle_size) {
381   /*
382    * The order in which these locations are written does not matter:
383    * none of the locations will be reachable, because the bundle heads
384    * still contains HLTs.
385    */
386   int       bundle_mask = bundle_size - 1;
387   uint32_t  *src_ptr;
388   uint32_t  *dest_ptr;
389   uint32_t  *end_ptr;
390 
391   CHECK(0 == ((uintptr_t) dest & 3));
392 
393   src_ptr = (uint32_t *) src;
394   dest_ptr = (uint32_t *) dest;
395   end_ptr = (uint32_t *) (dest + size);
396   while (dest_ptr < end_ptr) {
397     if ((((uintptr_t) dest_ptr) & bundle_mask) != 0) {
398       *dest_ptr = *src_ptr;
399     }
400     dest_ptr++;
401     src_ptr++;
402   }
403 }
404 
CopyBundleHeads(uint8_t * dest,uint8_t * src,uint32_t size,int bundle_size)405 static void CopyBundleHeads(uint8_t  *dest,
406                             uint8_t  *src,
407                             uint32_t size,
408                             int      bundle_size) {
409   /* Again, the order in which these locations are written does not matter. */
410   uint8_t *src_ptr;
411   uint8_t *dest_ptr;
412   uint8_t *end_ptr;
413 
414   /* dest must be aligned for the writes to be atomic. */
415   CHECK(0 == ((uintptr_t) dest & 3));
416 
417   src_ptr = src;
418   dest_ptr = dest;
419   end_ptr = dest + size;
420   while (dest_ptr < end_ptr) {
421     /*
422      * We assume that writing the 32-bit int here is atomic, which is
423      * the case on x86 and ARM as long as the address is word-aligned.
424      * The read does not have to be atomic.
425      */
426     *(uint32_t *) dest_ptr = *(uint32_t *) src_ptr;
427     dest_ptr += bundle_size;
428     src_ptr += bundle_size;
429   }
430 }
431 
ReplaceBundleHeadsWithHalts(uint8_t * dest,uint32_t size,int bundle_size)432 static void ReplaceBundleHeadsWithHalts(uint8_t  *dest,
433                                         uint32_t size,
434                                         int      bundle_size) {
435   uint32_t *dest_ptr = (uint32_t*) dest;
436   uint32_t *end_ptr = (uint32_t*) (dest + size);
437   while (dest_ptr < end_ptr) {
438     /* dont assume 1-byte halt, write entire NACL_HALT_WORD */
439     *dest_ptr = NACL_HALT_WORD;
440     dest_ptr += bundle_size / sizeof(uint32_t);
441   }
442   NaClWriteMemoryBarrier();
443 }
444 
CopyCodeSafelyInitial(uint8_t * dest,uint8_t * src,uint32_t size,int bundle_size)445 static INLINE void CopyCodeSafelyInitial(uint8_t  *dest,
446                                   uint8_t  *src,
447                                   uint32_t size,
448                                   int      bundle_size) {
449   CopyBundleTails(dest, src, size, bundle_size);
450   NaClWriteMemoryBarrier();
451   CopyBundleHeads(dest, src, size, bundle_size);
452 }
453 
MakeDynamicCodePagesVisible(struct NaClApp * nap,uint32_t page_index_min,uint32_t page_index_max,uint8_t * writable_addr)454 static void MakeDynamicCodePagesVisible(struct NaClApp *nap,
455                                         uint32_t page_index_min,
456                                         uint32_t page_index_max,
457                                         uint8_t *writable_addr) {
458   void *user_addr;
459   uint32_t index;
460   size_t size = (page_index_max - page_index_min) * NACL_MAP_PAGESIZE;
461 
462   for (index = page_index_min; index < page_index_max; index++) {
463     CHECK(!BitmapIsBitSet(nap->dynamic_page_bitmap, index));
464     BitmapSetBit(nap->dynamic_page_bitmap, index);
465   }
466   user_addr = (void *) NaClUserToSys(nap, nap->dynamic_text_start
467                                      + page_index_min * NACL_MAP_PAGESIZE);
468 
469 #if NACL_WINDOWS
470   NaClUntrustedThreadsSuspendAll(nap, /* save_registers= */ 0);
471 
472   /*
473    * The VirtualAlloc() call here has two effects:
474    *
475    *  1) It commits the page in the shared memory (SHM) object,
476    *     allocating swap space and making the page accessible.  This
477    *     affects our writable mapping of the shared memory object too.
478    *     Before the VirtualAlloc() call, dereferencing writable_addr
479    *     would fault.
480    *  2) It changes the page permissions of the mapping to
481    *     read+execute.  Since this exposes the page in its unsafe,
482    *     non-HLT-filled state, this must be done with untrusted
483    *     threads suspended.
484    */
485   {
486     uintptr_t offset;
487     for (offset = 0; offset < size; offset += NACL_MAP_PAGESIZE) {
488       void *user_page_addr = (char *) user_addr + offset;
489       if (VirtualAlloc(user_page_addr, NACL_MAP_PAGESIZE,
490                        MEM_COMMIT, PAGE_EXECUTE_READ) != user_page_addr) {
491         NaClLog(LOG_FATAL, "MakeDynamicCodePagesVisible: "
492                 "VirtualAlloc() failed -- probably out of swap space\n");
493       }
494     }
495   }
496 #endif
497 
498   /* Sanity check:  Ensure the page is not already in use. */
499   CHECK(*writable_addr == 0);
500 
501   NaClFillMemoryRegionWithHalt(writable_addr, size);
502 
503 #if NACL_WINDOWS
504   NaClUntrustedThreadsResumeAll(nap);
505 #else
506   if (NaClMprotect(user_addr, size, PROT_READ | PROT_EXEC) != 0) {
507     NaClLog(LOG_FATAL, "MakeDynamicCodePageVisible: NaClMprotect() failed\n");
508   }
509 #endif
510 }
511 
512 /*
513  * Maps a writable version of the code at [offset, offset+size) and returns a
514  * pointer to the new mapping. Internally caches the last mapping between
515  * calls. Pass offset=0,size=0 to clear cache.
516  * Caller must hold nap->dynamic_load_mutex.
517  */
CachedMapWritableText(struct NaClApp * nap,uint32_t offset,uint32_t size)518 static uintptr_t CachedMapWritableText(struct NaClApp *nap,
519                                        uint32_t offset,
520                                        uint32_t size) {
521   /*
522    * The nap->* variables used in this function can be in two states:
523    *
524    * 1)
525    * nap->dynamic_mapcache_size == 0
526    * nap->dynamic_mapcache_ret == 0
527    *
528    * Initial state, nothing is cached.
529    *
530    * 2)
531    * nap->dynamic_mapcache_size != 0
532    * nap->dynamic_mapcache_ret != 0
533    *
534    * We have a cached mmap result stored, that must be unmapped.
535    */
536   struct NaClDesc            *shm = nap->text_shm;
537 
538   if (offset != nap->dynamic_mapcache_offset
539           || size != nap->dynamic_mapcache_size) {
540     /*
541      * cache miss, first clear the old cache if needed
542      */
543     if (nap->dynamic_mapcache_size > 0) {
544       NaClHostDescUnmapUnsafe((void *) nap->dynamic_mapcache_ret,
545                               nap->dynamic_mapcache_size);
546       nap->dynamic_mapcache_offset = 0;
547       nap->dynamic_mapcache_size = 0;
548       nap->dynamic_mapcache_ret = 0;
549     }
550 
551     /*
552      * update that cached version
553      */
554     if (size > 0) {
555       uint32_t current_page_index;
556       uint32_t end_page_index;
557 
558       uintptr_t mapping = (*((struct NaClDescVtbl const *)
559             shm->base.vtbl)->
560               Map)(shm,
561                    NaClDescEffectorTrustedMem(),
562                    NULL,
563                    size,
564                    NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
565                    NACL_ABI_MAP_SHARED,
566                    offset);
567       if (NaClPtrIsNegErrno(&mapping)) {
568         return 0;
569       }
570 
571       /*
572        * To reduce the number of mprotect() system calls, we coalesce
573        * MakeDynamicCodePagesVisible() calls for adjacent pages that
574        * have yet not been allocated.
575        */
576       current_page_index = offset / NACL_MAP_PAGESIZE;
577       end_page_index = (offset + size) / NACL_MAP_PAGESIZE;
578       while (current_page_index < end_page_index) {
579         uint32_t start_page_index = current_page_index;
580         /* Find the end of this block of unallocated pages. */
581         while (current_page_index < end_page_index &&
582                !BitmapIsBitSet(nap->dynamic_page_bitmap, current_page_index)) {
583           current_page_index++;
584         }
585         if (current_page_index > start_page_index) {
586           uintptr_t writable_addr =
587               mapping + (start_page_index * NACL_MAP_PAGESIZE - offset);
588           MakeDynamicCodePagesVisible(nap, start_page_index, current_page_index,
589                                       (uint8_t *) writable_addr);
590         }
591         current_page_index++;
592       }
593 
594       nap->dynamic_mapcache_offset = offset;
595       nap->dynamic_mapcache_size = size;
596       nap->dynamic_mapcache_ret = mapping;
597     }
598   }
599   return nap->dynamic_mapcache_ret;
600 }
601 
602 /*
603  * A wrapper around CachedMapWritableText that performs common address
604  * calculations.
605  * Outputs *mmapped_addr.
606  * Caller must hold nap->dynamic_load_mutex.
607  * Returns boolean, true on success
608  */
NaClTextMapWrapper(struct NaClApp * nap,uint32_t dest,uint32_t size,uint8_t ** mapped_addr)609 static INLINE int NaClTextMapWrapper(struct NaClApp *nap,
610                                     uint32_t dest,
611                                     uint32_t size,
612                                     uint8_t  **mapped_addr) {
613   uint32_t  shm_offset;
614   uint32_t  shm_map_offset;
615   uint32_t  within_page_offset;
616   uint32_t  shm_map_offset_end;
617   uint32_t  shm_map_size;
618   uintptr_t mmap_ret;
619   uint8_t   *mmap_result;
620 
621   shm_offset = dest - (uint32_t) nap->dynamic_text_start;
622   shm_map_offset = shm_offset & ~(NACL_MAP_PAGESIZE - 1);
623   within_page_offset = shm_offset & (NACL_MAP_PAGESIZE - 1);
624   shm_map_offset_end =
625     (shm_offset + size + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1);
626   shm_map_size = shm_map_offset_end - shm_map_offset;
627 
628   mmap_ret = CachedMapWritableText(nap,
629                                    shm_map_offset,
630                                    shm_map_size);
631   if (0 == mmap_ret) {
632     return 0;
633   }
634   mmap_result = (uint8_t *) mmap_ret;
635   *mapped_addr = mmap_result + within_page_offset;
636   return 1;
637 }
638 
639 /*
640  * Clear the mmap cache if multiple pages were mapped.
641  * Caller must hold nap->dynamic_load_mutex.
642  */
NaClTextMapClearCacheIfNeeded(struct NaClApp * nap,uint32_t dest,uint32_t size)643 static INLINE void NaClTextMapClearCacheIfNeeded(struct NaClApp *nap,
644                                                  uint32_t dest,
645                                                  uint32_t size) {
646   uint32_t                    shm_offset;
647   uint32_t                    shm_map_offset;
648   uint32_t                    shm_map_offset_end;
649   uint32_t                    shm_map_size;
650   shm_offset = dest - (uint32_t) nap->dynamic_text_start;
651   shm_map_offset = shm_offset & ~(NACL_MAP_PAGESIZE - 1);
652   shm_map_offset_end =
653     (shm_offset + size + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1);
654   shm_map_size = shm_map_offset_end - shm_map_offset;
655   if (shm_map_size > NACL_MAP_PAGESIZE) {
656     /* call with size==offset==0 to clear cache */
657     CachedMapWritableText(nap, 0, 0);
658   }
659 }
660 
NaClTextDyncodeCreate(struct NaClApp * nap,uint32_t dest,void * code_copy,uint32_t size,const struct NaClValidationMetadata * metadata)661 int32_t NaClTextDyncodeCreate(struct NaClApp *nap,
662                               uint32_t       dest,
663                               void           *code_copy,
664                               uint32_t       size,
665                               const struct NaClValidationMetadata *metadata) {
666   uintptr_t                   dest_addr;
667   uint8_t                     *mapped_addr;
668   int32_t                     retval = -NACL_ABI_EINVAL;
669   int                         validator_result;
670   struct NaClPerfCounter      time_dyncode_create;
671   NaClPerfCounterCtor(&time_dyncode_create, "NaClTextDyncodeCreate");
672 
673   if (NULL == nap->text_shm) {
674     NaClLog(1, "NaClTextDyncodeCreate: Dynamic loading not enabled\n");
675     return -NACL_ABI_EINVAL;
676   }
677   if (0 != (dest & (nap->bundle_size - 1)) ||
678       0 != (size & (nap->bundle_size - 1))) {
679     NaClLog(1, "NaClTextDyncodeCreate: Non-bundle-aligned address or size\n");
680     return -NACL_ABI_EINVAL;
681   }
682   dest_addr = NaClUserToSysAddrRange(nap, dest, size);
683   if (kNaClBadAddress == dest_addr) {
684     NaClLog(1, "NaClTextDyncodeCreate: Dest address out of range\n");
685     return -NACL_ABI_EFAULT;
686   }
687   if (dest < nap->dynamic_text_start) {
688     NaClLog(1, "NaClTextDyncodeCreate: Below dynamic code area\n");
689     return -NACL_ABI_EFAULT;
690   }
691   /*
692    * We ensure that the final HLTs of the dynamic code region cannot
693    * be overwritten, just in case of CPU bugs.
694    */
695   if (dest + size > nap->dynamic_text_end - NACL_HALT_SLED_SIZE) {
696     NaClLog(1, "NaClTextDyncodeCreate: Above dynamic code area\n");
697     return -NACL_ABI_EFAULT;
698   }
699   if (0 == size) {
700     /* Nothing to load.  Succeed trivially. */
701     return 0;
702   }
703 
704   NaClXMutexLock(&nap->dynamic_load_mutex);
705 
706   /*
707    * Validate the code before trying to create the region.  This avoids the need
708    * to delete the region if validation fails.
709    * See: http://code.google.com/p/nativeclient/issues/detail?id=2566
710    */
711   if (!nap->skip_validator) {
712     validator_result = NaClValidateCode(nap, dest, code_copy, size, metadata);
713   } else {
714     NaClLog(LOG_ERROR, "VALIDATION SKIPPED.\n");
715     validator_result = LOAD_OK;
716   }
717 
718   NaClPerfCounterMark(&time_dyncode_create,
719                       NACL_PERF_IMPORTANT_PREFIX "DynRegionValidate");
720   NaClPerfCounterIntervalLast(&time_dyncode_create);
721 
722   if (validator_result != LOAD_OK
723       && nap->ignore_validator_result) {
724     NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
725             "continuing anyway...\n");
726     validator_result = LOAD_OK;
727   }
728 
729   if (validator_result != LOAD_OK) {
730     NaClLog(1, "NaClTextDyncodeCreate: "
731             "Validation of dynamic code failed\n");
732     retval = -NACL_ABI_EINVAL;
733     goto cleanup_unlock;
734   }
735 
736   if (NaClDynamicRegionCreate(nap, dest_addr, size, 0) != 1) {
737     /* target addr is in use */
738     NaClLog(1, "NaClTextDyncodeCreate: Code range already allocated\n");
739     retval = -NACL_ABI_EINVAL;
740     goto cleanup_unlock;
741   }
742 
743   if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
744     retval = -NACL_ABI_ENOMEM;
745     goto cleanup_unlock;
746   }
747 
748   CopyCodeSafelyInitial(mapped_addr, code_copy, size, nap->bundle_size);
749   /*
750    * Flush the processor's instruction cache.  This is not necessary
751    * for security, because any old cached instructions will just be
752    * safe halt instructions.  It is only necessary to ensure that
753    * untrusted code runs correctly when it tries to execute the
754    * dynamically-loaded code.
755    */
756   NaClFlushCacheForDoublyMappedCode(mapped_addr, (uint8_t *) dest_addr, size);
757 
758   retval = 0;
759 
760   NaClTextMapClearCacheIfNeeded(nap, dest, size);
761 
762  cleanup_unlock:
763   NaClXMutexUnlock(&nap->dynamic_load_mutex);
764   return retval;
765 }
766 
NaClSysDyncodeCreate(struct NaClAppThread * natp,uint32_t dest,uint32_t src,uint32_t size)767 int32_t NaClSysDyncodeCreate(struct NaClAppThread *natp,
768                              uint32_t             dest,
769                              uint32_t             src,
770                              uint32_t             size) {
771   struct NaClApp              *nap = natp->nap;
772   uintptr_t                   src_addr;
773   uint8_t                     *code_copy;
774   int32_t                     retval = -NACL_ABI_EINVAL;
775 
776   if (!nap->enable_dyncode_syscalls) {
777     NaClLog(LOG_WARNING,
778             "NaClSysDyncodeCreate: Dynamic code syscalls are disabled\n");
779     return -NACL_ABI_ENOSYS;
780   }
781 
782   src_addr = NaClUserToSysAddrRange(nap, src, size);
783   if (kNaClBadAddress == src_addr) {
784     NaClLog(1, "NaClSysDyncodeCreate: Source address out of range\n");
785     return -NACL_ABI_EFAULT;
786   }
787 
788   /*
789    * Make a private copy of the code, so that we can validate it
790    * without a TOCTTOU race condition.
791    */
792   code_copy = malloc(size);
793   if (NULL == code_copy) {
794     return -NACL_ABI_ENOMEM;
795   }
796   memcpy(code_copy, (uint8_t*) src_addr, size);
797 
798   /* Unknown data source, no metadata. */
799   retval = NaClTextDyncodeCreate(nap, dest, code_copy, size, NULL);
800 
801   free(code_copy);
802   return retval;
803 }
804 
NaClSysDyncodeModify(struct NaClAppThread * natp,uint32_t dest,uint32_t src,uint32_t size)805 int32_t NaClSysDyncodeModify(struct NaClAppThread *natp,
806                              uint32_t             dest,
807                              uint32_t             src,
808                              uint32_t             size) {
809   struct NaClApp              *nap = natp->nap;
810   uintptr_t                   dest_addr;
811   uintptr_t                   src_addr;
812   uintptr_t                   beginbundle;
813   uintptr_t                   endbundle;
814   uintptr_t                   offset;
815   uint8_t                     *mapped_addr;
816   uint8_t                     *code_copy = NULL;
817   uint8_t                     code_copy_buf[NACL_INSTR_BLOCK_SIZE];
818   int                         validator_result;
819   int32_t                     retval = -NACL_ABI_EINVAL;
820   struct NaClDynamicRegion    *region;
821 
822   if (!nap->validator->code_replacement) {
823     NaClLog(LOG_WARNING,
824             "NaClSysDyncodeModify: "
825             "Dynamic code modification is not supported\n");
826     return -NACL_ABI_ENOSYS;
827   }
828 
829   if (!nap->enable_dyncode_syscalls) {
830     NaClLog(LOG_WARNING,
831             "NaClSysDyncodeModify: Dynamic code syscalls are disabled\n");
832     return -NACL_ABI_ENOSYS;
833   }
834 
835   if (NULL == nap->text_shm) {
836     NaClLog(1, "NaClSysDyncodeModify: Dynamic loading not enabled\n");
837     return -NACL_ABI_EINVAL;
838   }
839 
840   if (0 == size) {
841     /* Nothing to modify.  Succeed trivially. */
842     return 0;
843   }
844 
845   dest_addr = NaClUserToSysAddrRange(nap, dest, size);
846   src_addr = NaClUserToSysAddrRange(nap, src, size);
847   if (kNaClBadAddress == src_addr || kNaClBadAddress == dest_addr) {
848     NaClLog(1, "NaClSysDyncodeModify: Address out of range\n");
849     return -NACL_ABI_EFAULT;
850   }
851 
852   NaClXMutexLock(&nap->dynamic_load_mutex);
853 
854   region = NaClDynamicRegionFind(nap, dest_addr, size);
855   if (NULL == region ||
856       region->start > dest_addr ||
857       region->start + region->size < dest_addr + size ||
858       region->is_mmap) {
859     /*
860      * target not a subregion of region or region is null, or came from a file.
861      */
862     NaClLog(1, "NaClSysDyncodeModify: Can't find region to modify\n");
863     retval = -NACL_ABI_EFAULT;
864     goto cleanup_unlock;
865   }
866 
867   beginbundle = dest_addr & ~(nap->bundle_size - 1);
868   endbundle   = (dest_addr + size - 1 + nap->bundle_size)
869                   & ~(nap->bundle_size - 1);
870   offset      = dest_addr &  (nap->bundle_size - 1);
871   if (endbundle-beginbundle <= sizeof code_copy_buf) {
872     /* usually patches are a single bundle, so stack allocate */
873     code_copy = code_copy_buf;
874   } else {
875     /* in general case heap allocate */
876     code_copy = malloc(endbundle-beginbundle);
877     if (NULL == code_copy) {
878       retval = -NACL_ABI_ENOMEM;
879       goto cleanup_unlock;
880     }
881   }
882 
883   /* copy the bundles from already-inserted code */
884   memcpy(code_copy, (uint8_t*) beginbundle, endbundle - beginbundle);
885 
886   /*
887    * make the requested change in temporary location
888    * this avoids TOTTOU race
889    */
890   memcpy(code_copy + offset, (uint8_t*) src_addr, size);
891 
892   /* update dest/size to refer to entire bundles */
893   dest      &= ~(nap->bundle_size - 1);
894   dest_addr &= ~((uintptr_t)nap->bundle_size - 1);
895   /* since both are in sandbox memory this check should succeed */
896   CHECK(endbundle-beginbundle < UINT32_MAX);
897   size = (uint32_t)(endbundle - beginbundle);
898 
899   /* validate this code as a replacement */
900   validator_result = NaClValidateCodeReplacement(nap,
901                                                  dest,
902                                                  (uint8_t*) dest_addr,
903                                                  code_copy,
904                                                  size);
905 
906   if (validator_result != LOAD_OK
907       && nap->ignore_validator_result) {
908     NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
909                        "continuing anyway...\n");
910     validator_result = LOAD_OK;
911   }
912 
913   if (validator_result != LOAD_OK) {
914     NaClLog(1, "NaClSysDyncodeModify: Validation of dynamic code failed\n");
915     retval = -NACL_ABI_EINVAL;
916     goto cleanup_unlock;
917   }
918 
919   if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
920     retval = -NACL_ABI_ENOMEM;
921     goto cleanup_unlock;
922   }
923 
924   if (LOAD_OK != NaClCopyCode(nap, dest, mapped_addr, code_copy, size)) {
925     NaClLog(1, "NaClSysDyncodeModify: Copying of replacement code failed\n");
926     retval = -NACL_ABI_EINVAL;
927     goto cleanup_unlock;
928   }
929   retval = 0;
930 
931   NaClTextMapClearCacheIfNeeded(nap, dest, size);
932 
933  cleanup_unlock:
934   NaClXMutexUnlock(&nap->dynamic_load_mutex);
935 
936   if (code_copy != code_copy_buf) {
937     free(code_copy);
938   }
939 
940   return retval;
941 }
942 
NaClSysDyncodeDelete(struct NaClAppThread * natp,uint32_t dest,uint32_t size)943 int32_t NaClSysDyncodeDelete(struct NaClAppThread *natp,
944                              uint32_t             dest,
945                              uint32_t             size) {
946   struct NaClApp              *nap = natp->nap;
947   uintptr_t                    dest_addr;
948   uint8_t                     *mapped_addr;
949   int32_t                     retval = -NACL_ABI_EINVAL;
950   struct NaClDynamicRegion    *region;
951 
952   if (!nap->enable_dyncode_syscalls) {
953     NaClLog(LOG_WARNING,
954             "NaClSysDyncodeDelete: Dynamic code syscalls are disabled\n");
955     return -NACL_ABI_ENOSYS;
956   }
957 
958   if (NULL == nap->text_shm) {
959     NaClLog(1, "NaClSysDyncodeDelete: Dynamic loading not enabled\n");
960     return -NACL_ABI_EINVAL;
961   }
962 
963   if (0 == size) {
964     /* Nothing to delete.  Just update our generation. */
965     int gen;
966     /* fetch current generation */
967     NaClXMutexLock(&nap->dynamic_load_mutex);
968     gen = nap->dynamic_delete_generation;
969     NaClXMutexUnlock(&nap->dynamic_load_mutex);
970     /* set our generation */
971     NaClSetThreadGeneration(natp, gen);
972     return 0;
973   }
974 
975   dest_addr = NaClUserToSysAddrRange(nap, dest, size);
976   if (kNaClBadAddress == dest_addr) {
977     NaClLog(1, "NaClSysDyncodeDelete: Address out of range\n");
978     return -NACL_ABI_EFAULT;
979   }
980 
981   NaClXMutexLock(&nap->dynamic_load_mutex);
982 
983   /*
984    * this check ensures the to-be-deleted region is identical to a
985    * previously inserted region, so no need to check for alignment/bounds/etc
986    */
987   region = NaClDynamicRegionFind(nap, dest_addr, size);
988   if (NULL == region ||
989       region->start != dest_addr ||
990       region->size != size ||
991       region->is_mmap) {
992     NaClLog(1, "NaClSysDyncodeDelete: Can't find region to delete\n");
993     retval = -NACL_ABI_EFAULT;
994     goto cleanup_unlock;
995   }
996 
997 
998   if (region->delete_generation < 0) {
999     /* first deletion request */
1000 
1001     if (nap->dynamic_delete_generation == INT32_MAX) {
1002       NaClLog(1, "NaClSysDyncodeDelete:"
1003                  "Overflow, can only delete INT32_MAX regions\n");
1004       retval = -NACL_ABI_EFAULT;
1005       goto cleanup_unlock;
1006     }
1007 
1008     if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
1009       retval = -NACL_ABI_ENOMEM;
1010       goto cleanup_unlock;
1011     }
1012 
1013     /* make it so no new threads can enter target region */
1014     ReplaceBundleHeadsWithHalts(mapped_addr, size, nap->bundle_size);
1015 
1016     /*
1017      * Flush the instruction cache.  In principle this is needed for
1018      * security on ARM so that, when new code is loaded, it is not
1019      * possible for it to jump to stale code that remains in the
1020      * icache.
1021      */
1022     NaClFlushCacheForDoublyMappedCode(mapped_addr, (uint8_t *) dest_addr, size);
1023 
1024     NaClTextMapClearCacheIfNeeded(nap, dest, size);
1025 
1026     /* increment and record the generation deletion was requested */
1027     region->delete_generation = ++nap->dynamic_delete_generation;
1028   }
1029 
1030   /* update our own generation */
1031   NaClSetThreadGeneration(natp, nap->dynamic_delete_generation);
1032 
1033   if (region->delete_generation <= NaClMinimumThreadGeneration(nap)) {
1034     /*
1035      * All threads have checked in since we marked region for deletion.
1036      * It is safe to remove the region.
1037      *
1038      * No need to memset the region to hlt since bundle heads are hlt
1039      * and thus the bodies are unreachable.
1040      */
1041     NaClDynamicRegionDelete(nap, region);
1042     retval = 0;
1043   } else {
1044     /*
1045      * Still waiting for some threads to report in...
1046      */
1047     retval = -NACL_ABI_EAGAIN;
1048   }
1049 
1050  cleanup_unlock:
1051   NaClXMutexUnlock(&nap->dynamic_load_mutex);
1052   return retval;
1053 }
1054 
NaClDyncodeVisit(struct NaClApp * nap,void (* fn)(void * state,struct NaClDynamicRegion * region),void * state)1055 void NaClDyncodeVisit(
1056     struct NaClApp *nap,
1057     void           (*fn)(void *state, struct NaClDynamicRegion *region),
1058     void           *state) {
1059   int            i;
1060 
1061   NaClXMutexLock(&nap->dynamic_load_mutex);
1062   for (i = 0; i < nap->num_dynamic_regions; ++i) {
1063     fn(state, &nap->dynamic_regions[i]);
1064   }
1065   NaClXMutexUnlock(&nap->dynamic_load_mutex);
1066 }
1067