1 /*
2 * Copyright (c) 2011 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7 /*
8 * NaCl Simple/secure ELF loader (NaCl SEL) memory map.
9 */
10
11 #include "native_client/src/include/portability.h"
12
13 #include <assert.h>
14 #include <fcntl.h>
15 #include <stdlib.h>
16 #include <stdio.h>
17
18 #include "native_client/src/include/nacl_platform.h"
19 #include "native_client/src/include/portability.h"
20
21 #include "native_client/src/shared/platform/nacl_check.h"
22 #include "native_client/src/shared/platform/nacl_log.h"
23 #include "native_client/src/shared/platform/nacl_host_desc.h"
24 #include "native_client/src/trusted/desc/nacl_desc_base.h"
25 #include "native_client/src/trusted/desc/nacl_desc_io.h"
26 #include "native_client/src/trusted/service_runtime/sel_mem.h"
27 #include "native_client/src/trusted/service_runtime/sel_util.h"
28 #include "native_client/src/trusted/service_runtime/nacl_config.h"
29
30 #include "native_client/src/trusted/service_runtime/include/sys/fcntl.h"
31 #include "native_client/src/trusted/service_runtime/include/sys/mman.h"
32
33 #define START_ENTRIES 5 /* tramp+text, rodata, data, bss, stack */
34 #define REMOVE_MARKED_DEBUG 0
35
36
37 /*
38 * The memory map structure is a simple array of memory regions which
39 * may have different access protections. We do not yet merge regions
40 * with the same access protections together to reduce the region
41 * number, but may do so in the future.
42 *
43 * Regions are described by (relative) starting page number, the
44 * number of pages, and the protection that the pages should have.
45 */
NaClVmmapEntryMake(uintptr_t page_num,size_t npages,int prot,int flags,struct NaClDesc * desc,nacl_off64_t offset,nacl_off64_t file_size)46 struct NaClVmmapEntry *NaClVmmapEntryMake(uintptr_t page_num,
47 size_t npages,
48 int prot,
49 int flags,
50 struct NaClDesc *desc,
51 nacl_off64_t offset,
52 nacl_off64_t file_size) {
53 struct NaClVmmapEntry *entry;
54
55 NaClLog(4,
56 "NaClVmmapEntryMake(0x%"NACL_PRIxPTR",0x%"NACL_PRIxS","
57 "0x%x,0x%x,0x%"NACL_PRIxPTR",0x%"NACL_PRIx64")\n",
58 page_num, npages, prot, flags, (uintptr_t) desc, offset);
59 entry = (struct NaClVmmapEntry *) malloc(sizeof *entry);
60 if (NULL == entry) {
61 return 0;
62 }
63 NaClLog(4, "entry: 0x%"NACL_PRIxPTR"\n", (uintptr_t) entry);
64 entry->page_num = page_num;
65 entry->npages = npages;
66 entry->prot = prot;
67 entry->flags = flags;
68 entry->removed = 0;
69 entry->desc = desc;
70 if (desc != NULL) {
71 NaClDescRef(desc);
72 }
73 entry->offset = offset;
74 entry->file_size = file_size;
75 return entry;
76 }
77
78
NaClVmmapEntryFree(struct NaClVmmapEntry * entry)79 void NaClVmmapEntryFree(struct NaClVmmapEntry *entry) {
80 NaClLog(4,
81 ("NaClVmmapEntryFree(0x%08"NACL_PRIxPTR
82 "): (0x%"NACL_PRIxPTR",0x%"NACL_PRIxS","
83 "0x%x,0x%x,0x%"NACL_PRIxPTR",0x%"NACL_PRIx64")\n"),
84 (uintptr_t) entry,
85 entry->page_num, entry->npages, entry->prot,
86 entry->flags, (uintptr_t) entry->desc, entry->offset);
87
88 if (entry->desc != NULL) {
89 NaClDescSafeUnref(entry->desc);
90 }
91 free(entry);
92 }
93
94
95 /*
96 * Print debug.
97 */
NaClVmentryPrint(void * state,struct NaClVmmapEntry * vmep)98 void NaClVmentryPrint(void *state,
99 struct NaClVmmapEntry *vmep) {
100 UNREFERENCED_PARAMETER(state);
101
102 printf("page num 0x%06x\n", (uint32_t)vmep->page_num);
103 printf("num pages %d\n", (uint32_t)vmep->npages);
104 printf("prot bits %x\n", vmep->prot);
105 printf("flags %x\n", vmep->flags);
106 fflush(stdout);
107 }
108
109
NaClVmmapDebug(struct NaClVmmap * self,char * msg)110 void NaClVmmapDebug(struct NaClVmmap *self,
111 char *msg) {
112 puts(msg);
113 NaClVmmapVisit(self, NaClVmentryPrint, (void *) 0);
114 fflush(stdout);
115 }
116
117
NaClVmmapCtor(struct NaClVmmap * self)118 int NaClVmmapCtor(struct NaClVmmap *self) {
119 self->size = START_ENTRIES;
120 if (SIZE_T_MAX / sizeof *self->vmentry < self->size) {
121 return 0;
122 }
123 self->vmentry = calloc(self->size, sizeof *self->vmentry);
124 if (!self->vmentry) {
125 return 0;
126 }
127 self->nvalid = 0;
128 self->is_sorted = 1;
129 return 1;
130 }
131
132
NaClVmmapDtor(struct NaClVmmap * self)133 void NaClVmmapDtor(struct NaClVmmap *self) {
134 size_t i;
135
136 for (i = 0; i < self->nvalid; ++i) {
137 NaClVmmapEntryFree(self->vmentry[i]);
138 }
139 free(self->vmentry);
140 self->vmentry = 0;
141 }
142
143 /*
144 * Comparison function for qsort. Should never encounter a
145 * removed/invalid entry.
146 */
147
NaClVmmapCmpEntries(void const * vleft,void const * vright)148 static int NaClVmmapCmpEntries(void const *vleft,
149 void const *vright) {
150 struct NaClVmmapEntry const *const *left =
151 (struct NaClVmmapEntry const *const *) vleft;
152 struct NaClVmmapEntry const *const *right =
153 (struct NaClVmmapEntry const *const *) vright;
154
155 return (int) ((*left)->page_num - (*right)->page_num);
156 }
157
158
NaClVmmapRemoveMarked(struct NaClVmmap * self)159 static void NaClVmmapRemoveMarked(struct NaClVmmap *self) {
160 size_t i;
161 size_t last;
162
163 if (0 == self->nvalid)
164 return;
165
166 #if REMOVE_MARKED_DEBUG
167 NaClVmmapDebug(self, "Before RemoveMarked");
168 #endif
169 /*
170 * Linearly scan with a move-end-to-front strategy to get rid of
171 * marked-to-be-removed entries.
172 */
173
174 /*
175 * Invariant:
176 *
177 * forall j in [0, self->nvalid): NULL != self->vmentry[j]
178 */
179 for (last = self->nvalid; last > 0 && self->vmentry[--last]->removed; ) {
180 NaClVmmapEntryFree(self->vmentry[last]);
181 self->vmentry[last] = NULL;
182 }
183 if (last == 0 && self->vmentry[0]->removed) {
184 NaClLog(LOG_FATAL, "No valid entries in VM map\n");
185 return;
186 }
187
188 /*
189 * Post condition of above loop:
190 *
191 * forall j in [0, last]: NULL != self->vmentry[j]
192 *
193 * 0 <= last < self->nvalid && !self->vmentry[last]->removed
194 */
195 CHECK(last < self->nvalid);
196 CHECK(!self->vmentry[last]->removed);
197 /*
198 * and,
199 *
200 * forall j in (last, self->nvalid): NULL == self->vmentry[j]
201 */
202
203 /*
204 * Loop invariant: forall j in [0, i): !self->vmentry[j]->removed
205 */
206 for (i = 0; i < last; ++i) {
207 if (!self->vmentry[i]->removed) {
208 continue;
209 }
210 /*
211 * post condition: self->vmentry[i]->removed
212 *
213 * swap with entry at self->vmentry[last].
214 */
215
216 NaClVmmapEntryFree(self->vmentry[i]);
217 self->vmentry[i] = self->vmentry[last];
218 self->vmentry[last] = NULL;
219
220 /*
221 * Invariants here:
222 *
223 * forall j in [last, self->nvalid): NULL == self->vmentry[j]
224 *
225 * forall j in [0, i]: !self->vmentry[j]->removed
226 */
227
228 while (--last > i && self->vmentry[last]->removed) {
229 NaClVmmapEntryFree(self->vmentry[last]);
230 self->vmentry[last] = NULL;
231 }
232 /*
233 * since !self->vmentry[i]->removed, we are guaranteed that
234 * !self->vmentry[last]->removed when the while loop terminates.
235 *
236 * forall j in (last, self->nvalid):
237 * NULL == self->vmentry[j]->removed
238 */
239 }
240 /* i == last */
241 /* forall j in [0, last]: !self->vmentry[j]->removed */
242 /* forall j in (last, self->nvalid): NULL == self->vmentry[j] */
243 self->nvalid = last + 1;
244
245 self->is_sorted = 0;
246 #if REMOVE_MARKED_DEBUG
247 NaClVmmapDebug(self, "After RemoveMarked");
248 #endif
249 }
250
251
NaClVmmapMakeSorted(struct NaClVmmap * self)252 void NaClVmmapMakeSorted(struct NaClVmmap *self) {
253 if (self->is_sorted)
254 return;
255
256 NaClVmmapRemoveMarked(self);
257
258 qsort(self->vmentry,
259 self->nvalid,
260 sizeof *self->vmentry,
261 NaClVmmapCmpEntries);
262 self->is_sorted = 1;
263 #if REMOVE_MARKED_DEBUG
264 NaClVmmapDebug(self, "After Sort");
265 #endif
266 }
267
NaClVmmapAdd(struct NaClVmmap * self,uintptr_t page_num,size_t npages,int prot,int flags,struct NaClDesc * desc,nacl_off64_t offset,nacl_off64_t file_size)268 void NaClVmmapAdd(struct NaClVmmap *self,
269 uintptr_t page_num,
270 size_t npages,
271 int prot,
272 int flags,
273 struct NaClDesc *desc,
274 nacl_off64_t offset,
275 nacl_off64_t file_size) {
276 struct NaClVmmapEntry *entry;
277
278 NaClLog(2,
279 ("NaClVmmapAdd(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR", "
280 "0x%"NACL_PRIxS", 0x%x, 0x%x, 0x%"NACL_PRIxPTR", "
281 "0x%"NACL_PRIx64")\n"),
282 (uintptr_t) self, page_num, npages, prot, flags,
283 (uintptr_t) desc, offset);
284 if (self->nvalid == self->size) {
285 size_t new_size = 2 * self->size;
286 struct NaClVmmapEntry **new_map;
287
288 new_map = realloc(self->vmentry, new_size * sizeof *new_map);
289 if (NULL == new_map) {
290 NaClLog(LOG_FATAL, "NaClVmmapAdd: could not allocate memory\n");
291 return;
292 }
293 self->vmentry = new_map;
294 self->size = new_size;
295 }
296 /* self->nvalid < self->size */
297 entry = NaClVmmapEntryMake(page_num, npages, prot, flags,
298 desc, offset, file_size);
299
300 self->vmentry[self->nvalid] = entry;
301 self->is_sorted = 0;
302 ++self->nvalid;
303 }
304
305 /*
306 * Update the virtual memory map. Deletion is handled by a remove
307 * flag, since a NULL desc just means that the memory is backed by the
308 * system paging file.
309 */
NaClVmmapUpdate(struct NaClVmmap * self,uintptr_t page_num,size_t npages,int prot,int flags,int remove,struct NaClDesc * desc,nacl_off64_t offset,nacl_off64_t file_size)310 static void NaClVmmapUpdate(struct NaClVmmap *self,
311 uintptr_t page_num,
312 size_t npages,
313 int prot,
314 int flags,
315 int remove,
316 struct NaClDesc *desc,
317 nacl_off64_t offset,
318 nacl_off64_t file_size) {
319 /* update existing entries or create new entry as needed */
320 size_t i;
321 uintptr_t new_region_end_page = page_num + npages;
322
323 NaClLog(2,
324 ("NaClVmmapUpdate(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR", "
325 "0x%"NACL_PRIxS", 0x%x, 0x%x, %d, 0x%"NACL_PRIxPTR", "
326 "0x%"NACL_PRIx64")\n"),
327 (uintptr_t) self, page_num, npages, prot, flags,
328 remove, (uintptr_t) desc, offset);
329 NaClVmmapMakeSorted(self);
330
331 CHECK(npages > 0);
332
333 for (i = 0; i < self->nvalid; i++) {
334 struct NaClVmmapEntry *ent = self->vmentry[i];
335 uintptr_t ent_end_page = ent->page_num + ent->npages;
336 nacl_off64_t additional_offset =
337 (new_region_end_page - ent->page_num) << NACL_PAGESHIFT;
338
339
340 if (ent->page_num < page_num && new_region_end_page < ent_end_page) {
341 /*
342 * Split existing mapping into two parts, with new mapping in
343 * the middle.
344 */
345 NaClVmmapAdd(self,
346 new_region_end_page,
347 ent_end_page - new_region_end_page,
348 ent->prot,
349 ent->flags,
350 ent->desc,
351 ent->offset + additional_offset,
352 ent->file_size);
353 ent->npages = page_num - ent->page_num;
354 break;
355 } else if (ent->page_num < page_num && page_num < ent_end_page) {
356 /* New mapping overlaps end of existing mapping. */
357 ent->npages = page_num - ent->page_num;
358 } else if (ent->page_num < new_region_end_page &&
359 new_region_end_page < ent_end_page) {
360 /* New mapping overlaps start of existing mapping. */
361 ent->page_num = new_region_end_page;
362 ent->npages = ent_end_page - new_region_end_page;
363 ent->offset += additional_offset;
364 break;
365 } else if (page_num <= ent->page_num &&
366 ent_end_page <= new_region_end_page) {
367 /* New mapping covers all of the existing mapping. */
368 ent->removed = 1;
369 } else {
370 /* No overlap */
371 assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num);
372 }
373 }
374
375 if (!remove) {
376 NaClVmmapAdd(self, page_num, npages, prot, flags, desc, offset, file_size);
377 }
378
379 NaClVmmapRemoveMarked(self);
380 }
381
NaClVmmapAddWithOverwrite(struct NaClVmmap * self,uintptr_t page_num,size_t npages,int prot,int flags,struct NaClDesc * desc,nacl_off64_t offset,nacl_off64_t file_size)382 void NaClVmmapAddWithOverwrite(struct NaClVmmap *self,
383 uintptr_t page_num,
384 size_t npages,
385 int prot,
386 int flags,
387 struct NaClDesc *desc,
388 nacl_off64_t offset,
389 nacl_off64_t file_size) {
390 NaClVmmapUpdate(self,
391 page_num,
392 npages,
393 prot,
394 flags,
395 /* remove= */ 0,
396 desc,
397 offset,
398 file_size);
399 }
400
NaClVmmapRemove(struct NaClVmmap * self,uintptr_t page_num,size_t npages)401 void NaClVmmapRemove(struct NaClVmmap *self,
402 uintptr_t page_num,
403 size_t npages) {
404 NaClVmmapUpdate(self,
405 page_num,
406 npages,
407 /* prot= */ 0,
408 /* flags= */ 0,
409 /* remove= */ 1,
410 /* desc= */NULL,
411 /* offset= */0,
412 /* file_size= */0);
413 }
414
415 /*
416 * NaClVmmapCheckMapping checks whether there is an existing mapping with
417 * maximum protection equivalent or higher to the given one.
418 */
NaClVmmapCheckExistingMapping(struct NaClVmmap * self,uintptr_t page_num,size_t npages,int prot)419 static int NaClVmmapCheckExistingMapping(struct NaClVmmap *self,
420 uintptr_t page_num,
421 size_t npages,
422 int prot) {
423 size_t i;
424 uintptr_t region_end_page = page_num + npages;
425
426 NaClLog(2,
427 ("NaClVmmapCheckExistingMapping(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR
428 ", 0x%"NACL_PRIxS", 0x%x)\n"),
429 (uintptr_t) self, page_num, npages, prot);
430
431 if (0 == self->nvalid) {
432 return 0;
433 }
434 NaClVmmapMakeSorted(self);
435
436 for (i = 0; i < self->nvalid; ++i) {
437 struct NaClVmmapEntry *ent = self->vmentry[i];
438 uintptr_t ent_end_page = ent->page_num + ent->npages;
439 int flags = NaClVmmapEntryMaxProt(ent);
440
441 if (ent->page_num <= page_num && region_end_page <= ent_end_page) {
442 /* The mapping is inside existing entry. */
443 return 0 == (prot & (~flags));
444 } else if (ent->page_num <= page_num && page_num < ent_end_page) {
445 /* The mapping overlaps the entry. */
446 if (0 != (prot & (~flags))) {
447 return 0;
448 }
449 page_num = ent_end_page;
450 npages = region_end_page - ent_end_page;
451 } else if (page_num < ent->page_num) {
452 /* The mapping without backing store. */
453 return 0;
454 }
455 }
456 return 0;
457 }
458
NaClVmmapChangeProt(struct NaClVmmap * self,uintptr_t page_num,size_t npages,int prot)459 int NaClVmmapChangeProt(struct NaClVmmap *self,
460 uintptr_t page_num,
461 size_t npages,
462 int prot) {
463 size_t i;
464 size_t nvalid;
465 uintptr_t new_region_end_page = page_num + npages;
466
467 /*
468 * NaClVmmapCheckExistingMapping should be always called before
469 * NaClVmmapChangeProt proceeds to ensure that valid mapping exists
470 * as modifications cannot be rolled back.
471 */
472 if (!NaClVmmapCheckExistingMapping(self, page_num, npages, prot)) {
473 return 0;
474 }
475
476 NaClLog(2,
477 ("NaClVmmapChangeProt(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR
478 ", 0x%"NACL_PRIxS", 0x%x)\n"),
479 (uintptr_t) self, page_num, npages, prot);
480 NaClVmmapMakeSorted(self);
481
482 /*
483 * This loop & interval boundary tests closely follow those in
484 * NaClVmmapUpdate. When updating those, do not forget to update them
485 * at both places where appropriate.
486 * TODO(phosek): use better data structure which will support intervals
487 */
488
489 for (i = 0, nvalid = self->nvalid; i < nvalid && npages > 0; i++) {
490 struct NaClVmmapEntry *ent = self->vmentry[i];
491 uintptr_t ent_end_page = ent->page_num + ent->npages;
492 nacl_off64_t additional_offset =
493 (new_region_end_page - ent->page_num) << NACL_PAGESHIFT;
494
495 if (ent->page_num < page_num && new_region_end_page < ent_end_page) {
496 /* Split existing mapping into two parts */
497 NaClVmmapAdd(self,
498 new_region_end_page,
499 ent_end_page - new_region_end_page,
500 ent->prot,
501 ent->flags,
502 ent->desc,
503 ent->offset + additional_offset,
504 ent->file_size);
505 ent->npages = page_num - ent->page_num;
506 /* Add the new mapping into the middle. */
507 NaClVmmapAdd(self,
508 page_num,
509 npages,
510 prot,
511 ent->flags,
512 ent->desc,
513 ent->offset + (page_num - ent->page_num),
514 ent->file_size);
515 break;
516 } else if (ent->page_num < page_num && page_num < ent_end_page) {
517 /* New mapping overlaps end of existing mapping. */
518 ent->npages = page_num - ent->page_num;
519 /* Add the overlapping part of the mapping. */
520 NaClVmmapAdd(self,
521 page_num,
522 ent_end_page - page_num,
523 prot,
524 ent->flags,
525 ent->desc,
526 ent->offset + (page_num - ent->page_num),
527 ent->file_size);
528 /* The remaining part (if any) will be added in other iteration. */
529 page_num = ent_end_page;
530 npages = new_region_end_page - ent_end_page;
531 } else if (ent->page_num < new_region_end_page &&
532 new_region_end_page < ent_end_page) {
533 /* New mapping overlaps start of existing mapping, split it. */
534 NaClVmmapAdd(self,
535 page_num,
536 npages,
537 prot,
538 ent->flags,
539 ent->desc,
540 ent->offset,
541 ent->file_size);
542 ent->page_num = new_region_end_page;
543 ent->npages = ent_end_page - new_region_end_page;
544 ent->offset += additional_offset;
545 break;
546 } else if (page_num <= ent->page_num &&
547 ent_end_page <= new_region_end_page) {
548 /* New mapping covers all of the existing mapping. */
549 page_num = ent_end_page;
550 npages = new_region_end_page - ent_end_page;
551 ent->prot = prot;
552 } else {
553 /* No overlap */
554 assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num);
555 }
556 }
557 return 1;
558 }
559
NaClVmmapEntryMaxProt(struct NaClVmmapEntry * entry)560 int NaClVmmapEntryMaxProt(struct NaClVmmapEntry *entry) {
561 int flags = PROT_NONE;
562
563 if (entry->desc != NULL && 0 == (entry->flags & NACL_ABI_MAP_PRIVATE)) {
564 int o_flags = (*NACL_VTBL(NaClDesc, entry->desc)->GetFlags)(entry->desc);
565 switch (o_flags & NACL_ABI_O_ACCMODE) {
566 case NACL_ABI_O_RDONLY:
567 flags = NACL_ABI_PROT_READ;
568 break;
569 case NACL_ABI_O_WRONLY:
570 flags = NACL_ABI_PROT_WRITE;
571 break;
572 case NACL_ABI_O_RDWR:
573 flags = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
574 break;
575 default:
576 NaClLog(LOG_FATAL, "Internal error: illegal O_ACCMODE\n");
577 break;
578 }
579 } else {
580 flags = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
581 }
582
583 return flags;
584 }
585
NaClVmmapContainCmpEntries(void const * vkey,void const * vent)586 static int NaClVmmapContainCmpEntries(void const *vkey,
587 void const *vent) {
588 struct NaClVmmapEntry const *const *key =
589 (struct NaClVmmapEntry const *const *) vkey;
590 struct NaClVmmapEntry const *const *ent =
591 (struct NaClVmmapEntry const *const *) vent;
592
593 NaClLog(5, "key->page_num = 0x%05"NACL_PRIxPTR"\n", (*key)->page_num);
594
595 NaClLog(5, "entry->page_num = 0x%05"NACL_PRIxPTR"\n", (*ent)->page_num);
596 NaClLog(5, "entry->npages = 0x%"NACL_PRIxS"\n", (*ent)->npages);
597
598 if ((*key)->page_num < (*ent)->page_num) return -1;
599 if ((*key)->page_num < (*ent)->page_num + (*ent)->npages) return 0;
600 return 1;
601 }
602
NaClVmmapFindPage(struct NaClVmmap * self,uintptr_t pnum)603 struct NaClVmmapEntry const *NaClVmmapFindPage(struct NaClVmmap *self,
604 uintptr_t pnum) {
605 struct NaClVmmapEntry key;
606 struct NaClVmmapEntry *kptr;
607 struct NaClVmmapEntry *const *result_ptr;
608
609 NaClVmmapMakeSorted(self);
610 key.page_num = pnum;
611 kptr = &key;
612 result_ptr = ((struct NaClVmmapEntry *const *)
613 bsearch(&kptr,
614 self->vmentry,
615 self->nvalid,
616 sizeof self->vmentry[0],
617 NaClVmmapContainCmpEntries));
618 return result_ptr ? *result_ptr : NULL;
619 }
620
621
NaClVmmapFindPageIter(struct NaClVmmap * self,uintptr_t pnum,struct NaClVmmapIter * space)622 struct NaClVmmapIter *NaClVmmapFindPageIter(struct NaClVmmap *self,
623 uintptr_t pnum,
624 struct NaClVmmapIter *space) {
625 struct NaClVmmapEntry key;
626 struct NaClVmmapEntry *kptr;
627 struct NaClVmmapEntry **result_ptr;
628
629 NaClVmmapMakeSorted(self);
630 key.page_num = pnum;
631 kptr = &key;
632 result_ptr = ((struct NaClVmmapEntry **)
633 bsearch(&kptr,
634 self->vmentry,
635 self->nvalid,
636 sizeof self->vmentry[0],
637 NaClVmmapContainCmpEntries));
638 space->vmmap = self;
639 if (NULL == result_ptr) {
640 space->entry_ix = self->nvalid;
641 } else {
642 space->entry_ix = result_ptr - self->vmentry;
643 }
644 return space;
645 }
646
647
NaClVmmapIterAtEnd(struct NaClVmmapIter * nvip)648 int NaClVmmapIterAtEnd(struct NaClVmmapIter *nvip) {
649 return nvip->entry_ix >= nvip->vmmap->nvalid;
650 }
651
652
653 /*
654 * IterStar only permissible if not AtEnd
655 */
NaClVmmapIterStar(struct NaClVmmapIter * nvip)656 struct NaClVmmapEntry *NaClVmmapIterStar(struct NaClVmmapIter *nvip) {
657 return nvip->vmmap->vmentry[nvip->entry_ix];
658 }
659
660
NaClVmmapIterIncr(struct NaClVmmapIter * nvip)661 void NaClVmmapIterIncr(struct NaClVmmapIter *nvip) {
662 ++nvip->entry_ix;
663 }
664
665
666 /*
667 * Iterator becomes invalid after Erase. We could have a version that
668 * keep the iterator valid by copying forward, but it is unclear
669 * whether that is needed.
670 */
NaClVmmapIterErase(struct NaClVmmapIter * nvip)671 void NaClVmmapIterErase(struct NaClVmmapIter *nvip) {
672 struct NaClVmmap *nvp;
673
674 nvp = nvip->vmmap;
675 free(nvp->vmentry[nvip->entry_ix]);
676 nvp->vmentry[nvip->entry_ix] = nvp->vmentry[--nvp->nvalid];
677 nvp->is_sorted = 0;
678 }
679
680
NaClVmmapVisit(struct NaClVmmap * self,void (* fn)(void * state,struct NaClVmmapEntry * entry),void * state)681 void NaClVmmapVisit(struct NaClVmmap *self,
682 void (*fn)(void *state,
683 struct NaClVmmapEntry *entry),
684 void *state) {
685 size_t i;
686 size_t nentries;
687
688 NaClVmmapMakeSorted(self);
689 for (i = 0, nentries = self->nvalid; i < nentries; ++i) {
690 (*fn)(state, self->vmentry[i]);
691 }
692 }
693
694
695 /*
696 * Linear search, from high addresses down.
697 */
NaClVmmapFindSpace(struct NaClVmmap * self,size_t num_pages)698 uintptr_t NaClVmmapFindSpace(struct NaClVmmap *self,
699 size_t num_pages) {
700 size_t i;
701 struct NaClVmmapEntry *vmep;
702 uintptr_t end_page;
703 uintptr_t start_page;
704
705 if (0 == self->nvalid)
706 return 0;
707 NaClVmmapMakeSorted(self);
708 for (i = self->nvalid; --i > 0; ) {
709 vmep = self->vmentry[i-1];
710 end_page = vmep->page_num + vmep->npages; /* end page from previous */
711 start_page = self->vmentry[i]->page_num; /* start page from current */
712 if (start_page - end_page >= num_pages) {
713 return start_page - num_pages;
714 }
715 }
716 return 0;
717 /*
718 * in user addresses, page 0 is always trampoline, and user
719 * addresses are contained in system addresses, so returning a
720 * system page number of 0 can serve as error indicator: it is at
721 * worst the trampoline page, and likely to be below it.
722 */
723 }
724
725
726 /*
727 * Linear search, from high addresses down. For mmap, so the starting
728 * address of the region found must be NACL_MAP_PAGESIZE aligned.
729 *
730 * For general mmap it is better to use as high an address as
731 * possible, since the stack size for the main thread is currently
732 * fixed, and the heap is the only thing that grows.
733 */
NaClVmmapFindMapSpace(struct NaClVmmap * self,size_t num_pages)734 uintptr_t NaClVmmapFindMapSpace(struct NaClVmmap *self,
735 size_t num_pages) {
736 size_t i;
737 struct NaClVmmapEntry *vmep;
738 uintptr_t end_page;
739 uintptr_t start_page;
740
741 if (0 == self->nvalid)
742 return 0;
743 NaClVmmapMakeSorted(self);
744 num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
745
746 for (i = self->nvalid; --i > 0; ) {
747 vmep = self->vmentry[i-1];
748 end_page = vmep->page_num + vmep->npages; /* end page from previous */
749 end_page = NaClRoundPageNumUpToMapMultiple(end_page);
750
751 start_page = self->vmentry[i]->page_num; /* start page from current */
752 if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
753
754 start_page = NaClTruncPageNumDownToMapMultiple(start_page);
755
756 if (start_page <= end_page) {
757 continue;
758 }
759 }
760 if (start_page - end_page >= num_pages) {
761 return start_page - num_pages;
762 }
763 }
764 return 0;
765 /*
766 * in user addresses, page 0 is always trampoline, and user
767 * addresses are contained in system addresses, so returning a
768 * system page number of 0 can serve as error indicator: it is at
769 * worst the trampoline page, and likely to be below it.
770 */
771 }
772
773
774 /*
775 * Linear search, from uaddr up.
776 */
NaClVmmapFindMapSpaceAboveHint(struct NaClVmmap * self,uintptr_t uaddr,size_t num_pages)777 uintptr_t NaClVmmapFindMapSpaceAboveHint(struct NaClVmmap *self,
778 uintptr_t uaddr,
779 size_t num_pages) {
780 size_t nvalid;
781 size_t i;
782 struct NaClVmmapEntry *vmep;
783 uintptr_t usr_page;
784 uintptr_t start_page;
785 uintptr_t end_page;
786
787 NaClVmmapMakeSorted(self);
788
789 usr_page = uaddr >> NACL_PAGESHIFT;
790 num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
791
792 nvalid = self->nvalid;
793
794 for (i = 1; i < nvalid; ++i) {
795 vmep = self->vmentry[i-1];
796 end_page = vmep->page_num + vmep->npages;
797 end_page = NaClRoundPageNumUpToMapMultiple(end_page);
798
799 start_page = self->vmentry[i]->page_num;
800 if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
801
802 start_page = NaClTruncPageNumDownToMapMultiple(start_page);
803
804 if (start_page <= end_page) {
805 continue;
806 }
807 }
808 if (end_page <= usr_page && usr_page < start_page) {
809 end_page = usr_page;
810 }
811 if (usr_page <= end_page && (start_page - end_page) >= num_pages) {
812 /* found a gap at or after uaddr that's big enough */
813 return end_page;
814 }
815 }
816 return 0;
817 }
818