1 /*
2  * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 
7 #include "native_client/tests/dynamic_code_loading/dynamic_load_test.h"
8 
9 #include <assert.h>
10 #include <errno.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 
15 #include <nacl/nacl_dyncode.h>
16 
17 #include "native_client/src/include/arm_sandbox.h"
18 #include "native_client/tests/dynamic_code_loading/dynamic_segment.h"
19 #include "native_client/tests/dynamic_code_loading/templates.h"
20 #include "native_client/tests/inbrowser_test_runner/test_runner.h"
21 
22 #if defined(__x86_64__)
23 /* On x86-64, template functions do not fit in 32-byte buffers */
24 #define BUF_SIZE 64
25 #elif defined(__i386__) || defined(__arm__)
26 #define BUF_SIZE 32
27 #else
28 #error "Unknown Platform"
29 #endif
30 
31 /*
32  * TODO(bsy): get this value from the toolchain.  Get the toolchain
33  * team to provide this value.
34  */
35 #define NUM_BUNDLES_FOR_HLT 3
36 
nacl_load_code(void * dest,void * src,int size)37 int nacl_load_code(void *dest, void *src, int size) {
38   int rc = nacl_dyncode_create(dest, src, size);
39   /*
40    * Undo the syscall wrapper's errno handling, because it's more
41    * convenient to test a single return value.
42    */
43   return rc == 0 ? 0 : -errno;
44 }
45 
46 char *next_addr = NULL;
47 
allocate_code_space(int pages)48 char *allocate_code_space(int pages) {
49   char *addr;
50   if (next_addr == NULL) {
51     next_addr = (char *) DYNAMIC_CODE_SEGMENT_START;
52   }
53   addr = next_addr;
54   next_addr += PAGE_SIZE * pages;
55   assert(next_addr < (char *) DYNAMIC_CODE_SEGMENT_END);
56   return addr;
57 }
58 
fill_int32(uint8_t * data,size_t size,int32_t value)59 void fill_int32(uint8_t *data, size_t size, int32_t value) {
60   int i;
61   assert(size % 4 == 0);
62   /*
63    * All the archs we target supported unaligned word read/write, but
64    * check that the pointer is aligned anyway.
65    */
66   assert(((uintptr_t) data) % 4 == 0);
67   for (i = 0; i < size / 4; i++)
68     ((uint32_t *) data)[i] = value;
69 }
70 
fill_nops(uint8_t * data,size_t size)71 void fill_nops(uint8_t *data, size_t size) {
72 #if defined(__i386__) || defined(__x86_64__)
73   memset(data, 0x90, size); /* NOPs */
74 #elif defined(__arm__)
75   fill_int32(data, size, 0xe1a00000); /* NOP (MOV r0, r0) */
76 #else
77 # error "Unknown arch"
78 #endif
79 }
80 
fill_hlts(uint8_t * data,size_t size)81 void fill_hlts(uint8_t *data, size_t size) {
82 #if defined(__i386__) || defined(__x86_64__)
83   memset(data, 0xf4, size); /* HLTs */
84 #elif defined(__arm__)
85   fill_int32(data, size, NACL_INSTR_ARM_HALT_FILL);
86 #else
87 # error "Unknown arch"
88 #endif
89 }
90 
91 /*
92  * Getting the assembler to pad our code fragments in templates.S is
93  * awkward because we have to output them in data mode, in which the
94  * assembler wants to output zeroes instead of NOPs for padding.
95  */
copy_and_pad_fragment(void * dest,int dest_size,const char * fragment_start,const char * fragment_end)96 void copy_and_pad_fragment(void *dest,
97                            int dest_size,
98                            const char *fragment_start,
99                            const char *fragment_end) {
100   int fragment_size = fragment_end - fragment_start;
101   assert(dest_size % NACL_BUNDLE_SIZE == 0);
102   assert(fragment_size <= dest_size);
103   fill_nops(dest, dest_size);
104   memcpy(dest, fragment_start, fragment_size);
105 }
106 
107 /* Check that we can load and run code. */
test_loading_code(void)108 void test_loading_code(void) {
109   void *load_area = allocate_code_space(1);
110   uint8_t buf[BUF_SIZE];
111   int rc;
112   int (*func)(void);
113 
114   copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
115 
116   rc = nacl_load_code(load_area, buf, sizeof(buf));
117   assert(rc == 0);
118   assert(memcmp(load_area, buf, sizeof(buf)) == 0);
119   /*
120    * Need double cast otherwise gcc complains with "ISO C forbids
121    * conversion of object pointer to function pointer type
122    * [-pedantic]".
123    */
124   func = (int (*)(void)) (uintptr_t) load_area;
125   rc = func();
126   assert(rc == MARKER_OLD);
127 }
128 
129 /*
130  * This is mostly the same as test_loading_code() except that we
131  * repeat the test many times within the same page.  Unlike the other
132  * tests, this will consistently fail on ARM if we do not flush the
133  * instruction cache, so it reproduces the bug
134  * http://code.google.com/p/nativeclient/issues/detail?id=699
135  */
test_stress(void)136 void test_stress(void) {
137   void *load_area = allocate_code_space(1);
138   uint8_t *dest;
139   uint8_t *dest_max;
140   uint8_t buf[BUF_SIZE];
141 
142   copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
143 
144   dest_max = (uint8_t *) load_area + DYNAMIC_CODE_PAGE_SIZE;
145   for (dest = load_area; dest < dest_max; dest += sizeof(buf)) {
146     int (*func)(void);
147     int rc;
148 
149     rc = nacl_load_code(dest, buf, sizeof(buf));
150     assert(rc == 0);
151     func = (int (*)(void)) (uintptr_t) dest;
152     rc = func();
153     assert(rc == MARKER_OLD);
154   }
155 }
156 
157 /*
158  * The syscall may have to mmap() shared memory temporarily,
159  * so there is some interaction with page size.
160  * Check that we can load to non-page-aligned addresses.
161  */
test_loading_code_non_page_aligned(void)162 void test_loading_code_non_page_aligned(void) {
163   char *load_area = allocate_code_space(1);
164   uint8_t buf[BUF_SIZE];
165   int rc;
166 
167   copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
168 
169   rc = nacl_load_code(load_area, buf, sizeof(buf));
170   assert(rc == 0);
171   assert(memcmp(load_area, buf, sizeof(buf)) == 0);
172 
173   load_area += sizeof(buf);
174   rc = nacl_load_code(load_area, buf, sizeof(buf));
175   assert(rc == 0);
176   assert(memcmp(load_area, buf, sizeof(buf)) == 0);
177 }
178 
179 /*
180  * Since there is an interaction with page size, we also test loading
181  * a multi-page chunk of code.
182  */
test_loading_large_chunk(void)183 void test_loading_large_chunk(void) {
184   char *load_area = allocate_code_space(2);
185   int size = 0x20000;
186   uint8_t *data = alloca(size);
187   int rc;
188 
189   fill_nops(data, size);
190   rc = nacl_load_code(load_area, data, size);
191   assert(rc == 0);
192   assert(memcmp(load_area, data, size) == 0);
193 }
194 
test_loading_zero_size(void)195 void test_loading_zero_size(void) {
196   char *load_area = allocate_code_space(1);
197   int rc = nacl_load_code(load_area, &template_func, 0);
198   assert(rc == 0);
199 }
200 
201 /*
202  * In general, the failure tests don't check that loading fails for
203  * the reason we expect.  TODO(mseaborn): We could do this by
204  * comparing with expected log output.
205  */
206 
test_fail_on_validation_error(void)207 void test_fail_on_validation_error(void) {
208   void *load_area = allocate_code_space(1);
209   uint8_t buf[BUF_SIZE];
210   int rc;
211 
212   copy_and_pad_fragment(buf, sizeof(buf), &invalid_code, &invalid_code_end);
213 
214   rc = nacl_load_code(load_area, buf, sizeof(buf));
215   assert(rc == -EINVAL);
216 }
217 
test_validation_error_does_not_leak(void)218 void test_validation_error_does_not_leak(void) {
219   void *load_area = allocate_code_space(1);
220   uint8_t buf[BUF_SIZE];
221   int rc;
222 
223   copy_and_pad_fragment(buf, sizeof(buf), &invalid_code, &invalid_code_end);
224   rc = nacl_load_code(load_area, buf, sizeof(buf));
225   assert(rc == -EINVAL);
226 
227   /*
228    * Make sure that the failed validation didn't claim the memory.
229    * See: http://code.google.com/p/nativeclient/issues/detail?id=2566
230    */
231   copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
232   rc = nacl_load_code(load_area, buf, sizeof(buf));
233   assert(rc == 0);
234 }
235 
test_fail_on_non_bundle_aligned_dest_addresses(void)236 void test_fail_on_non_bundle_aligned_dest_addresses(void) {
237   char *load_area = allocate_code_space(1);
238   int rc;
239   uint8_t nops[BUF_SIZE];
240 
241   fill_nops(nops, sizeof(nops));
242 
243   /* Test unaligned destination. */
244   rc = nacl_load_code(load_area + 1, nops, NACL_BUNDLE_SIZE);
245   assert(rc == -EINVAL);
246   rc = nacl_load_code(load_area + 4, nops, NACL_BUNDLE_SIZE);
247   assert(rc == -EINVAL);
248 
249   /* Test unaligned size. */
250   rc = nacl_load_code(load_area, nops + 1, NACL_BUNDLE_SIZE - 1);
251   assert(rc == -EINVAL);
252   rc = nacl_load_code(load_area, nops + 4, NACL_BUNDLE_SIZE - 4);
253   assert(rc == -EINVAL);
254 
255   /* Check that the code we're trying works otherwise. */
256   rc = nacl_load_code(load_area, nops, NACL_BUNDLE_SIZE);
257   assert(rc == 0);
258 }
259 
260 /*
261  * In principle we could load into the initially-loaded executable's
262  * code area, but at the moment we don't allow it.
263  */
test_fail_on_load_to_static_code_area(void)264 void test_fail_on_load_to_static_code_area(void) {
265   int size = &hlts_end - &hlts;
266   int rc = nacl_load_code(&hlts, &hlts, size);
267   assert(rc == -EFAULT);
268 }
269 
270 uint8_t block_in_data_segment[64];
271 
test_fail_on_load_to_data_area(void)272 void test_fail_on_load_to_data_area(void) {
273   uint8_t *data;
274   int rc;
275 
276   fill_hlts(block_in_data_segment, sizeof(block_in_data_segment));
277 
278   /*
279    * Align to bundle size so that we don't fail for a reason we're not
280    * testing for.
281    */
282   data = block_in_data_segment;
283   while (((int) data) % NACL_BUNDLE_SIZE != 0)
284     data++;
285   rc = nacl_load_code(data, data, NACL_BUNDLE_SIZE);
286   assert(rc == -EFAULT);
287 }
288 
test_fail_on_overwrite(void)289 void test_fail_on_overwrite(void) {
290   void *load_area = allocate_code_space(1);
291   uint8_t buf[BUF_SIZE];
292   int rc;
293 
294   copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
295 
296   rc = nacl_load_code(load_area, buf, sizeof(buf));
297   assert(rc == 0);
298 
299   copy_and_pad_fragment(buf, sizeof(buf), &template_func,
300                                           &template_func_end);
301 
302   rc = nacl_load_code(load_area, buf, sizeof(buf));
303   assert(rc == -EINVAL);
304 }
305 
306 
307 /* Allowing mmap() to overwrite the dynamic code area would be unsafe. */
test_fail_on_mmap_to_dyncode_area(void)308 void test_fail_on_mmap_to_dyncode_area(void) {
309   void *addr = allocate_code_space(1);
310   size_t page_size = 0x10000;
311   void *result;
312   int rc;
313 
314   assert((uintptr_t) addr % page_size == 0);
315   result = mmap(addr, page_size, PROT_READ | PROT_WRITE,
316                 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
317   assert(result == MAP_FAILED);
318   assert(errno == EINVAL);
319 
320   rc = munmap(addr, page_size);
321   assert(rc == -1);
322   assert(errno == EINVAL);
323 
324   /* TODO(mseaborn): Test mprotect() once NaCl provides it. */
325 }
326 
test_branches_outside_chunk(void)327 void test_branches_outside_chunk(void) {
328   char *load_area = allocate_code_space(1);
329   int rc;
330   int size = &branch_forwards_end - &branch_forwards;
331   assert(size == 16 || size == 32);
332   assert(&branch_backwards_end - &branch_backwards == size);
333 
334   rc = nacl_load_code(load_area, &branch_forwards, size);
335   assert(rc == 0);
336   rc = nacl_load_code(load_area + size, &branch_backwards, size);
337   assert(rc == 0);
338 }
339 
test_end_of_code_region(void)340 void test_end_of_code_region(void) {
341   int rc;
342   void *dest;
343   uint8_t data[BUF_SIZE];
344   fill_nops(data, sizeof(data));
345 
346   /*
347    * This tries to load into the data segment, which is definitely not
348    * allowed.
349    */
350   dest = (uint8_t *) DYNAMIC_CODE_SEGMENT_END;
351   rc = nacl_load_code(dest, data, sizeof(data));
352   assert(rc == -EFAULT);
353 
354   /*
355    * This tries to load into the last bundle of the code region, which
356    * sel_ldr disallows just in case there is some CPU bug in which the
357    * CPU fails to check for running off the end of an x86 code
358    * segment.  This is applied to other architectures for
359    * consistency.
360    */
361   dest = (uint8_t *) DYNAMIC_CODE_SEGMENT_END - sizeof(data);
362   rc = nacl_load_code(dest, data, sizeof(data));
363   assert(rc == -EFAULT);
364 
365   dest = (uint8_t *) DYNAMIC_CODE_SEGMENT_END - sizeof(data) * 2;
366   rc = nacl_load_code(dest, data, sizeof(data));
367   assert(rc == 0);
368 }
369 
test_hlt_filled_bundle(void)370 void test_hlt_filled_bundle(void) {
371   uint8_t bad_code[NUM_BUNDLES_FOR_HLT * NACL_BUNDLE_SIZE];
372   void *load_area;
373   int ix;
374 
375   for (ix = 0; ix < NUM_BUNDLES_FOR_HLT; ++ix) {
376     fill_nops(bad_code, sizeof bad_code);
377     fill_hlts(bad_code + ix * NACL_BUNDLE_SIZE, NACL_BUNDLE_SIZE);
378 
379     load_area = allocate_code_space(1);
380     /* hlts are now allowed */
381     assert(0 == nacl_load_code(load_area, bad_code, sizeof bad_code));
382     /* but not twice... */
383     assert(0 != nacl_load_code(load_area, bad_code, sizeof bad_code));
384   }
385 }
386 
387 /*
388  * If threading tests have run before in this process, nacl_dyncode_delete will
389  * return EAGAIN if the threads have not finished trusted-side cleanup yet.
390  * (this is related to the
391  * https://code.google.com/p/nativeclient/issues/detail?id=1028).
392  * If we have joined the thread already, then we just need to wait until it
393  * finishes untrusted-side cleanup and calls IRT thread_exit. Doing this allows
394  * the tests to run in any order. Only the first deletion in a non-threaded test
395  * needs to do this.
396  */
dyncode_delete_with_retry(void * dest,size_t size)397 int dyncode_delete_with_retry(void *dest, size_t size) {
398   int rc;
399   do {
400     rc = nacl_dyncode_delete(dest, size);
401   } while (rc != 0 && errno == EAGAIN);
402   return rc;
403 }
404 
405 /* Check that we can dynamically delete code. */
test_deleting_code(void)406 void test_deleting_code(void) {
407   uint8_t *load_area = (uint8_t *) allocate_code_space(1);
408   uint8_t buf[BUF_SIZE];
409   int rc;
410   int (*func)(void);
411 
412   copy_and_pad_fragment(buf, sizeof(buf), &template_func, &template_func_end);
413   rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
414   assert(rc == 0);
415   func = (int (*)(void)) (uintptr_t) load_area;
416   rc = func();
417   assert(rc == MARKER_OLD);
418 
419   rc = dyncode_delete_with_retry(load_area, sizeof(buf));
420   assert(rc == 0);
421   assert(load_area[0] != buf[0]);
422 
423   /* Attempting to unload the code again should fail. */
424   rc = nacl_dyncode_delete(load_area, sizeof(buf));
425   assert(rc == -1);
426   assert(errno == EFAULT);
427 
428   /*
429    * We should be able to load new code at the same address.  This
430    * assumes that no other threads are running, otherwise this request
431    * can be rejected.
432    *
433    * This fails under ARM QEMU.  QEMU will flush its instruction
434    * translation cache based on writes to the same virtual address,
435    * but it ignores our explicit cache flush system calls.  Valgrind
436    * has a similar problem, except that there is no cache flush system
437    * call on x86.
438    */
439   if (getenv("UNDER_QEMU_ARM") != NULL ||
440       getenv("RUNNING_ON_VALGRIND") != NULL) {
441     printf("Skipping loading new code under emulator\n");
442   } else {
443     printf("Testing loading new code...\n");
444     copy_and_pad_fragment(buf, sizeof(buf), &template_func_replacement,
445                           &template_func_replacement_end);
446     rc = nacl_dyncode_create(load_area, buf, sizeof(buf));
447     assert(rc == 0);
448     func = (int (*)(void)) (uintptr_t) load_area;
449     rc = func();
450     assert(rc == MARKER_NEW);
451 
452     rc = nacl_dyncode_delete(load_area, sizeof(buf));
453     assert(rc == 0);
454     assert(load_area[0] != buf[0]);
455   }
456 }
457 
458 /* nacl_dyncode_delete() succeeds trivially on the empty range. */
test_deleting_zero_size(void)459 void test_deleting_zero_size(void) {
460   uint8_t *load_addr = (uint8_t *) allocate_code_space(1);
461   int rc = nacl_dyncode_delete(load_addr, 0);
462   assert(rc == 0);
463 }
464 
test_deleting_code_from_invalid_ranges(void)465 void test_deleting_code_from_invalid_ranges(void) {
466   uint8_t *load_addr = (uint8_t *) allocate_code_space(1) + 32;
467   uint8_t buf[64];
468   int rc;
469 
470   /* We specifically want to test using multiple instruction bundles. */
471   assert(sizeof(buf) / NACL_BUNDLE_SIZE >= 2);
472   assert(sizeof(buf) % NACL_BUNDLE_SIZE == 0);
473 
474   rc = dyncode_delete_with_retry(load_addr, sizeof(buf));
475   assert(rc == -1);
476   assert(errno == EFAULT);
477 
478   fill_hlts(buf, sizeof(buf));
479   rc = nacl_dyncode_create(load_addr, buf, sizeof(buf));
480   assert(rc == 0);
481 
482   /* Overlapping before. */
483   rc = nacl_dyncode_delete(load_addr - NACL_BUNDLE_SIZE,
484                            sizeof(buf) + NACL_BUNDLE_SIZE);
485   assert(rc == -1);
486   assert(errno == EFAULT);
487   /* Overlapping after. */
488   rc = nacl_dyncode_delete(load_addr, sizeof(buf) + NACL_BUNDLE_SIZE);
489   assert(rc == -1);
490   assert(errno == EFAULT);
491   /* Missing the end of the loaded chunk. */
492   rc = nacl_dyncode_delete(load_addr, sizeof(buf) - NACL_BUNDLE_SIZE);
493   assert(rc == -1);
494   assert(errno == EFAULT);
495   /* Missing the start of the loaded chunk. */
496   rc = nacl_dyncode_delete(load_addr + NACL_BUNDLE_SIZE,
497                            sizeof(buf) - NACL_BUNDLE_SIZE);
498   assert(rc == -1);
499   assert(errno == EFAULT);
500   /* The correct range should work, though. */
501   rc = nacl_dyncode_delete(load_addr, sizeof(buf));
502   assert(rc == 0);
503 }
504 
check_region_is_filled_with_hlts(const char * data,size_t size)505 void check_region_is_filled_with_hlts(const char *data, size_t size) {
506 #if defined(__i386__) || defined(__x86_64__)
507   uint8_t halts = 0xf4; /* HLT */
508 #elif defined(__arm__)
509   uint32_t halts = NACL_INSTR_ARM_HALT_FILL;
510 #else
511 # error "Unknown arch"
512 #endif
513   const char *ptr;
514   for (ptr = data; ptr < data + size; ptr += sizeof(halts)) {
515     assert(memcmp(ptr, &halts, sizeof(halts)) == 0);
516   }
517 }
518 
519 /*
520  * Check that regions surrounding the region we load code into are
521  * correctly filled with halt instructions.  Loading code causes the
522  * pages to become allocated, and unused parts of these pages should
523  * be filled with halts.
524  */
test_demand_alloc_surrounding_hlt_filling(void)525 void test_demand_alloc_surrounding_hlt_filling(void) {
526   int pad_size = 0x4000; /* This must be less than one 64k page. */
527   int code_size = 0x28000;
528   int total_size = pad_size * 2 + code_size;
529   assert(total_size % DYNAMIC_CODE_PAGE_SIZE == 0);
530   char *load_area = allocate_code_space(total_size / DYNAMIC_CODE_PAGE_SIZE);
531   uint8_t *data = alloca(code_size);
532   int rc;
533 
534   fill_nops(data, code_size);
535   rc = nacl_load_code(load_area + pad_size, data, code_size);
536   assert(rc == 0);
537   check_region_is_filled_with_hlts(load_area, pad_size);
538   assert(memcmp(load_area + pad_size, data, code_size) == 0);
539   check_region_is_filled_with_hlts(load_area + pad_size + code_size, pad_size);
540 }
541 
542 /*
543  * Check that dyncode_create() works on a set of pages when a strict
544  * subset of those pages were allocated by a previous dyncode_create()
545  * call.  This provides some coverage of the coalescing of mprotect()
546  * calls that dyncode_create() does.
547  */
test_demand_alloc_of_fragmented_pages(void)548 void test_demand_alloc_of_fragmented_pages(void) {
549   int smaller_size = 2 * DYNAMIC_CODE_PAGE_SIZE;
550   int smaller_size_load_offset = 2 * DYNAMIC_CODE_PAGE_SIZE;
551   int larger_size = 6 * DYNAMIC_CODE_PAGE_SIZE;
552   char *load_area = allocate_code_space(6);
553   uint8_t *data = alloca(larger_size);
554   int rc;
555 
556   fill_nops(data, larger_size);
557 
558   /* Cause pages 2 and 3 to be allocated. */
559   rc = nacl_load_code(load_area + smaller_size_load_offset, data, smaller_size);
560   assert(rc == 0);
561 
562   rc = dyncode_delete_with_retry(load_area + smaller_size_load_offset,
563                                  smaller_size);
564   assert(rc == 0);
565 
566   /* Cause pages 0, 1, 4 and 5 to be allocated as well. */
567   rc = nacl_load_code(load_area, data, larger_size);
568   assert(rc == 0);
569 }
570 
run_test(const char * test_name,void (* test_func)(void))571 void run_test(const char *test_name, void (*test_func)(void)) {
572   printf("Running %s...\n", test_name);
573   test_func();
574 }
575 
576 #define RUN_TEST(test_func) (run_test(#test_func, test_func))
577 
TestMain(void)578 int TestMain(void) {
579   /*
580    * This should come first, so that we test loading code into the first page.
581    * See http://code.google.com/p/nativeclient/issues/detail?id=1143
582    */
583   RUN_TEST(test_loading_code);
584 
585   RUN_TEST(test_loading_code_non_page_aligned);
586   RUN_TEST(test_loading_large_chunk);
587   RUN_TEST(test_loading_zero_size);
588   RUN_TEST(test_fail_on_validation_error);
589   RUN_TEST(test_validation_error_does_not_leak);
590   RUN_TEST(test_fail_on_non_bundle_aligned_dest_addresses);
591   RUN_TEST(test_fail_on_load_to_static_code_area);
592   RUN_TEST(test_fail_on_load_to_data_area);
593   RUN_TEST(test_fail_on_overwrite);
594   RUN_TEST(test_fail_on_mmap_to_dyncode_area);
595   RUN_TEST(test_branches_outside_chunk);
596   RUN_TEST(test_end_of_code_region);
597   RUN_TEST(test_hlt_filled_bundle);
598   /*
599    * dyncode_delete() tests have been broken inside Chromium by the
600    * switch to the new Chrome-IPC-based PPAPI proxy.  The new proxy
601    * uses a background thread which does not "check in" in a way to
602    * satisfy dyncode_delete()'s requirements.
603    * See https://code.google.com/p/nativeclient/issues/detail?id=3199
604    * TODO(mseaborn): Re-enable these again when they work.
605   */
606   if (!TestRunningInBrowser()) {
607     RUN_TEST(test_deleting_code);
608     RUN_TEST(test_deleting_zero_size);
609     RUN_TEST(test_deleting_code_from_invalid_ranges);
610     RUN_TEST(test_threaded_delete);
611   }
612   RUN_TEST(test_demand_alloc_surrounding_hlt_filling);
613   /*
614    * This test uses dyncode_delete() which is broken inside Chromium
615    * with the new PPAPI proxy.  See above.  TODO(mseaborn): Re-enable.
616    */
617   if (!TestRunningInBrowser()) {
618     RUN_TEST(test_demand_alloc_of_fragmented_pages);
619   }
620   RUN_TEST(test_stress);
621   /*
622    * TODO(ncbray) reenable when kernel bug is fixed.
623    * http://code.google.com/p/nativeclient/issues/detail?id=2678
624    */
625 #ifndef __arm__
626   /*
627    * TODO(ncbray) reenable when cause of flake is understood.
628    * http://code.google.com/p/chromium/issues/detail?id=120355
629    */
630   if (!TestRunningInBrowser())
631     RUN_TEST(test_threaded_loads);
632 #endif
633 
634   /* Test again to make sure we didn't run out of space. */
635   RUN_TEST(test_loading_code);
636 
637   return 0;
638 }
639 
main(void)640 int main(void) {
641   return RunTests(TestMain);
642 }
643