1 /* i386-linux.elf-main.c -- stub loader for Linux x86 ELF executable
2
3 This file is part of the UPX executable compressor.
4
5 Copyright (C) 1996-2020 Markus Franz Xaver Johannes Oberhumer
6 Copyright (C) 1996-2020 Laszlo Molnar
7 Copyright (C) 2000-2020 John F. Reiser
8 All Rights Reserved.
9
10 UPX and the UCL library are free software; you can redistribute them
11 and/or modify them under the terms of the GNU General Public License as
12 published by the Free Software Foundation; either version 2 of
13 the License, or (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; see the file COPYING.
22 If not, write to the Free Software Foundation, Inc.,
23 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24
25 Markus F.X.J. Oberhumer Laszlo Molnar
26 <markus@oberhumer.com> <ezerotven+github@gmail.com>
27
28 John F. Reiser
29 <jreiser@users.sourceforge.net>
30 */
31
32 #ifndef DEBUG /*{*/
33 #define DEBUG 0
34 #endif /*}*/
35
36 #include "include/linux.h"
37 void *mmap(void *, size_t, int, int, int, off_t);
38 #if defined(__i386__) || defined(__mips__) || defined(__powerpc__) //{
39 # define mmap_privanon(addr,len,prot,flgs) mmap((addr),(len),(prot), \
40 MAP_PRIVATE|MAP_ANONYMOUS|(flgs),-1,0)
41 #else //}{
42 void *mmap_privanon(void *, size_t, int, int);
43 #endif //}
44 ssize_t write(int, void const *, size_t);
45
46
47 /*************************************************************************
48 // configuration section
49 **************************************************************************/
50
51 // In order to make it much easier to move this code at runtime and execute
52 // it at an address different from it load address: there must be no
53 // static data, and no string constants.
54
55 #if !DEBUG //{
56 #define DPRINTF(fmt, args...) /*empty*/
57 #else //}{
58 // DPRINTF is defined as an expression using "({ ... })"
59 // so that DPRINTF can be invoked inside an expression,
60 // and then followed by a comma to ignore the return value.
61 // The only complication is that percent and backslash
62 // must be doubled in the format string, because the format
63 // string is processd twice: once at compile-time by 'asm'
64 // to produce the assembled value, and once at runtime to use it.
65 #if defined(__powerpc__) //{
66 #define DPRINTF(fmt, args...) ({ \
67 char const *r_fmt; \
68 asm("bl 0f; .string \"" fmt "\"; .balign 4; 0: mflr %0" \
69 /*out*/ : "=r"(r_fmt) \
70 /* in*/ : \
71 /*und*/ : "lr"); \
72 dprintf(r_fmt, args); \
73 })
74 #elif defined(__x86_64) || defined(__i386__) //}{
75 #define DPRINTF(fmt, args...) ({ \
76 char const *r_fmt; \
77 asm("call 0f; .asciz \"" fmt "\"; 0: pop %0" \
78 /*out*/ : "=r"(r_fmt) ); \
79 dprintf(r_fmt, args); \
80 })
81 #elif defined(__arm__) /*}{*/
82 #define DPRINTF(fmt, args...) ({ \
83 char const *r_fmt; \
84 asm("mov %0,pc; b 0f; \
85 .asciz \"" fmt "\"; .balign 4; \
86 0: " \
87 /*out*/ : "=r"(r_fmt) ); \
88 dprintf(r_fmt, args); \
89 })
90 #elif defined(__mips__) /*}{*/
91 #define DPRINTF(fmt, args...) ({ \
92 char const *r_fmt; \
93 asm(".set noreorder; bal L%=j; move %0,$31; .set reorder; \
94 .asciz \"" fmt "\"; .balign 4; \
95 L%=j: " \
96 /*out*/ : "=r"(r_fmt) \
97 /* in*/ : \
98 /*und*/ : "ra"); \
99 dprintf(r_fmt, args); \
100 })
101 #endif //}
102
103 static int dprintf(char const *fmt, ...); // forward
104
105 #ifdef __arm__ /*{*/
106 extern unsigned div10(unsigned);
107 #else /*}{*/
108 static unsigned
div10(unsigned x)109 div10(unsigned x)
110 {
111 return x / 10u;
112 }
113 #endif /*}*/
114
115 static int
unsimal(unsigned x,char * ptr,int n)116 unsimal(unsigned x, char *ptr, int n)
117 {
118 if (10<=x) {
119 unsigned const q = div10(x);
120 x -= 10 * q;
121 n = unsimal(q, ptr, n);
122 }
123 ptr[n] = '0' + x;
124 return 1+ n;
125 }
126
127 static int
decimal(int x,char * ptr,int n)128 decimal(int x, char *ptr, int n)
129 {
130 if (x < 0) {
131 x = -x;
132 ptr[n++] = '-';
133 }
134 return unsimal(x, ptr, n);
135 }
136
137 static int
heximal(unsigned long x,char * ptr,int n)138 heximal(unsigned long x, char *ptr, int n)
139 {
140 unsigned j = -1+ 2*sizeof(unsigned long);
141 unsigned long m = 0xful << (4 * j);
142 for (; j; --j, m >>= 4) { // omit leading 0 digits
143 if (m & x) break;
144 }
145 for (; m; --j, m >>= 4) {
146 unsigned d = 0xf & (x >> (4 * j));
147 ptr[n++] = ((10<=d) ? ('a' - 10) : '0') + d;
148 }
149 return n;
150 }
151
152 #define va_arg __builtin_va_arg
153 #define va_end __builtin_va_end
154 #define va_list __builtin_va_list
155 #define va_start __builtin_va_start
156
157 static int
dprintf(char const * fmt,...)158 dprintf(char const *fmt, ...)
159 {
160 int n= 0;
161 char const *literal = 0; // NULL
162 char buf[24]; // ~0ull == 18446744073709551615 ==> 20 chars
163 va_list va; va_start(va, fmt);
164 for (;;) {
165 char c = *fmt++;
166 if (!c) { // end of fmt
167 if (literal) {
168 goto finish;
169 }
170 break; // goto done
171 }
172 if ('%'!=c) {
173 if (!literal) {
174 literal = fmt; // 1 beyond start of literal
175 }
176 continue;
177 }
178 // '%' == c
179 if (literal) {
180 finish:
181 n += write(2, -1+ literal, fmt - literal);
182 literal = 0; // NULL
183 if (!c) { // fmt already ended
184 break; // goto done
185 }
186 }
187 switch (c= *fmt++) { // deficiency: does not handle _long_
188 default: { // un-implemented conversion
189 n+= write(2, -1+ fmt, 1);
190 } break;
191 case 0: { // fmt ends with "%\0" ==> ignore
192 goto done;
193 } break;
194 case 'u': {
195 n+= write(2, buf, unsimal(va_arg(va, unsigned), buf, 0));
196 } break;
197 case 'd': {
198 n+= write(2, buf, decimal(va_arg(va, int), buf, 0));
199 } break;
200 case 'p': {
201 buf[0] = '0';
202 buf[1] = 'x';
203 n+= write(2, buf, heximal((unsigned long)va_arg(va, void *), buf, 2));
204 } break;
205 case 'x': {
206 buf[0] = '0';
207 buf[1] = 'x';
208 n+= write(2, buf, heximal(va_arg(va, int), buf, 2));
209 } break;
210 } // 'switch'
211 }
212 done:
213 va_end(va);
214 return n;
215 }
216 #endif /*}*/
217
218 #define MAX_ELF_HDR 512 // Elf32_Ehdr + n*Elf32_Phdr must fit in this
219
220
221 /*************************************************************************
222 // "file" util
223 **************************************************************************/
224
225 typedef struct {
226 size_t size; // must be first to match size[0] uncompressed size
227 char *buf;
228 } Extent;
229
230 static void
231 #if (ACC_CC_GNUC >= 0x030300) && defined(__i386__) /*{*/
232 __attribute__((__noinline__, __used__, regparm(3), stdcall))
233 #endif /*}*/
xread(Extent * x,char * buf,size_t count)234 xread(Extent *x, char *buf, size_t count)
235 {
236 char *p=x->buf, *q=buf;
237 size_t j;
238 DPRINTF("xread %%p(%%x %%p) %%p %%x\\n", x, x->size, x->buf, buf, count);
239 if (x->size < count) {
240 exit(127);
241 }
242 for (j = count; 0!=j--; ++p, ++q) {
243 *q = *p;
244 }
245 x->buf += count;
246 x->size -= count;
247 }
248
249
250 /*************************************************************************
251 // util
252 **************************************************************************/
253
254 #if !DEBUG //{ save space
255 #define ERR_LAB error: exit(127);
256 #define err_exit(a) goto error
257 #else //}{ save debugging time
258 #define ERR_LAB /*empty*/
259
260 extern void my_bkpt(int, ...);
261
262 static void __attribute__ ((__noreturn__))
err_exit(int a)263 err_exit(int a)
264 {
265 DPRINTF("err_exit %%x\\n", a);
266 (void)a; // debugging convenience
267 #if defined(__powerpc__) //{
268 my_bkpt(a);
269 #endif //}
270 exit(127);
271 }
272 #endif //}
273
274 static void *
do_brk(void * addr)275 do_brk(void *addr)
276 {
277 return brk(addr);
278 }
279
280
281 /*************************************************************************
282 // UPX & NRV stuff
283 **************************************************************************/
284
285 typedef void f_unfilter(
286 nrv_byte *, // also addvalue
287 nrv_uint,
288 unsigned cto8, // junk in high 24 bits
289 unsigned ftid
290 );
291 typedef int f_expand(
292 const nrv_byte *, nrv_uint,
293 nrv_byte *, size_t *, unsigned );
294
295
296 static void
unpackExtent(Extent * const xi,Extent * const xo,f_expand * const f_exp,f_unfilter * f_unf)297 unpackExtent(
298 Extent *const xi, // input
299 Extent *const xo, // output
300 f_expand *const f_exp,
301 f_unfilter *f_unf
302 )
303 {
304 DPRINTF("unpackExtent in=%%p(%%x %%p) out=%%p(%%x %%p) %%p %%p\\n",
305 xi, xi->size, xi->buf, xo, xo->size, xo->buf, f_exp, f_unf);
306 while (xo->size) {
307 struct b_info h;
308 // Note: if h.sz_unc == h.sz_cpr then the block was not
309 // compressible and is stored in its uncompressed form.
310
311 // Read and check block sizes.
312 xread(xi, (char *)&h, sizeof(h));
313 if (h.sz_unc == 0) { // uncompressed size 0 -> EOF
314 if (h.sz_cpr != UPX_MAGIC_LE32) // h.sz_cpr must be h->magic
315 err_exit(2);
316 if (xi->size != 0) // all bytes must be written
317 err_exit(3);
318 break;
319 }
320 if (h.sz_cpr <= 0) {
321 err_exit(4);
322 ERR_LAB
323 }
324 if (h.sz_cpr > h.sz_unc
325 || h.sz_unc > xo->size ) {
326 DPRINTF("sz_cpr=%%x sz_unc=%%x xo->size=%%x\\n", h.sz_cpr, h.sz_unc, xo->size);
327 err_exit(5);
328 }
329 // Now we have:
330 // assert(h.sz_cpr <= h.sz_unc);
331 // assert(h.sz_unc > 0 && h.sz_unc <= blocksize);
332 // assert(h.sz_cpr > 0 && h.sz_cpr <= blocksize);
333
334 if (h.sz_cpr < h.sz_unc) { // Decompress block
335 size_t out_len = h.sz_unc; // EOF for lzma
336 int const j = (*f_exp)((unsigned char *)xi->buf, h.sz_cpr,
337 (unsigned char *)xo->buf, &out_len,
338 #if defined(__i386__) //{
339 *(int *)(void *)&h.b_method
340 #else
341 h.b_method
342 #endif
343 );
344 if (j != 0 || out_len != (nrv_uint)h.sz_unc)
345 err_exit(7);
346 // Skip Ehdr+Phdrs: separate 1st block, not filtered
347 if (h.b_ftid!=0 && f_unf // have filter
348 && ((512 < out_len) // this block is longer than Ehdr+Phdrs
349 || (xo->size==(unsigned)h.sz_unc) ) // block is last in Extent
350 ) {
351 (*f_unf)((unsigned char *)xo->buf, out_len, h.b_cto8, h.b_ftid);
352 }
353 xi->buf += h.sz_cpr;
354 xi->size -= h.sz_cpr;
355 }
356 else { // copy literal block
357 xread(xi, xo->buf, h.sz_cpr);
358 }
359 xo->buf += h.sz_unc;
360 xo->size -= h.sz_unc;
361 }
362 }
363
364
365 #if defined(__i386__) /*{*/
366 // Create (or find) an escape hatch to use when munmapping ourselves the stub.
367 // Called by do_xmap to create it; remembered in AT_NULL.d_val
368 static void *
make_hatch_x86(Elf32_Phdr const * const phdr,ptrdiff_t reloc)369 make_hatch_x86(Elf32_Phdr const *const phdr, ptrdiff_t reloc)
370 {
371 unsigned xprot = 0;
372 unsigned *hatch = 0;
373 DPRINTF("make_hatch %%p %%x %%x\\n",phdr,reloc,0);
374 if (phdr->p_type==PT_LOAD && phdr->p_flags & PF_X) {
375 // The format of the 'if' is
376 // if ( ( (hatch = loc1), test_loc1 )
377 // || ( (hatch = loc2), test_loc2 ) ) {
378 // action
379 // }
380 // which uses the comma to save bytes when test_locj involves locj
381 // and the action is the same when either test succeeds.
382
383 if (
384 // Try page fragmentation just beyond .text .
385 ( (hatch = (void *)(phdr->p_memsz + phdr->p_vaddr + reloc)),
386 ( phdr->p_memsz==phdr->p_filesz // don't pollute potential .bss
387 && 4<=(~PAGE_MASK & -(int)hatch) ) ) // space left on page
388 // Try Elf32_Ehdr.e_ident[12..15] . warning: 'const' cast away
389 || ( (hatch = (void *)(&((Elf32_Ehdr *)phdr->p_vaddr + reloc)->e_ident[12])),
390 (phdr->p_offset==0) )
391 // Allocate and use a new page.
392 || ( xprot = 1, hatch = mmap(0, PAGE_SIZE, PROT_WRITE|PROT_READ,
393 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) )
394 ) {
395 // Omitting 'const' saves repeated literal in gcc.
396 unsigned /*const*/ escape = 0xc36180cd; // "int $0x80; popa; ret"
397 // Don't store into read-only page if value is already there.
398 if (* (volatile unsigned*) hatch != escape) {
399 * hatch = escape;
400 }
401 if (xprot) {
402 mprotect(hatch, 1*sizeof(unsigned), PROT_EXEC|PROT_READ);
403 }
404 DPRINTF(" hatch at %%p\\n", hatch);
405 }
406 else {
407 hatch = 0;
408 }
409 }
410 return hatch;
411 }
412 #elif defined(__arm__) /*}{*/
413 extern unsigned get_sys_munmap(void);
414
415 static void *
make_hatch_arm(Elf32_Phdr const * const phdr,ptrdiff_t reloc)416 make_hatch_arm(
417 Elf32_Phdr const *const phdr,
418 ptrdiff_t reloc
419 )
420 {
421 unsigned const sys_munmap = get_sys_munmap();
422 unsigned xprot = 0;
423 unsigned *hatch = 0;
424 DPRINTF("make_hatch %%p %%x %%x\\n",phdr,reloc,sys_munmap);
425 if (phdr->p_type==PT_LOAD && phdr->p_flags & PF_X) {
426 // The format of the 'if' is
427 // if ( ( (hatch = loc1), test_loc1 )
428 // || ( (hatch = loc2), test_loc2 ) ) {
429 // action
430 // }
431 // which uses the comma to save bytes when test_locj involves locj
432 // and the action is the same when either test succeeds.
433
434 if (
435 // Try page fragmentation just beyond .text .
436 ( (hatch = (void *)(~3u & (3+ phdr->p_memsz + phdr->p_vaddr + reloc))),
437 ( phdr->p_memsz==phdr->p_filesz // don't pollute potential .bss
438 && (2*4)<=(~PAGE_MASK & -(int)hatch) ) ) // space left on page
439 // Try Elf32_Ehdr.e_ident[8..15] . warning: 'const' cast away
440 || ( (hatch = (void *)(&((Elf32_Ehdr *)phdr->p_vaddr + reloc)->e_ident[8])),
441 (phdr->p_offset==0) )
442 // Allocate and use a new page.
443 || ( xprot = 1, hatch = mmap(0, PAGE_SIZE, PROT_WRITE|PROT_READ,
444 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) )
445 ) {
446 hatch[0] = sys_munmap; // syscall __NR_unmap
447 hatch[1] = 0xe1a0f00e; // mov pc,lr
448 __clear_cache(&hatch[0], &hatch[2]); // ? needed before mprotect()
449 if (xprot) {
450 mprotect(hatch, 2*sizeof(unsigned), PROT_EXEC|PROT_READ);
451 }
452 }
453 else {
454 hatch = 0;
455 }
456 }
457 return hatch;
458 }
459 #elif defined(__mips__) /*}{*/
460 static void *
make_hatch_mips(Elf32_Phdr const * const phdr,ptrdiff_t reloc,unsigned const frag_mask)461 make_hatch_mips(
462 Elf32_Phdr const *const phdr,
463 ptrdiff_t reloc,
464 unsigned const frag_mask)
465 {
466 unsigned xprot = 0;
467 unsigned *hatch = 0;
468 DPRINTF("make_hatch %%p %%x %%x\\n",phdr,reloc,frag_mask);
469 if (phdr->p_type==PT_LOAD && phdr->p_flags & PF_X) {
470 if (
471 // Try page fragmentation just beyond .text .
472 ( (hatch = (void *)(phdr->p_memsz + phdr->p_vaddr + reloc)),
473 ( phdr->p_memsz==phdr->p_filesz // don't pollute potential .bss
474 && (3*4)<=(frag_mask & -(int)hatch) ) ) // space left on page
475 // Allocate and use a new page.
476 || ( xprot = 1, hatch = mmap(0, PAGE_SIZE, PROT_WRITE|PROT_READ,
477 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) )
478 ) {
479 hatch[0]= 0x0000000c; // syscall
480 #define RS(r) ((037&(r))<<21)
481 #define JR 010
482 hatch[1] = RS(30)|JR; // jr $30 # s8
483 hatch[2] = 0x00000000; // nop
484 if (xprot) {
485 mprotect(hatch, 3*sizeof(unsigned), PROT_EXEC|PROT_READ);
486 }
487 }
488 else {
489 hatch = 0;
490 }
491 }
492 return hatch;
493 }
494 #elif defined(__powerpc__) /*}{*/
495 static void *
make_hatch_ppc32(Elf32_Phdr const * const phdr,ptrdiff_t reloc,unsigned const frag_mask)496 make_hatch_ppc32(
497 Elf32_Phdr const *const phdr,
498 ptrdiff_t reloc,
499 unsigned const frag_mask)
500 {
501 unsigned xprot = 0;
502 unsigned *hatch = 0;
503 DPRINTF("make_hatch %%p %%x %%x\\n",phdr,reloc,frag_mask);
504 if (phdr->p_type==PT_LOAD && phdr->p_flags & PF_X) {
505 if (
506 // Try page fragmentation just beyond .text .
507 ( (hatch = (void *)(phdr->p_memsz + phdr->p_vaddr + reloc)),
508 ( phdr->p_memsz==phdr->p_filesz // don't pollute potential .bss
509 && (2*4)<=(frag_mask & -(int)hatch) ) ) // space left on page
510 // Try Elf32_Ehdr.e_ident[8..15] . warning: 'const' cast away
511 || ( (hatch = (void *)(&((Elf32_Ehdr *)phdr->p_vaddr + reloc)->e_ident[8])),
512 (phdr->p_offset==0) )
513 // Allocate and use a new page.
514 || ( xprot = 1, hatch = mmap(0, PAGE_SIZE, PROT_WRITE|PROT_READ,
515 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) )
516 ) {
517 hatch[0] = 0x44000002; // sc
518 hatch[1] = 0x4e800020; // blr
519 if (xprot) {
520 mprotect(hatch, 2*sizeof(unsigned), PROT_EXEC|PROT_READ);
521 }
522 }
523 else {
524 hatch = 0;
525 }
526 }
527 return hatch;
528 }
529 #endif /*}*/
530
531 static void
532 #if defined(__i386__) /*{*/
533 __attribute__((regparm(2), stdcall))
534 #endif /*}*/
upx_bzero(char * p,size_t len)535 upx_bzero(char *p, size_t len)
536 {
537 if (len) do {
538 *p++= 0;
539 } while (--len);
540 }
541 #define bzero upx_bzero
542
543
544 static Elf32_auxv_t *
545 #if defined(__i386__) /*{*/
546 __attribute__((regparm(2), stdcall))
547 #endif /*}*/
auxv_find(Elf32_auxv_t * av,unsigned const type)548 auxv_find(Elf32_auxv_t *av, unsigned const type)
549 {
550 Elf32_auxv_t *avail = 0;
551 if (av
552 #if defined(__i386__) /*{*/
553 && 0==(1&(int)av) /* PT_INTERP usually inhibits, except for hatch */
554 #endif /*}*/
555 ) {
556 for (;; ++av) {
557 if (av->a_type==type)
558 return av;
559 if (av->a_type==AT_IGNORE)
560 avail = av;
561 }
562 if (0!=avail && AT_NULL!=type) {
563 av = avail;
564 av->a_type = type;
565 return av;
566 }
567 }
568 return 0;
569 }
570
571
572 static void
573 #if defined(__i386__) /*{*/
574 __attribute__((regparm(3), stdcall))
575 #endif /*}*/
auxv_up(Elf32_auxv_t * av,unsigned const type,unsigned const value)576 auxv_up(Elf32_auxv_t *av, unsigned const type, unsigned const value)
577 {
578 DPRINTF("auxv_up %%p %%x %%x\\n",av,type,value);
579 av = auxv_find(av, type);
580 if (av) {
581 av->a_un.a_val = value;
582 }
583 }
584
585 // The PF_* and PROT_* bits are {1,2,4}; the conversion table fits in 32 bits.
586 #define REP8(x) \
587 ((x)|((x)<<4)|((x)<<8)|((x)<<12)|((x)<<16)|((x)<<20)|((x)<<24)|((x)<<28))
588 #define EXP8(y) \
589 ((1&(y)) ? 0xf0f0f0f0 : (2&(y)) ? 0xff00ff00 : (4&(y)) ? 0xffff0000 : 0)
590 #define PF_TO_PROT(pf) \
591 ((PROT_READ|PROT_WRITE|PROT_EXEC) & ( \
592 ( (REP8(PROT_EXEC ) & EXP8(PF_X)) \
593 |(REP8(PROT_READ ) & EXP8(PF_R)) \
594 |(REP8(PROT_WRITE) & EXP8(PF_W)) \
595 ) >> ((pf & (PF_R|PF_W|PF_X))<<2) ))
596
597
598 #if defined(__powerpc__) //{
599 extern
600 size_t get_page_mask(void); // variable page size AT_PAGESZ; see *-fold.S
601 #elif defined(__mips__) //}{
602 // empty
603 #else //}{ // FIXME for __mips__
get_page_mask(void)604 size_t get_page_mask(void) { return PAGE_MASK; } // compile-time constant
605 #endif //}
606
607 // Find convex hull of PT_LOAD (the minimal interval which covers all PT_LOAD),
608 // and mmap that much, to be sure that a kernel using exec-shield-randomize
609 // won't place the first piece in a way that leaves no room for the rest.
610 static ptrdiff_t // returns relocation constant
611 #if defined(__i386__) /*{*/
612 __attribute__((regparm(3), stdcall))
613 #endif /*}*/
xfind_pages(unsigned mflags,Elf32_Phdr const * phdr,int phnum,Elf32_Addr * const p_brk,size_t const page_mask)614 xfind_pages(unsigned mflags, Elf32_Phdr const *phdr, int phnum,
615 Elf32_Addr *const p_brk
616 #if defined (__mips__) //{
617 , size_t const page_mask
618 #endif //}
619 )
620 {
621 #if !defined(__mips__) //{
622 size_t const page_mask = get_page_mask();
623 #endif //}
624 Elf32_Addr lo= ~0, hi= 0, addr = 0;
625 DPRINTF("xfind_pages %%x %%p %%d %%p\\n", mflags, phdr, phnum, p_brk);
626 for (; --phnum>=0; ++phdr) if (PT_LOAD==phdr->p_type
627 #if defined(__arm__) /*{*/
628 && phdr->p_memsz
629 // Android < 4.1 (kernel < 3.0.31) often has PT_INTERP of /system/bin/linker
630 // with bad PT_LOAD[0]. https://sourceforge.net/p/upx/bugs/221
631 // Type: EXEC (Executable file)
632 //
633 // Program Headers:
634 // Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
635 // LOAD 0x0000d4 0x00000000 0xb0000000 0x00000 0x00000 R 0x1000
636 // LOAD 0x001000 0xb0001000 0xb0001000 0x07614 0x07614 R E 0x1000
637 // LOAD 0x009000 0xb0009000 0xb0009000 0x006f8 0x0ccdc RW 0x1000
638 #endif /*}*/
639 ) {
640 if (phdr->p_vaddr < lo) {
641 lo = phdr->p_vaddr;
642 }
643 if (hi < (phdr->p_memsz + phdr->p_vaddr)) {
644 hi = phdr->p_memsz + phdr->p_vaddr;
645 }
646 }
647 lo -= ~page_mask & lo; // round down to page boundary
648 hi = page_mask & (hi - lo - page_mask -1); // page length
649 DPRINTF(" addr=%%p lo=%%p hi=%%p\\n", addr, lo, hi);
650 addr = (Elf32_Addr)mmap_privanon((void *)lo, hi, PROT_NONE, mflags);
651 DPRINTF(" addr=%%p\\n", addr);
652 *p_brk = hi + addr; // the logical value of brk(0)
653 return (ptrdiff_t)addr - lo;
654 }
655
656
657 static Elf32_Addr // entry address
do_xmap(int const fdi,Elf32_Ehdr const * const ehdr,Extent * const xi,Elf32_auxv_t * const av,unsigned * const p_reloc,f_unfilter * const f_unf,size_t const page_mask)658 do_xmap(int const fdi, Elf32_Ehdr const *const ehdr, Extent *const xi,
659 Elf32_auxv_t *const av, unsigned *const p_reloc, f_unfilter *const f_unf
660 #if defined(__mips__) //{
661 , size_t const page_mask
662 #endif //}
663 )
664 {
665 #if defined(__mips__) //{
666 unsigned const frag_mask = ~page_mask;
667 #else //}{
668 unsigned const frag_mask = ~get_page_mask();
669 #endif //}
670 Elf32_Phdr const *phdr = (Elf32_Phdr const *) (ehdr->e_phoff +
671 (void const *)ehdr);
672 Elf32_Addr v_brk;
673
674 ptrdiff_t reloc = xfind_pages(((ET_EXEC==ehdr->e_type) ? MAP_FIXED : 0),
675 phdr, ehdr->e_phnum, &v_brk
676 #if defined(__mips__) //{
677 , page_mask
678 #endif //}
679 );
680
681 #if DEBUG &&!defined(__mips__) //{
682 size_t const page_mask = 0;
683 #endif //}
684 DPRINTF("do_xmap fdi=%%x ehdr=%%p xi=%%p(%%x %%p)\\n"
685 " av=%%p page_mask=%%p reloc=%%p p_reloc=%%p/%%p f_unf=%%p\\n",
686 fdi, ehdr, xi, (xi? xi->size: 0), (xi? xi->buf: 0),
687 av, page_mask, reloc, p_reloc, *p_reloc, f_unf);
688 int j;
689 for (j=0; j < ehdr->e_phnum; ++phdr, ++j)
690 if (xi && PT_PHDR==phdr->p_type) {
691 auxv_up(av, AT_PHDR, phdr->p_vaddr + reloc);
692 } else
693 if (PT_LOAD==phdr->p_type
694 #if defined(__arm__) /*{*/
695 && phdr->p_memsz
696 #endif /*}*/
697 ) {
698 if (xi && !phdr->p_offset /*&& ET_EXEC==ehdr->e_type*/) { // 1st PT_LOAD
699 // ? Compressed PT_INTERP must not overwrite values from compressed a.out?
700 auxv_up(av, AT_PHDR, phdr->p_vaddr + reloc + ehdr->e_phoff);
701 auxv_up(av, AT_PHNUM, ehdr->e_phnum);
702 auxv_up(av, AT_PHENT, ehdr->e_phentsize); /* ancient kernels might omit! */
703 //auxv_up(av, AT_PAGESZ, PAGE_SIZE); /* ld-linux.so.2 does not need this */
704 }
705 unsigned const prot = PF_TO_PROT(phdr->p_flags);
706 Extent xo;
707 size_t mlen = xo.size = phdr->p_filesz;
708 char * addr = xo.buf = (char *)(phdr->p_vaddr + reloc);
709 char *const haddr = phdr->p_memsz + addr;
710 size_t frag = (int)addr & frag_mask;
711 mlen += frag;
712 addr -= frag;
713 DPRINTF(" phdr type=%%x offset=%%x vaddr=%%x paddr=%%x filesz=%%x memsz=%%x flags=%%x align=%%x\\n",
714 phdr->p_type, phdr->p_offset, phdr->p_vaddr, phdr->p_paddr,
715 phdr->p_filesz, phdr->p_memsz, phdr->p_flags, phdr->p_align);
716 DPRINTF(" addr=%%x mlen=%%x frag=%%x prot=%%x\\n", addr, mlen, frag, prot);
717
718 #if defined(__i386__) /*{*/
719 // Decompressor can overrun the destination by 3 bytes.
720 # define LEN_OVER 3
721 #else /*}{*/
722 # define LEN_OVER 0
723 #endif /*}*/
724
725 if (xi) { // compresed source: mprotect(,,prot) later
726 if (addr != mmap_privanon(addr, LEN_OVER + mlen,
727 PROT_WRITE | PROT_READ, MAP_FIXED) )
728 err_exit(6);
729 unpackExtent(xi, &xo, (f_expand *)fdi,
730 ((PROT_EXEC & prot) ? f_unf : 0) );
731 }
732 else { // PT_INTERP
733 if (addr != mmap(addr, mlen, prot, MAP_FIXED | MAP_PRIVATE,
734 fdi, phdr->p_offset - frag) )
735 err_exit(8);
736 }
737 // Linux does not fixup the low end, so neither do we.
738 // Indeed, must leave it alone because some PT_GNU_RELRO
739 // dangle below PT_LOAD (but still on the low page)!
740 //if (PROT_WRITE & prot) {
741 // bzero(addr, frag); // fragment at lo end
742 //}
743 frag = (-mlen) & frag_mask; // distance to next page boundary
744 if (PROT_WRITE & prot) { // note: read-only .bss not supported here
745 bzero(mlen+addr, frag); // fragment at hi end
746 }
747 if (xi) {
748 #if defined(__i386__) /*{*/
749 void *const hatch = make_hatch_x86(phdr, reloc);
750 if (0!=hatch) {
751 /* always update AT_NULL, especially for compressed PT_INTERP */
752 auxv_up((Elf32_auxv_t *)(~1 & (int)av), AT_NULL, (unsigned)hatch);
753 }
754 #elif defined(__arm__) /*}{*/
755 void *const hatch = make_hatch_arm(phdr, reloc);
756 if (0!=hatch) {
757 auxv_up(av, AT_NULL, (unsigned)hatch);
758 }
759 #elif defined(__mips__) /*}{*/
760 void *const hatch = make_hatch_mips(phdr, reloc, frag_mask);
761 if (0!=hatch) {
762 auxv_up(av, AT_NULL, (unsigned)hatch);
763 }
764 #elif defined(__powerpc__) /*}{*/
765 void *const hatch = make_hatch_ppc32(phdr, reloc, frag_mask);
766 if (0!=hatch) {
767 auxv_up(av, AT_NULL, (unsigned)hatch);
768 }
769 #endif /*}*/
770 if (0!=mprotect(addr, mlen, prot)) {
771 err_exit(10);
772 ERR_LAB
773 }
774 }
775 addr += mlen + frag; /* page boundary on hi end */
776 if (addr < haddr) { // need pages for .bss
777 DPRINTF("addr=%%p haddr=%%p\\n", addr, haddr);
778 if (addr != mmap_privanon(addr, haddr - addr, prot, MAP_FIXED)) {
779 for(;;);
780 err_exit(9);
781 }
782 }
783 #if defined(__i386__) /*{*/
784 else if (xi) { // cleanup if decompressor overrun crosses page boundary
785 mlen = frag_mask & (3+ mlen);
786 if (mlen<=3) { // page fragment was overrun buffer only
787 munmap(addr, mlen);
788 }
789 }
790 #endif /*}*/
791 }
792 if (xi && ET_DYN!=ehdr->e_type) {
793 // Needed only if compressed shell script invokes compressed shell.
794 do_brk((void *)v_brk);
795 }
796 if (0!=p_reloc) {
797 *p_reloc = reloc;
798 }
799 return ehdr->e_entry + reloc;
800 }
801
802 #if 0 && defined(__arm__) //{
803 static uint32_t ascii5(char *p, uint32_t v, unsigned n)
804 {
805 do {
806 unsigned char d = (0x1f & v) + 'A';
807 if ('Z' < d) d += '0' - (1+ 'Z');
808 *--p = d;
809 v >>= 5;
810 } while (--n > 0);
811 return v;
812 }
813 #endif //}
814
815
816 /*************************************************************************
817 // upx_main - called by our entry code
818 //
819 // This function is optimized for size.
820 **************************************************************************/
821
822 #if defined(__mips__) /*{*/
823 void *upx_main( // returns entry address
824 struct b_info const *const bi, // 1st block header
825 size_t const sz_compressed, // total length
826 Elf32_Ehdr *const ehdr, // temp char[sz_ehdr] for decompressing
827 Elf32_auxv_t *const av,
828 f_expand *const f_exp,
829 f_unfilter *const f_unf,
830 Elf32_Addr const elfaddr,
831 size_t const page_mask
832 ) __asm__("upx_main");
upx_main(struct b_info const * const bi,size_t const sz_compressed,Elf32_Ehdr * const ehdr,Elf32_auxv_t * const av,f_expand * const f_exp,f_unfilter * const f_unf,Elf32_Addr const elfaddr,size_t const page_mask)833 void *upx_main( // returns entry address
834 struct b_info const *const bi, // 1st block header
835 size_t const sz_compressed, // total length
836 Elf32_Ehdr *const ehdr, // temp char[sz_ehdr] for decompressing
837 Elf32_auxv_t *const av,
838 f_expand *const f_exp,
839 f_unfilter *const f_unf,
840 Elf32_Addr const elfaddr,
841 size_t const page_mask
842 )
843
844 #elif defined(__powerpc__) //}{
845 void *upx_main( // returns entry address
846 struct b_info const *const bi, // 1st block header
847 size_t const sz_compressed, // total length
848 Elf32_Ehdr *const ehdr, // temp char[sz_ehdr] for decompressing
849 Elf32_auxv_t *const av,
850 f_expand *const f_exp,
851 f_unfilter *const f_unf,
852 Elf32_Addr elfaddr
853 ) __asm__("upx_main");
854 void *upx_main( // returns entry address
855 struct b_info const *const bi, // 1st block header
856 size_t const sz_compressed, // total length
857 Elf32_Ehdr *const ehdr, // temp char[sz_ehdr] for decompressing
858 Elf32_auxv_t *const av,
859 f_expand *const f_exp,
860 f_unfilter *const f_unf,
861 Elf32_Addr elfaddr
862 )
863
864 #else /*}{ !__mips__ && !__powerpc__ */
865 void *upx_main(
866 Elf32_auxv_t *const av,
867 unsigned const sz_compressed,
868 f_expand *const f_exp,
869 f_unfilter * /*const*/ f_unfilter,
870 Extent xo,
871 Extent xi,
872 Elf32_Addr const volatile elfaddr
873 ) __asm__("upx_main");
874 void *upx_main(
875 Elf32_auxv_t *const av,
876 unsigned const sz_compressed,
877 f_expand *const f_exp,
878 f_unfilter * /*const*/ f_unf,
879 Extent xo, // {sz_unc, ehdr} for ELF headers
880 Extent xi, // {sz_cpr, &b_info} for ELF headers
881 Elf32_Addr const volatile elfaddr // value+result: compiler must not change
882 )
883 #endif /*}*/
884 {
885 #if defined(__i386__) //{
886 f_unf = (0xeb != *(unsigned char *)f_exp) // 2-byte jmp around unfilter
887 ? 0
888 : (f_unfilter *)(2+ (long)f_exp);
889 #endif //}
890
891 #if !defined(__mips__) && !defined(__powerpc__) /*{*/
892 Elf32_Ehdr *const ehdr = (Elf32_Ehdr *)(void *)xo.buf; // temp char[MAX_ELF_HDR+OVERHEAD]
893 // sizeof(Ehdr+Phdrs), compressed; including b_info header
894 size_t const sz_first = xi.size;
895 #endif /*}*/
896
897 #if defined(__powerpc__) //{
898 size_t const sz_first = sizeof(*bi) + bi->sz_cpr;
899 Extent xo, xi;
900 xo.buf = (char *)ehdr; xo.size = bi->sz_unc;
901 xi.buf = CONST_CAST(char *, bi); xi.size = sz_compressed;
902 #endif //}
903
904 #if defined(__mips__) /*{*/
905 Extent xo, xi, xj;
906 xo.buf = (char *)ehdr; xo.size = bi->sz_unc;
907 xi.buf = CONST_CAST(char *, bi); xi.size = sz_compressed;
908 xj.buf = CONST_CAST(char *, bi); xj.size = sizeof(*bi) + bi->sz_cpr;
909 #endif //}
910
911 DPRINTF("upx_main av=%%p szc=%%x f_exp=%%p f_unf=%%p "
912 " xo=%%p(%%x %%p) xi=%%p(%%x %%p) elfaddr=%%x\\n",
913 av, sz_compressed, f_exp, f_unf, &xo, xo.size, xo.buf,
914 &xi, xi.size, xi.buf, elfaddr);
915
916 #if defined(__mips__) //{
917 // ehdr = Uncompress Ehdr and Phdrs
918 unpackExtent(&xj, &xo, f_exp, 0);
919 #else //}{ !defined(__mips__)
920 // Uncompress Ehdr and Phdrs.
921 unpackExtent(&xi, &xo, f_exp, 0);
922 // Prepare to decompress the Elf headers again, into the first PT_LOAD.
923 xi.buf -= sz_first;
924 xi.size = sz_compressed;
925 #endif // !__mips__ }
926
927 Elf32_Addr reloc = elfaddr;
928 DPRINTF("upx_main1 .e_entry=%%p reloc=%%p\\n", ehdr->e_entry, reloc);
929 Elf32_Phdr *phdr = (Elf32_Phdr *)(1+ ehdr);
930
931 // De-compress Ehdr again into actual position, then de-compress the rest.
932 Elf32_Addr entry = do_xmap((int)f_exp, ehdr, &xi, av, &reloc, f_unf
933 #if defined(__mips__) //{
934 , page_mask
935 #endif //}
936 );
937 DPRINTF("upx_main2 entry=%%p reloc=%%p\\n", entry, reloc);
938 auxv_up(av, AT_ENTRY , entry);
939
940 { // Map PT_INTERP program interpreter
941 int j;
942 for (j=0, phdr = (Elf32_Phdr *)(1+ ehdr); j < ehdr->e_phnum; ++phdr, ++j)
943 if (PT_INTERP==phdr->p_type) {
944 int const fdi = open(reloc + (char const *)phdr->p_vaddr, O_RDONLY, 0);
945 if (0 > fdi) {
946 err_exit(18);
947 }
948 if (MAX_ELF_HDR!=read(fdi, (void *)ehdr, MAX_ELF_HDR)) {
949 ERR_LAB
950 err_exit(19);
951 }
952 entry = do_xmap(fdi, ehdr, 0, av, &reloc, 0
953 #if defined(__mips__) //{
954 , page_mask
955 #endif //}
956 );
957 DPRINTF("upx_main3 entry=%%p reloc=%%p\\n", entry, reloc);
958 auxv_up(av, AT_BASE, reloc); // uClibc and musl
959 close(fdi);
960 break;
961 }
962 }
963
964 return (void *)entry;
965 }
966
967 /* vim:set ts=4 sw=4 et: */
968