1 
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff.        syswrap-x86-linux.c ---*/
4 /*--------------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2000-2008 Nicholas Nethercote
11       njn@valgrind.org
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26    02111-1307, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 */
30 
31 #if defined(VGP_x86_freebsd)
32 
33 /* TODO/FIXME jrs 20050207: assignments to the syscall return result
34    in interrupted_syscall() need to be reviewed.  They don't seem
35    to assign the shadow state.
36 */
37 
38 #include "pub_core_basics.h"
39 #include "pub_core_vki.h"
40 #include "pub_core_vkiscnums.h"
41 #include "pub_core_libcsetjmp.h"    // to keep _threadstate.h happy
42 #include "pub_core_threadstate.h"
43 #include "pub_core_aspacemgr.h"
44 #include "pub_core_debuglog.h"
45 #include "pub_core_libcbase.h"
46 #include "pub_core_libcassert.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_machine.h"
51 #include "pub_core_mallocfree.h"
52 #include "pub_core_options.h"
53 #include "pub_core_scheduler.h"
54 #include "pub_core_sigframe.h"      // For VG_(sigframe_destroy)()
55 #include "pub_core_signals.h"
56 #include "pub_core_syscall.h"
57 #include "pub_core_syswrap.h"
58 #include "pub_core_tooliface.h"
59 #include "pub_core_stacks.h"        // VG_(register_stack)
60 
61 #include "priv_types_n_macros.h"
62 #include "priv_syswrap-generic.h"    /* for decls of generic wrappers */
63 #include "priv_syswrap-freebsd.h"      /* for decls of linux-ish wrappers */
64 #include "priv_syswrap-main.h"
65 
66 /* ---------------------------------------------------------------------
67    clone() handling
68    ------------------------------------------------------------------ */
69 
70 /* Call f(arg1), but first switch stacks, using 'stack' as the new
71    stack, and use 'retaddr' as f's return-to address.  Also, clear all
72    the integer registers before entering f.*/
73 __attribute__((noreturn))
74 void ML_(call_on_new_stack_0_1) ( Addr stack,
75 			          Addr retaddr,
76 			          void (*f)(Word),
77                                   Word arg1 );
78 //  4(%esp) == stack
79 //  8(%esp) == retaddr
80 // 12(%esp) == f
81 // 16(%esp) == arg1
82 asm(
83 ".text\n"
84 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
85 "vgModuleLocal_call_on_new_stack_0_1:\n"
86 "   movl %esp, %esi\n"     // remember old stack pointer
87 "   movl 4(%esi), %esp\n"  // set stack
88 "   pushl 16(%esi)\n"      // arg1 to stack
89 "   pushl  8(%esi)\n"      // retaddr to stack
90 "   pushl 12(%esi)\n"      // f to stack
91 "   movl $0, %eax\n"       // zero all GP regs
92 "   movl $0, %ebx\n"
93 "   movl $0, %ecx\n"
94 "   movl $0, %edx\n"
95 "   movl $0, %esi\n"
96 "   movl $0, %edi\n"
97 "   movl $0, %ebp\n"
98 "   ret\n"                 // jump to f
99 "   ud2\n"                 // should never get here
100 ".previous\n"
101 );
102 
103 
104 #if 0
105 /*
106         Perform a rfork system call.  rfork is strange because it has
107         fork()-like return-twice semantics, so it needs special
108         handling here.
109 
110         Upon entry, we have:
111 
112             int (fn)(void*)     in  0+FSZ(%esp)
113             void* child_stack   in  4+FSZ(%esp)
114             int flags           in  8+FSZ(%esp)
115             void* arg           in 12+FSZ(%esp)
116             pid_t* child_tid    in 16+FSZ(%esp)
117             pid_t* parent_tid   in 20+FSZ(%esp)
118             void* tls_ptr       in 24+FSZ(%esp)
119 
120         System call requires:
121 
122             int    $__NR_clone  in %eax
123             int    flags        in %ebx
124             void*  child_stack  in %ecx
125             pid_t* parent_tid   in %edx
126             pid_t* child_tid    in %edi
127             void*  tls_ptr      in %esi
128 
129 	Returns an Int encoded in the linux-x86 way, not a SysRes.
130  */
131 #define FSZ               "4+4+4+4" /* frame size = retaddr+ebx+edi+esi */
132 #define __NR_CLONE        VG_STRINGIFY(__NR_clone)
133 #define __NR_EXIT         VG_STRINGIFY(__NR_exit)
134 
135 extern
136 Int do_syscall_clone_x86_freebsd ( Word (*fn)(void *),
137                                  void* stack,
138                                  Int   flags,
139                                  void* arg,
140                                  Int*  child_tid,
141                                  Int*  parent_tid,
142                                  vki_modify_ldt_t * );
143 asm(
144 ".text\n"
145 "do_syscall_clone_x86_freebsd:\n"
146 "        push    %ebx\n"
147 "        push    %edi\n"
148 "        push    %esi\n"
149 
150          /* set up child stack with function and arg */
151 "        movl     4+"FSZ"(%esp), %ecx\n"    /* syscall arg2: child stack */
152 "        movl    12+"FSZ"(%esp), %ebx\n"    /* fn arg */
153 "        movl     0+"FSZ"(%esp), %eax\n"    /* fn */
154 "        lea     -8(%ecx), %ecx\n"          /* make space on stack */
155 "        movl    %ebx, 4(%ecx)\n"           /*   fn arg */
156 "        movl    %eax, 0(%ecx)\n"           /*   fn */
157 
158          /* get other args to clone */
159 "        movl     8+"FSZ"(%esp), %ebx\n"    /* syscall arg1: flags */
160 "        movl    20+"FSZ"(%esp), %edx\n"    /* syscall arg3: parent tid * */
161 "        movl    16+"FSZ"(%esp), %edi\n"    /* syscall arg5: child tid * */
162 "        movl    24+"FSZ"(%esp), %esi\n"    /* syscall arg4: tls_ptr * */
163 "        movl    $"__NR_CLONE", %eax\n"
164 "        int     $0x80\n"                   /* clone() */
165 "        testl   %eax, %eax\n"              /* child if retval == 0 */
166 "        jnz     1f\n"
167 
168          /* CHILD - call thread function */
169 "        popl    %eax\n"
170 "        call    *%eax\n"                   /* call fn */
171 
172          /* exit with result */
173 "        movl    %eax, %ebx\n"              /* arg1: return value from fn */
174 "        movl    $"__NR_EXIT", %eax\n"
175 "        int     $0x80\n"
176 
177          /* Hm, exit returned */
178 "        ud2\n"
179 
180 "1:\n"   /* PARENT or ERROR */
181 "        pop     %esi\n"
182 "        pop     %edi\n"
183 "        pop     %ebx\n"
184 "        ret\n"
185 ".previous\n"
186 );
187 
188 #undef FSZ
189 #undef __NR_CLONE
190 #undef __NR_EXIT
191 
192 
193 // forward declarations
194 static void setup_child ( ThreadArchState*, ThreadArchState*, Bool );
195 
196 /*
197    When a client clones, we need to keep track of the new thread.  This means:
198    1. allocate a ThreadId+ThreadState+stack for the the thread
199 
200    2. initialize the thread's new VCPU state
201 
202    3. create the thread using the same args as the client requested,
203    but using the scheduler entrypoint for EIP, and a separate stack
204    for ESP.
205  */
206 static SysRes do_rfork ( ThreadId ptid,
207                          UInt flags)
208 {
209    static const Bool debug = False;
210 
211    Addr         esp;
212    ThreadId     ctid = VG_(alloc_ThreadState)();
213    ThreadState* ptst = VG_(get_ThreadState)(ptid);
214    ThreadState* ctst = VG_(get_ThreadState)(ctid);
215    UWord*       stack;
216    NSegment const* seg;
217    SysRes       res;
218    Int          eax;
219    vki_sigset_t blockall, savedmask;
220 
221    VG_(sigfillset)(&blockall);
222 
223    vg_assert(VG_(is_running_thread)(ptid));
224    vg_assert(VG_(is_valid_tid)(ctid));
225 
226    stack = (UWord*)ML_(allocstack)(ctid);
227    if (stack == NULL) {
228       res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
229       goto out;
230    }
231 
232    /* Copy register state
233 
234       Both parent and child return to the same place, and the code
235       following the clone syscall works out which is which, so we
236       don't need to worry about it.
237 
238       The parent gets the child's new tid returned from clone, but the
239       child gets 0.
240 
241       If the clone call specifies a NULL esp for the new thread, then
242       it actually gets a copy of the parent's esp.
243    */
244    /* Note: the clone call done by the Quadrics Elan3 driver specifies
245       clone flags of 0xF00, and it seems to rely on the assumption
246       that the child inherits a copy of the parent's GDT.
247       setup_child takes care of setting that up. */
248    setup_child( &ctst->arch, &ptst->arch, True );
249 
250    /* Make sys_clone appear to have returned Success(0) in the
251       child. */
252    ctst->arch.vex.guest_EAX = 0;
253 
254    /* Assume linuxthreads port storing its intended stack in %esi */
255    esp = ctst->arch.vex.guest_ESI;
256 
257    ctst->os_state.parent = ptid;
258 
259    /* inherit signal mask */
260    ctst->sig_mask     = ptst->sig_mask;
261    ctst->tmp_sig_mask = ptst->sig_mask;
262 
263    /* We don't really know where the client stack is, because its
264       allocated by the client.  The best we can do is look at the
265       memory mappings and try to derive some useful information.  We
266       assume that esp starts near its highest possible value, and can
267       only go down to the start of the mmaped segment. */
268    seg = VG_(am_find_nsegment)((Addr)esp);
269    if (seg && seg->kind != SkResvn) {
270       ctst->client_stack_highest_byte = (Addr)VG_PGROUNDUP(esp);
271       ctst->client_stack_szB = ctst->client_stack_highest_byte - seg->start;
272 
273       VG_(register_stack)(seg->start, ctst->client_stack_highest_byte);
274 
275       if (debug)
276 	 VG_(printf)("tid %d: guessed client stack range %#lx-%#lx\n",
277 		     ctid, seg->start, VG_PGROUNDUP(esp));
278    } else {
279       VG_(message)(Vg_UserMsg, "!? New thread %d starts with ESP(%#lx) unmapped\n",
280 		   ctid, esp);
281       ctst->client_stack_szB  = 0;
282    }
283 
284    /* Assume the clone will succeed, and tell any tool that wants to
285       know that this thread has come into existence.  We cannot defer
286       it beyond this point because sys_set_thread_area, just below,
287       causes tCheck to assert by making references to the new ThreadId
288       if we don't state the new thread exists prior to that point.
289       If the clone fails, we'll send out a ll_exit notification for it
290       at the out: label below, to clean up. */
291    VG_TRACK ( pre_thread_ll_create, ptid, ctid );
292 
293    /* start the thread with everything blocked */
294    VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);
295 
296    /* Create the new thread */
297    /* XXX need to see what happens with tids etc with rfork */
298    eax = do_syscall_clone_x86_freebsd(
299             ML_(start_thread_NORETURN), stack, flags /*, &VG_(threads)[ctid], NULL*/ );
300    res = VG_(mk_SysRes_x86_freebsd)( eax ); /* XXX edx returns too! */
301 
302    VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);
303 
304   out:
305    if (res.isError) {
306       /* clone failed */
307       VG_(cleanup_thread)(&ctst->arch);
308       ctst->status = VgTs_Empty;
309       /* oops.  Better tell the tool the thread exited in a hurry :-) */
310       VG_TRACK( pre_thread_ll_exit, ctid );
311    }
312 
313    return res;
314 }
315 #endif
316 
317 /* Translate a struct modify_ldt_ldt_s to a VexGuestX86SegDescr */
318 
319 static
translate_to_hw_format(void * base,VexGuestX86SegDescr * out)320 void translate_to_hw_format ( /* IN  */ void* base,
321                               /* OUT */ VexGuestX86SegDescr* out)
322 {
323    UInt entry_1, entry_2;
324    UInt base_addr = (UInt) base;
325    vg_assert(8 == sizeof(VexGuestX86SegDescr));
326 
327    if (0)
328       VG_(printf)("translate_to_hw_format: base %p\n", base );
329 
330    /* Allow LDTs to be cleared by the user. */
331    if (base == 0) {
332       entry_1 = 0;
333       entry_2 = 0;
334       goto install;
335    }
336    /* base as specified, no limit, read/write/accessed etc */
337    entry_1 = ((base_addr & 0x0000ffff) << 16) | 0x0ffff;
338    entry_2 = (base_addr & 0xff000000) |
339              ((base_addr & 0x00ff0000) >> 16) | 0x00cff300;
340 
341    /* Install the new entry ...  */
342   install:
343    out->LdtEnt.Words.word1 = entry_1;
344    out->LdtEnt.Words.word2 = entry_2;
345 }
346 
347 /* Create a zeroed-out GDT. */
alloc_zeroed_x86_GDT(void)348 static VexGuestX86SegDescr* alloc_zeroed_x86_GDT ( void )
349 {
350    Int nbytes = VEX_GUEST_X86_GDT_NENT * sizeof(VexGuestX86SegDescr);
351    return VG_(arena_calloc)(VG_AR_CORE, "di.syswrap-x86.azxG.1", nbytes, 1);
352 }
353 
354 #if 0
355 /* Create a zeroed-out LDT. */
356 static VexGuestX86SegDescr* alloc_zeroed_x86_LDT ( void )
357 {
358    Int nbytes = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr);
359    return VG_(arena_calloc)(VG_AR_CORE, "di.syswrap-x86.azxL.1", nbytes, 1);
360 }
361 
362 /* Free up an LDT or GDT allocated by the above fns. */
363 static void free_LDT_or_GDT ( VexGuestX86SegDescr* dt )
364 {
365    vg_assert(dt);
366    VG_(arena_free)(VG_AR_CORE, (void*)dt);
367 }
368 
369 /* Copy contents between two existing LDTs. */
370 static void copy_LDT_from_to ( VexGuestX86SegDescr* src,
371                                VexGuestX86SegDescr* dst )
372 {
373    Int i;
374    vg_assert(src);
375    vg_assert(dst);
376    for (i = 0; i < VEX_GUEST_X86_LDT_NENT; i++)
377       dst[i] = src[i];
378 }
379 
380 /* Copy contents between two existing GDTs. */
381 static void copy_GDT_from_to ( VexGuestX86SegDescr* src,
382                                VexGuestX86SegDescr* dst )
383 {
384    Int i;
385    vg_assert(src);
386    vg_assert(dst);
387    for (i = 0; i < VEX_GUEST_X86_GDT_NENT; i++)
388       dst[i] = src[i];
389 }
390 
391 /* Free this thread's DTs, if it has any. */
392 static void deallocate_LGDTs_for_thread ( VexGuestX86State* vex )
393 {
394    vg_assert(sizeof(HWord) == sizeof(void*));
395 
396    if (0)
397       VG_(printf)("deallocate_LGDTs_for_thread: "
398                   "ldt = 0x%x, gdt = 0x%x\n",
399                   vex->guest_LDT, vex->guest_GDT );
400 
401    if (vex->guest_LDT != (HWord)NULL) {
402       free_LDT_or_GDT( (VexGuestX86SegDescr*)vex->guest_LDT );
403       vex->guest_LDT = (HWord)NULL;
404    }
405 
406    if (vex->guest_GDT != (HWord)NULL) {
407       free_LDT_or_GDT( (VexGuestX86SegDescr*)vex->guest_GDT );
408       vex->guest_GDT = (HWord)NULL;
409    }
410 }
411 #endif
412 
sys_set_thread_area(ThreadId tid,Int * idxptr,void * base)413 static SysRes sys_set_thread_area ( ThreadId tid, Int *idxptr, void *base)
414 {
415    VexGuestX86SegDescr* gdt;
416    Int idx;
417 
418    vg_assert(8 == sizeof(VexGuestX86SegDescr));
419    vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
420 
421    gdt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_GDT;
422 
423    /* If the thread doesn't have a GDT, allocate it now. */
424    if (!gdt) {
425       gdt = alloc_zeroed_x86_GDT();
426       VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt;
427    }
428 
429    idx = *idxptr;
430    if (idx == -1) {
431       /* Find and use the first free entry.  Don't allocate entry
432          zero, because the hardware will never do that, and apparently
433          doing so confuses some code (perhaps stuff running on
434          Wine). */
435       for (idx = 1; idx < VEX_GUEST_X86_GDT_NENT; idx++) {
436          if (gdt[idx].LdtEnt.Words.word1 == 0
437              && gdt[idx].LdtEnt.Words.word2 == 0)
438             break;
439       }
440 
441       if (idx == VEX_GUEST_X86_GDT_NENT)
442          return VG_(mk_SysRes_Error)( VKI_ESRCH );
443    } else if (idx < 0 || idx == 0 || idx >= VEX_GUEST_X86_GDT_NENT) {
444       /* Similarly, reject attempts to use GDT[0]. */
445       return VG_(mk_SysRes_Error)( VKI_EINVAL );
446    }
447 
448    translate_to_hw_format(base, &gdt[idx]);
449 
450    *idxptr = idx;
451    return VG_(mk_SysRes_Success)( 0 );
452 }
453 
sys_get_thread_area(ThreadId tid,Int idx,void ** basep)454 static SysRes sys_get_thread_area ( ThreadId tid, Int idx, void ** basep )
455 {
456    VexGuestX86SegDescr* gdt;
457    UInt base;
458 
459    vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
460    vg_assert(8 == sizeof(VexGuestX86SegDescr));
461 
462    gdt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_GDT;
463 
464    /* If the thread doesn't have a GDT, allocate it now. */
465    if (!gdt) {
466       gdt = alloc_zeroed_x86_GDT();
467       VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt;
468    }
469 
470    base = ( gdt[idx].LdtEnt.Bits.BaseHi << 24 ) |
471           ( gdt[idx].LdtEnt.Bits.BaseMid << 16 ) |
472             gdt[idx].LdtEnt.Bits.BaseLow;
473    *basep = (void *)base;
474 
475    return VG_(mk_SysRes_Success)( 0 );
476 }
477 
478 /* ---------------------------------------------------------------------
479    More thread stuff
480    ------------------------------------------------------------------ */
481 
VG_(cleanup_thread)482 void VG_(cleanup_thread) ( ThreadArchState* arch )
483 {
484 }
485 
486 
487 #if 0
488 static void setup_child ( /*OUT*/ ThreadArchState *child,
489                           /*IN*/  ThreadArchState *parent,
490                           Bool inherit_parents_GDT )
491 {
492    /* We inherit our parent's guest state. */
493    child->vex = parent->vex;
494    child->vex_shadow1 = parent->vex_shadow1;
495    child->vex_shadow2 = parent->vex_shadow2;
496 
497    /* We inherit our parent's LDT. */
498    if (parent->vex.guest_LDT == (HWord)NULL) {
499       /* We hope this is the common case. */
500       child->vex.guest_LDT = (HWord)NULL;
501    } else {
502       /* No luck .. we have to take a copy of the parent's. */
503       child->vex.guest_LDT = (HWord)alloc_zeroed_x86_LDT();
504       copy_LDT_from_to( (VexGuestX86SegDescr*)parent->vex.guest_LDT,
505                         (VexGuestX86SegDescr*)child->vex.guest_LDT );
506    }
507 
508    /* Either we start with an empty GDT (the usual case) or inherit a
509       copy of our parents' one (Quadrics Elan3 driver -style clone
510       only). */
511    child->vex.guest_GDT = (HWord)NULL;
512 
513    if (inherit_parents_GDT && parent->vex.guest_GDT != (HWord)NULL) {
514       child->vex.guest_GDT = (HWord)alloc_zeroed_x86_GDT();
515       copy_GDT_from_to( (VexGuestX86SegDescr*)parent->vex.guest_GDT,
516                         (VexGuestX86SegDescr*)child->vex.guest_GDT );
517    }
518 }
519 #endif
520 
521 /* ---------------------------------------------------------------------
522    PRE/POST wrappers for x86/Linux-specific syscalls
523    ------------------------------------------------------------------ */
524 
525 #define PRE(name)       DEFN_PRE_TEMPLATE(freebsd, name)
526 #define POST(name)      DEFN_POST_TEMPLATE(freebsd, name)
527 
528 #if 0
529 struct thr_param {
530     void        (*start_func)(void *);  /* thread entry function. */
531     void        *arg;                   /* argument for entry function. */
532     char        *stack_base;            /* stack base address. */
533     size_t      stack_size;             /* stack size. */
534     char        *tls_base;              /* tls base address. */
535     size_t      tls_size;               /* tls size. */
536     long        *child_tid;             /* address to store new TID. */
537     long        *parent_tid;            /* parent accesses the new TID here. */
538     int         flags;                  /* thread flags. */
539     struct rtprio       *rtp;           /* Real-time scheduling priority */
540     void        *spare[3];              /* TODO: cpu affinity mask etc. */
541 };
542 int thr_new(struct thr_param *param, int param_size);
543 #endif
544 
PRE(sys_thr_new)545 PRE(sys_thr_new)
546 {
547    static const Bool debug = False;
548 
549    ThreadId     ctid = VG_(alloc_ThreadState)();
550    ThreadState* ptst = VG_(get_ThreadState)(tid);
551    ThreadState* ctst = VG_(get_ThreadState)(ctid);
552    SysRes       res;
553    vki_sigset_t blockall, savedmask;
554    struct vki_thr_param tp;
555    Int idx = -1;
556    Addr stk;
557 
558    PRINT("thr_new ( %#lx, %ld )",ARG1,ARG2);
559    PRE_REG_READ2(int, "thr_new",
560                  struct thr_param *, param,
561                  int, param_size);
562 
563    PRE_MEM_READ( "thr_new(param)", ARG1, offsetof(struct vki_thr_param, spare));
564    if (!ML_(safe_to_deref)( (void*)ARG1, offsetof(struct vki_thr_param, spare))) {
565       SET_STATUS_Failure( VKI_EFAULT );
566       return;
567    }
568    VG_(memset)(&tp, 0, sizeof(tp));
569    VG_(memcpy)(&tp, (void *)ARG1, offsetof(struct vki_thr_param, spare));
570    PRE_MEM_WRITE("clone(parent_tidptr)", (Addr)tp.parent_tid, sizeof(long));
571    PRE_MEM_WRITE("clone(child_tidptr)", (Addr)tp.child_tid, sizeof(long));
572 
573    VG_(sigfillset)(&blockall);
574 
575    vg_assert(VG_(is_running_thread)(tid));
576    vg_assert(VG_(is_valid_tid)(ctid));
577 
578    /* Copy register state
579 
580       On linux, both parent and child return to the same place, and the code
581       following the clone syscall works out which is which, so we
582       don't need to worry about it.
583       On FreeBSD, thr_new arranges a direct call.  We don't actually need any
584       of this gunk.
585 
586       The parent gets the child's new tid returned from clone, but the
587       child gets 0.
588 
589       If the clone call specifies a NULL rsp for the new thread, then
590       it actually gets a copy of the parent's rsp.
591    */
592    /* We inherit our parent's guest state. */
593    ctst->arch.vex = ptst->arch.vex;
594    ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
595    ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
596 
597    /* Make sys_clone appear to have returned Success(0) in the
598       child. */
599    ctst->arch.vex.guest_EAX = 0;
600    ctst->arch.vex.guest_EDX = 0;
601    LibVEX_GuestX86_put_eflag_c(0, &ctst->arch.vex);
602 
603    ctst->os_state.parent = tid;
604 
605    /* inherit signal mask */
606    ctst->sig_mask = ptst->sig_mask;
607    ctst->tmp_sig_mask = ptst->sig_mask;
608 
609    /* Linux has to guess, we don't */
610    VG_(register_stack)((Addr)tp.stack_base, (Addr)tp.stack_base + tp.stack_size);
611 
612    /* Assume the clone will succeed, and tell any tool that wants to
613       know that this thread has come into existence.  If the clone
614       fails, we'll send out a ll_exit notification for it at the out:
615       label below, to clean up. */
616    VG_TRACK ( pre_thread_ll_create, tid, ctid );
617 
618    if (debug)
619       VG_(printf)("clone child has SETTLS: tls at %#lx\n", (Addr)tp.tls_base);
620    sys_set_thread_area( ctid, &idx, tp.tls_base );
621    ctst->arch.vex.guest_GS = (idx << 3) | 3;   /* GSEL(GUGS_SEL, SEL_UPL) */
622    tp.tls_base = 0;	/* Don't have the kernel do it too */
623 
624    /* start the thread with everything blocked */
625    VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);
626 
627    /* Set the client state for scheduler to run libthr's trampoline */
628    ctst->arch.vex.guest_ESP = (Addr)tp.stack_base + tp.stack_size - 8;
629    ctst->arch.vex.guest_EIP = (Addr)tp.start_func;
630    *(UWord *)(ctst->arch.vex.guest_ESP + 4) = (UWord)tp.arg;	/* Client arg */
631    *(UWord *)(ctst->arch.vex.guest_ESP + 0) = 0;		/* fake return addr */
632 
633    /* Set up valgrind's trampoline on its own stack */
634    stk = ML_(allocstack)(ctid);
635    tp.stack_base = (void *)ctst->os_state.valgrind_stack_base;
636    tp.stack_size = (Addr)stk - (Addr)tp.stack_base;
637    /* This is for thr_new() to run valgrind's trampoline */
638    tp.start_func = (void *)ML_(start_thread_NORETURN);
639    tp.arg = &VG_(threads)[ctid];
640 
641    /* Create the new thread */
642    res = VG_(do_syscall2)(__NR_thr_new, (UWord)&tp, sizeof(tp));
643 
644    VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);
645 
646    if (sr_isError(res)) {
647       /* clone failed */
648       VG_(cleanup_thread)(&ctst->arch);
649       ctst->status = VgTs_Empty;
650       /* oops.  Better tell the tool the thread exited in a hurry :-) */
651       VG_TRACK( pre_thread_ll_exit, ctid );
652    } else {
653 
654       POST_MEM_WRITE((Addr)tp.parent_tid, sizeof(long));
655       POST_MEM_WRITE((Addr)tp.child_tid, sizeof(long));
656       POST_MEM_WRITE((Addr)ctst->arch.vex.guest_ESP, 8);
657 
658       /* Thread creation was successful; let the child have the chance
659          to run */
660       *flags |= SfYieldAfter;
661    }
662 
663    /* "Complete" the syscall so that the wrapper doesn't call the kernel again. */
664    SET_STATUS_from_SysRes(res);
665 }
666 
667 
PRE(sys_rfork)668 PRE(sys_rfork)
669 {
670    PRINT("sys_rfork ( %lx )",ARG1);
671    PRE_REG_READ1(int, "rfork",
672                  unsigned int, flags);
673 
674 #if 0
675    cloneflags = ARG1;
676 
677    if (!ML_(client_signal_OK)(ARG1 & VKI_CSIGNAL)) {
678       SET_STATUS_Failure( VKI_EINVAL );
679       return;
680    }
681 
682    SET_STATUS_from_SysRes( do_clone(tid, ARG1));
683 
684    if (SUCCESS) {
685       *flags |= SfYieldAfter;
686    }
687 #else
688    VG_(message)(Vg_UserMsg, "No rfork for you!");
689    VG_(unimplemented)
690          ("Valgrind does not support rfork() yet.");
691    SET_STATUS_Failure( VKI_ENOSYS );
692 #endif
693 }
694 
PRE(sys_sigreturn)695 PRE(sys_sigreturn)
696 {
697    PRINT("sys_sigreturn ( %#lx )", ARG1);
698    PRE_REG_READ1(long, "sigreturn",
699                  struct vki_ucontext *, ucp);
700 
701    PRE_MEM_READ( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
702    PRE_MEM_WRITE( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
703 }
704 
PRE(sys_fake_sigreturn)705 PRE(sys_fake_sigreturn)
706 {
707    /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
708       an explanation of what follows. */
709 
710    ThreadState* tst;
711    struct vki_ucontext *uc;
712    PRINT("sys_sigreturn ( )");
713 
714    vg_assert(VG_(is_valid_tid)(tid));
715    vg_assert(tid >= 1 && tid < VG_N_THREADS);
716    vg_assert(VG_(is_running_thread)(tid));
717 
718    /* Adjust esp to point to start of frame; skip back up over handler
719       ret addr */
720    tst = VG_(get_ThreadState)(tid);
721    tst->arch.vex.guest_ESP -= sizeof(Addr);	/* QQQ should be redundant */
722 
723    uc = (struct vki_ucontext *)ARG1;
724    if (uc == NULL || uc->uc_mcontext.len != sizeof(uc->uc_mcontext)) {
725       SET_STATUS_Failure(VKI_EINVAL);
726       return;
727    }
728 
729    /* This is only so that the EIP is (might be) useful to report if
730       something goes wrong in the sigreturn */
731    ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
732 
733    /* Restore register state from frame and remove it */
734    VG_(sigframe_destroy)(tid);
735 
736    /*
737     * Signal handler might have changed the signal mask.  Respect that.
738     */
739    tst->sig_mask = uc->uc_sigmask;
740    tst->tmp_sig_mask = uc->uc_sigmask;
741 
742    /* Tell the driver not to update the guest state with the "result",
743       and set a bogus result to keep it happy. */
744    *flags |= SfNoWriteResult;
745    SET_STATUS_Success(0);
746 
747    /* Check to see if any signals arose as a result of this. */
748    *flags |= SfPollAfter;
749 }
750 
751 #if 0	/* QQQ keep for 6.x signals */
752 PRE(sys_rt_sigreturn)
753 {
754    /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
755       an explanation of what follows. */
756 
757    ThreadState* tst;
758    PRINT("sys_rt_sigreturn ( )");
759 
760    vg_assert(VG_(is_valid_tid)(tid));
761    vg_assert(tid >= 1 && tid < VG_N_THREADS);
762    vg_assert(VG_(is_running_thread)(tid));
763 
764    /* Adjust esp to point to start of frame; skip back up over handler
765       ret addr */
766    tst = VG_(get_ThreadState)(tid);
767    tst->arch.vex.guest_ESP -= sizeof(Addr);
768 
769    /* This is only so that the EIP is (might be) useful to report if
770       something goes wrong in the sigreturn */
771    ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
772 
773    /* Restore register state from frame and remove it */
774    VG_(sigframe_destroy)(tid, True);
775 
776    /* Tell the driver not to update the guest state with the "result",
777       and set a bogus result to keep it happy. */
778    *flags |= SfNoWriteResult;
779    SET_STATUS_Success(0);
780 
781    /* Check to see if any signals arose as a result of this. */
782    *flags |= SfPollAfter;
783 }
784 #endif
785 
restore_mcontext(ThreadState * tst,struct vki_mcontext * sc)786 static void restore_mcontext(ThreadState *tst, struct vki_mcontext *sc)
787 {
788    tst->arch.vex.guest_EAX     = sc->eax;
789    tst->arch.vex.guest_ECX     = sc->ecx;
790    tst->arch.vex.guest_EDX     = sc->edx;
791    tst->arch.vex.guest_EBX     = sc->ebx;
792    tst->arch.vex.guest_EBP     = sc->ebp;
793    tst->arch.vex.guest_ESP     = sc->esp;
794    tst->arch.vex.guest_ESI     = sc->esi;
795    tst->arch.vex.guest_EDI     = sc->edi;
796    tst->arch.vex.guest_EIP     = sc->eip;
797    tst->arch.vex.guest_CS      = sc->cs;
798    tst->arch.vex.guest_SS      = sc->ss;
799    tst->arch.vex.guest_DS      = sc->ds;
800    tst->arch.vex.guest_ES      = sc->es;
801    tst->arch.vex.guest_FS      = sc->fs;
802    tst->arch.vex.guest_GS      = sc->gs;
803    /*
804     * XXX: missing support for other flags.
805     */
806    if (sc->eflags & 0x0001)
807       LibVEX_GuestX86_put_eflag_c(1, &tst->arch.vex);
808    else
809       LibVEX_GuestX86_put_eflag_c(0, &tst->arch.vex);
810 }
811 
fill_mcontext(ThreadState * tst,struct vki_mcontext * sc)812 static void fill_mcontext(ThreadState *tst, struct vki_mcontext *sc)
813 {
814    sc->eax = tst->arch.vex.guest_EAX;
815    sc->ecx = tst->arch.vex.guest_ECX;
816    sc->edx = tst->arch.vex.guest_EDX;
817    sc->ebx = tst->arch.vex.guest_EBX;
818    sc->ebp = tst->arch.vex.guest_EBP;
819    sc->esp = tst->arch.vex.guest_ESP;
820    sc->esi = tst->arch.vex.guest_ESI;
821    sc->edi = tst->arch.vex.guest_EDI;
822    sc->eip = tst->arch.vex.guest_EIP;
823    sc->cs = tst->arch.vex.guest_CS;
824    sc->ss = tst->arch.vex.guest_SS;
825    sc->ds = tst->arch.vex.guest_DS;
826    sc->es = tst->arch.vex.guest_ES;
827    sc->fs = tst->arch.vex.guest_FS;
828    sc->gs = tst->arch.vex.guest_GS;
829    sc->eflags = LibVEX_GuestX86_get_eflags(&tst->arch.vex);
830 /*
831    not yet.
832    VG_(memcpy)(&sc->fpstate, fpstate, sizeof(*fpstate));
833 */
834    sc->fpformat = VKI_FPFMT_NODEV;
835    sc->ownedfp = VKI_FPOWNED_NONE;
836    sc->len = sizeof(*sc);
837    VG_(memset)(sc->spare2, 0, sizeof(sc->spare2));
838 }
839 
840 
PRE(sys_getcontext)841 PRE(sys_getcontext)
842 {
843    ThreadState* tst;
844    struct vki_ucontext *uc;
845 
846    PRINT("sys_getcontext ( %#lx )", ARG1);
847    PRE_REG_READ1(long, "getcontext",
848                  struct vki_ucontext *, ucp);
849    PRE_MEM_WRITE( "getcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
850    uc = (struct vki_ucontext *)ARG1;
851    if (uc == NULL) {
852       SET_STATUS_Failure(VKI_EINVAL);
853       return;
854    }
855    tst = VG_(get_ThreadState)(tid);
856    fill_mcontext(tst, &uc->uc_mcontext);
857    uc->uc_mcontext.eax = 0;
858    uc->uc_mcontext.edx = 0;
859    uc->uc_mcontext.eflags &= ~0x0001;   /* PSL_C */
860    uc->uc_sigmask = tst->sig_mask;
861    VG_(memset)(uc->__spare__, 0, sizeof(uc->__spare__));
862    SET_STATUS_Success(0);
863 }
864 
PRE(sys_setcontext)865 PRE(sys_setcontext)
866 {
867    ThreadState* tst;
868    struct vki_ucontext *uc;
869 
870    PRINT("sys_setcontext ( %#lx )", ARG1);
871    PRE_REG_READ1(long, "setcontext",
872                  struct vki_ucontext *, ucp);
873 
874    PRE_MEM_READ( "setcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
875    PRE_MEM_WRITE( "setcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
876 
877    vg_assert(VG_(is_valid_tid)(tid));
878    vg_assert(tid >= 1 && tid < VG_N_THREADS);
879    vg_assert(VG_(is_running_thread)(tid));
880 
881    tst = VG_(get_ThreadState)(tid);
882    uc = (struct vki_ucontext *)ARG1;
883    if (uc == NULL || uc->uc_mcontext.len != sizeof(uc->uc_mcontext)) {
884       SET_STATUS_Failure(VKI_EINVAL);
885       return;
886    }
887 
888    restore_mcontext(tst, &uc->uc_mcontext);
889    tst->sig_mask = uc->uc_sigmask;
890 
891    /* Tell the driver not to update the guest state with the "result",
892       and set a bogus result to keep it happy. */
893    *flags |= SfNoWriteResult;
894    SET_STATUS_Success(0);
895 
896    /* Check to see if some any signals arose as a result of this. */
897    *flags |= SfPollAfter;
898 }
899 
PRE(sys_swapcontext)900 PRE(sys_swapcontext)
901 {
902    struct vki_ucontext *ucp, *oucp;
903    ThreadState* tst;
904 
905    PRINT("sys_swapcontext ( %#lx, %#lx )", ARG1, ARG2);
906    PRE_REG_READ2(long, "swapcontext",
907                  struct vki_ucontext *, oucp, struct vki_ucontext *, ucp);
908 
909    PRE_MEM_READ( "swapcontext(ucp)", ARG2, sizeof(struct vki_ucontext) );
910    PRE_MEM_WRITE( "swapcontext(oucp)", ARG1, sizeof(struct vki_ucontext) );
911 
912    oucp = (struct vki_ucontext *)ARG1;
913    ucp = (struct vki_ucontext *)ARG2;
914    if (oucp == NULL || ucp == NULL || ucp->uc_mcontext.len != sizeof(ucp->uc_mcontext)) {
915       SET_STATUS_Failure(VKI_EINVAL);
916       return;
917    }
918    tst = VG_(get_ThreadState)(tid);
919 
920    /*
921     * Save the context.
922     */
923    fill_mcontext(tst, &oucp->uc_mcontext);
924    oucp->uc_mcontext.eax = 0;
925    oucp->uc_mcontext.edx = 0;
926    oucp->uc_mcontext.eflags &= ~0x0001; /* PSL_C */
927    oucp->uc_sigmask = tst->sig_mask;
928    VG_(memset)(oucp->__spare__, 0, sizeof(oucp->__spare__));
929 
930    /*
931     * Switch to new one.
932     */
933    restore_mcontext(tst, &ucp->uc_mcontext);
934    tst->sig_mask = ucp->uc_sigmask;
935 
936    /* Tell the driver not to update the guest state with the "result",
937       and set a bogus result to keep it happy. */
938    *flags |= SfNoWriteResult;
939    SET_STATUS_Success(0);
940 
941    /* Check to see if some any signals arose as a result of this. */
942    *flags |= SfPollAfter;
943 }
944 
945 /* This is here because on x86 the off_t is passed in 2 regs. Don't ask about pad.  */
946 
947 /* caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); */
948 /*              ARG1           ARG2       ARG3      ARG4       ARG5    ARG6     ARG7+ARG8 */
949 
PRE(sys_mmap)950 PRE(sys_mmap)
951 {
952    SysRes r;
953 
954    PRINT("sys_mmap ( %#lx, %lu, %ld, %ld, %ld, pad%ld, lo0x%lx hi0x%lx)",
955          ARG1, (UWord)ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8 );
956    PRE_REG_READ8(long, "mmap",
957                  char *, addr, unsigned long, len, int, prot,  int, flags,
958                  int, fd,  int, pad, unsigned long, lo, unsigned long, hi);
959 
960    r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG7 + ((Off64T)ARG8 << 32) );
961    SET_STATUS_from_SysRes(r);
962 }
963 
PRE(sys_mmap7)964 PRE(sys_mmap7)
965 {
966    SysRes r;
967 
968    PRINT("sys_mmap ( %#lx, %lu, %ld, %ld, %ld, lo0x%lx hi0x%lx)",
969          ARG1, (UWord)ARG2, ARG3, ARG4, ARG5, ARG6, ARG7 );
970    PRE_REG_READ7(long, "mmap",
971                  char *, addr, unsigned long, len, int, prot,  int, flags,
972                  int, fd, unsigned long, lo, unsigned long, hi);
973 
974    r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6 + ((Off64T)ARG7 << 32) );
975    SET_STATUS_from_SysRes(r);
976 }
977 
PRE(sys_lseek)978 PRE(sys_lseek)
979 {
980    PRINT("sys_lseek ( %ld, 0x%lx, 0x%lx, %ld )", ARG1,ARG3,ARG4,ARG5);
981    PRE_REG_READ5(long, "lseek",
982                  unsigned int, fd, int, pad, unsigned int, offset_low,
983                  unsigned int, offset_high, unsigned int, whence);
984 }
985 
PRE(sys_lseek7)986 PRE(sys_lseek7)
987 {
988    PRINT("sys_lseek ( %ld, 0x%lx, 0x%lx, %ld )", ARG1,ARG2,ARG3,ARG4);
989    PRE_REG_READ4(long, "lseek",
990                  unsigned int, fd, unsigned int, offset_low,
991                  unsigned int, offset_high, unsigned int, whence);
992 }
993 
PRE(sys_pread)994 PRE(sys_pread)
995 {
996    *flags |= SfMayBlock;
997    PRINT("sys_read ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG5, ARG6);
998    PRE_REG_READ6(ssize_t, "read",
999                  unsigned int, fd, char *, buf, vki_size_t, count,
1000                  int, pad, unsigned int, off_low, unsigned int, off_high);
1001 
1002    if (!ML_(fd_allowed)(ARG1, "read", tid, False))
1003       SET_STATUS_Failure( VKI_EBADF );
1004    else
1005       PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 );
1006 }
1007 
POST(sys_pread)1008 POST(sys_pread)
1009 {
1010    vg_assert(SUCCESS);
1011    POST_MEM_WRITE( ARG2, RES );
1012 }
1013 
PRE(sys_pread7)1014 PRE(sys_pread7)
1015 {
1016    *flags |= SfMayBlock;
1017    PRINT("sys_read ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
1018    PRE_REG_READ5(ssize_t, "read",
1019                  unsigned int, fd, char *, buf, vki_size_t, count,
1020                  unsigned int, off_low, unsigned int, off_high);
1021 
1022    if (!ML_(fd_allowed)(ARG1, "read", tid, False))
1023       SET_STATUS_Failure( VKI_EBADF );
1024    else
1025       PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 );
1026 }
1027 
POST(sys_pread7)1028 POST(sys_pread7)
1029 {
1030    vg_assert(SUCCESS);
1031    POST_MEM_WRITE( ARG2, RES );
1032 }
1033 
PRE(sys_pwrite)1034 PRE(sys_pwrite)
1035 {
1036    Bool ok;
1037    *flags |= SfMayBlock;
1038    PRINT("sys_write ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG5, ARG6);
1039    PRE_REG_READ6(ssize_t, "write",
1040                  unsigned int, fd, const char *, buf, vki_size_t, count,
1041                  int, pad, unsigned int, off_low, unsigned int, off_high);
1042    /* check to see if it is allowed.  If not, try for an exemption from
1043       --sim-hints=enable-outer (used for self hosting). */
1044    ok = ML_(fd_allowed)(ARG1, "write", tid, False);
1045    if (!ok && ARG1 == 2/*stderr*/
1046            && VG_(strstr)(VG_(clo_sim_hints),"enable-outer"))
1047       ok = True;
1048    if (!ok)
1049       SET_STATUS_Failure( VKI_EBADF );
1050    else
1051       PRE_MEM_READ( "write(buf)", ARG2, ARG3 );
1052 }
1053 
PRE(sys_pwrite7)1054 PRE(sys_pwrite7)
1055 {
1056    Bool ok;
1057    *flags |= SfMayBlock;
1058    PRINT("sys_write ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
1059    PRE_REG_READ5(ssize_t, "write",
1060                  unsigned int, fd, const char *, buf, vki_size_t, count,
1061                  unsigned int, off_low, unsigned int, off_high);
1062    /* check to see if it is allowed.  If not, try for an exemption from
1063       --sim-hints=enable-outer (used for self hosting). */
1064    ok = ML_(fd_allowed)(ARG1, "write", tid, False);
1065    if (!ok && ARG1 == 2/*stderr*/
1066            && VG_(strstr)(VG_(clo_sim_hints),"enable-outer"))
1067       ok = True;
1068    if (!ok)
1069       SET_STATUS_Failure( VKI_EBADF );
1070    else
1071       PRE_MEM_READ( "write(buf)", ARG2, ARG3 );
1072 }
1073 
PRE(sys_ftruncate)1074 PRE(sys_ftruncate)
1075 {
1076    *flags |= SfMayBlock;
1077    PRINT("sys_ftruncate ( %ld, %lu, %lu )", ARG1,ARG3,ARG4);
1078    PRE_REG_READ4(long, "ftruncate", unsigned int, fd, int, pad,
1079 		  unsigned int, length_low, unsigned int, length_high);
1080 }
1081 
PRE(sys_ftruncate7)1082 PRE(sys_ftruncate7)
1083 {
1084    *flags |= SfMayBlock;
1085    PRINT("sys_ftruncate ( %ld, %lu, %lu )", ARG1,ARG2,ARG3);
1086    PRE_REG_READ3(long, "ftruncate", unsigned int, fd,
1087 		  unsigned int, length_low, unsigned int, length_high);
1088 }
1089 
PRE(sys_truncate)1090 PRE(sys_truncate)
1091 {
1092    *flags |= SfMayBlock;
1093    PRINT("sys_truncate ( %#lx(%s), %lu, %lu )", ARG1,(char *)ARG1,ARG3,ARG4);
1094    PRE_REG_READ4(long, "truncate",
1095                  const char *, path, int, pad,
1096 		 unsigned int, length_low, unsigned int, length_high);
1097    PRE_MEM_RASCIIZ( "truncate(path)", ARG1 );
1098 }
1099 
PRE(sys_truncate7)1100 PRE(sys_truncate7)
1101 {
1102    *flags |= SfMayBlock;
1103    PRINT("sys_truncate ( %#lx(%s), %lu, %lu )", ARG1,(char *)ARG1,ARG2,ARG3);
1104    PRE_REG_READ3(long, "truncate",
1105                  const char *, path,
1106 		 unsigned int, length_low, unsigned int, length_high);
1107    PRE_MEM_RASCIIZ( "truncate(path)", ARG1 );
1108 }
1109 
PRE(sys_sysarch)1110 PRE(sys_sysarch)
1111 {
1112    ThreadState *tst;
1113    Int idx;
1114    void **p;
1115 
1116    PRINT("sys_sysarch ( %ld, %#lx )", ARG1, ARG2);
1117    PRE_REG_READ2(int, "sysarch",
1118 		 int, number, void *, args);
1119    switch (ARG1) {
1120    case VKI_I386_SET_GSBASE:
1121       PRINT("sys_i386_set_gsbase ( %#lx )", ARG2);
1122       PRE_REG_READ1(int, "i386_set_gsbase", void *, base)
1123 
1124       /* On FreeBSD, the syscall loads the %gs selector for us, so do it now. */
1125       tst = VG_(get_ThreadState)(tid);
1126       p = (void**)ARG2;
1127       tst->arch.vex.guest_GS = (1 << 3) | 3;   /* GSEL(GUGS_SEL, SEL_UPL) */
1128       /* "do" the syscall ourselves; the kernel never sees it */
1129       idx = 1;
1130       SET_STATUS_from_SysRes( sys_set_thread_area( tid, &idx, *p ) );
1131 
1132       break;
1133    case VKI_I386_GET_GSBASE:
1134       PRINT("sys_i386_get_gsbase ( %#lx )", ARG2);
1135       PRE_REG_READ1(int, "i386_get_gsbase", void *, basep)
1136       PRE_MEM_WRITE( "i386_get_gsbase(basep)", ARG2, sizeof(void *) );
1137 
1138       /* "do" the syscall ourselves; the kernel never sees it */
1139       SET_STATUS_from_SysRes( sys_get_thread_area( tid, 2, (void **)ARG2 ) );
1140 
1141       if (SUCCESS) {
1142 	 POST_MEM_WRITE( ARG2, sizeof(void *) );
1143       }
1144       break;
1145    case VKI_I386_GET_XFPUSTATE:
1146       PRINT("sys_i386_get_xfpustate ( %#lx )", ARG2);
1147       PRE_REG_READ1(int, "i386_get_xfpustate", void *, basep)
1148       PRE_MEM_WRITE( "i386_get_xfpustate(basep)", ARG2, sizeof(void *) );
1149 
1150       /* "do" the syscall ourselves; the kernel never sees it */
1151       tst = VG_(get_ThreadState)(tid);
1152       SET_STATUS_Success2( tst->arch.vex.guest_FPTAG[0], tst->arch.vex.guest_FPTAG[0] );
1153       POST_MEM_WRITE( ARG2, sizeof(void *) );
1154       break;
1155    default:
1156       VG_(message) (Vg_UserMsg, "unhandled sysarch cmd %ld", ARG1);
1157       VG_(unimplemented) ("unhandled sysarch cmd");
1158       break;
1159    }
1160 }
1161 
1162 #undef PRE
1163 #undef POST
1164 
1165 #endif /* defined(VGP_x86_linux) */
1166 
1167 
1168 /*--------------------------------------------------------------------*/
1169 /*--- end                                                          ---*/
1170 /*--------------------------------------------------------------------*/
1171