1 
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff.    syswrap-amd64-freebsd.c ---*/
4 /*--------------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2000-2005 Nicholas Nethercote
11       njn@valgrind.org
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26    02111-1307, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 */
30 
31 #if defined(VGP_amd64_freebsd)
32 
33 #include "pub_core_basics.h"
34 #include "pub_core_vki.h"
35 #include "pub_core_vkiscnums.h"
36 #include "pub_core_libcsetjmp.h"    // to keep _threadstate.h happy
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_debuglog.h"
40 #include "pub_core_libcbase.h"
41 #include "pub_core_libcassert.h"
42 #include "pub_core_libcprint.h"
43 #include "pub_core_libcproc.h"
44 #include "pub_core_libcsignal.h"
45 #include "pub_core_machine.h"
46 #include "pub_core_options.h"
47 #include "pub_core_scheduler.h"
48 #include "pub_core_sigframe.h"
49 #include "pub_core_signals.h"
50 #include "pub_core_syscall.h"
51 #include "pub_core_syswrap.h"
52 #include "pub_core_tooliface.h"
53 #include "pub_core_stacks.h"        // VG_(register_stack)
54 
55 #include "priv_types_n_macros.h"
56 #include "priv_syswrap-generic.h"    /* for decls of generic wrappers */
57 #include "priv_syswrap-freebsd.h"    /* for decls of freebsd-ish wrappers */
58 #include "priv_syswrap-main.h"
59 
60 /* ---------------------------------------------------------------------
61    clone() handling
62    ------------------------------------------------------------------ */
63 
64 /* Call f(arg1), but first switch stacks, using 'stack' as the new
65    stack, and use 'retaddr' as f's return-to address.  Also, clear all
66    the integer registers before entering f. */
67 __attribute__((noreturn))
68 void ML_(call_on_new_stack_0_1) ( Addr stack,
69 			          Addr retaddr,
70 			          void (*f)(Word),
71                                   Word arg1 );
72 // %rdi == stack
73 // %rsi == retaddr
74 // %rdx == f
75 // %rcx == arg1
76 asm(
77 ".text\n"
78 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
79 "vgModuleLocal_call_on_new_stack_0_1:\n"
80 "   movq   %rdi, %rsp\n"   // set stack
81 "   pushq  %rsi\n"         // retaddr to stack
82 "   pushq  %rdx\n"         // f to stack
83 "   pushq  %rcx\n"         // arg1 to stack
84 "   movq $0, %rax\n"       // zero all GP regs
85 "   movq $0, %rbx\n"
86 "   movq $0, %rcx\n"
87 "   movq $0, %rdx\n"
88 "   movq $0, %rsi\n"
89 "   movq $0, %rdi\n"
90 "   movq $0, %rbp\n"
91 "   movq $0, %r8\n"
92 "   movq $0, %r9\n"
93 "   movq $0, %r10\n"
94 "   movq $0, %r11\n"
95 "   movq $0, %r12\n"
96 "   movq $0, %r13\n"
97 "   movq $0, %r14\n"
98 "   movq $0, %r15\n"
99 "   popq   %rdi\n"         // arg1 to correct arg reg
100 "   ret\n"                 // jump to f
101 "   ud2\n"                 // should never get here
102 ".previous\n"
103 );
104 
105 
106 /* ---------------------------------------------------------------------
107    More thread stuff
108    ------------------------------------------------------------------ */
109 
VG_(cleanup_thread)110 void VG_(cleanup_thread) ( ThreadArchState *arch )
111 {
112 }
113 
114 /* ---------------------------------------------------------------------
115    PRE/POST wrappers for amd64/FreeBSD-specific syscalls
116    ------------------------------------------------------------------ */
117 
118 #define PRE(name)       DEFN_PRE_TEMPLATE(freebsd, name)
119 #define POST(name)      DEFN_POST_TEMPLATE(freebsd, name)
120 
PRE(sys_thr_new)121 PRE(sys_thr_new)
122 {
123    static const Bool debug = False;
124 
125    ThreadId     ctid = VG_(alloc_ThreadState)();
126    ThreadState* ptst = VG_(get_ThreadState)(tid);
127    ThreadState* ctst = VG_(get_ThreadState)(ctid);
128    SysRes       res;
129    vki_sigset_t blockall, savedmask;
130    struct vki_thr_param tp;
131    Addr stk;
132 
133    PRINT("thr_new ( %#lx, %ld )",ARG1,ARG2);
134    PRE_REG_READ2(int, "thr_new",
135                  struct thr_param *, param,
136                  int, param_size);
137 
138    PRE_MEM_READ( "thr_new(param)", ARG1, offsetof(struct vki_thr_param, spare));
139    if (!ML_(safe_to_deref)( (void*)ARG1, offsetof(struct vki_thr_param, spare))) {
140       SET_STATUS_Failure( VKI_EFAULT );
141       return;
142    }
143    VG_(memset)(&tp, 0, sizeof(tp));
144    VG_(memcpy)(&tp, (void *)ARG1, offsetof(struct vki_thr_param, spare));
145    PRE_MEM_WRITE("thr_new(parent_tidptr)", (Addr)tp.parent_tid, sizeof(long));
146    PRE_MEM_WRITE("thr_new(child_tidptr)", (Addr)tp.child_tid, sizeof(long));
147 
148    VG_(sigfillset)(&blockall);
149 
150    vg_assert(VG_(is_running_thread)(tid));
151    vg_assert(VG_(is_valid_tid)(ctid));
152 
153    /* Copy register state
154 
155       On linux, both parent and child return to the same place, and the code
156       following the clone syscall works out which is which, so we
157       don't need to worry about it.
158       On FreeBSD, thr_new arranges a direct call.  We don't actually need any
159       of this gunk.
160 
161       The parent gets the child's new tid returned from clone, but the
162       child gets 0.
163 
164       If the clone call specifies a NULL rsp for the new thread, then
165       it actually gets a copy of the parent's rsp.
166    */
167    /* We inherit our parent's guest state. */
168    ctst->arch.vex = ptst->arch.vex;
169    ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
170    ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
171 
172    /* Make thr_new appear to have returned Success(0) in the
173       child. */
174    ctst->arch.vex.guest_RAX = 0;
175    ctst->arch.vex.guest_RDX = 0;
176    LibVEX_GuestAMD64_put_rflag_c(0, &ctst->arch.vex);
177 
178    ctst->os_state.parent = tid;
179 
180    /* inherit signal mask */
181    ctst->sig_mask = ptst->sig_mask;
182    ctst->tmp_sig_mask = ptst->sig_mask;
183 
184    /* Linux has to guess, we don't */
185    ctst->client_stack_highest_byte = (Addr)tp.stack_base + tp.stack_size;
186    ctst->client_stack_szB = tp.stack_size;
187    VG_(register_stack)((Addr)tp.stack_base, (Addr)tp.stack_base + tp.stack_size);
188 
189    /* Assume the thr_new will succeed, and tell any tool that wants to
190       know that this thread has come into existence.  If the thr_new
191       fails, we'll send out a ll_exit notification for it at the out:
192       label below, to clean up. */
193    VG_TRACK ( pre_thread_ll_create, tid, ctid );
194 
195    if (debug)
196       VG_(printf)("clone child has SETTLS: tls at %#lx\n", (Addr)tp.tls_base);
197    ctst->arch.vex.guest_FS_CONST = (UWord)tp.tls_base;
198    tp.tls_base = 0;	/* Don't have the kernel do it too */
199 
200    /* start the thread with everything blocked */
201    VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);
202 
203    /* Set the client state for scheduler to run libthr's trampoline */
204    ctst->arch.vex.guest_RDI = (Addr)tp.arg;
205    /* XXX: align on 16-byte boundary? */
206    ctst->arch.vex.guest_RSP = (Addr)tp.stack_base + tp.stack_size - 8;
207    ctst->arch.vex.guest_RIP = (Addr)tp.start_func;
208 
209    /* But this is for thr_new() to run valgrind's trampoline */
210    tp.start_func = (void *)ML_(start_thread_NORETURN);
211    tp.arg = &VG_(threads)[ctid];
212 
213    /* And valgrind's trampoline on its own stack */
214    stk = ML_(allocstack)(ctid);
215    if (stk == (Addr)NULL) {
216       res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
217       goto fail;
218    }
219    tp.stack_base = (void *)ctst->os_state.valgrind_stack_base;
220    tp.stack_size = (Addr)stk - (Addr)tp.stack_base;
221 
222    /* Create the new thread */
223    res = VG_(do_syscall2)(__NR_thr_new, (UWord)&tp, sizeof(tp));
224 
225    VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);
226 
227 fail:
228    if (sr_isError(res)) {
229       /* thr_new failed */
230       VG_(cleanup_thread)(&ctst->arch);
231       ctst->status = VgTs_Empty;
232       /* oops.  Better tell the tool the thread exited in a hurry :-) */
233       VG_TRACK( pre_thread_ll_exit, ctid );
234    } else {
235 
236       POST_MEM_WRITE((Addr)tp.parent_tid, sizeof(long));
237       POST_MEM_WRITE((Addr)tp.child_tid, sizeof(long));
238 
239       /* Thread creation was successful; let the child have the chance
240          to run */
241       *flags |= SfYieldAfter;
242    }
243 
244    /* "Complete" the syscall so that the wrapper doesn't call the kernel again. */
245    SET_STATUS_from_SysRes(res);
246 }
247 
PRE(sys_rfork)248 PRE(sys_rfork)
249 {
250    PRINT("sys_rfork ( %#lx )", ARG1 );
251    PRE_REG_READ1(long, "rfork", int, flags);
252 
253    VG_(message)(Vg_UserMsg, "rfork() not implemented");
254    VG_(unimplemented)("Valgrind does not support rfork().");
255 
256    SET_STATUS_Failure(VKI_ENOSYS);
257 }
258 
PRE(sys_sigreturn)259 PRE(sys_sigreturn)
260 {
261    PRINT("sys_sigreturn ( %#lx )", ARG1);
262    PRE_REG_READ1(long, "sigreturn",
263                  struct vki_ucontext *, ucp);
264 
265    PRE_MEM_READ( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
266    PRE_MEM_WRITE( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
267 }
268 
PRE(sys_fake_sigreturn)269 PRE(sys_fake_sigreturn)
270 {
271    ThreadState* tst;
272    struct vki_ucontext *uc;
273    int rflags;
274 
275    PRINT("sys_sigreturn ( %#lx )", ARG1);
276    PRE_REG_READ1(long, "sigreturn",
277                  struct vki_ucontext *, ucp);
278 
279    PRE_MEM_READ( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
280    PRE_MEM_WRITE( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
281 
282    vg_assert(VG_(is_valid_tid)(tid));
283    vg_assert(tid >= 1 && tid < VG_N_THREADS);
284    vg_assert(VG_(is_running_thread)(tid));
285 
286    /* Adjust esp to point to start of frame; skip back up over handler
287       ret addr */
288    tst = VG_(get_ThreadState)(tid);
289    tst->arch.vex.guest_RSP -= sizeof(Addr);
290 
291    uc = (struct vki_ucontext *)ARG1;
292    if (uc == NULL || uc->uc_mcontext.len != sizeof(uc->uc_mcontext)) {
293       SET_STATUS_Failure(VKI_EINVAL);
294       return;
295    }
296 
297    /* This is only so that the EIP is (might be) useful to report if
298       something goes wrong in the sigreturn */
299    ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
300 
301    VG_(sigframe_destroy)(tid);
302 
303    /* For unclear reasons, it appears we need the syscall to return
304       without changing %EAX.  Since %EAX is the return value, and can
305       denote either success or failure, we must set up so that the
306       driver logic copies it back unchanged.  Also, note %EAX is of
307       the guest registers written by VG_(sigframe_destroy). */
308    rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
309    SET_STATUS_from_SysRes( VG_(mk_SysRes_amd64_freebsd)( tst->arch.vex.guest_RAX,
310        tst->arch.vex.guest_RDX, (rflags & 1) != 0 ? True : False) );
311 
312    /*
313     * Signal handler might have changed the signal mask.  Respect that.
314     */
315    tst->sig_mask = uc->uc_sigmask;
316    tst->tmp_sig_mask = uc->uc_sigmask;
317 
318    /* Tell the driver not to update the guest state with the "result",
319       and set a bogus result to keep it happy. */
320    *flags |= SfNoWriteResult;
321    SET_STATUS_Success(0);
322 
323    /* Check to see if some any signals arose as a result of this. */
324    *flags |= SfPollAfter;
325 }
326 
restore_mcontext(ThreadState * tst,struct vki_mcontext * sc)327 static void restore_mcontext(ThreadState *tst, struct vki_mcontext *sc)
328 {
329    tst->arch.vex.guest_RAX     = sc->rax;
330    tst->arch.vex.guest_RCX     = sc->rcx;
331    tst->arch.vex.guest_RDX     = sc->rdx;
332    tst->arch.vex.guest_RBX     = sc->rbx;
333    tst->arch.vex.guest_RBP     = sc->rbp;
334    tst->arch.vex.guest_RSP     = sc->rsp;
335    tst->arch.vex.guest_RSI     = sc->rsi;
336    tst->arch.vex.guest_RDI     = sc->rdi;
337    tst->arch.vex.guest_R8      = sc->r8;
338    tst->arch.vex.guest_R9      = sc->r9;
339    tst->arch.vex.guest_R10     = sc->r10;
340    tst->arch.vex.guest_R11     = sc->r11;
341    tst->arch.vex.guest_R12     = sc->r12;
342    tst->arch.vex.guest_R13     = sc->r13;
343    tst->arch.vex.guest_R14     = sc->r14;
344    tst->arch.vex.guest_R15     = sc->r15;
345    tst->arch.vex.guest_RIP     = sc->rip;
346    /*
347     * XXX: missing support for other flags.
348     */
349    if (sc->rflags & 0x0001)
350       LibVEX_GuestAMD64_put_rflag_c(1, &tst->arch.vex);
351    else
352       LibVEX_GuestAMD64_put_rflag_c(0, &tst->arch.vex);
353 }
354 
fill_mcontext(ThreadState * tst,struct vki_mcontext * sc)355 static void fill_mcontext(ThreadState *tst, struct vki_mcontext *sc)
356 {
357    sc->rax = tst->arch.vex.guest_RAX;
358    sc->rcx = tst->arch.vex.guest_RCX;
359    sc->rdx = tst->arch.vex.guest_RDX;
360    sc->rbx = tst->arch.vex.guest_RBX;
361    sc->rbp = tst->arch.vex.guest_RBP;
362    sc->rsp = tst->arch.vex.guest_RSP;
363    sc->rsi = tst->arch.vex.guest_RSI;
364    sc->rdi = tst->arch.vex.guest_RDI;
365    sc->r8 = tst->arch.vex.guest_R8;
366    sc->r9 = tst->arch.vex.guest_R9;
367    sc->r10 = tst->arch.vex.guest_R10;
368    sc->r11 = tst->arch.vex.guest_R11;
369    sc->r12 = tst->arch.vex.guest_R12;
370    sc->r13 = tst->arch.vex.guest_R13;
371    sc->r14 = tst->arch.vex.guest_R14;
372    sc->r15 = tst->arch.vex.guest_R15;
373    sc->rip = tst->arch.vex.guest_RIP;
374 /*
375    Not supported by VEX.
376    sc->cs = tst->arch.vex.guest_CS;
377    sc->ss = tst->arch.vex.guest_SS;
378    sc->ds = tst->arch.vex.guest_DS;
379    sc->es = tst->arch.vex.guest_ES;
380    sc->fs = tst->arch.vex.guest_FS;
381    sc->gs = tst->arch.vex.guest_GS;
382 */
383    sc->rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
384 /*
385    not yet.
386    VG_(memcpy)(&sc->fpstate, fpstate, sizeof(*fpstate));
387 */
388    sc->fpformat = VKI_FPFMT_NODEV;
389    sc->ownedfp = VKI_FPOWNED_NONE;
390    sc->len = sizeof(*sc);
391    VG_(memset)(sc->spare2, 0, sizeof(sc->spare2));
392 }
393 
PRE(sys_getcontext)394 PRE(sys_getcontext)
395 {
396    ThreadState* tst;
397    struct vki_ucontext *uc;
398 
399    PRINT("sys_getcontext ( %#lx )", ARG1);
400    PRE_REG_READ1(long, "getcontext",
401                  struct vki_ucontext *, ucp);
402    PRE_MEM_WRITE( "getcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
403    uc = (struct vki_ucontext *)ARG1;
404    if (uc == NULL) {
405       SET_STATUS_Failure(VKI_EINVAL);
406       return;
407    }
408    tst = VG_(get_ThreadState)(tid);
409    fill_mcontext(tst, &uc->uc_mcontext);
410    uc->uc_mcontext.rax = 0;
411    uc->uc_mcontext.rdx = 0;
412    uc->uc_mcontext.rflags &= ~0x0001;	/* PSL_C */
413    uc->uc_sigmask = tst->sig_mask;
414    VG_(memset)(uc->__spare__, 0, sizeof(uc->__spare__));
415    SET_STATUS_Success(0);
416 }
417 
PRE(sys_setcontext)418 PRE(sys_setcontext)
419 {
420    ThreadState* tst;
421    struct vki_ucontext *uc;
422 
423    PRINT("sys_setcontext ( %#lx )", ARG1);
424    PRE_REG_READ1(long, "setcontext",
425                  struct vki_ucontext *, ucp);
426 
427    PRE_MEM_READ( "setcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
428    PRE_MEM_WRITE( "setcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
429 
430    vg_assert(VG_(is_valid_tid)(tid));
431    vg_assert(tid >= 1 && tid < VG_N_THREADS);
432    vg_assert(VG_(is_running_thread)(tid));
433 
434    tst = VG_(get_ThreadState)(tid);
435    uc = (struct vki_ucontext *)ARG1;
436    if (uc == NULL || uc->uc_mcontext.len != sizeof(uc->uc_mcontext)) {
437       SET_STATUS_Failure(VKI_EINVAL);
438       return;
439    }
440 
441    restore_mcontext(tst, &uc->uc_mcontext);
442    tst->sig_mask = uc->uc_sigmask;
443 
444    /* Tell the driver not to update the guest state with the "result",
445       and set a bogus result to keep it happy. */
446    *flags |= SfNoWriteResult;
447    SET_STATUS_Success(0);
448 
449    /* Check to see if some any signals arose as a result of this. */
450    *flags |= SfPollAfter;
451 }
452 
PRE(sys_swapcontext)453 PRE(sys_swapcontext)
454 {
455    struct vki_ucontext *ucp, *oucp;
456    ThreadState* tst;
457 
458    PRINT("sys_swapcontext ( %#lx, %#lx )", ARG1, ARG2);
459    PRE_REG_READ2(long, "swapcontext",
460                  struct vki_ucontext *, oucp, struct vki_ucontext *, ucp);
461 
462    PRE_MEM_READ( "swapcontext(ucp)", ARG2, sizeof(struct vki_ucontext) );
463    PRE_MEM_WRITE( "swapcontext(oucp)", ARG1, sizeof(struct vki_ucontext) );
464 
465    oucp = (struct vki_ucontext *)ARG1;
466    ucp = (struct vki_ucontext *)ARG2;
467    if (oucp == NULL || ucp == NULL || ucp->uc_mcontext.len != sizeof(ucp->uc_mcontext)) {
468       SET_STATUS_Failure(VKI_EINVAL);
469       return;
470    }
471    tst = VG_(get_ThreadState)(tid);
472 
473    /*
474     * Save the context.
475     */
476    fill_mcontext(tst, &oucp->uc_mcontext);
477    oucp->uc_mcontext.rax = 0;
478    oucp->uc_mcontext.rdx = 0;
479    oucp->uc_mcontext.rflags &= ~0x0001;	/* PSL_C */
480    oucp->uc_sigmask = tst->sig_mask;
481    VG_(memset)(oucp->__spare__, 0, sizeof(oucp->__spare__));
482 
483    /*
484     * Switch to new one.
485     */
486    restore_mcontext(tst, &ucp->uc_mcontext);
487    tst->sig_mask = ucp->uc_sigmask;
488 
489    /* Tell the driver not to update the guest state with the "result",
490       and set a bogus result to keep it happy. */
491    *flags |= SfNoWriteResult;
492    SET_STATUS_Success(0);
493 
494    /* Check to see if some any signals arose as a result of this. */
495    *flags |= SfPollAfter;
496 }
497 
498 
499 /* This is here because on x86 the off_t is passed in 2 regs. Don't ask about pad.  */
500 
501 /* caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); */
502 /*              ARG1           ARG2       ARG3      ARG4       ARG5    ARG6     ARG7 */
503 
PRE(sys_mmap)504 PRE(sys_mmap)
505 {
506    SysRes r;
507 
508    PRINT("sys_mmap ( %#lx, %lu, %ld, %ld, %ld, pad%ld, 0x%lx)",
509          ARG1, (UWord)ARG2, ARG3, ARG4, ARG5, ARG6, ARG7 );
510    PRE_REG_READ7(long, "mmap",
511                  char *, addr, unsigned long, len, int, prot,  int, flags,
512                  int, fd,  int, pad, unsigned long, pos);
513 
514    r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG7 );
515    SET_STATUS_from_SysRes(r);
516 }
517 
518 /* FreeBSD-7 introduces a "regular" version of mmap etc. */
PRE(sys_mmap7)519 PRE(sys_mmap7)
520 {
521    SysRes r;
522 
523    PRINT("sys_mmap ( %#lx, %lu, %ld, %ld, %ld, 0x%lx)",
524          ARG1, (UWord)ARG2, ARG3, ARG4, ARG5, ARG6 );
525    PRE_REG_READ6(long, "mmap",
526                  char *, addr, unsigned long, len, int, prot,  int, flags,
527                  int, fd,  unsigned long, pos);
528 
529    r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6 );
530    SET_STATUS_from_SysRes(r);
531 }
532 
PRE(sys_lseek)533 PRE(sys_lseek)
534 {
535    PRINT("sys_lseek ( %ld, 0x%lx, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4);
536    PRE_REG_READ4(long, "lseek",
537                  unsigned int, fd, int, pad, unsigned long, offset,
538                  unsigned int, whence);
539 }
540 
PRE(sys_lseek7)541 PRE(sys_lseek7)
542 {
543    PRINT("sys_lseek ( %ld, 0x%lx, %ld )", ARG1,ARG2,ARG3);
544    PRE_REG_READ3(long, "lseek",
545                  unsigned int, fd, unsigned long, offset,
546                  unsigned int, whence);
547 }
548 
PRE(sys_pread)549 PRE(sys_pread)
550 {
551    *flags |= SfMayBlock;
552    PRINT("sys_read ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
553    PRE_REG_READ5(ssize_t, "read",
554                  unsigned int, fd, char *, buf, vki_size_t, count,
555                  int, pad, unsigned long, off);
556 
557    if (!ML_(fd_allowed)(ARG1, "read", tid, False))
558       SET_STATUS_Failure( VKI_EBADF );
559    else
560       PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 );
561 }
562 
POST(sys_pread)563 POST(sys_pread)
564 {
565    vg_assert(SUCCESS);
566    POST_MEM_WRITE( ARG2, RES );
567 }
568 
PRE(sys_pread7)569 PRE(sys_pread7)
570 {
571    *flags |= SfMayBlock;
572    PRINT("sys_read ( %ld, %#lx, %lu, %lu )", ARG1, ARG2, ARG3, ARG4);
573    PRE_REG_READ4(ssize_t, "read",
574                  unsigned int, fd, char *, buf, vki_size_t, count,
575                  unsigned long, off);
576 
577    if (!ML_(fd_allowed)(ARG1, "read", tid, False))
578       SET_STATUS_Failure( VKI_EBADF );
579    else
580       PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 );
581 }
582 
POST(sys_pread7)583 POST(sys_pread7)
584 {
585    vg_assert(SUCCESS);
586    POST_MEM_WRITE( ARG2, RES );
587 }
588 
PRE(sys_pwrite)589 PRE(sys_pwrite)
590 {
591    Bool ok;
592    *flags |= SfMayBlock;
593    PRINT("sys_write ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
594    PRE_REG_READ5(ssize_t, "write",
595                  unsigned int, fd, const char *, buf, vki_size_t, count,
596                  int, pad, unsigned long, off);
597    /* check to see if it is allowed.  If not, try for an exemption from
598       --sim-hints=enable-outer (used for self hosting). */
599    ok = ML_(fd_allowed)(ARG1, "write", tid, False);
600    if (!ok && ARG1 == 2/*stderr*/
601            && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints)))
602       ok = True;
603    if (!ok)
604       SET_STATUS_Failure( VKI_EBADF );
605    else
606       PRE_MEM_READ( "write(buf)", ARG2, ARG3 );
607 }
608 
PRE(sys_pwrite7)609 PRE(sys_pwrite7)
610 {
611    Bool ok;
612    *flags |= SfMayBlock;
613    PRINT("sys_write ( %ld, %#lx, %lu, %lu )", ARG1, ARG2, ARG3, ARG4);
614    PRE_REG_READ4(ssize_t, "write",
615                  unsigned int, fd, const char *, buf, vki_size_t, count,
616                  unsigned long, off);
617    /* check to see if it is allowed.  If not, try for an exemption from
618       --sim-hints=enable-outer (used for self hosting). */
619    ok = ML_(fd_allowed)(ARG1, "write", tid, False);
620    if (!ok && ARG1 == 2/*stderr*/
621            && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints)))
622       ok = True;
623    if (!ok)
624       SET_STATUS_Failure( VKI_EBADF );
625    else
626       PRE_MEM_READ( "write(buf)", ARG2, ARG3 );
627 }
628 
PRE(sys_ftruncate)629 PRE(sys_ftruncate)
630 {
631    *flags |= SfMayBlock;
632    PRINT("sys_ftruncate ( %ld, %lu )", ARG1,ARG3);
633    PRE_REG_READ3(long, "ftruncate", unsigned int, fd, int, pad,
634 		  unsigned int, length);
635 }
636 
PRE(sys_ftruncate7)637 PRE(sys_ftruncate7)
638 {
639    *flags |= SfMayBlock;
640    PRINT("sys_ftruncate ( %ld, %lu )", ARG1,ARG2);
641    PRE_REG_READ2(long, "ftruncate", unsigned int, fd,
642 		  unsigned long, length);
643 }
644 
PRE(sys_truncate)645 PRE(sys_truncate)
646 {
647    *flags |= SfMayBlock;
648    PRINT("sys_truncate ( %#lx(%s), %lu )", ARG1,(char *)ARG1,ARG3);
649    PRE_REG_READ3(long, "truncate",
650                  const char *, path, int, pad, unsigned int, length);
651    PRE_MEM_RASCIIZ( "truncate(path)", ARG1 );
652 }
653 
PRE(sys_truncate7)654 PRE(sys_truncate7)
655 {
656    *flags |= SfMayBlock;
657    PRINT("sys_truncate ( %#lx(%s), %lu )", ARG1,(char *)ARG1,ARG2);
658    PRE_REG_READ2(long, "truncate",
659                  const char *, path, unsigned long, length);
660    PRE_MEM_RASCIIZ( "truncate(path)", ARG1 );
661 }
662 
PRE(sys_sysarch)663 PRE(sys_sysarch)
664 {
665    ThreadState *tst;
666    void **p;
667 
668    PRINT("sys_sysarch ( %ld, %#lx )", ARG1, ARG2);
669    PRE_REG_READ2(int, "sysarch",
670                  int, number, void *, args);
671    switch (ARG1) {
672    case VKI_AMD64_SET_FSBASE:
673       PRINT("sys_amd64_set_fsbase ( %#lx )", ARG2);
674       PRE_REG_READ1(long, "amd64_set_fsbase", void *, base)
675 
676       /* On FreeBSD, the syscall loads the %gs selector for us, so do it now. */
677       tst = VG_(get_ThreadState)(tid);
678       p = (void**)ARG2;
679       tst->arch.vex.guest_FS_CONST = (UWord)*p;
680       /* "do" the syscall ourselves; the kernel never sees it */
681       SET_STATUS_Success2((ULong)*p, tst->arch.vex.guest_RDX );
682 
683       break;
684    case VKI_AMD64_GET_FSBASE:
685       PRINT("sys_amd64_get_fsbase ( %#lx )", ARG2);
686       PRE_REG_READ1(int, "amd64_get_fsbase", void *, basep)
687       PRE_MEM_WRITE( "amd64_get_fsbase(basep)", ARG2, sizeof(void *) );
688 
689       /* "do" the syscall ourselves; the kernel never sees it */
690       tst = VG_(get_ThreadState)(tid);
691       SET_STATUS_Success2( tst->arch.vex.guest_FS_CONST, tst->arch.vex.guest_RDX );
692       POST_MEM_WRITE( ARG2, sizeof(void *) );
693       break;
694    case VKI_AMD64_GET_XFPUSTATE:
695       PRINT("sys_amd64_get_xfpustate ( %#lx )", ARG2);
696       PRE_REG_READ1(int, "amd64_get_xfpustate", void *, basep)
697       PRE_MEM_WRITE( "amd64_get_xfpustate(basep)", ARG2, sizeof(void *) );
698 
699       /* "do" the syscall ourselves; the kernel never sees it */
700       tst = VG_(get_ThreadState)(tid);
701       SET_STATUS_Success2( tst->arch.vex.guest_FPTAG[0], tst->arch.vex.guest_FPTAG[0] );
702       POST_MEM_WRITE( ARG2, sizeof(void *) );
703       break;
704    default:
705       VG_(message) (Vg_UserMsg, "unhandled sysarch cmd %ld", ARG1);
706       VG_(unimplemented) ("unhandled sysarch cmd");
707       break;
708    }
709 }
710 
711 #undef PRE
712 #undef POST
713 
714 #endif /* defined(VGP_amd64_freebsd) */
715 
716 /*--------------------------------------------------------------------*/
717 /*--- end                                                          ---*/
718 /*--------------------------------------------------------------------*/
719