1 
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff.    syswrap-amd64-dragonfly.c ---*/
4 /*--------------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2000-2005 Nicholas Nethercote
11       njn@valgrind.org
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26    02111-1307, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 */
30 
31 #if defined(VGP_amd64_dragonfly)
32 
33 #include "pub_core_basics.h"
34 #include "pub_core_vki.h"
35 #include "pub_core_vkiscnums.h"
36 #include "pub_core_libcsetjmp.h"    // to keep _threadstate.h happy
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_debuglog.h"
40 #include "pub_core_libcbase.h"
41 #include "pub_core_libcassert.h"
42 #include "pub_core_libcprint.h"
43 #include "pub_core_libcproc.h"
44 #include "pub_core_libcsignal.h"
45 #include "pub_core_machine.h"
46 #include "pub_core_options.h"
47 #include "pub_core_scheduler.h"
48 #include "pub_core_sigframe.h"
49 #include "pub_core_signals.h"
50 #include "pub_core_syscall.h"
51 #include "pub_core_syswrap.h"
52 #include "pub_core_tooliface.h"
53 #include "pub_core_stacks.h"        // VG_(register_stack)
54 
55 #include "priv_types_n_macros.h"
56 #include "priv_syswrap-generic.h"    /* for decls of generic wrappers */
57 #include "priv_syswrap-dragonfly.h"    /* for decls of dragonfly-ish wrappers */
58 #include "priv_syswrap-main.h"
59 
60 /* ---------------------------------------------------------------------
61    clone() handling
62    ------------------------------------------------------------------ */
63 
64 /* Call f(arg1), but first switch stacks, using 'stack' as the new
65    stack, and use 'retaddr' as f's return-to address.  Also, clear all
66    the integer registers before entering f. */
67 __attribute__((noreturn))
68 void ML_(call_on_new_stack_0_1) ( Addr stack,
69 			          Addr retaddr,
70 			          void (*f)(Word),
71                                   Word arg1 );
72 // %rdi == stack
73 // %rsi == retaddr
74 // %rdx == f
75 // %rcx == arg1
76 asm(
77 ".text\n"
78 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
79 "vgModuleLocal_call_on_new_stack_0_1:\n"
80 "   movq   %rdi, %rsp\n"   // set stack
81 "   pushq  %rsi\n"         // retaddr to stack
82 "   pushq  %rdx\n"         // f to stack
83 "   pushq  %rcx\n"         // arg1 to stack
84 "   movq $0, %rax\n"       // zero all GP regs
85 "   movq $0, %rbx\n"
86 "   movq $0, %rcx\n"
87 "   movq $0, %rdx\n"
88 "   movq $0, %rsi\n"
89 "   movq $0, %rdi\n"
90 "   movq $0, %rbp\n"
91 "   movq $0, %r8\n"
92 "   movq $0, %r9\n"
93 "   movq $0, %r10\n"
94 "   movq $0, %r11\n"
95 "   movq $0, %r12\n"
96 "   movq $0, %r13\n"
97 "   movq $0, %r14\n"
98 "   movq $0, %r15\n"
99 "   popq   %rdi\n"         // arg1 to correct arg reg
100 "   ret\n"                 // jump to f
101 "   ud2\n"                 // should never get here
102 ".previous\n"
103 );
104 
105 
106 /* ---------------------------------------------------------------------
107    More thread stuff
108    ------------------------------------------------------------------ */
109 
VG_(cleanup_thread)110 void VG_(cleanup_thread) ( ThreadArchState *arch )
111 {
112 }
113 
114 /* ---------------------------------------------------------------------
115    PRE/POST wrappers for amd64/Dragonfly-specific syscalls
116    ------------------------------------------------------------------ */
117 
118 #define PRE(name)       DEFN_PRE_TEMPLATE(dragonfly, name)
119 #define POST(name)      DEFN_POST_TEMPLATE(dragonfly, name)
120 
121 #if 0
122 
123 PRE(sys_thr_new)
124 {
125    static const Bool debug = False;
126 
127    ThreadId     ctid = VG_(alloc_ThreadState)();
128    ThreadState* ptst = VG_(get_ThreadState)(tid);
129    ThreadState* ctst = VG_(get_ThreadState)(ctid);
130    SysRes       res;
131    vki_sigset_t blockall, savedmask;
132    struct vki_thr_param tp;
133    Addr stk;
134 
135    PRINT("thr_new ( %#lx, %ld )",ARG1,ARG2);
136    PRE_REG_READ2(int, "thr_new",
137                  struct thr_param *, param,
138                  int, param_size);
139 
140    PRE_MEM_READ( "thr_new(param)", ARG1, offsetof(struct vki_thr_param, spare));
141    if (!ML_(safe_to_deref)( (void*)ARG1, offsetof(struct vki_thr_param, spare))) {
142       SET_STATUS_Failure( VKI_EFAULT );
143       return;
144    }
145    VG_(memset)(&tp, 0, sizeof(tp));
146    VG_(memcpy)(&tp, (void *)ARG1, offsetof(struct vki_thr_param, spare));
147    PRE_MEM_WRITE("thr_new(parent_tidptr)", (Addr)tp.parent_tid, sizeof(long));
148    PRE_MEM_WRITE("thr_new(child_tidptr)", (Addr)tp.child_tid, sizeof(long));
149 
150    VG_(sigfillset)(&blockall);
151 
152    vg_assert(VG_(is_running_thread)(tid));
153    vg_assert(VG_(is_valid_tid)(ctid));
154 
155    /* Copy register state
156 
157       On linux, both parent and child return to the same place, and the code
158       following the clone syscall works out which is which, so we
159       don't need to worry about it.
160       On Dragonfly, thr_new arranges a direct call.  We don't actually need any
161       of this gunk.
162 
163       The parent gets the child's new tid returned from clone, but the
164       child gets 0.
165 
166       If the clone call specifies a NULL rsp for the new thread, then
167       it actually gets a copy of the parent's rsp.
168    */
169    /* We inherit our parent's guest state. */
170    ctst->arch.vex = ptst->arch.vex;
171    ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
172    ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
173 
174    /* Make thr_new appear to have returned Success(0) in the
175       child. */
176    ctst->arch.vex.guest_RAX = 0;
177    ctst->arch.vex.guest_RDX = 0;
178    LibVEX_GuestAMD64_put_rflag_c(0, &ctst->arch.vex);
179 
180    ctst->os_state.parent = tid;
181 
182    /* inherit signal mask */
183    ctst->sig_mask = ptst->sig_mask;
184    ctst->tmp_sig_mask = ptst->sig_mask;
185 
186    /* Linux has to guess, we don't */
187    ctst->client_stack_highest_byte = (Addr)tp.stack_base + tp.stack_size;
188    ctst->client_stack_szB = tp.stack_size;
189    VG_(register_stack)((Addr)tp.stack_base, (Addr)tp.stack_base + tp.stack_size);
190 
191    /* Assume the thr_new will succeed, and tell any tool that wants to
192       know that this thread has come into existence.  If the thr_new
193       fails, we'll send out a ll_exit notification for it at the out:
194       label below, to clean up. */
195    VG_TRACK ( pre_thread_ll_create, tid, ctid );
196 
197    if (debug)
198       VG_(printf)("clone child has SETTLS: tls at %#lx\n", (Addr)tp.tls_base);
199    ctst->arch.vex.guest_FS_CONST = (UWord)tp.tls_base;
200    tp.tls_base = 0;	/* Don't have the kernel do it too */
201 
202    /* start the thread with everything blocked */
203    VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);
204 
205    /* Set the client state for scheduler to run libthr's trampoline */
206    ctst->arch.vex.guest_RDI = (Addr)tp.arg;
207    /* XXX: align on 16-byte boundary? */
208    ctst->arch.vex.guest_RSP = (Addr)tp.stack_base + tp.stack_size - 8;
209    ctst->arch.vex.guest_RIP = (Addr)tp.start_func;
210 
211    /* But this is for thr_new() to run valgrind's trampoline */
212    tp.start_func = (void *)ML_(start_thread_NORETURN);
213    tp.arg = &VG_(threads)[ctid];
214 
215    /* And valgrind's trampoline on its own stack */
216    stk = ML_(allocstack)(ctid);
217    if (stk == (Addr)NULL) {
218       res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
219       goto fail;
220    }
221    tp.stack_base = (void *)ctst->os_state.valgrind_stack_base;
222    tp.stack_size = (Addr)stk - (Addr)tp.stack_base;
223 
224    /* Create the new thread */
225    res = VG_(do_syscall2)(__NR_thr_new, (UWord)&tp, sizeof(tp));
226 
227    VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);
228 
229 fail:
230    if (sr_isError(res)) {
231       /* thr_new failed */
232       VG_(cleanup_thread)(&ctst->arch);
233       ctst->status = VgTs_Empty;
234       /* oops.  Better tell the tool the thread exited in a hurry :-) */
235       VG_TRACK( pre_thread_ll_exit, ctid );
236    } else {
237 
238       POST_MEM_WRITE((Addr)tp.parent_tid, sizeof(long));
239       POST_MEM_WRITE((Addr)tp.child_tid, sizeof(long));
240 
241       /* Thread creation was successful; let the child have the chance
242          to run */
243       *flags |= SfYieldAfter;
244    }
245 
246    /* "Complete" the syscall so that the wrapper doesn't call the kernel again. */
247    SET_STATUS_from_SysRes(res);
248 }
249 
250 #endif
251 
PRE(sys_rfork)252 PRE(sys_rfork)
253 {
254    PRINT("sys_rfork ( %#lx )", ARG1 );
255    PRE_REG_READ1(long, "rfork", int, flags);
256 
257    VG_(message)(Vg_UserMsg, "rfork() not implemented");
258    VG_(unimplemented)("Valgrind does not support rfork().");
259 
260    SET_STATUS_Failure(VKI_ENOSYS);
261 }
262 
PRE(sys_sigreturn)263 PRE(sys_sigreturn)
264 {
265    PRINT("sys_sigreturn ( %#lx )", ARG1);
266    PRE_REG_READ1(long, "sigreturn",
267                  struct vki_ucontext *, ucp);
268 
269    PRE_MEM_READ( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
270    PRE_MEM_WRITE( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
271 }
272 
PRE(sys_fake_sigreturn)273 PRE(sys_fake_sigreturn)
274 {
275    ThreadState* tst;
276    struct vki_ucontext *uc;
277    int rflags;
278 
279    PRINT("sys_sigreturn ( %#lx )", ARG1);
280    PRE_REG_READ1(long, "sigreturn",
281                  struct vki_ucontext *, ucp);
282 
283    PRE_MEM_READ( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
284    PRE_MEM_WRITE( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
285 
286    vg_assert(VG_(is_valid_tid)(tid));
287    vg_assert(tid >= 1 && tid < VG_N_THREADS);
288    vg_assert(VG_(is_running_thread)(tid));
289 
290    /* Adjust esp to point to start of frame; skip back up over handler
291       ret addr */
292    tst = VG_(get_ThreadState)(tid);
293    tst->arch.vex.guest_RSP -= sizeof(Addr);
294 
295    uc = (struct vki_ucontext *)ARG1;
296    if (uc == NULL || uc->uc_mcontext.len != sizeof(uc->uc_mcontext)) {
297       SET_STATUS_Failure(VKI_EINVAL);
298       return;
299    }
300 
301    /* This is only so that the EIP is (might be) useful to report if
302       something goes wrong in the sigreturn */
303    ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
304 
305    VG_(sigframe_destroy)(tid);
306 
307    /* For unclear reasons, it appears we need the syscall to return
308       without changing %EAX.  Since %EAX is the return value, and can
309       denote either success or failure, we must set up so that the
310       driver logic copies it back unchanged.  Also, note %EAX is of
311       the guest registers written by VG_(sigframe_destroy). */
312    rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
313    SET_STATUS_from_SysRes( VG_(mk_SysRes_amd64_dragonfly)( tst->arch.vex.guest_RAX,
314        tst->arch.vex.guest_RDX, (rflags & 1) != 0 ? True : False) );
315 
316    /*
317     * Signal handler might have changed the signal mask.  Respect that.
318     */
319    tst->sig_mask = uc->uc_sigmask;
320    tst->tmp_sig_mask = uc->uc_sigmask;
321 
322    /* Tell the driver not to update the guest state with the "result",
323       and set a bogus result to keep it happy. */
324    *flags |= SfNoWriteResult;
325    SET_STATUS_Success(0);
326 
327    /* Check to see if some any signals arose as a result of this. */
328    *flags |= SfPollAfter;
329 }
330 
restore_mcontext(ThreadState * tst,struct vki_mcontext * sc)331 static void restore_mcontext(ThreadState *tst, struct vki_mcontext *sc)
332 {
333    tst->arch.vex.guest_RAX     = sc->rax;
334    tst->arch.vex.guest_RCX     = sc->rcx;
335    tst->arch.vex.guest_RDX     = sc->rdx;
336    tst->arch.vex.guest_RBX     = sc->rbx;
337    tst->arch.vex.guest_RBP     = sc->rbp;
338    tst->arch.vex.guest_RSP     = sc->rsp;
339    tst->arch.vex.guest_RSI     = sc->rsi;
340    tst->arch.vex.guest_RDI     = sc->rdi;
341    tst->arch.vex.guest_R8      = sc->r8;
342    tst->arch.vex.guest_R9      = sc->r9;
343    tst->arch.vex.guest_R10     = sc->r10;
344    tst->arch.vex.guest_R11     = sc->r11;
345    tst->arch.vex.guest_R12     = sc->r12;
346    tst->arch.vex.guest_R13     = sc->r13;
347    tst->arch.vex.guest_R14     = sc->r14;
348    tst->arch.vex.guest_R15     = sc->r15;
349    tst->arch.vex.guest_RIP     = sc->rip;
350    /*
351     * XXX: missing support for other flags.
352     */
353    if (sc->rflags & 0x0001)
354       LibVEX_GuestAMD64_put_rflag_c(1, &tst->arch.vex);
355    else
356       LibVEX_GuestAMD64_put_rflag_c(0, &tst->arch.vex);
357 }
358 
fill_mcontext(ThreadState * tst,struct vki_mcontext * sc)359 static void fill_mcontext(ThreadState *tst, struct vki_mcontext *sc)
360 {
361    sc->rax = tst->arch.vex.guest_RAX;
362    sc->rcx = tst->arch.vex.guest_RCX;
363    sc->rdx = tst->arch.vex.guest_RDX;
364    sc->rbx = tst->arch.vex.guest_RBX;
365    sc->rbp = tst->arch.vex.guest_RBP;
366    sc->rsp = tst->arch.vex.guest_RSP;
367    sc->rsi = tst->arch.vex.guest_RSI;
368    sc->rdi = tst->arch.vex.guest_RDI;
369    sc->r8 = tst->arch.vex.guest_R8;
370    sc->r9 = tst->arch.vex.guest_R9;
371    sc->r10 = tst->arch.vex.guest_R10;
372    sc->r11 = tst->arch.vex.guest_R11;
373    sc->r12 = tst->arch.vex.guest_R12;
374    sc->r13 = tst->arch.vex.guest_R13;
375    sc->r14 = tst->arch.vex.guest_R14;
376    sc->r15 = tst->arch.vex.guest_R15;
377    sc->rip = tst->arch.vex.guest_RIP;
378 /*
379    Not supported by VEX.
380    sc->cs = tst->arch.vex.guest_CS;
381    sc->ss = tst->arch.vex.guest_SS;
382    sc->ds = tst->arch.vex.guest_DS;
383    sc->es = tst->arch.vex.guest_ES;
384    sc->fs = tst->arch.vex.guest_FS;
385    sc->gs = tst->arch.vex.guest_GS;
386 */
387    sc->rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
388 /*
389    not yet.
390    VG_(memcpy)(&sc->fpstate, fpstate, sizeof(*fpstate));
391 */
392    sc->fpformat = VKI_FPFMT_NODEV;
393    sc->ownedfp = VKI_FPOWNED_NONE;
394    sc->len = sizeof(*sc);
395    VG_(memset)(sc->spare2, 0, sizeof(sc->spare2));
396 }
397 
PRE(sys_getcontext)398 PRE(sys_getcontext)
399 {
400    ThreadState* tst;
401    struct vki_ucontext *uc;
402 
403    PRINT("sys_getcontext ( %#lx )", ARG1);
404    PRE_REG_READ1(long, "getcontext",
405                  struct vki_ucontext *, ucp);
406    PRE_MEM_WRITE( "getcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
407    uc = (struct vki_ucontext *)ARG1;
408    if (uc == NULL) {
409       SET_STATUS_Failure(VKI_EINVAL);
410       return;
411    }
412    tst = VG_(get_ThreadState)(tid);
413    fill_mcontext(tst, &uc->uc_mcontext);
414    uc->uc_mcontext.rax = 0;
415    uc->uc_mcontext.rdx = 0;
416    uc->uc_mcontext.rflags &= ~0x0001;	/* PSL_C */
417    uc->uc_sigmask = tst->sig_mask;
418    VG_(memset)(uc->__spare__, 0, sizeof(uc->__spare__));
419    SET_STATUS_Success(0);
420 }
421 
PRE(sys_setcontext)422 PRE(sys_setcontext)
423 {
424    ThreadState* tst;
425    struct vki_ucontext *uc;
426 
427    PRINT("sys_setcontext ( %#lx )", ARG1);
428    PRE_REG_READ1(long, "setcontext",
429                  struct vki_ucontext *, ucp);
430 
431    PRE_MEM_READ( "setcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
432    PRE_MEM_WRITE( "setcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
433 
434    vg_assert(VG_(is_valid_tid)(tid));
435    vg_assert(tid >= 1 && tid < VG_N_THREADS);
436    vg_assert(VG_(is_running_thread)(tid));
437 
438    tst = VG_(get_ThreadState)(tid);
439    uc = (struct vki_ucontext *)ARG1;
440    if (uc == NULL || uc->uc_mcontext.len != sizeof(uc->uc_mcontext)) {
441       SET_STATUS_Failure(VKI_EINVAL);
442       return;
443    }
444 
445    restore_mcontext(tst, &uc->uc_mcontext);
446    tst->sig_mask = uc->uc_sigmask;
447 
448    /* Tell the driver not to update the guest state with the "result",
449       and set a bogus result to keep it happy. */
450    *flags |= SfNoWriteResult;
451    SET_STATUS_Success(0);
452 
453    /* Check to see if some any signals arose as a result of this. */
454    *flags |= SfPollAfter;
455 }
456 
PRE(sys_swapcontext)457 PRE(sys_swapcontext)
458 {
459    struct vki_ucontext *ucp, *oucp;
460    ThreadState* tst;
461 
462    PRINT("sys_swapcontext ( %#lx, %#lx )", ARG1, ARG2);
463    PRE_REG_READ2(long, "swapcontext",
464                  struct vki_ucontext *, oucp, struct vki_ucontext *, ucp);
465 
466    PRE_MEM_READ( "swapcontext(ucp)", ARG2, sizeof(struct vki_ucontext) );
467    PRE_MEM_WRITE( "swapcontext(oucp)", ARG1, sizeof(struct vki_ucontext) );
468 
469    oucp = (struct vki_ucontext *)ARG1;
470    ucp = (struct vki_ucontext *)ARG2;
471    if (oucp == NULL || ucp == NULL || ucp->uc_mcontext.len != sizeof(ucp->uc_mcontext)) {
472       SET_STATUS_Failure(VKI_EINVAL);
473       return;
474    }
475    tst = VG_(get_ThreadState)(tid);
476 
477    /*
478     * Save the context.
479     */
480    fill_mcontext(tst, &oucp->uc_mcontext);
481    oucp->uc_mcontext.rax = 0;
482    oucp->uc_mcontext.rdx = 0;
483    oucp->uc_mcontext.rflags &= ~0x0001;	/* PSL_C */
484    oucp->uc_sigmask = tst->sig_mask;
485    VG_(memset)(oucp->__spare__, 0, sizeof(oucp->__spare__));
486 
487    /*
488     * Switch to new one.
489     */
490    restore_mcontext(tst, &ucp->uc_mcontext);
491    tst->sig_mask = ucp->uc_sigmask;
492 
493    /* Tell the driver not to update the guest state with the "result",
494       and set a bogus result to keep it happy. */
495    *flags |= SfNoWriteResult;
496    SET_STATUS_Success(0);
497 
498    /* Check to see if some any signals arose as a result of this. */
499    *flags |= SfPollAfter;
500 }
501 
502 
503 /* This is here because on x86 the off_t is passed in 2 regs. Don't ask about pad.  */
504 
505 /* caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); */
506 /*              ARG1           ARG2       ARG3      ARG4       ARG5    ARG6     ARG7 */
507 
PRE(sys_mmap)508 PRE(sys_mmap)
509 {
510    SysRes r;
511 
512    PRINT("sys_mmap ( %#lx, %lu, %ld, %ld, %ld, pad%ld, 0x%lx)",
513          ARG1, (UWord)ARG2, ARG3, ARG4, ARG5, ARG6, ARG7 );
514    PRE_REG_READ7(long, "mmap",
515                  char *, addr, unsigned long, len, int, prot,  int, flags,
516                  int, fd,  int, pad, unsigned long, pos);
517 
518    r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG7 );
519    SET_STATUS_from_SysRes(r);
520 }
521 
PRE(sys_mmap7)522 PRE(sys_mmap7)
523 {
524    SysRes r;
525 
526    PRINT("sys_mmap7 ( %#lx, %lu, %ld, %ld, %ld, 0x%lx)",
527          ARG1, (UWord)ARG2, ARG3, ARG4, ARG5, ARG6 );
528    PRE_REG_READ6(long, "mmap",
529                  char *, addr, unsigned long, len, int, prot,  int, flags,
530                  int, fd,  unsigned long, pos);
531 
532    r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6 );
533    SET_STATUS_from_SysRes(r);
534 }
535 
PRE(sys_lseek)536 PRE(sys_lseek)
537 {
538    PRINT("sys_lseek ( %ld, 0x%lx, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4);
539    PRE_REG_READ4(long, "lseek",
540                  unsigned int, fd, int, pad, unsigned long, offset,
541                  unsigned int, whence);
542 }
543 
PRE(sys_lseek7)544 PRE(sys_lseek7)
545 {
546    PRINT("sys_lseek ( %ld, 0x%lx, %ld )", ARG1,ARG2,ARG3);
547    PRE_REG_READ3(long, "lseek",
548                  unsigned int, fd, unsigned long, offset,
549                  unsigned int, whence);
550 }
551 
PRE(sys_pread)552 PRE(sys_pread)
553 {
554    *flags |= SfMayBlock;
555    PRINT("sys_read ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
556    PRE_REG_READ5(ssize_t, "read",
557                  unsigned int, fd, char *, buf, vki_size_t, count,
558                  int, pad, unsigned long, off);
559 
560    if (!ML_(fd_allowed)(ARG1, "read", tid, False))
561       SET_STATUS_Failure( VKI_EBADF );
562    else
563       PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 );
564 }
565 
POST(sys_pread)566 POST(sys_pread)
567 {
568    vg_assert(SUCCESS);
569    POST_MEM_WRITE( ARG2, RES );
570 }
571 
PRE(sys_preadv)572 PRE(sys_preadv)
573 {
574    *flags |= SfMayBlock;
575    PRINT("sys_extpreadv ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
576    PRE_REG_READ5(ssize_t, "extpreadv",
577                  int, fd, struct vki_iovec*, iov, int, iovcnt, int, flags,
578 				 vki_off_t, offset);
579 
580    if (!ML_(fd_allowed)(ARG1, "extpreadv", tid, False))
581       SET_STATUS_Failure( VKI_EBADF );
582    else
583       PRE_MEM_READ("extpreadv(iov)", ARG2, ARG3 * sizeof(struct vki_iovec));
584 }
585 
POST(sys_preadv)586 POST(sys_preadv)
587 {
588    int i;
589    struct vki_iovec *v = ARG2;
590 
591    vg_assert(SUCCESS);
592 
593    for (i = 0; i < ARG3; i++)
594 		POST_MEM_WRITE(v[i].iov_base, v[i].iov_len);
595 }
596 
597 
PRE(sys_pread7)598 PRE(sys_pread7)
599 {
600    *flags |= SfMayBlock;
601    PRINT("sys_read ( %ld, %#lx, %lu, %lu )", ARG1, ARG2, ARG3, ARG4);
602    PRE_REG_READ4(ssize_t, "read",
603                  unsigned int, fd, char *, buf, vki_size_t, count,
604                  unsigned long, off);
605 
606    if (!ML_(fd_allowed)(ARG1, "read", tid, False))
607       SET_STATUS_Failure( VKI_EBADF );
608    else
609       PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 );
610 }
611 
POST(sys_pread7)612 POST(sys_pread7)
613 {
614    vg_assert(SUCCESS);
615    POST_MEM_WRITE( ARG2, RES );
616 }
617 
PRE(sys_pwrite)618 PRE(sys_pwrite)
619 {
620    Bool ok;
621    *flags |= SfMayBlock;
622    PRINT("sys_write ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
623    PRE_REG_READ5(ssize_t, "write",
624                  unsigned int, fd, const char *, buf, vki_size_t, count,
625                  int, pad, unsigned long, off);
626    /* check to see if it is allowed.  If not, try for an exemption from
627       --sim-hints=enable-outer (used for self hosting). */
628    ok = ML_(fd_allowed)(ARG1, "write", tid, False);
629    if (!ok && ARG1 == 2/*stderr*/
630            && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints)))
631       ok = True;
632    if (!ok)
633       SET_STATUS_Failure( VKI_EBADF );
634    else
635       PRE_MEM_READ( "write(buf)", ARG2, ARG3 );
636 }
637 
PRE(sys_pwritev)638 PRE(sys_pwritev)
639 {
640    Bool ok;
641    *flags |= SfMayBlock;
642    PRINT("sys_extpwritev ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
643    PRE_REG_READ5(ssize_t, "extpwritev",
644                  int, fd, const struct vki_iovec*, iov, int, iovcnt, int, flags,
645 				 vki_off_t, offset);
646    ok = ML_(fd_allowed)(ARG1, "extpwritev", tid, False);
647    if (!ok && ARG1 == 2/*stderr*/
648            && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints)))
649       ok = True;
650    if (!ok)
651       SET_STATUS_Failure( VKI_EBADF );
652    else
653       PRE_MEM_READ("extpwritev(iov)", ARG2, ARG3 * sizeof(struct vki_iovec));
654 }
655 
656 
PRE(sys_pwrite7)657 PRE(sys_pwrite7)
658 {
659    Bool ok;
660    *flags |= SfMayBlock;
661    PRINT("sys_write ( %ld, %#lx, %lu, %lu )", ARG1, ARG2, ARG3, ARG4);
662    PRE_REG_READ4(ssize_t, "write",
663                  unsigned int, fd, const char *, buf, vki_size_t, count,
664                  unsigned long, off);
665    /* check to see if it is allowed.  If not, try for an exemption from
666       --sim-hints=enable-outer (used for self hosting). */
667    ok = ML_(fd_allowed)(ARG1, "write", tid, False);
668    if (!ok && ARG1 == 2/*stderr*/
669            && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints)))
670       ok = True;
671    if (!ok)
672       SET_STATUS_Failure( VKI_EBADF );
673    else
674       PRE_MEM_READ( "write(buf)", ARG2, ARG3 );
675 }
676 
PRE(sys_ftruncate)677 PRE(sys_ftruncate)
678 {
679    *flags |= SfMayBlock;
680    PRINT("sys_ftruncate ( %ld, %lu )", ARG1,ARG3);
681    PRE_REG_READ3(long, "ftruncate", unsigned int, fd, int, pad,
682 		  unsigned int, length);
683 }
684 
PRE(sys_ftruncate7)685 PRE(sys_ftruncate7)
686 {
687    *flags |= SfMayBlock;
688    PRINT("sys_ftruncate ( %ld, %lu )", ARG1,ARG2);
689    PRE_REG_READ2(long, "ftruncate", unsigned int, fd,
690 		  unsigned long, length);
691 }
692 
PRE(sys_truncate)693 PRE(sys_truncate)
694 {
695    *flags |= SfMayBlock;
696    PRINT("sys_truncate ( %#lx(%s), %lu )", ARG1,(char *)ARG1,ARG3);
697    PRE_REG_READ3(long, "truncate",
698                  const char *, path, int, pad, unsigned int, length);
699    PRE_MEM_RASCIIZ( "truncate(path)", ARG1 );
700 }
701 
PRE(sys_truncate7)702 PRE(sys_truncate7)
703 {
704    *flags |= SfMayBlock;
705    PRINT("sys_truncate ( %#lx(%s), %lu )", ARG1,(char *)ARG1,ARG2);
706    PRE_REG_READ2(long, "truncate",
707                  const char *, path, unsigned long, length);
708    PRE_MEM_RASCIIZ( "truncate(path)", ARG1 );
709 }
710 
PRE(sys_sysarch)711 PRE(sys_sysarch)
712 {
713    ThreadState *tst;
714    void **p;
715 
716    PRINT("sys_sysarch ( %ld, %#lx )", ARG1, ARG2);
717    PRE_REG_READ2(int, "sysarch",
718                  int, number, void *, args);
719    switch (ARG1) {
720    case VKI_AMD64_SET_FSBASE:
721       PRINT("sys_amd64_set_fsbase ( %#lx )", ARG2);
722       PRE_REG_READ1(long, "amd64_set_fsbase", void *, base)
723 
724       /* On Dragonfly, the syscall loads the %gs selector for us, so do it now. */
725       tst = VG_(get_ThreadState)(tid);
726       p = (void**)ARG2;
727       tst->arch.vex.guest_FS_CONST = (UWord)*p;
728       /* "do" the syscall ourselves; the kernel never sees it */
729       SET_STATUS_Success2((ULong)*p, tst->arch.vex.guest_RDX );
730 
731       break;
732    case VKI_AMD64_GET_FSBASE:
733       PRINT("sys_amd64_get_fsbase ( %#lx )", ARG2);
734       PRE_REG_READ1(int, "amd64_get_fsbase", void *, basep)
735       PRE_MEM_WRITE( "amd64_get_fsbase(basep)", ARG2, sizeof(void *) );
736 
737       /* "do" the syscall ourselves; the kernel never sees it */
738       tst = VG_(get_ThreadState)(tid);
739       SET_STATUS_Success2( tst->arch.vex.guest_FS_CONST, tst->arch.vex.guest_RDX );
740       POST_MEM_WRITE( ARG2, sizeof(void *) );
741       break;
742    case VKI_AMD64_GET_XFPUSTATE:
743       PRINT("sys_amd64_get_xfpustate ( %#lx )", ARG2);
744       PRE_REG_READ1(int, "amd64_get_xfpustate", void *, basep)
745       PRE_MEM_WRITE( "amd64_get_xfpustate(basep)", ARG2, sizeof(void *) );
746 
747       /* "do" the syscall ourselves; the kernel never sees it */
748       tst = VG_(get_ThreadState)(tid);
749       SET_STATUS_Success2( tst->arch.vex.guest_FPTAG[0], tst->arch.vex.guest_FPTAG[0] );
750       POST_MEM_WRITE( ARG2, sizeof(void *) );
751       break;
752    default:
753       VG_(message) (Vg_UserMsg, "unhandled sysarch cmd %ld", ARG1);
754       VG_(unimplemented) ("unhandled sysarch cmd");
755       break;
756    }
757 }
758 
759 #undef PRE
760 #undef POST
761 
762 #endif /* defined(VGP_amd64_dragonfly) */
763 
764 /*--------------------------------------------------------------------*/
765 /*--- end                                                          ---*/
766 /*--------------------------------------------------------------------*/
767