1// Copyright 2015 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// +build msan 6 7#include "go_asm.h" 8#include "go_tls.h" 9#include "funcdata.h" 10#include "textflag.h" 11 12// This is like race_amd64.s, but for the msan calls. 13// See race_amd64.s for detailed comments. 14 15#ifdef GOOS_windows 16#define RARG0 CX 17#define RARG1 DX 18#define RARG2 R8 19#define RARG3 R9 20#else 21#define RARG0 DI 22#define RARG1 SI 23#define RARG2 DX 24#define RARG3 CX 25#endif 26 27// func runtime·domsanread(addr unsafe.Pointer, sz uintptr) 28// Called from msanread. 29TEXT runtime·domsanread(SB), NOSPLIT, $0-16 30 MOVQ addr+0(FP), RARG0 31 MOVQ size+8(FP), RARG1 32 // void __msan_read_go(void *addr, uintptr_t sz); 33 MOVQ $__msan_read_go(SB), AX 34 JMP msancall<>(SB) 35 36// func runtime·msanwrite(addr unsafe.Pointer, sz uintptr) 37// Called from instrumented code. 38TEXT runtime·msanwrite(SB), NOSPLIT, $0-16 39 MOVQ addr+0(FP), RARG0 40 MOVQ size+8(FP), RARG1 41 // void __msan_write_go(void *addr, uintptr_t sz); 42 MOVQ $__msan_write_go(SB), AX 43 JMP msancall<>(SB) 44 45// func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr) 46TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16 47 MOVQ addr+0(FP), RARG0 48 MOVQ size+8(FP), RARG1 49 // void __msan_malloc_go(void *addr, uintptr_t sz); 50 MOVQ $__msan_malloc_go(SB), AX 51 JMP msancall<>(SB) 52 53// func runtime·msanfree(addr unsafe.Pointer, sz uintptr) 54TEXT runtime·msanfree(SB), NOSPLIT, $0-16 55 MOVQ addr+0(FP), RARG0 56 MOVQ size+8(FP), RARG1 57 // void __msan_free_go(void *addr, uintptr_t sz); 58 MOVQ $__msan_free_go(SB), AX 59 JMP msancall<>(SB) 60 61// Switches SP to g0 stack and calls (AX). Arguments already set. 62TEXT msancall<>(SB), NOSPLIT, $0-0 63 get_tls(R12) 64 MOVQ g(R12), R14 65 MOVQ SP, R12 // callee-saved, preserved across the CALL 66 CMPQ R14, $0 67 JE call // no g; still on a system stack 68 69 MOVQ g_m(R14), R13 70 // Switch to g0 stack. 71 MOVQ m_g0(R13), R10 72 CMPQ R10, R14 73 JE call // already on g0 74 75 MOVQ (g_sched+gobuf_sp)(R10), SP 76call: 77 ANDQ $~15, SP // alignment for gcc ABI 78 CALL AX 79 MOVQ R12, SP 80 RET 81