1//===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines some functions for various memory management utilities. 11// 12//===----------------------------------------------------------------------===// 13 14#include "Unix.h" 15#include "llvm/Support/DataTypes.h" 16#include "llvm/Support/ErrorHandling.h" 17#include "llvm/Support/Process.h" 18 19#ifdef HAVE_SYS_MMAN_H 20#include <sys/mman.h> 21#endif 22 23#ifdef __APPLE__ 24#include <mach/mach.h> 25#endif 26 27#if defined(__mips__) 28# if defined(__OpenBSD__) 29# include <mips64/sysarch.h> 30# else 31# include <sys/cachectl.h> 32# endif 33#endif 34 35#ifdef __APPLE__ 36extern "C" void sys_icache_invalidate(const void *Addr, size_t len); 37#else 38extern "C" void __clear_cache(void *, void*); 39#endif 40 41namespace { 42 43int getPosixProtectionFlags(unsigned Flags) { 44 switch (Flags) { 45 case llvm::sys::Memory::MF_READ: 46 return PROT_READ; 47 case llvm::sys::Memory::MF_WRITE: 48 return PROT_WRITE; 49 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: 50 return PROT_READ | PROT_WRITE; 51 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: 52 return PROT_READ | PROT_EXEC; 53 case llvm::sys::Memory::MF_READ | 54 llvm::sys::Memory::MF_WRITE | 55 llvm::sys::Memory::MF_EXEC: 56 return PROT_READ | PROT_WRITE | PROT_EXEC; 57 case llvm::sys::Memory::MF_EXEC: 58#if defined(__FreeBSD__) 59 // On PowerPC, having an executable page that has no read permission 60 // can have unintended consequences. The function InvalidateInstruction- 61 // Cache uses instructions dcbf and icbi, both of which are treated by 62 // the processor as loads. If the page has no read permissions, 63 // executing these instructions will result in a segmentation fault. 64 // Somehow, this problem is not present on Linux, but it does happen 65 // on FreeBSD. 66 return PROT_READ | PROT_EXEC; 67#else 68 return PROT_EXEC; 69#endif 70 default: 71 llvm_unreachable("Illegal memory protection flag specified!"); 72 } 73 // Provide a default return value as required by some compilers. 74 return PROT_NONE; 75} 76 77} // namespace 78 79namespace llvm { 80namespace sys { 81 82MemoryBlock 83Memory::allocateMappedMemory(size_t NumBytes, 84 const MemoryBlock *const NearBlock, 85 unsigned PFlags, 86 std::error_code &EC) { 87 EC = std::error_code(); 88 if (NumBytes == 0) 89 return MemoryBlock(); 90 91 static const size_t PageSize = Process::getPageSize(); 92 const size_t NumPages = (NumBytes+PageSize-1)/PageSize; 93 94 int fd = -1; 95#ifdef NEED_DEV_ZERO_FOR_MMAP 96 static int zero_fd = open("/dev/zero", O_RDWR); 97 if (zero_fd == -1) { 98 EC = std::error_code(errno, std::generic_category()); 99 return MemoryBlock(); 100 } 101 fd = zero_fd; 102#endif 103 104 int MMFlags = MAP_PRIVATE | 105#ifdef HAVE_MMAP_ANONYMOUS 106 MAP_ANONYMOUS 107#else 108 MAP_ANON 109#endif 110 ; // Ends statement above 111 112 int Protect = getPosixProtectionFlags(PFlags); 113 114 // Use any near hint and the page size to set a page-aligned starting address 115 uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + 116 NearBlock->size() : 0; 117 if (Start && Start % PageSize) 118 Start += PageSize - Start % PageSize; 119 120 void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages, 121 Protect, MMFlags, fd, 0); 122 if (Addr == MAP_FAILED) { 123 if (NearBlock) //Try again without a near hint 124 return allocateMappedMemory(NumBytes, nullptr, PFlags, EC); 125 126 EC = std::error_code(errno, std::generic_category()); 127 return MemoryBlock(); 128 } 129 130 MemoryBlock Result; 131 Result.Address = Addr; 132 Result.Size = NumPages*PageSize; 133 134 if (PFlags & MF_EXEC) 135 Memory::InvalidateInstructionCache(Result.Address, Result.Size); 136 137 return Result; 138} 139 140std::error_code 141Memory::releaseMappedMemory(MemoryBlock &M) { 142 if (M.Address == nullptr || M.Size == 0) 143 return std::error_code(); 144 145 if (0 != ::munmap(M.Address, M.Size)) 146 return std::error_code(errno, std::generic_category()); 147 148 M.Address = nullptr; 149 M.Size = 0; 150 151 return std::error_code(); 152} 153 154std::error_code 155Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { 156 if (M.Address == nullptr || M.Size == 0) 157 return std::error_code(); 158 159 if (!Flags) 160 return std::error_code(EINVAL, std::generic_category()); 161 162 int Protect = getPosixProtectionFlags(Flags); 163 164#if !defined(__minix) 165 int Result = ::mprotect(M.Address, M.Size, Protect); 166 if (Result != 0) 167#else 168 errno = ENOSYS; 169#endif /* !defined(__minix) */ 170 return std::error_code(errno, std::generic_category()); 171 172 if (Flags & MF_EXEC) 173 Memory::InvalidateInstructionCache(M.Address, M.Size); 174 175 return std::error_code(); 176} 177 178/// AllocateRWX - Allocate a slab of memory with read/write/execute 179/// permissions. This is typically used for JIT applications where we want 180/// to emit code to the memory then jump to it. Getting this type of memory 181/// is very OS specific. 182/// 183MemoryBlock 184Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, 185 std::string *ErrMsg) { 186 if (NumBytes == 0) return MemoryBlock(); 187 188 size_t PageSize = Process::getPageSize(); 189 size_t NumPages = (NumBytes+PageSize-1)/PageSize; 190 191 int fd = -1; 192#ifdef NEED_DEV_ZERO_FOR_MMAP 193 static int zero_fd = open("/dev/zero", O_RDWR); 194 if (zero_fd == -1) { 195 MakeErrMsg(ErrMsg, "Can't open /dev/zero device"); 196 return MemoryBlock(); 197 } 198 fd = zero_fd; 199#endif 200 201 int flags = MAP_PRIVATE | 202#ifdef HAVE_MMAP_ANONYMOUS 203 MAP_ANONYMOUS 204#else 205 MAP_ANON 206#endif 207 ; 208 209 void* start = NearBlock ? (unsigned char*)NearBlock->base() + 210 NearBlock->size() : nullptr; 211 212#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 213 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC, 214 flags, fd, 0); 215#else 216 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC, 217 flags, fd, 0); 218#endif 219 if (pa == MAP_FAILED) { 220 if (NearBlock) //Try again without a near hint 221 return AllocateRWX(NumBytes, nullptr); 222 223 MakeErrMsg(ErrMsg, "Can't allocate RWX Memory"); 224 return MemoryBlock(); 225 } 226 227#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 228 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa, 229 (vm_size_t)(PageSize*NumPages), 0, 230 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 231 if (KERN_SUCCESS != kr) { 232 MakeErrMsg(ErrMsg, "vm_protect max RX failed"); 233 return MemoryBlock(); 234 } 235 236 kr = vm_protect(mach_task_self(), (vm_address_t)pa, 237 (vm_size_t)(PageSize*NumPages), 0, 238 VM_PROT_READ | VM_PROT_WRITE); 239 if (KERN_SUCCESS != kr) { 240 MakeErrMsg(ErrMsg, "vm_protect RW failed"); 241 return MemoryBlock(); 242 } 243#endif 244 245 MemoryBlock result; 246 result.Address = pa; 247 result.Size = NumPages*PageSize; 248 249 return result; 250} 251 252bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { 253 if (M.Address == nullptr || M.Size == 0) return false; 254 if (0 != ::munmap(M.Address, M.Size)) 255 return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); 256 return false; 257} 258 259bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { 260#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 261 if (M.Address == 0 || M.Size == 0) return false; 262 Memory::InvalidateInstructionCache(M.Address, M.Size); 263 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, 264 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); 265 return KERN_SUCCESS == kr; 266#else 267 return true; 268#endif 269} 270 271bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { 272#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 273 if (M.Address == 0 || M.Size == 0) return false; 274 Memory::InvalidateInstructionCache(M.Address, M.Size); 275 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, 276 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 277 return KERN_SUCCESS == kr; 278#elif defined(__arm__) || defined(__aarch64__) 279 Memory::InvalidateInstructionCache(M.Address, M.Size); 280 return true; 281#else 282 return true; 283#endif 284} 285 286bool Memory::setRangeWritable(const void *Addr, size_t Size) { 287#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 288 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, 289 (vm_size_t)Size, 0, 290 VM_PROT_READ | VM_PROT_WRITE); 291 return KERN_SUCCESS == kr; 292#else 293 return true; 294#endif 295} 296 297bool Memory::setRangeExecutable(const void *Addr, size_t Size) { 298#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 299 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, 300 (vm_size_t)Size, 0, 301 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 302 return KERN_SUCCESS == kr; 303#else 304 return true; 305#endif 306} 307 308/// InvalidateInstructionCache - Before the JIT can run a block of code 309/// that has been emitted it must invalidate the instruction cache on some 310/// platforms. 311void Memory::InvalidateInstructionCache(const void *Addr, 312 size_t Len) { 313 314// icache invalidation for PPC and ARM. 315#if defined(__APPLE__) 316 317# if (defined(__POWERPC__) || defined (__ppc__) || \ 318 defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \ 319 defined(__arm64__)) 320 sys_icache_invalidate(const_cast<void *>(Addr), Len); 321# endif 322 323#else 324 325# if (defined(__POWERPC__) || defined (__ppc__) || \ 326 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) 327 const size_t LineSize = 32; 328 329 const intptr_t Mask = ~(LineSize - 1); 330 const intptr_t StartLine = ((intptr_t) Addr) & Mask; 331 const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; 332 333 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) 334 asm volatile("dcbf 0, %0" : : "r"(Line)); 335 asm volatile("sync"); 336 337 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) 338 asm volatile("icbi 0, %0" : : "r"(Line)); 339 asm volatile("isync"); 340# elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \ 341 defined(__GNUC__) 342 // FIXME: Can we safely always call this for __GNUC__ everywhere? 343 const char *Start = static_cast<const char *>(Addr); 344 const char *End = Start + Len; 345 __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); 346# endif 347 348#endif // end apple 349 350 ValgrindDiscardTranslations(Addr, Len); 351} 352 353} // namespace sys 354} // namespace llvm 355