1//===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines some functions for various memory management utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Unix.h"
14#include "llvm/Config/config.h"
15#include "llvm/Support/Alignment.h"
16#include "llvm/Support/DataTypes.h"
17#include "llvm/Support/ErrorHandling.h"
18#include "llvm/Support/Process.h"
19
20#ifdef HAVE_SYS_MMAN_H
21#include <sys/mman.h>
22#endif
23
24#ifdef __APPLE__
25#include <mach/mach.h>
26#endif
27
28#ifdef __Fuchsia__
29#include <zircon/syscalls.h>
30#endif
31
32#if defined(__mips__)
33#  if defined(__OpenBSD__)
34#    include <mips64/sysarch.h>
35#  elif !defined(__FreeBSD__)
36#    include <sys/cachectl.h>
37#  endif
38#endif
39
40#if defined(__APPLE__)
41extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
42#else
43extern "C" void __clear_cache(void *, void*);
44#endif
45
46static int getPosixProtectionFlags(unsigned Flags) {
47  switch (Flags & llvm::sys::Memory::MF_RWE_MASK) {
48  case llvm::sys::Memory::MF_READ:
49    return PROT_READ;
50  case llvm::sys::Memory::MF_WRITE:
51    return PROT_WRITE;
52  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
53    return PROT_READ | PROT_WRITE;
54  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
55    return PROT_READ | PROT_EXEC;
56  case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_WRITE |
57      llvm::sys::Memory::MF_EXEC:
58    return PROT_READ | PROT_WRITE | PROT_EXEC;
59  case llvm::sys::Memory::MF_EXEC:
60#if (defined(__FreeBSD__) || defined(__POWERPC__) || defined (__ppc__) || \
61     defined(_POWER) || defined(_ARCH_PPC))
62    // On PowerPC, having an executable page that has no read permission
63    // can have unintended consequences.  The function InvalidateInstruction-
64    // Cache uses instructions dcbf and icbi, both of which are treated by
65    // the processor as loads.  If the page has no read permissions,
66    // executing these instructions will result in a segmentation fault.
67    return PROT_READ | PROT_EXEC;
68#else
69    return PROT_EXEC;
70#endif
71  default:
72    llvm_unreachable("Illegal memory protection flag specified!");
73  }
74  // Provide a default return value as required by some compilers.
75  return PROT_NONE;
76}
77
78namespace llvm {
79namespace sys {
80
81MemoryBlock
82Memory::allocateMappedMemory(size_t NumBytes,
83                             const MemoryBlock *const NearBlock,
84                             unsigned PFlags,
85                             std::error_code &EC) {
86  EC = std::error_code();
87  if (NumBytes == 0)
88    return MemoryBlock();
89
90  // On platforms that have it, we can use MAP_ANON to get a memory-mapped
91  // page without file backing, but we need a fallback of opening /dev/zero
92  // for strictly POSIX platforms instead.
93  int fd;
94#if defined(MAP_ANON)
95  fd = -1;
96#else
97  fd = open("/dev/zero", O_RDWR);
98  if (fd == -1) {
99    EC = std::error_code(errno, std::generic_category());
100    return MemoryBlock();
101  }
102#endif
103
104  int MMFlags = MAP_PRIVATE;
105#if defined(MAP_ANON)
106  MMFlags |= MAP_ANON;
107#endif
108  int Protect = getPosixProtectionFlags(PFlags);
109
110#if defined(__NetBSD__) && defined(PROT_MPROTECT)
111  Protect |= PROT_MPROTECT(PROT_READ | PROT_WRITE | PROT_EXEC);
112#endif
113
114  // Use any near hint and the page size to set a page-aligned starting address
115  uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
116                                      NearBlock->allocatedSize() : 0;
117  static const size_t PageSize = Process::getPageSizeEstimate();
118  const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
119
120  if (Start && Start % PageSize)
121    Start += PageSize - Start % PageSize;
122
123  // FIXME: Handle huge page requests (MF_HUGE_HINT).
124  void *Addr = ::mmap(reinterpret_cast<void *>(Start), PageSize*NumPages, Protect,
125                      MMFlags, fd, 0);
126  if (Addr == MAP_FAILED) {
127    if (NearBlock) { //Try again without a near hint
128#if !defined(MAP_ANON)
129      close(fd);
130#endif
131      return allocateMappedMemory(NumBytes, nullptr, PFlags, EC);
132    }
133
134    EC = std::error_code(errno, std::generic_category());
135#if !defined(MAP_ANON)
136    close(fd);
137#endif
138    return MemoryBlock();
139  }
140
141#if !defined(MAP_ANON)
142  close(fd);
143#endif
144
145  MemoryBlock Result;
146  Result.Address = Addr;
147  Result.AllocatedSize = PageSize*NumPages;
148  Result.Flags = PFlags;
149
150  // Rely on protectMappedMemory to invalidate instruction cache.
151  if (PFlags & MF_EXEC) {
152    EC = Memory::protectMappedMemory (Result, PFlags);
153    if (EC != std::error_code())
154      return MemoryBlock();
155  }
156
157  return Result;
158}
159
160std::error_code
161Memory::releaseMappedMemory(MemoryBlock &M) {
162  if (M.Address == nullptr || M.AllocatedSize == 0)
163    return std::error_code();
164
165  if (0 != ::munmap(M.Address, M.AllocatedSize))
166    return std::error_code(errno, std::generic_category());
167
168  M.Address = nullptr;
169  M.AllocatedSize = 0;
170
171  return std::error_code();
172}
173
174std::error_code
175Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
176  static const Align PageSize = Align(Process::getPageSizeEstimate());
177  if (M.Address == nullptr || M.AllocatedSize == 0)
178    return std::error_code();
179
180  if (!Flags)
181    return std::error_code(EINVAL, std::generic_category());
182
183  int Protect = getPosixProtectionFlags(Flags);
184  uintptr_t Start = alignAddr((const uint8_t *)M.Address - PageSize.value() + 1, PageSize);
185  uintptr_t End = alignAddr((const uint8_t *)M.Address + M.AllocatedSize, PageSize);
186
187  bool InvalidateCache = (Flags & MF_EXEC);
188
189#if defined(__arm__) || defined(__aarch64__)
190  // Certain ARM implementations treat icache clear instruction as a memory read,
191  // and CPU segfaults on trying to clear cache on !PROT_READ page.  Therefore we need
192  // to temporarily add PROT_READ for the sake of flushing the instruction caches.
193  if (InvalidateCache && !(Protect & PROT_READ)) {
194    int Result = ::mprotect((void *)Start, End - Start, Protect | PROT_READ);
195    if (Result != 0)
196      return std::error_code(errno, std::generic_category());
197
198    Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
199    InvalidateCache = false;
200  }
201#endif
202
203  int Result = ::mprotect((void *)Start, End - Start, Protect);
204
205  if (Result != 0)
206    return std::error_code(errno, std::generic_category());
207
208  if (InvalidateCache)
209    Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
210
211  return std::error_code();
212}
213
214/// InvalidateInstructionCache - Before the JIT can run a block of code
215/// that has been emitted it must invalidate the instruction cache on some
216/// platforms.
217void Memory::InvalidateInstructionCache(const void *Addr,
218                                        size_t Len) {
219
220// icache invalidation for PPC and ARM.
221#if defined(__APPLE__)
222
223#  if (defined(__POWERPC__) || defined (__ppc__) || \
224       defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \
225       defined(__arm64__))
226  sys_icache_invalidate(const_cast<void *>(Addr), Len);
227#  endif
228
229#elif defined(__Fuchsia__)
230
231  zx_status_t Status = zx_cache_flush(Addr, Len, ZX_CACHE_FLUSH_INSN);
232  assert(Status == ZX_OK && "cannot invalidate instruction cache");
233
234#else
235
236#  if (defined(__POWERPC__) || defined (__ppc__) || \
237       defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
238  const size_t LineSize = 32;
239
240  const intptr_t Mask = ~(LineSize - 1);
241  const intptr_t StartLine = ((intptr_t) Addr) & Mask;
242  const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
243
244  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
245    asm volatile("dcbf 0, %0" : : "r"(Line));
246  asm volatile("sync");
247
248  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
249    asm volatile("icbi 0, %0" : : "r"(Line));
250  asm volatile("isync");
251#  elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \
252        defined(__GNUC__)
253  // FIXME: Can we safely always call this for __GNUC__ everywhere?
254  const char *Start = static_cast<const char *>(Addr);
255  const char *End = Start + Len;
256  __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
257#  endif
258
259#endif  // end apple
260
261  ValgrindDiscardTranslations(Addr, Len);
262}
263
264} // namespace sys
265} // namespace llvm
266