1 //===- MemoryMapper.cpp - Cross-process memory mapper ------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/ExecutionEngine/Orc/MemoryMapper.h"
10 
11 #include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
12 #include "llvm/Support/WindowsError.h"
13 
14 #if defined(LLVM_ON_UNIX)
15 #include <fcntl.h>
16 #include <sys/mman.h>
17 #include <unistd.h>
18 #elif defined(_WIN32)
19 #include <windows.h>
20 #endif
21 
22 namespace llvm {
23 namespace orc {
24 
25 MemoryMapper::~MemoryMapper() {}
26 
27 InProcessMemoryMapper::InProcessMemoryMapper(size_t PageSize)
28     : PageSize(PageSize) {}
29 
30 Expected<std::unique_ptr<InProcessMemoryMapper>>
31 InProcessMemoryMapper::Create() {
32   auto PageSize = sys::Process::getPageSize();
33   if (!PageSize)
34     return PageSize.takeError();
35   return std::make_unique<InProcessMemoryMapper>(*PageSize);
36 }
37 
38 void InProcessMemoryMapper::reserve(size_t NumBytes,
39                                     OnReservedFunction OnReserved) {
40   std::error_code EC;
41   auto MB = sys::Memory::allocateMappedMemory(
42       NumBytes, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
43 
44   if (EC)
45     return OnReserved(errorCodeToError(EC));
46 
47   {
48     std::lock_guard<std::mutex> Lock(Mutex);
49     Reservations[MB.base()].Size = MB.allocatedSize();
50   }
51 
52   OnReserved(
53       ExecutorAddrRange(ExecutorAddr::fromPtr(MB.base()), MB.allocatedSize()));
54 }
55 
56 char *InProcessMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
57   return Addr.toPtr<char *>();
58 }
59 
60 void InProcessMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
61                                        OnInitializedFunction OnInitialized) {
62   ExecutorAddr MinAddr(~0ULL);
63 
64   for (auto &Segment : AI.Segments) {
65     auto Base = AI.MappingBase + Segment.Offset;
66     auto Size = Segment.ContentSize + Segment.ZeroFillSize;
67 
68     if (Base < MinAddr)
69       MinAddr = Base;
70 
71     std::memset((Base + Segment.ContentSize).toPtr<void *>(), 0,
72                 Segment.ZeroFillSize);
73 
74     if (auto EC = sys::Memory::protectMappedMemory({Base.toPtr<void *>(), Size},
75                                                    Segment.Prot)) {
76       return OnInitialized(errorCodeToError(EC));
77     }
78     if (Segment.Prot & sys::Memory::MF_EXEC)
79       sys::Memory::InvalidateInstructionCache(Base.toPtr<void *>(), Size);
80   }
81 
82   auto DeinitializeActions = shared::runFinalizeActions(AI.Actions);
83   if (!DeinitializeActions)
84     return OnInitialized(DeinitializeActions.takeError());
85 
86   {
87     std::lock_guard<std::mutex> Lock(Mutex);
88     Allocations[MinAddr].DeinitializationActions =
89         std::move(*DeinitializeActions);
90     Reservations[AI.MappingBase.toPtr<void *>()].Allocations.push_back(MinAddr);
91   }
92 
93   OnInitialized(MinAddr);
94 }
95 
96 void InProcessMemoryMapper::deinitialize(
97     ArrayRef<ExecutorAddr> Bases,
98     MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
99   Error AllErr = Error::success();
100 
101   {
102     std::lock_guard<std::mutex> Lock(Mutex);
103 
104     for (auto Base : Bases) {
105 
106       if (Error Err = shared::runDeallocActions(
107               Allocations[Base].DeinitializationActions)) {
108         AllErr = joinErrors(std::move(AllErr), std::move(Err));
109       }
110 
111       Allocations.erase(Base);
112     }
113   }
114 
115   OnDeinitialized(std::move(AllErr));
116 }
117 
118 void InProcessMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
119                                     OnReleasedFunction OnReleased) {
120   Error Err = Error::success();
121 
122   for (auto Base : Bases) {
123     std::vector<ExecutorAddr> AllocAddrs;
124     size_t Size;
125     {
126       std::lock_guard<std::mutex> Lock(Mutex);
127       auto &R = Reservations[Base.toPtr<void *>()];
128       Size = R.Size;
129       AllocAddrs.swap(R.Allocations);
130     }
131 
132     // deinitialize sub allocations
133     std::promise<MSVCPError> P;
134     auto F = P.get_future();
135     deinitialize(AllocAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
136     if (Error E = F.get()) {
137       Err = joinErrors(std::move(Err), std::move(E));
138     }
139 
140     // free the memory
141     auto MB = sys::MemoryBlock(Base.toPtr<void *>(), Size);
142 
143     auto EC = sys::Memory::releaseMappedMemory(MB);
144     if (EC) {
145       Err = joinErrors(std::move(Err), errorCodeToError(EC));
146     }
147 
148     std::lock_guard<std::mutex> Lock(Mutex);
149     Reservations.erase(Base.toPtr<void *>());
150   }
151 
152   OnReleased(std::move(Err));
153 }
154 
155 InProcessMemoryMapper::~InProcessMemoryMapper() {
156   std::vector<ExecutorAddr> ReservationAddrs;
157   {
158     std::lock_guard<std::mutex> Lock(Mutex);
159 
160     ReservationAddrs.reserve(Reservations.size());
161     for (const auto &R : Reservations) {
162       ReservationAddrs.push_back(ExecutorAddr::fromPtr(R.getFirst()));
163     }
164   }
165 
166   std::promise<MSVCPError> P;
167   auto F = P.get_future();
168   release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
169   cantFail(F.get());
170 }
171 
172 // SharedMemoryMapper
173 
174 SharedMemoryMapper::SharedMemoryMapper(ExecutorProcessControl &EPC,
175                                        SymbolAddrs SAs, size_t PageSize)
176     : EPC(EPC), SAs(SAs), PageSize(PageSize) {}
177 
178 Expected<std::unique_ptr<SharedMemoryMapper>>
179 SharedMemoryMapper::Create(ExecutorProcessControl &EPC, SymbolAddrs SAs) {
180   auto PageSize = sys::Process::getPageSize();
181   if (!PageSize)
182     return PageSize.takeError();
183 
184   return std::make_unique<SharedMemoryMapper>(EPC, SAs, *PageSize);
185 }
186 
187 void SharedMemoryMapper::reserve(size_t NumBytes,
188                                  OnReservedFunction OnReserved) {
189 #if defined(LLVM_ON_UNIX) || defined(_WIN32)
190 
191   EPC.callSPSWrapperAsync<
192       rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>(
193       SAs.Reserve,
194       [this, NumBytes, OnReserved = std::move(OnReserved)](
195           Error SerializationErr,
196           Expected<std::pair<ExecutorAddr, std::string>> Result) mutable {
197         if (SerializationErr) {
198           cantFail(Result.takeError());
199           return OnReserved(std::move(SerializationErr));
200         }
201 
202         if (!Result)
203           return OnReserved(Result.takeError());
204 
205         ExecutorAddr RemoteAddr;
206         std::string SharedMemoryName;
207         std::tie(RemoteAddr, SharedMemoryName) = std::move(*Result);
208 
209         void *LocalAddr = nullptr;
210 
211 #if defined(LLVM_ON_UNIX)
212 
213         int SharedMemoryFile = shm_open(SharedMemoryName.c_str(), O_RDWR, 0700);
214         if (SharedMemoryFile < 0) {
215           return OnReserved(errorCodeToError(
216               std::error_code(errno, std::generic_category())));
217         }
218 
219         // this prevents other processes from accessing it by name
220         shm_unlink(SharedMemoryName.c_str());
221 
222         LocalAddr = mmap(nullptr, NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED,
223                          SharedMemoryFile, 0);
224         if (LocalAddr == MAP_FAILED) {
225           return OnReserved(errorCodeToError(
226               std::error_code(errno, std::generic_category())));
227         }
228 
229         close(SharedMemoryFile);
230 
231 #elif defined(_WIN32)
232 
233         std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
234                                           SharedMemoryName.end());
235         HANDLE SharedMemoryFile = OpenFileMappingW(
236             FILE_MAP_ALL_ACCESS, FALSE, WideSharedMemoryName.c_str());
237         if (!SharedMemoryFile)
238           return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
239 
240         LocalAddr =
241             MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
242         if (!LocalAddr) {
243           CloseHandle(SharedMemoryFile);
244           return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
245         }
246 
247         CloseHandle(SharedMemoryFile);
248 
249 #endif
250         {
251           std::lock_guard<std::mutex> Lock(Mutex);
252           Reservations.insert({RemoteAddr, {LocalAddr, NumBytes}});
253         }
254 
255         OnReserved(ExecutorAddrRange(RemoteAddr, NumBytes));
256       },
257       SAs.Instance, static_cast<uint64_t>(NumBytes));
258 
259 #else
260   OnReserved(make_error<StringError>(
261       "SharedMemoryMapper is not supported on this platform yet",
262       inconvertibleErrorCode()));
263 #endif
264 }
265 
266 char *SharedMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
267   auto R = Reservations.upper_bound(Addr);
268   assert(R != Reservations.begin() && "Attempt to prepare unknown range");
269   R--;
270 
271   ExecutorAddrDiff Offset = Addr - R->first;
272 
273   return static_cast<char *>(R->second.LocalAddr) + Offset;
274 }
275 
276 void SharedMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
277                                     OnInitializedFunction OnInitialized) {
278   auto Reservation = Reservations.find(AI.MappingBase);
279   assert(Reservation != Reservations.end() &&
280          "Attempt to initialize unreserved range");
281 
282   tpctypes::SharedMemoryFinalizeRequest FR;
283 
284   AI.Actions.swap(FR.Actions);
285 
286   FR.Segments.reserve(AI.Segments.size());
287 
288   for (auto Segment : AI.Segments) {
289     char *Base =
290         static_cast<char *>(Reservation->second.LocalAddr) + Segment.Offset;
291     std::memset(Base + Segment.ContentSize, 0, Segment.ZeroFillSize);
292 
293     tpctypes::SharedMemorySegFinalizeRequest SegReq;
294     SegReq.Prot = tpctypes::toWireProtectionFlags(
295         static_cast<sys::Memory::ProtectionFlags>(Segment.Prot));
296     SegReq.Addr = AI.MappingBase + Segment.Offset;
297     SegReq.Size = Segment.ContentSize + Segment.ZeroFillSize;
298 
299     FR.Segments.push_back(SegReq);
300   }
301 
302   EPC.callSPSWrapperAsync<
303       rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>(
304       SAs.Initialize,
305       [OnInitialized = std::move(OnInitialized)](
306           Error SerializationErr, Expected<ExecutorAddr> Result) mutable {
307         if (SerializationErr) {
308           cantFail(Result.takeError());
309           return OnInitialized(std::move(SerializationErr));
310         }
311 
312         OnInitialized(std::move(Result));
313       },
314       SAs.Instance, AI.MappingBase, std::move(FR));
315 }
316 
317 void SharedMemoryMapper::deinitialize(
318     ArrayRef<ExecutorAddr> Allocations,
319     MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
320   EPC.callSPSWrapperAsync<
321       rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>(
322       SAs.Deinitialize,
323       [OnDeinitialized = std::move(OnDeinitialized)](Error SerializationErr,
324                                                      Error Result) mutable {
325         if (SerializationErr) {
326           cantFail(std::move(Result));
327           return OnDeinitialized(std::move(SerializationErr));
328         }
329 
330         OnDeinitialized(std::move(Result));
331       },
332       SAs.Instance, Allocations);
333 }
334 
335 void SharedMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
336                                  OnReleasedFunction OnReleased) {
337 #if defined(LLVM_ON_UNIX) || defined(_WIN32)
338   Error Err = Error::success();
339 
340   {
341     std::lock_guard<std::mutex> Lock(Mutex);
342 
343     for (auto Base : Bases) {
344 
345 #if defined(LLVM_ON_UNIX)
346 
347       if (munmap(Reservations[Base].LocalAddr, Reservations[Base].Size) != 0)
348         Err = joinErrors(std::move(Err), errorCodeToError(std::error_code(
349                                              errno, std::generic_category())));
350 
351 #elif defined(_WIN32)
352 
353       if (!UnmapViewOfFile(Reservations[Base].LocalAddr))
354         joinErrors(std::move(Err),
355                    errorCodeToError(mapWindowsError(GetLastError())));
356 
357 #endif
358 
359       Reservations.erase(Base);
360     }
361   }
362 
363   EPC.callSPSWrapperAsync<
364       rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>(
365       SAs.Release,
366       [OnReleased = std::move(OnReleased),
367        Err = std::move(Err)](Error SerializationErr, Error Result) mutable {
368         if (SerializationErr) {
369           cantFail(std::move(Result));
370           return OnReleased(
371               joinErrors(std::move(Err), std::move(SerializationErr)));
372         }
373 
374         return OnReleased(joinErrors(std::move(Err), std::move(Result)));
375       },
376       SAs.Instance, Bases);
377 #else
378   OnReleased(make_error<StringError>(
379       "SharedMemoryMapper is not supported on this platform yet",
380       inconvertibleErrorCode()));
381 #endif
382 }
383 
384 SharedMemoryMapper::~SharedMemoryMapper() {
385   std::vector<ExecutorAddr> ReservationAddrs;
386   if (!Reservations.empty()) {
387     std::lock_guard<std::mutex> Lock(Mutex);
388     {
389       ReservationAddrs.reserve(Reservations.size());
390       for (const auto &R : Reservations) {
391         ReservationAddrs.push_back(R.first);
392       }
393     }
394   }
395 
396   std::promise<MSVCPError> P;
397   auto F = P.get_future();
398   release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
399   // FIXME: Release can actually fail. The error should be propagated.
400   // Meanwhile, a better option is to explicitly call release().
401   cantFail(F.get());
402 }
403 
404 } // namespace orc
405 
406 } // namespace llvm
407