1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
6 
7 #ifndef mozilla_interceptor_MMPolicies_h
8 #define mozilla_interceptor_MMPolicies_h
9 
10 #include "mozilla/Assertions.h"
11 #include "mozilla/CheckedInt.h"
12 #include "mozilla/DynamicallyLinkedFunctionPtr.h"
13 #include "mozilla/MathAlgorithms.h"
14 #include "mozilla/Maybe.h"
15 #include "mozilla/Span.h"
16 #include "mozilla/TypedEnumBits.h"
17 #include "mozilla/Types.h"
18 #include "mozilla/WindowsMapRemoteView.h"
19 
20 #include <windows.h>
21 
22 #if (NTDDI_VERSION < NTDDI_WIN10_RS4) || defined(__MINGW32__)
23 PVOID WINAPI VirtualAlloc2(HANDLE Process, PVOID BaseAddress, SIZE_T Size,
24                            ULONG AllocationType, ULONG PageProtection,
25                            MEM_EXTENDED_PARAMETER* ExtendedParameters,
26                            ULONG ParameterCount);
27 PVOID WINAPI MapViewOfFile3(HANDLE FileMapping, HANDLE Process,
28                             PVOID BaseAddress, ULONG64 Offset, SIZE_T ViewSize,
29                             ULONG AllocationType, ULONG PageProtection,
30                             MEM_EXTENDED_PARAMETER* ExtendedParameters,
31                             ULONG ParameterCount);
32 #endif  // (NTDDI_VERSION < NTDDI_WIN10_RS4) || defined(__MINGW32__)
33 
34 // _CRT_RAND_S is not defined everywhere, but we need it.
35 #if !defined(_CRT_RAND_S)
36 extern "C" errno_t rand_s(unsigned int* randomValue);
37 #endif  // !defined(_CRT_RAND_S)
38 
39 // Declaring only the functions we need in NativeNt.h.  To include the entire
40 // NativeNt.h causes circular dependency.
41 namespace mozilla {
42 namespace nt {
43 SIZE_T WINAPI VirtualQueryEx(HANDLE aProcess, LPCVOID aAddress,
44                              PMEMORY_BASIC_INFORMATION aMemInfo,
45                              SIZE_T aMemInfoLen);
46 
47 SIZE_T WINAPI VirtualQuery(LPCVOID aAddress, PMEMORY_BASIC_INFORMATION aMemInfo,
48                            SIZE_T aMemInfoLen);
49 }  // namespace nt
50 }  // namespace mozilla
51 
52 namespace mozilla {
53 namespace interceptor {
54 
55 // This class implements memory operations not involving any kernel32's
56 // functions, so that derived classes can use them.
57 class MOZ_TRIVIAL_CTOR_DTOR MMPolicyInProcessPrimitive {
58  protected:
ProtectInternal(decltype (&::VirtualProtect)aVirtualProtect,void * aVAddress,size_t aSize,uint32_t aProtFlags,uint32_t * aPrevProtFlags)59   bool ProtectInternal(decltype(&::VirtualProtect) aVirtualProtect,
60                        void* aVAddress, size_t aSize, uint32_t aProtFlags,
61                        uint32_t* aPrevProtFlags) const {
62     MOZ_ASSERT(aPrevProtFlags);
63     BOOL ok = aVirtualProtect(aVAddress, aSize, aProtFlags,
64                               reinterpret_cast<PDWORD>(aPrevProtFlags));
65     if (!ok && aPrevProtFlags) {
66       // VirtualProtect can fail but still set valid protection flags.
67       // Let's clear those upon failure.
68       *aPrevProtFlags = 0;
69     }
70 
71     return !!ok;
72   }
73 
74  public:
Read(void * aToPtr,const void * aFromPtr,size_t aLen)75   bool Read(void* aToPtr, const void* aFromPtr, size_t aLen) const {
76     ::memcpy(aToPtr, aFromPtr, aLen);
77     return true;
78   }
79 
Write(void * aToPtr,const void * aFromPtr,size_t aLen)80   bool Write(void* aToPtr, const void* aFromPtr, size_t aLen) const {
81     ::memcpy(aToPtr, aFromPtr, aLen);
82     return true;
83   }
84 
85   /**
86    * @return true if the page that hosts aVAddress is accessible.
87    */
IsPageAccessible(uintptr_t aVAddress)88   bool IsPageAccessible(uintptr_t aVAddress) const {
89     MEMORY_BASIC_INFORMATION mbi;
90     SIZE_T result = nt::VirtualQuery(reinterpret_cast<LPCVOID>(aVAddress), &mbi,
91                                      sizeof(mbi));
92 
93     return result && mbi.AllocationProtect && mbi.State == MEM_COMMIT &&
94            mbi.Protect != PAGE_NOACCESS;
95   }
96 };
97 
98 class MOZ_TRIVIAL_CTOR_DTOR MMPolicyBase {
99  protected:
AlignDown(const uintptr_t aUnaligned,const uintptr_t aAlignTo)100   static uintptr_t AlignDown(const uintptr_t aUnaligned,
101                              const uintptr_t aAlignTo) {
102     MOZ_ASSERT(IsPowerOfTwo(aAlignTo));
103 #pragma warning(suppress : 4146)
104     return aUnaligned & (-aAlignTo);
105   }
106 
AlignUp(const uintptr_t aUnaligned,const uintptr_t aAlignTo)107   static uintptr_t AlignUp(const uintptr_t aUnaligned,
108                            const uintptr_t aAlignTo) {
109     MOZ_ASSERT(IsPowerOfTwo(aAlignTo));
110 #pragma warning(suppress : 4146)
111     return aUnaligned + ((-aUnaligned) & (aAlignTo - 1));
112   }
113 
AlignUpToRegion(PVOID aUnaligned,uintptr_t aAlignTo,size_t aLen,size_t aDesiredLen)114   static PVOID AlignUpToRegion(PVOID aUnaligned, uintptr_t aAlignTo,
115                                size_t aLen, size_t aDesiredLen) {
116     uintptr_t unaligned = reinterpret_cast<uintptr_t>(aUnaligned);
117     uintptr_t aligned = AlignUp(unaligned, aAlignTo);
118     MOZ_ASSERT(aligned >= unaligned);
119 
120     if (aLen < aligned - unaligned) {
121       return nullptr;
122     }
123 
124     aLen -= (aligned - unaligned);
125     return reinterpret_cast<PVOID>((aLen >= aDesiredLen) ? aligned : 0);
126   }
127 
128  public:
129 #if defined(NIGHTLY_BUILD)
130   Maybe<DetourError> mLastError;
GetLastDetourError()131   const Maybe<DetourError>& GetLastDetourError() const { return mLastError; }
132   template <typename... Args>
SetLastDetourError(Args &&...aArgs)133   void SetLastDetourError(Args&&... aArgs) {
134     mLastError = Some(DetourError(std::forward<Args>(aArgs)...));
135   }
136 #else
137   template <typename... Args>
138   void SetLastDetourError(Args&&... aArgs) {}
139 #endif  // defined(NIGHTLY_BUILD)
140 
ComputeAllocationSize(const uint32_t aRequestedSize)141   DWORD ComputeAllocationSize(const uint32_t aRequestedSize) const {
142     MOZ_ASSERT(aRequestedSize);
143     DWORD result = aRequestedSize;
144 
145     const uint32_t granularity = GetAllocGranularity();
146 
147     uint32_t mod = aRequestedSize % granularity;
148     if (mod) {
149       result += (granularity - mod);
150     }
151 
152     return result;
153   }
154 
GetAllocGranularity()155   DWORD GetAllocGranularity() const {
156     static const DWORD kAllocGranularity = []() -> DWORD {
157       SYSTEM_INFO sysInfo;
158       ::GetSystemInfo(&sysInfo);
159       return sysInfo.dwAllocationGranularity;
160     }();
161 
162     return kAllocGranularity;
163   }
164 
GetPageSize()165   DWORD GetPageSize() const {
166     static const DWORD kPageSize = []() -> DWORD {
167       SYSTEM_INFO sysInfo;
168       ::GetSystemInfo(&sysInfo);
169       return sysInfo.dwPageSize;
170     }();
171 
172     return kPageSize;
173   }
174 
GetMaxUserModeAddress()175   uintptr_t GetMaxUserModeAddress() const {
176     static const uintptr_t kMaxUserModeAddr = []() -> uintptr_t {
177       SYSTEM_INFO sysInfo;
178       ::GetSystemInfo(&sysInfo);
179       return reinterpret_cast<uintptr_t>(sysInfo.lpMaximumApplicationAddress);
180     }();
181 
182     return kMaxUserModeAddr;
183   }
184 
GetLowerBound(const Span<const uint8_t> & aBounds)185   static const uint8_t* GetLowerBound(const Span<const uint8_t>& aBounds) {
186     return &(*aBounds.cbegin());
187   }
188 
GetUpperBoundIncl(const Span<const uint8_t> & aBounds)189   static const uint8_t* GetUpperBoundIncl(const Span<const uint8_t>& aBounds) {
190     // We return an upper bound that is inclusive.
191     return &(*(aBounds.cend() - 1));
192   }
193 
GetUpperBoundExcl(const Span<const uint8_t> & aBounds)194   static const uint8_t* GetUpperBoundExcl(const Span<const uint8_t>& aBounds) {
195     // We return an upper bound that is exclusive by adding 1 to the inclusive
196     // upper bound.
197     return GetUpperBoundIncl(aBounds) + 1;
198   }
199 
200   /**
201    * It is convenient for us to provide address range information based on a
202    * "pivot" and a distance from that pivot, as branch instructions operate
203    * within a range of the program counter. OTOH, to actually manage the
204    * regions of memory, it is easier to think about them in terms of their
205    * lower and upper bounds. This function converts from the former format to
206    * the latter format.
207    */
SpanFromPivotAndDistance(const uint32_t aSize,const uintptr_t aPivotAddr,const uint32_t aMaxDistanceFromPivot)208   Maybe<Span<const uint8_t>> SpanFromPivotAndDistance(
209       const uint32_t aSize, const uintptr_t aPivotAddr,
210       const uint32_t aMaxDistanceFromPivot) const {
211     if (!aPivotAddr || !aMaxDistanceFromPivot) {
212       return Nothing();
213     }
214 
215     // We don't allow regions below 1MB so that we're not allocating near any
216     // sensitive areas in our address space.
217     const uintptr_t kMinAllowableAddress = 0x100000;
218 
219     const uintptr_t kGranularity(GetAllocGranularity());
220 
221     // We subtract the max distance from the pivot to determine our lower bound.
222     CheckedInt<uintptr_t> lowerBound(aPivotAddr);
223     lowerBound -= aMaxDistanceFromPivot;
224     if (lowerBound.isValid()) {
225       // In this case, the subtraction has not underflowed, but we still want
226       // the lower bound to be at least kMinAllowableAddress.
227       lowerBound = std::max(lowerBound.value(), kMinAllowableAddress);
228     } else {
229       // In this case, we underflowed. Forcibly set the lower bound to
230       // kMinAllowableAddress.
231       lowerBound = CheckedInt<uintptr_t>(kMinAllowableAddress);
232     }
233 
234     // Align up to the next unit of allocation granularity when necessary.
235     lowerBound = AlignUp(lowerBound.value(), kGranularity);
236     MOZ_ASSERT(lowerBound.isValid());
237     if (!lowerBound.isValid()) {
238       return Nothing();
239     }
240 
241     // We must ensure that our region is below the maximum allowable user-mode
242     // address, or our reservation will fail.
243     const uintptr_t kMaxUserModeAddr = GetMaxUserModeAddress();
244 
245     // We add the max distance from the pivot to determine our upper bound.
246     CheckedInt<uintptr_t> upperBound(aPivotAddr);
247     upperBound += aMaxDistanceFromPivot;
248     if (upperBound.isValid()) {
249       // In this case, the addition has not overflowed, but we still want
250       // the upper bound to be at most kMaxUserModeAddr.
251       upperBound = std::min(upperBound.value(), kMaxUserModeAddr);
252     } else {
253       // In this case, we overflowed. Forcibly set the upper bound to
254       // kMaxUserModeAddr.
255       upperBound = CheckedInt<uintptr_t>(kMaxUserModeAddr);
256     }
257 
258     // Subtract the desired allocation size so that any chunk allocated in the
259     // region will be reachable.
260     upperBound -= aSize;
261     if (!upperBound.isValid()) {
262       return Nothing();
263     }
264 
265     // Align down to the next unit of allocation granularity when necessary.
266     upperBound = AlignDown(upperBound.value(), kGranularity);
267     if (!upperBound.isValid()) {
268       return Nothing();
269     }
270 
271     MOZ_ASSERT(lowerBound.value() < upperBound.value());
272     if (lowerBound.value() >= upperBound.value()) {
273       return Nothing();
274     }
275 
276     // Return the result as a Span
277     return Some(Span(reinterpret_cast<const uint8_t*>(lowerBound.value()),
278                      upperBound.value() - lowerBound.value()));
279   }
280 
281   /**
282    * This function locates a virtual memory region of |aDesiredBytesLen| that
283    * resides in the interval [aRangeMin, aRangeMax). We do this by scanning the
284    * virtual memory space for a block of unallocated memory that is sufficiently
285    * large.
286    */
FindRegion(HANDLE aProcess,const size_t aDesiredBytesLen,const uint8_t * aRangeMin,const uint8_t * aRangeMax)287   PVOID FindRegion(HANDLE aProcess, const size_t aDesiredBytesLen,
288                    const uint8_t* aRangeMin, const uint8_t* aRangeMax) {
289     // Convert the given pointers to uintptr_t because we should not
290     // compare two pointers unless they are from the same array or object.
291     uintptr_t rangeMin = reinterpret_cast<uintptr_t>(aRangeMin);
292     uintptr_t rangeMax = reinterpret_cast<uintptr_t>(aRangeMax);
293 
294     const DWORD kGranularity = GetAllocGranularity();
295     if (!aDesiredBytesLen) {
296       SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_INVALIDLEN);
297       return nullptr;
298     }
299 
300     MOZ_ASSERT(rangeMin < rangeMax);
301     if (rangeMin >= rangeMax) {
302       SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_INVALIDRANGE);
303       return nullptr;
304     }
305 
306     // Generate a randomized base address that falls within the interval
307     // [aRangeMin, aRangeMax - aDesiredBytesLen]
308     unsigned int rnd = 0;
309     rand_s(&rnd);
310 
311     // Reduce rnd to a value that falls within the acceptable range
312     uintptr_t maxOffset =
313         (rangeMax - rangeMin - aDesiredBytesLen) / kGranularity;
314     // Divide by maxOffset + 1 because maxOffset * kGranularity is acceptable.
315     uintptr_t offset = (uintptr_t(rnd) % (maxOffset + 1)) * kGranularity;
316 
317     // Start searching at this address
318     const uintptr_t searchStart = rangeMin + offset;
319     // The max address needs to incorporate the desired length
320     const uintptr_t kMaxPtr = rangeMax - aDesiredBytesLen;
321 
322     MOZ_DIAGNOSTIC_ASSERT(searchStart <= kMaxPtr);
323 
324     MEMORY_BASIC_INFORMATION mbi;
325     SIZE_T len = sizeof(mbi);
326 
327     // Scan the range for a free chunk that is at least as large as
328     // aDesiredBytesLen
329     // Scan [searchStart, kMaxPtr]
330     for (uintptr_t address = searchStart; address <= kMaxPtr;) {
331       if (nt::VirtualQueryEx(aProcess, reinterpret_cast<uint8_t*>(address),
332                              &mbi, len) != len) {
333         SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_VIRTUALQUERY_ERROR,
334                            ::GetLastError());
335         return nullptr;
336       }
337 
338       if (mbi.State == MEM_FREE) {
339         // |mbi.BaseAddress| is aligned with the page granularity, but may not
340         // be aligned with the allocation granularity.  VirtualAlloc does not
341         // accept such a non-aligned address unless the corresponding allocation
342         // region is free.  So we get the next boundary's start address.
343         PVOID regionStart = AlignUpToRegion(mbi.BaseAddress, kGranularity,
344                                             mbi.RegionSize, aDesiredBytesLen);
345         if (regionStart) {
346           return regionStart;
347         }
348       }
349 
350       address = reinterpret_cast<uintptr_t>(mbi.BaseAddress) + mbi.RegionSize;
351     }
352 
353     // Scan [aRangeMin, searchStart)
354     for (uintptr_t address = rangeMin; address < searchStart;) {
355       if (nt::VirtualQueryEx(aProcess, reinterpret_cast<uint8_t*>(address),
356                              &mbi, len) != len) {
357         SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_VIRTUALQUERY_ERROR,
358                            ::GetLastError());
359         return nullptr;
360       }
361 
362       if (mbi.State == MEM_FREE) {
363         PVOID regionStart = AlignUpToRegion(mbi.BaseAddress, kGranularity,
364                                             mbi.RegionSize, aDesiredBytesLen);
365         if (regionStart) {
366           return regionStart;
367         }
368       }
369 
370       address = reinterpret_cast<uintptr_t>(mbi.BaseAddress) + mbi.RegionSize;
371     }
372 
373     SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_NO_FREE_REGION,
374                        ::GetLastError());
375     return nullptr;
376   }
377 
378   /**
379    * This function reserves a |aSize| block of virtual memory.
380    *
381    * When |aBounds| is Nothing, it just calls |aReserveFn| and lets Windows
382    * choose the base address.
383    *
384    * Otherwise, it tries to call |aReserveRangeFn| to reserve the memory within
385    * the bounds provided by |aBounds|. It is advantageous to use this function
386    * because the OS's VM manager has better information as to which base
387    * addresses are the best to use.
388    *
389    * If |aReserveRangeFn| retuns Nothing, this means that the platform support
390    * is not available. In that case, we fall back to manually computing a region
391    * to use for reserving the memory by calling |FindRegion|.
392    */
393   template <typename ReserveFnT, typename ReserveRangeFnT>
Reserve(HANDLE aProcess,const uint32_t aSize,const ReserveFnT & aReserveFn,const ReserveRangeFnT & aReserveRangeFn,const Maybe<Span<const uint8_t>> & aBounds)394   PVOID Reserve(HANDLE aProcess, const uint32_t aSize,
395                 const ReserveFnT& aReserveFn,
396                 const ReserveRangeFnT& aReserveRangeFn,
397                 const Maybe<Span<const uint8_t>>& aBounds) {
398     if (!aBounds) {
399       // No restrictions, let the OS choose the base address
400       PVOID ret = aReserveFn(aProcess, nullptr, aSize);
401       if (!ret) {
402         SetLastDetourError(MMPOLICY_RESERVE_NOBOUND_RESERVE_ERROR,
403                            ::GetLastError());
404       }
405       return ret;
406     }
407 
408     const uint8_t* lowerBound = GetLowerBound(aBounds.ref());
409     const uint8_t* upperBoundExcl = GetUpperBoundExcl(aBounds.ref());
410 
411     Maybe<PVOID> result =
412         aReserveRangeFn(aProcess, aSize, lowerBound, upperBoundExcl);
413     if (result) {
414       return result.value();
415     }
416 
417     // aReserveRangeFn is not available on this machine. We'll do a manual
418     // search.
419 
420     size_t curAttempt = 0;
421     const size_t kMaxAttempts = 8;
422 
423     // We loop here because |FindRegion| may return a base address that
424     // is reserved elsewhere before we have had a chance to reserve it
425     // ourselves.
426     while (curAttempt < kMaxAttempts) {
427       PVOID base = FindRegion(aProcess, aSize, lowerBound, upperBoundExcl);
428       if (!base) {
429         return nullptr;
430       }
431 
432       result = Some(aReserveFn(aProcess, base, aSize));
433       if (result.value()) {
434         return result.value();
435       }
436 
437       ++curAttempt;
438     }
439 
440     // If we run out of attempts, we fall through to the default case where
441     // the system chooses any base address it wants. In that case, the hook
442     // will be set on a best-effort basis.
443     PVOID ret = aReserveFn(aProcess, nullptr, aSize);
444     if (!ret) {
445       SetLastDetourError(MMPOLICY_RESERVE_FINAL_RESERVE_ERROR,
446                          ::GetLastError());
447     }
448     return ret;
449   }
450 };
451 
452 class MOZ_TRIVIAL_CTOR_DTOR MMPolicyInProcess
453     : public MMPolicyInProcessPrimitive,
454       public MMPolicyBase {
455  public:
456   typedef MMPolicyInProcess MMPolicyT;
457 
MMPolicyInProcess()458   constexpr MMPolicyInProcess()
459       : mBase(nullptr), mReservationSize(0), mCommitOffset(0) {}
460 
461   MMPolicyInProcess(const MMPolicyInProcess&) = delete;
462   MMPolicyInProcess& operator=(const MMPolicyInProcess&) = delete;
463 
MMPolicyInProcess(MMPolicyInProcess && aOther)464   MMPolicyInProcess(MMPolicyInProcess&& aOther)
465       : mBase(nullptr), mReservationSize(0), mCommitOffset(0) {
466     *this = std::move(aOther);
467   }
468 
469   MMPolicyInProcess& operator=(MMPolicyInProcess&& aOther) {
470     mBase = aOther.mBase;
471     aOther.mBase = nullptr;
472 
473     mCommitOffset = aOther.mCommitOffset;
474     aOther.mCommitOffset = 0;
475 
476     mReservationSize = aOther.mReservationSize;
477     aOther.mReservationSize = 0;
478 
479     return *this;
480   }
481 
482   explicit operator bool() const { return !!mBase; }
483 
484   /**
485    * Should we unhook everything upon destruction?
486    */
ShouldUnhookUponDestruction()487   bool ShouldUnhookUponDestruction() const { return true; }
488 
489 #if defined(_M_IX86)
WriteAtomic(void * aDestPtr,const uint16_t aValue)490   bool WriteAtomic(void* aDestPtr, const uint16_t aValue) const {
491     *static_cast<uint16_t*>(aDestPtr) = aValue;
492     return true;
493   }
494 #endif  // defined(_M_IX86)
495 
Protect(void * aVAddress,size_t aSize,uint32_t aProtFlags,uint32_t * aPrevProtFlags)496   bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags,
497                uint32_t* aPrevProtFlags) const {
498     return ProtectInternal(::VirtualProtect, aVAddress, aSize, aProtFlags,
499                            aPrevProtFlags);
500   }
501 
FlushInstructionCache()502   bool FlushInstructionCache() const {
503     return !!::FlushInstructionCache(::GetCurrentProcess(), nullptr, 0);
504   }
505 
GetTrampWriteProtFlags()506   static DWORD GetTrampWriteProtFlags() { return PAGE_EXECUTE_READWRITE; }
507 
508 #if defined(_M_X64)
IsTrampolineSpaceInLowest2GB()509   bool IsTrampolineSpaceInLowest2GB() const {
510     return (mBase + mReservationSize) <=
511            reinterpret_cast<uint8_t*>(0x0000000080000000ULL);
512   }
513 #endif  // defined(_M_X64)
514 
515  protected:
GetLocalView()516   uint8_t* GetLocalView() const { return mBase; }
517 
GetRemoteView()518   uintptr_t GetRemoteView() const {
519     // Same as local view for in-process
520     return reinterpret_cast<uintptr_t>(mBase);
521   }
522 
523   /**
524    * @return the effective number of bytes reserved, or 0 on failure
525    */
Reserve(const uint32_t aSize,const Maybe<Span<const uint8_t>> & aBounds)526   uint32_t Reserve(const uint32_t aSize,
527                    const Maybe<Span<const uint8_t>>& aBounds) {
528     if (!aSize) {
529       return 0;
530     }
531 
532     if (mBase) {
533       MOZ_ASSERT(mReservationSize >= aSize);
534       return mReservationSize;
535     }
536 
537     mReservationSize = ComputeAllocationSize(aSize);
538 
539     auto reserveFn = [](HANDLE aProcess, PVOID aBase, uint32_t aSize) -> PVOID {
540       return ::VirtualAlloc(aBase, aSize, MEM_RESERVE, PAGE_NOACCESS);
541     };
542 
543     auto reserveWithinRangeFn =
544         [](HANDLE aProcess, uint32_t aSize, const uint8_t* aRangeMin,
545            const uint8_t* aRangeMaxExcl) -> Maybe<PVOID> {
546       static const StaticDynamicallyLinkedFunctionPtr<
547           decltype(&::VirtualAlloc2)>
548           pVirtualAlloc2(L"kernelbase.dll", "VirtualAlloc2");
549       if (!pVirtualAlloc2) {
550         return Nothing();
551       }
552 
553       // NB: MEM_ADDRESS_REQUIREMENTS::HighestEndingAddress is *inclusive*
554       MEM_ADDRESS_REQUIREMENTS memReq = {
555           const_cast<uint8_t*>(aRangeMin),
556           const_cast<uint8_t*>(aRangeMaxExcl - 1)};
557 
558       MEM_EXTENDED_PARAMETER memParam = {};
559       memParam.Type = MemExtendedParameterAddressRequirements;
560       memParam.Pointer = &memReq;
561 
562       return Some(pVirtualAlloc2(aProcess, nullptr, aSize, MEM_RESERVE,
563                                  PAGE_NOACCESS, &memParam, 1));
564     };
565 
566     mBase = static_cast<uint8_t*>(
567         MMPolicyBase::Reserve(::GetCurrentProcess(), mReservationSize,
568                               reserveFn, reserveWithinRangeFn, aBounds));
569 
570     if (!mBase) {
571       return 0;
572     }
573 
574     return mReservationSize;
575   }
576 
MaybeCommitNextPage(const uint32_t aRequestedOffset,const uint32_t aRequestedLength)577   bool MaybeCommitNextPage(const uint32_t aRequestedOffset,
578                            const uint32_t aRequestedLength) {
579     if (!(*this)) {
580       return false;
581     }
582 
583     uint32_t limit = aRequestedOffset + aRequestedLength - 1;
584     if (limit < mCommitOffset) {
585       // No commit required
586       return true;
587     }
588 
589     MOZ_DIAGNOSTIC_ASSERT(mCommitOffset < mReservationSize);
590     if (mCommitOffset >= mReservationSize) {
591       return false;
592     }
593 
594     PVOID local = ::VirtualAlloc(mBase + mCommitOffset, GetPageSize(),
595                                  MEM_COMMIT, PAGE_EXECUTE_READ);
596     if (!local) {
597       return false;
598     }
599 
600     mCommitOffset += GetPageSize();
601     return true;
602   }
603 
604  private:
605   uint8_t* mBase;
606   uint32_t mReservationSize;
607   uint32_t mCommitOffset;
608 };
609 
610 // This class manages in-process memory access without using functions
611 // imported from kernel32.dll.  Instead, it uses functions in its own
612 // function table that are provided from outside.
613 class MMPolicyInProcessEarlyStage : public MMPolicyInProcessPrimitive {
614  public:
615   struct Kernel32Exports {
616     decltype(&::FlushInstructionCache) mFlushInstructionCache;
617     decltype(&::GetModuleHandleW) mGetModuleHandleW;
618     decltype(&::GetSystemInfo) mGetSystemInfo;
619     decltype(&::VirtualProtect) mVirtualProtect;
620   };
621 
622  private:
GetPageSize(const Kernel32Exports & aK32Exports)623   static DWORD GetPageSize(const Kernel32Exports& aK32Exports) {
624     SYSTEM_INFO sysInfo;
625     aK32Exports.mGetSystemInfo(&sysInfo);
626     return sysInfo.dwPageSize;
627   }
628 
629   const Kernel32Exports& mK32Exports;
630   const DWORD mPageSize;
631 
632  public:
MMPolicyInProcessEarlyStage(const Kernel32Exports & aK32Exports)633   explicit MMPolicyInProcessEarlyStage(const Kernel32Exports& aK32Exports)
634       : mK32Exports(aK32Exports), mPageSize(GetPageSize(mK32Exports)) {}
635 
636   // The pattern of constructing a local static variable with a lambda,
637   // which can be seen in MMPolicyBase, is compiled into code with the
638   // critical section APIs like EnterCriticalSection imported from kernel32.dll.
639   // Because this class needs to be able to run in a process's early stage
640   // when IAT is not yet resolved, we cannot use that patten, thus simply
641   // caching a value as a local member in the class.
GetPageSize()642   DWORD GetPageSize() const { return mPageSize; }
643 
Protect(void * aVAddress,size_t aSize,uint32_t aProtFlags,uint32_t * aPrevProtFlags)644   bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags,
645                uint32_t* aPrevProtFlags) const {
646     return ProtectInternal(mK32Exports.mVirtualProtect, aVAddress, aSize,
647                            aProtFlags, aPrevProtFlags);
648   }
649 
FlushInstructionCache()650   bool FlushInstructionCache() const {
651     const HANDLE kCurrentProcess = reinterpret_cast<HANDLE>(-1);
652     return !!mK32Exports.mFlushInstructionCache(kCurrentProcess, nullptr, 0);
653   }
654 };
655 
656 class MMPolicyOutOfProcess : public MMPolicyBase {
657  public:
658   typedef MMPolicyOutOfProcess MMPolicyT;
659 
MMPolicyOutOfProcess(HANDLE aProcess)660   explicit MMPolicyOutOfProcess(HANDLE aProcess)
661       : mProcess(nullptr),
662         mMapping(nullptr),
663         mLocalView(nullptr),
664         mRemoteView(nullptr),
665         mReservationSize(0),
666         mCommitOffset(0) {
667     MOZ_ASSERT(aProcess);
668     ::DuplicateHandle(::GetCurrentProcess(), aProcess, ::GetCurrentProcess(),
669                       &mProcess, kAccessFlags, FALSE, 0);
670     MOZ_ASSERT(mProcess);
671   }
672 
MMPolicyOutOfProcess(DWORD aPid)673   explicit MMPolicyOutOfProcess(DWORD aPid)
674       : mProcess(::OpenProcess(kAccessFlags, FALSE, aPid)),
675         mMapping(nullptr),
676         mLocalView(nullptr),
677         mRemoteView(nullptr),
678         mReservationSize(0),
679         mCommitOffset(0) {
680     MOZ_ASSERT(mProcess);
681   }
682 
~MMPolicyOutOfProcess()683   ~MMPolicyOutOfProcess() { Destroy(); }
684 
MMPolicyOutOfProcess(MMPolicyOutOfProcess && aOther)685   MMPolicyOutOfProcess(MMPolicyOutOfProcess&& aOther)
686       : mProcess(nullptr),
687         mMapping(nullptr),
688         mLocalView(nullptr),
689         mRemoteView(nullptr),
690         mReservationSize(0),
691         mCommitOffset(0) {
692     *this = std::move(aOther);
693   }
694 
695   MMPolicyOutOfProcess(const MMPolicyOutOfProcess& aOther) = delete;
696   MMPolicyOutOfProcess& operator=(const MMPolicyOutOfProcess&) = delete;
697 
698   MMPolicyOutOfProcess& operator=(MMPolicyOutOfProcess&& aOther) {
699     Destroy();
700 
701     mProcess = aOther.mProcess;
702     aOther.mProcess = nullptr;
703 
704     mMapping = aOther.mMapping;
705     aOther.mMapping = nullptr;
706 
707     mLocalView = aOther.mLocalView;
708     aOther.mLocalView = nullptr;
709 
710     mRemoteView = aOther.mRemoteView;
711     aOther.mRemoteView = nullptr;
712 
713     mReservationSize = aOther.mReservationSize;
714     aOther.mReservationSize = 0;
715 
716     mCommitOffset = aOther.mCommitOffset;
717     aOther.mCommitOffset = 0;
718 
719     return *this;
720   }
721 
722   explicit operator bool() const {
723     return mProcess && mMapping && mLocalView && mRemoteView;
724   }
725 
ShouldUnhookUponDestruction()726   bool ShouldUnhookUponDestruction() const {
727     // We don't clean up hooks for remote processes; they are expected to
728     // outlive our process.
729     return false;
730   }
731 
732   // This function reads as many bytes as |aLen| from the target process and
733   // succeeds only when the entire area to be read is accessible.
Read(void * aToPtr,const void * aFromPtr,size_t aLen)734   bool Read(void* aToPtr, const void* aFromPtr, size_t aLen) const {
735     MOZ_ASSERT(mProcess);
736     if (!mProcess) {
737       return false;
738     }
739 
740     SIZE_T numBytes = 0;
741     BOOL ok = ::ReadProcessMemory(mProcess, aFromPtr, aToPtr, aLen, &numBytes);
742     return ok && numBytes == aLen;
743   }
744 
745   // This function reads as many bytes as possible from the target process up
746   // to |aLen| bytes and returns the number of bytes which was actually read.
TryRead(void * aToPtr,const void * aFromPtr,size_t aLen)747   size_t TryRead(void* aToPtr, const void* aFromPtr, size_t aLen) const {
748     MOZ_ASSERT(mProcess);
749     if (!mProcess) {
750       return 0;
751     }
752 
753     uint32_t pageSize = GetPageSize();
754     uintptr_t pageMask = pageSize - 1;
755 
756     auto rangeStart = reinterpret_cast<uintptr_t>(aFromPtr);
757     auto rangeEnd = rangeStart + aLen;
758 
759     while (rangeStart < rangeEnd) {
760       SIZE_T numBytes = 0;
761       BOOL ok = ::ReadProcessMemory(mProcess, aFromPtr, aToPtr,
762                                     rangeEnd - rangeStart, &numBytes);
763       if (ok) {
764         return numBytes;
765       }
766 
767       // If ReadProcessMemory fails, try to read up to each page boundary from
768       // the end of the requested area one by one.
769       if (rangeEnd & pageMask) {
770         rangeEnd &= ~pageMask;
771       } else {
772         rangeEnd -= pageSize;
773       }
774     }
775 
776     return 0;
777   }
778 
Write(void * aToPtr,const void * aFromPtr,size_t aLen)779   bool Write(void* aToPtr, const void* aFromPtr, size_t aLen) const {
780     MOZ_ASSERT(mProcess);
781     if (!mProcess) {
782       return false;
783     }
784 
785     SIZE_T numBytes = 0;
786     BOOL ok = ::WriteProcessMemory(mProcess, aToPtr, aFromPtr, aLen, &numBytes);
787     return ok && numBytes == aLen;
788   }
789 
Protect(void * aVAddress,size_t aSize,uint32_t aProtFlags,uint32_t * aPrevProtFlags)790   bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags,
791                uint32_t* aPrevProtFlags) const {
792     MOZ_ASSERT(mProcess);
793     if (!mProcess) {
794       return false;
795     }
796 
797     MOZ_ASSERT(aPrevProtFlags);
798     BOOL ok = ::VirtualProtectEx(mProcess, aVAddress, aSize, aProtFlags,
799                                  reinterpret_cast<PDWORD>(aPrevProtFlags));
800     if (!ok && aPrevProtFlags) {
801       // VirtualProtectEx can fail but still set valid protection flags.
802       // Let's clear those upon failure.
803       *aPrevProtFlags = 0;
804     }
805 
806     return !!ok;
807   }
808 
809   /**
810    * @return true if the page that hosts aVAddress is accessible.
811    */
IsPageAccessible(uintptr_t aVAddress)812   bool IsPageAccessible(uintptr_t aVAddress) const {
813     MEMORY_BASIC_INFORMATION mbi;
814     SIZE_T result = nt::VirtualQueryEx(
815         mProcess, reinterpret_cast<LPCVOID>(aVAddress), &mbi, sizeof(mbi));
816 
817     return result && mbi.AllocationProtect && mbi.State == MEM_COMMIT &&
818            mbi.Protect != PAGE_NOACCESS;
819   }
820 
FlushInstructionCache()821   bool FlushInstructionCache() const {
822     return !!::FlushInstructionCache(mProcess, nullptr, 0);
823   }
824 
GetTrampWriteProtFlags()825   static DWORD GetTrampWriteProtFlags() { return PAGE_READWRITE; }
826 
827 #if defined(_M_X64)
IsTrampolineSpaceInLowest2GB()828   bool IsTrampolineSpaceInLowest2GB() const {
829     return (GetRemoteView() + mReservationSize) <= 0x0000000080000000ULL;
830   }
831 #endif  // defined(_M_X64)
832 
833  protected:
GetLocalView()834   uint8_t* GetLocalView() const { return mLocalView; }
835 
GetRemoteView()836   uintptr_t GetRemoteView() const {
837     return reinterpret_cast<uintptr_t>(mRemoteView);
838   }
839 
840   /**
841    * @return the effective number of bytes reserved, or 0 on failure
842    */
Reserve(const uint32_t aSize,const Maybe<Span<const uint8_t>> & aBounds)843   uint32_t Reserve(const uint32_t aSize,
844                    const Maybe<Span<const uint8_t>>& aBounds) {
845     if (!aSize || !mProcess) {
846       SetLastDetourError(MMPOLICY_RESERVE_INVALIDARG);
847       return 0;
848     }
849 
850     if (mRemoteView) {
851       MOZ_ASSERT(mReservationSize >= aSize);
852       SetLastDetourError(MMPOLICY_RESERVE_ZERO_RESERVATIONSIZE);
853       return mReservationSize;
854     }
855 
856     mReservationSize = ComputeAllocationSize(aSize);
857 
858     mMapping = ::CreateFileMappingW(INVALID_HANDLE_VALUE, nullptr,
859                                     PAGE_EXECUTE_READWRITE | SEC_RESERVE, 0,
860                                     mReservationSize, nullptr);
861     if (!mMapping) {
862       SetLastDetourError(MMPOLICY_RESERVE_CREATEFILEMAPPING, ::GetLastError());
863       return 0;
864     }
865 
866     mLocalView = static_cast<uint8_t*>(
867         ::MapViewOfFile(mMapping, FILE_MAP_WRITE, 0, 0, 0));
868     if (!mLocalView) {
869       SetLastDetourError(MMPOLICY_RESERVE_MAPVIEWOFFILE, ::GetLastError());
870       return 0;
871     }
872 
873     auto reserveFn = [mapping = mMapping](HANDLE aProcess, PVOID aBase,
874                                           uint32_t aSize) -> PVOID {
875       return mozilla::MapRemoteViewOfFile(mapping, aProcess, 0ULL, aBase, 0, 0,
876                                           PAGE_EXECUTE_READ);
877     };
878 
879     auto reserveWithinRangeFn =
880         [mapping = mMapping](HANDLE aProcess, uint32_t aSize,
881                              const uint8_t* aRangeMin,
882                              const uint8_t* aRangeMaxExcl) -> Maybe<PVOID> {
883       static const StaticDynamicallyLinkedFunctionPtr<
884           decltype(&::MapViewOfFile3)>
885           pMapViewOfFile3(L"kernelbase.dll", "MapViewOfFile3");
886       if (!pMapViewOfFile3) {
887         return Nothing();
888       }
889 
890       // NB: MEM_ADDRESS_REQUIREMENTS::HighestEndingAddress is *inclusive*
891       MEM_ADDRESS_REQUIREMENTS memReq = {
892           const_cast<uint8_t*>(aRangeMin),
893           const_cast<uint8_t*>(aRangeMaxExcl - 1)};
894 
895       MEM_EXTENDED_PARAMETER memParam = {};
896       memParam.Type = MemExtendedParameterAddressRequirements;
897       memParam.Pointer = &memReq;
898 
899       return Some(pMapViewOfFile3(mapping, aProcess, nullptr, 0, aSize, 0,
900                                   PAGE_EXECUTE_READ, &memParam, 1));
901     };
902 
903     mRemoteView = MMPolicyBase::Reserve(mProcess, mReservationSize, reserveFn,
904                                         reserveWithinRangeFn, aBounds);
905     if (!mRemoteView) {
906       return 0;
907     }
908 
909     return mReservationSize;
910   }
911 
MaybeCommitNextPage(const uint32_t aRequestedOffset,const uint32_t aRequestedLength)912   bool MaybeCommitNextPage(const uint32_t aRequestedOffset,
913                            const uint32_t aRequestedLength) {
914     if (!(*this)) {
915       return false;
916     }
917 
918     uint32_t limit = aRequestedOffset + aRequestedLength - 1;
919     if (limit < mCommitOffset) {
920       // No commit required
921       return true;
922     }
923 
924     MOZ_DIAGNOSTIC_ASSERT(mCommitOffset < mReservationSize);
925     if (mCommitOffset >= mReservationSize) {
926       return false;
927     }
928 
929     PVOID local = ::VirtualAlloc(mLocalView + mCommitOffset, GetPageSize(),
930                                  MEM_COMMIT, PAGE_READWRITE);
931     if (!local) {
932       return false;
933     }
934 
935     PVOID remote = ::VirtualAllocEx(
936         mProcess, static_cast<uint8_t*>(mRemoteView) + mCommitOffset,
937         GetPageSize(), MEM_COMMIT, PAGE_EXECUTE_READ);
938     if (!remote) {
939       return false;
940     }
941 
942     mCommitOffset += GetPageSize();
943     return true;
944   }
945 
946  private:
Destroy()947   void Destroy() {
948     // We always leak the remote view
949     if (mLocalView) {
950       ::UnmapViewOfFile(mLocalView);
951       mLocalView = nullptr;
952     }
953 
954     if (mMapping) {
955       ::CloseHandle(mMapping);
956       mMapping = nullptr;
957     }
958 
959     if (mProcess) {
960       ::CloseHandle(mProcess);
961       mProcess = nullptr;
962     }
963   }
964 
965  private:
966   HANDLE mProcess;
967   HANDLE mMapping;
968   uint8_t* mLocalView;
969   PVOID mRemoteView;
970   uint32_t mReservationSize;
971   uint32_t mCommitOffset;
972 
973   static const DWORD kAccessFlags = PROCESS_QUERY_INFORMATION |
974                                     PROCESS_VM_OPERATION | PROCESS_VM_READ |
975                                     PROCESS_VM_WRITE;
976 };
977 
978 }  // namespace interceptor
979 }  // namespace mozilla
980 
981 #endif  // mozilla_interceptor_MMPolicies_h
982