1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ 6 7 #ifndef mozilla_interceptor_MMPolicies_h 8 #define mozilla_interceptor_MMPolicies_h 9 10 #include "mozilla/Assertions.h" 11 #include "mozilla/CheckedInt.h" 12 #include "mozilla/DynamicallyLinkedFunctionPtr.h" 13 #include "mozilla/MathAlgorithms.h" 14 #include "mozilla/Maybe.h" 15 #include "mozilla/Span.h" 16 #include "mozilla/TypedEnumBits.h" 17 #include "mozilla/Types.h" 18 #include "mozilla/WindowsMapRemoteView.h" 19 20 #include <windows.h> 21 22 #if (NTDDI_VERSION < NTDDI_WIN10_RS4) || defined(__MINGW32__) 23 PVOID WINAPI VirtualAlloc2(HANDLE Process, PVOID BaseAddress, SIZE_T Size, 24 ULONG AllocationType, ULONG PageProtection, 25 MEM_EXTENDED_PARAMETER* ExtendedParameters, 26 ULONG ParameterCount); 27 PVOID WINAPI MapViewOfFile3(HANDLE FileMapping, HANDLE Process, 28 PVOID BaseAddress, ULONG64 Offset, SIZE_T ViewSize, 29 ULONG AllocationType, ULONG PageProtection, 30 MEM_EXTENDED_PARAMETER* ExtendedParameters, 31 ULONG ParameterCount); 32 #endif // (NTDDI_VERSION < NTDDI_WIN10_RS4) || defined(__MINGW32__) 33 34 // _CRT_RAND_S is not defined everywhere, but we need it. 35 #if !defined(_CRT_RAND_S) 36 extern "C" errno_t rand_s(unsigned int* randomValue); 37 #endif // !defined(_CRT_RAND_S) 38 39 // Declaring only the functions we need in NativeNt.h. To include the entire 40 // NativeNt.h causes circular dependency. 41 namespace mozilla { 42 namespace nt { 43 SIZE_T WINAPI VirtualQueryEx(HANDLE aProcess, LPCVOID aAddress, 44 PMEMORY_BASIC_INFORMATION aMemInfo, 45 SIZE_T aMemInfoLen); 46 47 SIZE_T WINAPI VirtualQuery(LPCVOID aAddress, PMEMORY_BASIC_INFORMATION aMemInfo, 48 SIZE_T aMemInfoLen); 49 } // namespace nt 50 } // namespace mozilla 51 52 namespace mozilla { 53 namespace interceptor { 54 55 // This class implements memory operations not involving any kernel32's 56 // functions, so that derived classes can use them. 57 class MOZ_TRIVIAL_CTOR_DTOR MMPolicyInProcessPrimitive { 58 protected: ProtectInternal(decltype (&::VirtualProtect)aVirtualProtect,void * aVAddress,size_t aSize,uint32_t aProtFlags,uint32_t * aPrevProtFlags)59 bool ProtectInternal(decltype(&::VirtualProtect) aVirtualProtect, 60 void* aVAddress, size_t aSize, uint32_t aProtFlags, 61 uint32_t* aPrevProtFlags) const { 62 MOZ_ASSERT(aPrevProtFlags); 63 BOOL ok = aVirtualProtect(aVAddress, aSize, aProtFlags, 64 reinterpret_cast<PDWORD>(aPrevProtFlags)); 65 if (!ok && aPrevProtFlags) { 66 // VirtualProtect can fail but still set valid protection flags. 67 // Let's clear those upon failure. 68 *aPrevProtFlags = 0; 69 } 70 71 return !!ok; 72 } 73 74 public: Read(void * aToPtr,const void * aFromPtr,size_t aLen)75 bool Read(void* aToPtr, const void* aFromPtr, size_t aLen) const { 76 ::memcpy(aToPtr, aFromPtr, aLen); 77 return true; 78 } 79 Write(void * aToPtr,const void * aFromPtr,size_t aLen)80 bool Write(void* aToPtr, const void* aFromPtr, size_t aLen) const { 81 ::memcpy(aToPtr, aFromPtr, aLen); 82 return true; 83 } 84 85 /** 86 * @return true if the page that hosts aVAddress is accessible. 87 */ IsPageAccessible(void * aVAddress)88 bool IsPageAccessible(void* aVAddress) const { 89 MEMORY_BASIC_INFORMATION mbi; 90 SIZE_T result = nt::VirtualQuery(aVAddress, &mbi, sizeof(mbi)); 91 92 return result && mbi.AllocationProtect && (mbi.Type & MEM_IMAGE) && 93 mbi.State == MEM_COMMIT && mbi.Protect != PAGE_NOACCESS; 94 } 95 }; 96 97 class MOZ_TRIVIAL_CTOR_DTOR MMPolicyBase { 98 protected: AlignDown(const uintptr_t aUnaligned,const uintptr_t aAlignTo)99 static uintptr_t AlignDown(const uintptr_t aUnaligned, 100 const uintptr_t aAlignTo) { 101 MOZ_ASSERT(IsPowerOfTwo(aAlignTo)); 102 #pragma warning(suppress : 4146) 103 return aUnaligned & (-aAlignTo); 104 } 105 AlignUp(const uintptr_t aUnaligned,const uintptr_t aAlignTo)106 static uintptr_t AlignUp(const uintptr_t aUnaligned, 107 const uintptr_t aAlignTo) { 108 MOZ_ASSERT(IsPowerOfTwo(aAlignTo)); 109 #pragma warning(suppress : 4146) 110 return aUnaligned + ((-aUnaligned) & (aAlignTo - 1)); 111 } 112 113 public: ComputeAllocationSize(const uint32_t aRequestedSize)114 DWORD ComputeAllocationSize(const uint32_t aRequestedSize) const { 115 MOZ_ASSERT(aRequestedSize); 116 DWORD result = aRequestedSize; 117 118 const uint32_t granularity = GetAllocGranularity(); 119 120 uint32_t mod = aRequestedSize % granularity; 121 if (mod) { 122 result += (granularity - mod); 123 } 124 125 return result; 126 } 127 GetAllocGranularity()128 DWORD GetAllocGranularity() const { 129 static const DWORD kAllocGranularity = []() -> DWORD { 130 SYSTEM_INFO sysInfo; 131 ::GetSystemInfo(&sysInfo); 132 return sysInfo.dwAllocationGranularity; 133 }(); 134 135 return kAllocGranularity; 136 } 137 GetPageSize()138 DWORD GetPageSize() const { 139 static const DWORD kPageSize = []() -> DWORD { 140 SYSTEM_INFO sysInfo; 141 ::GetSystemInfo(&sysInfo); 142 return sysInfo.dwPageSize; 143 }(); 144 145 return kPageSize; 146 } 147 GetMaxUserModeAddress()148 uintptr_t GetMaxUserModeAddress() const { 149 static const uintptr_t kMaxUserModeAddr = []() -> uintptr_t { 150 SYSTEM_INFO sysInfo; 151 ::GetSystemInfo(&sysInfo); 152 return reinterpret_cast<uintptr_t>(sysInfo.lpMaximumApplicationAddress); 153 }(); 154 155 return kMaxUserModeAddr; 156 } 157 GetLowerBound(const Span<const uint8_t> & aBounds)158 static const uint8_t* GetLowerBound(const Span<const uint8_t>& aBounds) { 159 return &(*aBounds.cbegin()); 160 } 161 GetUpperBoundIncl(const Span<const uint8_t> & aBounds)162 static const uint8_t* GetUpperBoundIncl(const Span<const uint8_t>& aBounds) { 163 // We return an upper bound that is inclusive. 164 return &(*(aBounds.cend() - 1)); 165 } 166 GetUpperBoundExcl(const Span<const uint8_t> & aBounds)167 static const uint8_t* GetUpperBoundExcl(const Span<const uint8_t>& aBounds) { 168 // We return an upper bound that is exclusive by adding 1 to the inclusive 169 // upper bound. 170 return GetUpperBoundIncl(aBounds) + 1; 171 } 172 173 /** 174 * It is convenient for us to provide address range information based on a 175 * "pivot" and a distance from that pivot, as branch instructions operate 176 * within a range of the program counter. OTOH, to actually manage the 177 * regions of memory, it is easier to think about them in terms of their 178 * lower and upper bounds. This function converts from the former format to 179 * the latter format. 180 */ SpanFromPivotAndDistance(const uint32_t aSize,const uintptr_t aPivotAddr,const uint32_t aMaxDistanceFromPivot)181 Maybe<Span<const uint8_t>> SpanFromPivotAndDistance( 182 const uint32_t aSize, const uintptr_t aPivotAddr, 183 const uint32_t aMaxDistanceFromPivot) const { 184 if (!aPivotAddr || !aMaxDistanceFromPivot) { 185 return Nothing(); 186 } 187 188 // We don't allow regions below 1MB so that we're not allocating near any 189 // sensitive areas in our address space. 190 const uintptr_t kMinAllowableAddress = 0x100000; 191 192 const uintptr_t kGranularity(GetAllocGranularity()); 193 194 // We subtract the max distance from the pivot to determine our lower bound. 195 CheckedInt<uintptr_t> lowerBound(aPivotAddr); 196 lowerBound -= aMaxDistanceFromPivot; 197 if (lowerBound.isValid()) { 198 // In this case, the subtraction has not underflowed, but we still want 199 // the lower bound to be at least kMinAllowableAddress. 200 lowerBound = std::max(lowerBound.value(), kMinAllowableAddress); 201 } else { 202 // In this case, we underflowed. Forcibly set the lower bound to 203 // kMinAllowableAddress. 204 lowerBound = CheckedInt<uintptr_t>(kMinAllowableAddress); 205 } 206 207 // Align up to the next unit of allocation granularity when necessary. 208 lowerBound = AlignUp(lowerBound.value(), kGranularity); 209 MOZ_ASSERT(lowerBound.isValid()); 210 if (!lowerBound.isValid()) { 211 return Nothing(); 212 } 213 214 // We must ensure that our region is below the maximum allowable user-mode 215 // address, or our reservation will fail. 216 const uintptr_t kMaxUserModeAddr = GetMaxUserModeAddress(); 217 218 // We add the max distance from the pivot to determine our upper bound. 219 CheckedInt<uintptr_t> upperBound(aPivotAddr); 220 upperBound += aMaxDistanceFromPivot; 221 if (upperBound.isValid()) { 222 // In this case, the addition has not overflowed, but we still want 223 // the upper bound to be at most kMaxUserModeAddr. 224 upperBound = std::min(upperBound.value(), kMaxUserModeAddr); 225 } else { 226 // In this case, we overflowed. Forcibly set the upper bound to 227 // kMaxUserModeAddr. 228 upperBound = CheckedInt<uintptr_t>(kMaxUserModeAddr); 229 } 230 231 // Subtract the desired allocation size so that any chunk allocated in the 232 // region will be reachable. 233 upperBound -= aSize; 234 if (!upperBound.isValid()) { 235 return Nothing(); 236 } 237 238 // Align down to the next unit of allocation granularity when necessary. 239 upperBound = AlignDown(upperBound.value(), kGranularity); 240 if (!upperBound.isValid()) { 241 return Nothing(); 242 } 243 244 MOZ_ASSERT(lowerBound.value() < upperBound.value()); 245 if (lowerBound.value() >= upperBound.value()) { 246 return Nothing(); 247 } 248 249 // Return the result as a Span 250 return Some(MakeSpan(reinterpret_cast<const uint8_t*>(lowerBound.value()), 251 upperBound.value() - lowerBound.value())); 252 } 253 254 /** 255 * This function locates a virtual memory region of |aDesiredBytesLen| that 256 * resides in the interval [aRangeMin, aRangeMax). We do this by scanning the 257 * virtual memory space for a block of unallocated memory that is sufficiently 258 * large. 259 */ FindRegion(HANDLE aProcess,const size_t aDesiredBytesLen,const uint8_t * aRangeMin,const uint8_t * aRangeMax)260 PVOID FindRegion(HANDLE aProcess, const size_t aDesiredBytesLen, 261 const uint8_t* aRangeMin, const uint8_t* aRangeMax) const { 262 const DWORD kGranularity = GetAllocGranularity(); 263 MOZ_ASSERT(aDesiredBytesLen >= kGranularity); 264 if (!aDesiredBytesLen) { 265 return nullptr; 266 } 267 268 MOZ_ASSERT(aRangeMin < aRangeMax); 269 if (aRangeMin >= aRangeMax) { 270 return nullptr; 271 } 272 273 // Generate a randomized base address that falls within the interval 274 // [aRangeMin, aRangeMax - aDesiredBytesLen] 275 unsigned int rnd = 0; 276 rand_s(&rnd); 277 278 // Reduce rnd to a value that falls within the acceptable range 279 uintptr_t maxOffset = 280 (aRangeMax - aRangeMin - aDesiredBytesLen) / kGranularity; 281 uintptr_t offset = (uintptr_t(rnd) % maxOffset) * kGranularity; 282 283 // Start searching at this address 284 const uint8_t* address = aRangeMin + offset; 285 // The max address needs to incorporate the desired length 286 const uint8_t* const kMaxPtr = aRangeMax - aDesiredBytesLen; 287 288 MOZ_DIAGNOSTIC_ASSERT(address <= kMaxPtr); 289 290 MEMORY_BASIC_INFORMATION mbi; 291 SIZE_T len = sizeof(mbi); 292 293 // Scan the range for a free chunk that is at least as large as 294 // aDesiredBytesLen 295 while (address <= kMaxPtr && 296 nt::VirtualQueryEx(aProcess, address, &mbi, len)) { 297 if (mbi.State == MEM_FREE && mbi.RegionSize >= aDesiredBytesLen) { 298 return mbi.BaseAddress; 299 } 300 301 address = 302 reinterpret_cast<const uint8_t*>(mbi.BaseAddress) + mbi.RegionSize; 303 } 304 305 return nullptr; 306 } 307 308 /** 309 * This function reserves a |aSize| block of virtual memory. 310 * 311 * When |aBounds| is Nothing, it just calls |aReserveFn| and lets Windows 312 * choose the base address. 313 * 314 * Otherwise, it tries to call |aReserveRangeFn| to reserve the memory within 315 * the bounds provided by |aBounds|. It is advantageous to use this function 316 * because the OS's VM manager has better information as to which base 317 * addresses are the best to use. 318 * 319 * If |aReserveRangeFn| retuns Nothing, this means that the platform support 320 * is not available. In that case, we fall back to manually computing a region 321 * to use for reserving the memory by calling |FindRegion|. 322 */ 323 template <typename ReserveFnT, typename ReserveRangeFnT> Reserve(HANDLE aProcess,const uint32_t aSize,const ReserveFnT & aReserveFn,const ReserveRangeFnT & aReserveRangeFn,const Maybe<Span<const uint8_t>> & aBounds)324 PVOID Reserve(HANDLE aProcess, const uint32_t aSize, 325 const ReserveFnT& aReserveFn, 326 const ReserveRangeFnT& aReserveRangeFn, 327 const Maybe<Span<const uint8_t>>& aBounds) const { 328 if (!aBounds) { 329 // No restrictions, let the OS choose the base address 330 return aReserveFn(aProcess, nullptr, aSize); 331 } 332 333 const uint8_t* lowerBound = GetLowerBound(aBounds.ref()); 334 const uint8_t* upperBoundExcl = GetUpperBoundExcl(aBounds.ref()); 335 336 Maybe<PVOID> result = 337 aReserveRangeFn(aProcess, aSize, lowerBound, upperBoundExcl); 338 if (result) { 339 return result.value(); 340 } 341 342 // aReserveRangeFn is not available on this machine. We'll do a manual 343 // search. 344 345 size_t curAttempt = 0; 346 const size_t kMaxAttempts = 8; 347 348 // We loop here because |FindRegion| may return a base address that 349 // is reserved elsewhere before we have had a chance to reserve it 350 // ourselves. 351 while (curAttempt < kMaxAttempts) { 352 PVOID base = FindRegion(aProcess, aSize, lowerBound, upperBoundExcl); 353 if (!base) { 354 return nullptr; 355 } 356 357 result = Some(aReserveFn(aProcess, base, aSize)); 358 if (result.value()) { 359 return result.value(); 360 } 361 362 ++curAttempt; 363 } 364 365 // If we run out of attempts, we fall through to the default case where 366 // the system chooses any base address it wants. In that case, the hook 367 // will be set on a best-effort basis. 368 369 return aReserveFn(aProcess, nullptr, aSize); 370 } 371 }; 372 373 class MOZ_TRIVIAL_CTOR_DTOR MMPolicyInProcess 374 : public MMPolicyInProcessPrimitive, 375 public MMPolicyBase { 376 public: 377 typedef MMPolicyInProcess MMPolicyT; 378 MMPolicyInProcess()379 constexpr MMPolicyInProcess() 380 : mBase(nullptr), mReservationSize(0), mCommitOffset(0) {} 381 382 MMPolicyInProcess(const MMPolicyInProcess&) = delete; 383 MMPolicyInProcess& operator=(const MMPolicyInProcess&) = delete; 384 MMPolicyInProcess(MMPolicyInProcess && aOther)385 MMPolicyInProcess(MMPolicyInProcess&& aOther) 386 : mBase(nullptr), mReservationSize(0), mCommitOffset(0) { 387 *this = std::move(aOther); 388 } 389 390 MMPolicyInProcess& operator=(MMPolicyInProcess&& aOther) { 391 mBase = aOther.mBase; 392 aOther.mBase = nullptr; 393 394 mCommitOffset = aOther.mCommitOffset; 395 aOther.mCommitOffset = 0; 396 397 mReservationSize = aOther.mReservationSize; 398 aOther.mReservationSize = 0; 399 400 return *this; 401 } 402 403 explicit operator bool() const { return !!mBase; } 404 405 /** 406 * Should we unhook everything upon destruction? 407 */ ShouldUnhookUponDestruction()408 bool ShouldUnhookUponDestruction() const { return true; } 409 410 #if defined(_M_IX86) WriteAtomic(void * aDestPtr,const uint16_t aValue)411 bool WriteAtomic(void* aDestPtr, const uint16_t aValue) const { 412 *static_cast<uint16_t*>(aDestPtr) = aValue; 413 return true; 414 } 415 #endif // defined(_M_IX86) 416 Protect(void * aVAddress,size_t aSize,uint32_t aProtFlags,uint32_t * aPrevProtFlags)417 bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags, 418 uint32_t* aPrevProtFlags) const { 419 return ProtectInternal(::VirtualProtect, aVAddress, aSize, aProtFlags, 420 aPrevProtFlags); 421 } 422 FlushInstructionCache()423 bool FlushInstructionCache() const { 424 return !!::FlushInstructionCache(::GetCurrentProcess(), nullptr, 0); 425 } 426 GetTrampWriteProtFlags()427 static DWORD GetTrampWriteProtFlags() { return PAGE_EXECUTE_READWRITE; } 428 429 #if defined(_M_X64) IsTrampolineSpaceInLowest2GB()430 bool IsTrampolineSpaceInLowest2GB() const { 431 return (mBase + mReservationSize) <= 432 reinterpret_cast<uint8_t*>(0x0000000080000000ULL); 433 } 434 #endif // defined(_M_X64) 435 436 protected: GetLocalView()437 uint8_t* GetLocalView() const { return mBase; } 438 GetRemoteView()439 uintptr_t GetRemoteView() const { 440 // Same as local view for in-process 441 return reinterpret_cast<uintptr_t>(mBase); 442 } 443 444 /** 445 * @return the effective number of bytes reserved, or 0 on failure 446 */ Reserve(const uint32_t aSize,const Maybe<Span<const uint8_t>> & aBounds)447 uint32_t Reserve(const uint32_t aSize, 448 const Maybe<Span<const uint8_t>>& aBounds) { 449 if (!aSize) { 450 return 0; 451 } 452 453 if (mBase) { 454 MOZ_ASSERT(mReservationSize >= aSize); 455 return mReservationSize; 456 } 457 458 mReservationSize = ComputeAllocationSize(aSize); 459 460 auto reserveFn = [](HANDLE aProcess, PVOID aBase, uint32_t aSize) -> PVOID { 461 return ::VirtualAlloc(aBase, aSize, MEM_RESERVE, PAGE_NOACCESS); 462 }; 463 464 auto reserveWithinRangeFn = 465 [](HANDLE aProcess, uint32_t aSize, const uint8_t* aRangeMin, 466 const uint8_t* aRangeMaxExcl) -> Maybe<PVOID> { 467 static const StaticDynamicallyLinkedFunctionPtr<decltype( 468 &::VirtualAlloc2)> 469 pVirtualAlloc2(L"kernelbase.dll", "VirtualAlloc2"); 470 if (!pVirtualAlloc2) { 471 return Nothing(); 472 } 473 474 // NB: MEM_ADDRESS_REQUIREMENTS::HighestEndingAddress is *inclusive* 475 MEM_ADDRESS_REQUIREMENTS memReq = { 476 const_cast<uint8_t*>(aRangeMin), 477 const_cast<uint8_t*>(aRangeMaxExcl - 1)}; 478 479 MEM_EXTENDED_PARAMETER memParam = {}; 480 memParam.Type = MemExtendedParameterAddressRequirements; 481 memParam.Pointer = &memReq; 482 483 return Some(pVirtualAlloc2(aProcess, nullptr, aSize, MEM_RESERVE, 484 PAGE_NOACCESS, &memParam, 1)); 485 }; 486 487 mBase = static_cast<uint8_t*>( 488 MMPolicyBase::Reserve(::GetCurrentProcess(), mReservationSize, 489 reserveFn, reserveWithinRangeFn, aBounds)); 490 491 if (!mBase) { 492 return 0; 493 } 494 495 return mReservationSize; 496 } 497 MaybeCommitNextPage(const uint32_t aRequestedOffset,const uint32_t aRequestedLength)498 bool MaybeCommitNextPage(const uint32_t aRequestedOffset, 499 const uint32_t aRequestedLength) { 500 if (!(*this)) { 501 return false; 502 } 503 504 uint32_t limit = aRequestedOffset + aRequestedLength - 1; 505 if (limit < mCommitOffset) { 506 // No commit required 507 return true; 508 } 509 510 MOZ_DIAGNOSTIC_ASSERT(mCommitOffset < mReservationSize); 511 if (mCommitOffset >= mReservationSize) { 512 return false; 513 } 514 515 PVOID local = ::VirtualAlloc(mBase + mCommitOffset, GetPageSize(), 516 MEM_COMMIT, PAGE_EXECUTE_READ); 517 if (!local) { 518 return false; 519 } 520 521 mCommitOffset += GetPageSize(); 522 return true; 523 } 524 525 private: 526 uint8_t* mBase; 527 uint32_t mReservationSize; 528 uint32_t mCommitOffset; 529 }; 530 531 // This class manages in-process memory access without using functions 532 // imported from kernel32.dll. Instead, it uses functions in its own 533 // function table that are provided from outside. 534 class MMPolicyInProcessEarlyStage : public MMPolicyInProcessPrimitive { 535 public: 536 struct Kernel32Exports { 537 decltype(&::FlushInstructionCache) mFlushInstructionCache; 538 decltype(&::GetSystemInfo) mGetSystemInfo; 539 decltype(&::VirtualProtect) mVirtualProtect; 540 }; 541 542 private: GetPageSize(const Kernel32Exports & aK32Exports)543 static DWORD GetPageSize(const Kernel32Exports& aK32Exports) { 544 SYSTEM_INFO sysInfo; 545 aK32Exports.mGetSystemInfo(&sysInfo); 546 return sysInfo.dwPageSize; 547 } 548 549 const Kernel32Exports& mK32Exports; 550 const DWORD mPageSize; 551 552 public: MMPolicyInProcessEarlyStage(const Kernel32Exports & aK32Exports)553 explicit MMPolicyInProcessEarlyStage(const Kernel32Exports& aK32Exports) 554 : mK32Exports(aK32Exports), mPageSize(GetPageSize(mK32Exports)) {} 555 556 // The pattern of constructing a local static variable with a lambda, 557 // which can be seen in MMPolicyBase, is compiled into code with the 558 // critical section APIs like EnterCriticalSection imported from kernel32.dll. 559 // Because this class needs to be able to run in a process's early stage 560 // when IAT is not yet resolved, we cannot use that patten, thus simply 561 // caching a value as a local member in the class. GetPageSize()562 DWORD GetPageSize() const { return mPageSize; } 563 Protect(void * aVAddress,size_t aSize,uint32_t aProtFlags,uint32_t * aPrevProtFlags)564 bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags, 565 uint32_t* aPrevProtFlags) const { 566 return ProtectInternal(mK32Exports.mVirtualProtect, aVAddress, aSize, 567 aProtFlags, aPrevProtFlags); 568 } 569 FlushInstructionCache()570 bool FlushInstructionCache() const { 571 const HANDLE kCurrentProcess = reinterpret_cast<HANDLE>(-1); 572 return !!mK32Exports.mFlushInstructionCache(kCurrentProcess, nullptr, 0); 573 } 574 }; 575 576 class MMPolicyOutOfProcess : public MMPolicyBase { 577 public: 578 typedef MMPolicyOutOfProcess MMPolicyT; 579 MMPolicyOutOfProcess(HANDLE aProcess)580 explicit MMPolicyOutOfProcess(HANDLE aProcess) 581 : mProcess(nullptr), 582 mMapping(nullptr), 583 mLocalView(nullptr), 584 mRemoteView(nullptr), 585 mReservationSize(0), 586 mCommitOffset(0) { 587 MOZ_ASSERT(aProcess); 588 ::DuplicateHandle(::GetCurrentProcess(), aProcess, ::GetCurrentProcess(), 589 &mProcess, kAccessFlags, FALSE, 0); 590 MOZ_ASSERT(mProcess); 591 } 592 MMPolicyOutOfProcess(DWORD aPid)593 explicit MMPolicyOutOfProcess(DWORD aPid) 594 : mProcess(::OpenProcess(kAccessFlags, FALSE, aPid)), 595 mMapping(nullptr), 596 mLocalView(nullptr), 597 mRemoteView(nullptr), 598 mReservationSize(0), 599 mCommitOffset(0) { 600 MOZ_ASSERT(mProcess); 601 } 602 ~MMPolicyOutOfProcess()603 ~MMPolicyOutOfProcess() { Destroy(); } 604 MMPolicyOutOfProcess(MMPolicyOutOfProcess && aOther)605 MMPolicyOutOfProcess(MMPolicyOutOfProcess&& aOther) 606 : mProcess(nullptr), 607 mMapping(nullptr), 608 mLocalView(nullptr), 609 mRemoteView(nullptr), 610 mReservationSize(0), 611 mCommitOffset(0) { 612 *this = std::move(aOther); 613 } 614 615 MMPolicyOutOfProcess(const MMPolicyOutOfProcess& aOther) = delete; 616 MMPolicyOutOfProcess& operator=(const MMPolicyOutOfProcess&) = delete; 617 618 MMPolicyOutOfProcess& operator=(MMPolicyOutOfProcess&& aOther) { 619 Destroy(); 620 621 mProcess = aOther.mProcess; 622 aOther.mProcess = nullptr; 623 624 mMapping = aOther.mMapping; 625 aOther.mMapping = nullptr; 626 627 mLocalView = aOther.mLocalView; 628 aOther.mLocalView = nullptr; 629 630 mRemoteView = aOther.mRemoteView; 631 aOther.mRemoteView = nullptr; 632 633 mReservationSize = aOther.mReservationSize; 634 aOther.mReservationSize = 0; 635 636 mCommitOffset = aOther.mCommitOffset; 637 aOther.mCommitOffset = 0; 638 639 return *this; 640 } 641 642 explicit operator bool() const { 643 return mProcess && mMapping && mLocalView && mRemoteView; 644 } 645 ShouldUnhookUponDestruction()646 bool ShouldUnhookUponDestruction() const { 647 // We don't clean up hooks for remote processes; they are expected to 648 // outlive our process. 649 return false; 650 } 651 652 // This function reads as many bytes as |aLen| from the target process and 653 // succeeds only when the entire area to be read is accessible. Read(void * aToPtr,const void * aFromPtr,size_t aLen)654 bool Read(void* aToPtr, const void* aFromPtr, size_t aLen) const { 655 MOZ_ASSERT(mProcess); 656 if (!mProcess) { 657 return false; 658 } 659 660 SIZE_T numBytes = 0; 661 BOOL ok = ::ReadProcessMemory(mProcess, aFromPtr, aToPtr, aLen, &numBytes); 662 return ok && numBytes == aLen; 663 } 664 665 // This function reads as many bytes as possible from the target process up 666 // to |aLen| bytes and returns the number of bytes which was actually read. TryRead(void * aToPtr,const void * aFromPtr,size_t aLen)667 size_t TryRead(void* aToPtr, const void* aFromPtr, size_t aLen) const { 668 MOZ_ASSERT(mProcess); 669 if (!mProcess) { 670 return 0; 671 } 672 673 uint32_t pageSize = GetPageSize(); 674 uintptr_t pageMask = pageSize - 1; 675 676 auto rangeStart = reinterpret_cast<uintptr_t>(aFromPtr); 677 auto rangeEnd = rangeStart + aLen; 678 679 while (rangeStart < rangeEnd) { 680 SIZE_T numBytes = 0; 681 BOOL ok = ::ReadProcessMemory(mProcess, aFromPtr, aToPtr, 682 rangeEnd - rangeStart, &numBytes); 683 if (ok) { 684 return numBytes; 685 } 686 687 // If ReadProcessMemory fails, try to read up to each page boundary from 688 // the end of the requested area one by one. 689 if (rangeEnd & pageMask) { 690 rangeEnd &= ~pageMask; 691 } else { 692 rangeEnd -= pageSize; 693 } 694 } 695 696 return 0; 697 } 698 Write(void * aToPtr,const void * aFromPtr,size_t aLen)699 bool Write(void* aToPtr, const void* aFromPtr, size_t aLen) const { 700 MOZ_ASSERT(mProcess); 701 if (!mProcess) { 702 return false; 703 } 704 705 SIZE_T numBytes = 0; 706 BOOL ok = ::WriteProcessMemory(mProcess, aToPtr, aFromPtr, aLen, &numBytes); 707 return ok && numBytes == aLen; 708 } 709 Protect(void * aVAddress,size_t aSize,uint32_t aProtFlags,uint32_t * aPrevProtFlags)710 bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags, 711 uint32_t* aPrevProtFlags) const { 712 MOZ_ASSERT(mProcess); 713 if (!mProcess) { 714 return false; 715 } 716 717 MOZ_ASSERT(aPrevProtFlags); 718 BOOL ok = ::VirtualProtectEx(mProcess, aVAddress, aSize, aProtFlags, 719 reinterpret_cast<PDWORD>(aPrevProtFlags)); 720 if (!ok && aPrevProtFlags) { 721 // VirtualProtectEx can fail but still set valid protection flags. 722 // Let's clear those upon failure. 723 *aPrevProtFlags = 0; 724 } 725 726 return !!ok; 727 } 728 729 /** 730 * @return true if the page that hosts aVAddress is accessible. 731 */ IsPageAccessible(void * aVAddress)732 bool IsPageAccessible(void* aVAddress) const { 733 MEMORY_BASIC_INFORMATION mbi; 734 SIZE_T result = nt::VirtualQueryEx(mProcess, aVAddress, &mbi, sizeof(mbi)); 735 736 return result && mbi.AllocationProtect && (mbi.Type & MEM_IMAGE) && 737 mbi.State == MEM_COMMIT && mbi.Protect != PAGE_NOACCESS; 738 } 739 FlushInstructionCache()740 bool FlushInstructionCache() const { 741 return !!::FlushInstructionCache(mProcess, nullptr, 0); 742 } 743 GetTrampWriteProtFlags()744 static DWORD GetTrampWriteProtFlags() { return PAGE_READWRITE; } 745 746 #if defined(_M_X64) IsTrampolineSpaceInLowest2GB()747 bool IsTrampolineSpaceInLowest2GB() const { 748 return (GetRemoteView() + mReservationSize) <= 0x0000000080000000ULL; 749 } 750 #endif // defined(_M_X64) 751 752 protected: GetLocalView()753 uint8_t* GetLocalView() const { return mLocalView; } 754 GetRemoteView()755 uintptr_t GetRemoteView() const { 756 return reinterpret_cast<uintptr_t>(mRemoteView); 757 } 758 759 /** 760 * @return the effective number of bytes reserved, or 0 on failure 761 */ Reserve(const uint32_t aSize,const Maybe<Span<const uint8_t>> & aBounds)762 uint32_t Reserve(const uint32_t aSize, 763 const Maybe<Span<const uint8_t>>& aBounds) { 764 if (!aSize || !mProcess) { 765 return 0; 766 } 767 768 if (mRemoteView) { 769 MOZ_ASSERT(mReservationSize >= aSize); 770 return mReservationSize; 771 } 772 773 mReservationSize = ComputeAllocationSize(aSize); 774 775 mMapping = ::CreateFileMapping(INVALID_HANDLE_VALUE, nullptr, 776 PAGE_EXECUTE_READWRITE | SEC_RESERVE, 0, 777 mReservationSize, nullptr); 778 if (!mMapping) { 779 return 0; 780 } 781 782 mLocalView = static_cast<uint8_t*>( 783 ::MapViewOfFile(mMapping, FILE_MAP_WRITE, 0, 0, 0)); 784 if (!mLocalView) { 785 return 0; 786 } 787 788 auto reserveFn = [mapping = mMapping](HANDLE aProcess, PVOID aBase, 789 uint32_t aSize) -> PVOID { 790 return mozilla::MapRemoteViewOfFile(mapping, aProcess, 0ULL, aBase, 0, 0, 791 PAGE_EXECUTE_READ); 792 }; 793 794 auto reserveWithinRangeFn = 795 [mapping = mMapping](HANDLE aProcess, uint32_t aSize, 796 const uint8_t* aRangeMin, 797 const uint8_t* aRangeMaxExcl) -> Maybe<PVOID> { 798 static const StaticDynamicallyLinkedFunctionPtr<decltype( 799 &::MapViewOfFile3)> 800 pMapViewOfFile3(L"kernelbase.dll", "MapViewOfFile3"); 801 if (!pMapViewOfFile3) { 802 return Nothing(); 803 } 804 805 // NB: MEM_ADDRESS_REQUIREMENTS::HighestEndingAddress is *inclusive* 806 MEM_ADDRESS_REQUIREMENTS memReq = { 807 const_cast<uint8_t*>(aRangeMin), 808 const_cast<uint8_t*>(aRangeMaxExcl - 1)}; 809 810 MEM_EXTENDED_PARAMETER memParam = {}; 811 memParam.Type = MemExtendedParameterAddressRequirements; 812 memParam.Pointer = &memReq; 813 814 return Some(pMapViewOfFile3(mapping, aProcess, nullptr, 0, aSize, 0, 815 PAGE_EXECUTE_READ, &memParam, 1)); 816 }; 817 818 mRemoteView = MMPolicyBase::Reserve(mProcess, mReservationSize, reserveFn, 819 reserveWithinRangeFn, aBounds); 820 if (!mRemoteView) { 821 return 0; 822 } 823 824 return mReservationSize; 825 } 826 MaybeCommitNextPage(const uint32_t aRequestedOffset,const uint32_t aRequestedLength)827 bool MaybeCommitNextPage(const uint32_t aRequestedOffset, 828 const uint32_t aRequestedLength) { 829 if (!(*this)) { 830 return false; 831 } 832 833 uint32_t limit = aRequestedOffset + aRequestedLength - 1; 834 if (limit < mCommitOffset) { 835 // No commit required 836 return true; 837 } 838 839 MOZ_DIAGNOSTIC_ASSERT(mCommitOffset < mReservationSize); 840 if (mCommitOffset >= mReservationSize) { 841 return false; 842 } 843 844 PVOID local = ::VirtualAlloc(mLocalView + mCommitOffset, GetPageSize(), 845 MEM_COMMIT, PAGE_READWRITE); 846 if (!local) { 847 return false; 848 } 849 850 PVOID remote = ::VirtualAllocEx( 851 mProcess, static_cast<uint8_t*>(mRemoteView) + mCommitOffset, 852 GetPageSize(), MEM_COMMIT, PAGE_EXECUTE_READ); 853 if (!remote) { 854 return false; 855 } 856 857 mCommitOffset += GetPageSize(); 858 return true; 859 } 860 861 private: Destroy()862 void Destroy() { 863 // We always leak the remote view 864 if (mLocalView) { 865 ::UnmapViewOfFile(mLocalView); 866 mLocalView = nullptr; 867 } 868 869 if (mMapping) { 870 ::CloseHandle(mMapping); 871 mMapping = nullptr; 872 } 873 874 if (mProcess) { 875 ::CloseHandle(mProcess); 876 mProcess = nullptr; 877 } 878 } 879 880 private: 881 HANDLE mProcess; 882 HANDLE mMapping; 883 uint8_t* mLocalView; 884 PVOID mRemoteView; 885 uint32_t mReservationSize; 886 uint32_t mCommitOffset; 887 888 static const DWORD kAccessFlags = PROCESS_QUERY_INFORMATION | 889 PROCESS_VM_OPERATION | PROCESS_VM_READ | 890 PROCESS_VM_WRITE; 891 }; 892 893 } // namespace interceptor 894 } // namespace mozilla 895 896 #endif // mozilla_interceptor_MMPolicies_h 897