1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD-3-Clause (https://spdx.org/licenses/BSD-3-Clause) 4 * FILE: ntoskrnl/mm/ARM3/wslist.cpp 5 * PURPOSE: Working set list management 6 * PROGRAMMERS: Jérôme Gardou 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 #include <ntoskrnl.h> 11 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include "miarm.h" 17 18 /* GLOBALS ********************************************************************/ 19 PMMWSL MmWorkingSetList; 20 KEVENT MmWorkingSetManagerEvent; 21 22 /* LOCAL FUNCTIONS ************************************************************/ 23 24 static MMPTE GetPteTemplateForWsList(PMMWSL WsList) 25 { 26 return (WsList == MmSystemCacheWorkingSetList) ? ValidKernelPte : ValidKernelPteLocal; 27 } 28 29 static ULONG GetNextPageColorForWsList(PMMWSL WsList) 30 { 31 return (WsList == MmSystemCacheWorkingSetList) ? MI_GET_NEXT_COLOR() : MI_GET_NEXT_PROCESS_COLOR(PsGetCurrentProcess()); 32 } 33 34 static void FreeWsleIndex(PMMWSL WsList, ULONG Index) 35 { 36 PMMWSLE Wsle = WsList->Wsle; 37 ULONG& LastEntry = WsList->LastEntry; 38 ULONG& FirstFree = WsList->FirstFree; 39 ULONG& LastInitializedWsle = WsList->LastInitializedWsle; 40 41 /* Erase it now */ 42 Wsle[Index].u1.Long = 0; 43 44 if (Index == (LastEntry - 1)) 45 { 46 /* We're freeing the last index of our list. */ 47 while (Wsle[Index].u1.e1.Valid == 0) 48 Index--; 49 50 /* Should we bother about the Free entries */ 51 if (FirstFree < Index) 52 { 53 /* Try getting the index of the last free entry */ 54 ASSERT(Wsle[Index + 1].u1.Free.MustBeZero == 0); 55 ULONG PreviousFree = Wsle[Index + 1].u1.Free.PreviousFree; 56 ASSERT(PreviousFree < LastEntry); 57 ULONG LastFree = Index + 1 - PreviousFree; 58 #ifdef MMWSLE_PREVIOUS_FREE_JUMP 59 while (Wsle[LastFree].u1.e1.Valid) 60 { 61 ASSERT(LastFree > MMWSLE_PREVIOUS_FREE_JUMP); 62 LastFree -= MMWSLE_PREVIOUS_FREE_JUMP; 63 } 64 #endif 65 /* Update */ 66 ASSERT(LastFree >= FirstFree); 67 Wsle[FirstFree].u1.Free.PreviousFree = (Index + 1 - LastFree) & MMWSLE_PREVIOUS_FREE_MASK; 68 Wsle[LastFree].u1.Free.NextFree = 0; 69 } 70 else 71 { 72 /* No more free entries in our array */ 73 FirstFree = ULONG_MAX; 74 } 75 /* This is the new size of our array */ 76 LastEntry = Index + 1; 77 /* Should we shrink the alloc? */ 78 while ((LastInitializedWsle - LastEntry) > (PAGE_SIZE / sizeof(MMWSLE))) 79 { 80 PMMPTE PointerPte = MiAddressToPte(Wsle + LastInitializedWsle - 1); 81 /* We must not free ourself! */ 82 ASSERT(MiPteToAddress(PointerPte) != WsList); 83 84 PFN_NUMBER Page = PFN_FROM_PTE(PointerPte); 85 86 { 87 ntoskrnl::MiPfnLockGuard PfnLock; 88 89 PMMPFN Pfn = MiGetPfnEntry(Page); 90 MI_SET_PFN_DELETED(Pfn); 91 MiDecrementShareCount(MiGetPfnEntry(Pfn->u4.PteFrame), Pfn->u4.PteFrame); 92 MiDecrementShareCount(Pfn, Page); 93 } 94 95 PointerPte->u.Long = 0; 96 97 KeInvalidateTlbEntry(Wsle + LastInitializedWsle - 1); 98 LastInitializedWsle -= PAGE_SIZE / sizeof(MMWSLE); 99 } 100 return; 101 } 102 103 if (FirstFree == ULONG_MAX) 104 { 105 /* We're the first one. */ 106 FirstFree = Index; 107 Wsle[FirstFree].u1.Free.PreviousFree = (LastEntry - FirstFree) & MMWSLE_PREVIOUS_FREE_MASK; 108 return; 109 } 110 111 /* We must find where to place ourself */ 112 ULONG NextFree = FirstFree; 113 ULONG PreviousFree = 0; 114 while (NextFree < Index) 115 { 116 ASSERT(Wsle[NextFree].u1.Free.MustBeZero == 0); 117 if (Wsle[NextFree].u1.Free.NextFree == 0) 118 break; 119 PreviousFree = NextFree; 120 NextFree += Wsle[NextFree].u1.Free.NextFree; 121 } 122 123 if (NextFree < Index) 124 { 125 /* This is actually the last free entry */ 126 Wsle[NextFree].u1.Free.NextFree = Index - NextFree; 127 Wsle[Index].u1.Free.PreviousFree = (Index - NextFree) & MMWSLE_PREVIOUS_FREE_MASK; 128 Wsle[FirstFree].u1.Free.PreviousFree = (LastEntry - Index) & MMWSLE_PREVIOUS_FREE_MASK; 129 return; 130 } 131 132 if (PreviousFree == 0) 133 { 134 /* This is the first free */ 135 Wsle[Index].u1.Free.NextFree = FirstFree - Index; 136 Wsle[Index].u1.Free.PreviousFree = Wsle[FirstFree].u1.Free.PreviousFree; 137 Wsle[FirstFree].u1.Free.PreviousFree = (FirstFree - Index) & MMWSLE_PREVIOUS_FREE_MASK; 138 FirstFree = Index; 139 return; 140 } 141 142 /* Insert */ 143 Wsle[PreviousFree].u1.Free.NextFree = (Index - PreviousFree); 144 Wsle[Index].u1.Free.PreviousFree = (Index - PreviousFree) & MMWSLE_PREVIOUS_FREE_MASK; 145 Wsle[Index].u1.Free.NextFree = NextFree - Index; 146 Wsle[NextFree].u1.Free.PreviousFree = (NextFree - Index) & MMWSLE_PREVIOUS_FREE_MASK; 147 } 148 149 static ULONG GetFreeWsleIndex(PMMWSL WsList) 150 { 151 ULONG Index; 152 if (WsList->FirstFree != ULONG_MAX) 153 { 154 Index = WsList->FirstFree; 155 ASSERT(Index < WsList->LastInitializedWsle); 156 MMWSLE_FREE_ENTRY& FreeWsle = WsList->Wsle[Index].u1.Free; 157 ASSERT(FreeWsle.MustBeZero == 0); 158 if (FreeWsle.NextFree != 0) 159 { 160 WsList->FirstFree += FreeWsle.NextFree; 161 WsList->Wsle[WsList->FirstFree].u1.Free.PreviousFree = FreeWsle.PreviousFree; 162 } 163 else 164 { 165 WsList->FirstFree = ULONG_MAX; 166 } 167 } 168 else 169 { 170 Index = WsList->LastEntry++; 171 if (Index >= WsList->LastInitializedWsle) 172 { 173 /* Grow our array */ 174 PMMPTE PointerPte = MiAddressToPte(&WsList->Wsle[WsList->LastInitializedWsle]); 175 ASSERT(PointerPte->u.Hard.Valid == 0); 176 MMPTE TempPte = GetPteTemplateForWsList(WsList); 177 { 178 ntoskrnl::MiPfnLockGuard PfnLock; 179 180 TempPte.u.Hard.PageFrameNumber = MiRemoveAnyPage(GetNextPageColorForWsList(WsList)); 181 MiInitializePfnAndMakePteValid(TempPte.u.Hard.PageFrameNumber, PointerPte, TempPte); 182 } 183 184 WsList->LastInitializedWsle += PAGE_SIZE / sizeof(MMWSLE); 185 } 186 } 187 188 WsList->Wsle[Index].u1.Long = 0; 189 return Index; 190 } 191 192 static 193 VOID 194 RemoveFromWsList(PMMWSL WsList, PVOID Address) 195 { 196 /* Make sure that we are holding the right locks. */ 197 ASSERT(MM_ANY_WS_LOCK_HELD_EXCLUSIVE(PsGetCurrentThread())); 198 199 PMMPTE PointerPte = MiAddressToPte(Address); 200 201 /* Make sure we are removing a paged-in address */ 202 ASSERT(PointerPte->u.Hard.Valid == 1); 203 PMMPFN Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte)); 204 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid); 205 206 /* Shared pages not supported yet */ 207 ASSERT(Pfn1->u3.e1.PrototypePte == 0); 208 209 /* Nor are "ROS PFN" */ 210 ASSERT(MI_IS_ROS_PFN(Pfn1) == FALSE); 211 212 /* And we should have a valid index here */ 213 ASSERT(Pfn1->u1.WsIndex != 0); 214 215 FreeWsleIndex(WsList, Pfn1->u1.WsIndex); 216 } 217 218 static 219 ULONG 220 TrimWsList(PMMWSL WsList) 221 { 222 /* This should be done under WS lock */ 223 ASSERT(MM_ANY_WS_LOCK_HELD(PsGetCurrentThread())); 224 225 ULONG Ret = 0; 226 227 /* Walk the array */ 228 for (ULONG i = WsList->FirstDynamic; i < WsList->LastEntry; i++) 229 { 230 MMWSLE& Entry = WsList->Wsle[i]; 231 if (!Entry.u1.e1.Valid) 232 continue; 233 234 /* Only direct entries for now */ 235 ASSERT(Entry.u1.e1.Direct == 1); 236 237 /* Check the PTE */ 238 PMMPTE PointerPte = MiAddressToPte(Entry.u1.VirtualAddress); 239 240 /* This must be valid */ 241 ASSERT(PointerPte->u.Hard.Valid); 242 243 /* If the PTE was accessed, simply reset and that's the end of it */ 244 if (PointerPte->u.Hard.Accessed) 245 { 246 Entry.u1.e1.Age = 0; 247 PointerPte->u.Hard.Accessed = 0; 248 KeInvalidateTlbEntry(Entry.u1.VirtualAddress); 249 continue; 250 } 251 252 /* If the entry is not so old, just age it */ 253 if (Entry.u1.e1.Age < 3) 254 { 255 Entry.u1.e1.Age++; 256 continue; 257 } 258 259 if ((Entry.u1.e1.LockedInMemory) || (Entry.u1.e1.LockedInWs)) 260 { 261 /* This one is locked. Next time, maybe... */ 262 continue; 263 } 264 265 /* FIXME: Invalidating PDEs breaks legacy MMs */ 266 if (MI_IS_PAGE_TABLE_ADDRESS(Entry.u1.VirtualAddress)) 267 continue; 268 269 /* Please put yourself aside and make place for the younger ones */ 270 PFN_NUMBER Page = PFN_FROM_PTE(PointerPte); 271 { 272 ntoskrnl::MiPfnLockGuard PfnLock; 273 274 PMMPFN Pfn = MiGetPfnEntry(Page); 275 276 /* Not supported yet */ 277 ASSERT(Pfn->u3.e1.PrototypePte == 0); 278 ASSERT(!MI_IS_ROS_PFN(Pfn)); 279 280 /* FIXME: Remove this hack when possible */ 281 if (Pfn->Wsle.u1.e1.LockedInMemory || (Pfn->Wsle.u1.e1.LockedInWs)) 282 { 283 continue; 284 } 285 286 /* We can remove it from the list. Save Protection first */ 287 ULONG Protection = Entry.u1.e1.Protection; 288 RemoveFromWsList(WsList, Entry.u1.VirtualAddress); 289 290 /* Dirtify the page, if needed */ 291 if (PointerPte->u.Hard.Dirty) 292 Pfn->u3.e1.Modified = 1; 293 294 /* Make this a transition PTE */ 295 MI_MAKE_TRANSITION_PTE(PointerPte, Page, Protection); 296 KeInvalidateTlbEntry(MiAddressToPte(PointerPte)); 297 298 /* Drop the share count. This will take care of putting it in the standby or modified list. */ 299 MiDecrementShareCount(Pfn, Page); 300 } 301 302 Ret++; 303 } 304 return Ret; 305 } 306 307 /* GLOBAL FUNCTIONS ***********************************************************/ 308 extern "C" 309 { 310 311 _Use_decl_annotations_ 312 VOID 313 NTAPI 314 MiInsertInWorkingSetList( 315 _Inout_ PMMSUPPORT Vm, 316 _In_ PVOID Address, 317 _In_ ULONG Protection) 318 { 319 PMMWSL WsList = Vm->VmWorkingSetList; 320 321 /* Make sure that we are holding the WS lock. */ 322 ASSERT(MM_ANY_WS_LOCK_HELD_EXCLUSIVE(PsGetCurrentThread())); 323 324 PMMPTE PointerPte = MiAddressToPte(Address); 325 326 /* Make sure we are adding a paged-in address */ 327 ASSERT(PointerPte->u.Hard.Valid == 1); 328 PMMPFN Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte)); 329 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid); 330 331 /* Shared pages not supported yet */ 332 ASSERT(Pfn1->u1.WsIndex == 0); 333 ASSERT(Pfn1->u3.e1.PrototypePte == 0); 334 335 /* Nor are "ROS PFN" */ 336 ASSERT(MI_IS_ROS_PFN(Pfn1) == FALSE); 337 338 Pfn1->u1.WsIndex = GetFreeWsleIndex(WsList); 339 MMWSLENTRY& NewWsle = WsList->Wsle[Pfn1->u1.WsIndex].u1.e1; 340 NewWsle.VirtualPageNumber = reinterpret_cast<ULONG_PTR>(Address) >> PAGE_SHIFT; 341 NewWsle.Protection = Protection; 342 NewWsle.Direct = 1; 343 NewWsle.Hashed = 0; 344 NewWsle.LockedInMemory = 0; 345 NewWsle.LockedInWs = 0; 346 NewWsle.Age = 0; 347 NewWsle.Valid = 1; 348 349 Vm->WorkingSetSize += PAGE_SIZE; 350 if (Vm->WorkingSetSize > Vm->PeakWorkingSetSize) 351 Vm->PeakWorkingSetSize = Vm->WorkingSetSize; 352 } 353 354 _Use_decl_annotations_ 355 VOID 356 NTAPI 357 MiRemoveFromWorkingSetList( 358 _Inout_ PMMSUPPORT Vm, 359 _In_ PVOID Address) 360 { 361 RemoveFromWsList(Vm->VmWorkingSetList, Address); 362 363 Vm->WorkingSetSize -= PAGE_SIZE; 364 } 365 366 _Use_decl_annotations_ 367 VOID 368 NTAPI 369 MiInitializeWorkingSetList(_Inout_ PMMSUPPORT WorkingSet) 370 { 371 PMMWSL WsList = WorkingSet->VmWorkingSetList; 372 373 /* Initialize some fields */ 374 WsList->FirstFree = ULONG_MAX; 375 WsList->Wsle = reinterpret_cast<PMMWSLE>(WsList + 1); 376 WsList->LastEntry = 0; 377 /* The first page is already allocated */ 378 WsList->LastInitializedWsle = (PAGE_SIZE - sizeof(*WsList)) / sizeof(MMWSLE); 379 380 /* Insert the address we already know: our PDE base and the Working Set List */ 381 if (MI_IS_PROCESS_WORKING_SET(WorkingSet)) 382 { 383 ASSERT(WorkingSet->VmWorkingSetList == MmWorkingSetList); 384 #if _MI_PAGING_LEVELS == 4 385 MiInsertInWorkingSetList(WorkingSet, (PVOID)PXE_BASE, 0U); 386 #elif _MI_PAGING_LEVELS == 3 387 MiInsertInWorkingSetList(WorkingSet, (PVOID)PPE_BASE, 0U); 388 #elif _MI_PAGING_LEVELS == 2 389 MiInsertInWorkingSetList(WorkingSet, (PVOID)PDE_BASE, 0U); 390 #endif 391 } 392 393 #if _MI_PAGING_LEVELS == 4 394 MiInsertInWorkingSetList(WorkingSet, MiAddressToPpe(WorkingSet->VmWorkingSetList), 0UL); 395 #endif 396 #if _MI_PAGING_LEVELS >= 3 397 MiInsertInWorkingSetList(WorkingSet, MiAddressToPde(WorkingSet->VmWorkingSetList), 0UL); 398 #endif 399 MiInsertInWorkingSetList(WorkingSet, (PVOID)MiAddressToPte(WorkingSet->VmWorkingSetList), 0UL); 400 MiInsertInWorkingSetList(WorkingSet, (PVOID)WorkingSet->VmWorkingSetList, 0UL); 401 402 /* From now on, every added page can be trimmed at any time */ 403 WsList->FirstDynamic = WsList->LastEntry; 404 405 /* We can add this to our list */ 406 ExInterlockedInsertTailList(&MmWorkingSetExpansionHead, &WorkingSet->WorkingSetExpansionLinks, &MmExpansionLock); 407 } 408 409 VOID 410 NTAPI 411 MmWorkingSetManager(VOID) 412 { 413 PLIST_ENTRY VmListEntry; 414 PMMSUPPORT Vm = NULL; 415 KIRQL OldIrql; 416 417 OldIrql = MiAcquireExpansionLock(); 418 419 for (VmListEntry = MmWorkingSetExpansionHead.Flink; 420 VmListEntry != &MmWorkingSetExpansionHead; 421 VmListEntry = VmListEntry->Flink) 422 { 423 BOOLEAN TrimHard = MmAvailablePages < MmMinimumFreePages; 424 PEPROCESS Process = NULL; 425 426 /* Don't do anything if we have plenty of free pages. */ 427 if ((MmAvailablePages + MmModifiedPageListHead.Total) >= MmPlentyFreePages) 428 break; 429 430 Vm = CONTAINING_RECORD(VmListEntry, MMSUPPORT, WorkingSetExpansionLinks); 431 432 /* Let the legacy Mm System space alone */ 433 if (Vm == MmGetKernelAddressSpace()) 434 continue; 435 436 if (MI_IS_PROCESS_WORKING_SET(Vm)) 437 { 438 Process = CONTAINING_RECORD(Vm, EPROCESS, Vm); 439 440 /* Make sure the process is not terminating abd attach to it */ 441 if (!ExAcquireRundownProtection(&Process->RundownProtect)) 442 continue; 443 ASSERT(!KeIsAttachedProcess()); 444 KeAttachProcess(&Process->Pcb); 445 } 446 else 447 { 448 /* FIXME: Session & system space unsupported */ 449 continue; 450 } 451 452 MiReleaseExpansionLock(OldIrql); 453 454 /* Share-lock for now, we're only reading */ 455 MiLockWorkingSetShared(PsGetCurrentThread(), Vm); 456 457 if (((Vm->WorkingSetSize > Vm->MaximumWorkingSetSize) || 458 (TrimHard && (Vm->WorkingSetSize > Vm->MinimumWorkingSetSize))) && 459 MiConvertSharedWorkingSetLockToExclusive(PsGetCurrentThread(), Vm)) 460 { 461 /* We're done */ 462 Vm->Flags.BeingTrimmed = 1; 463 464 ULONG Trimmed = TrimWsList(Vm->VmWorkingSetList); 465 466 /* We're done */ 467 Vm->WorkingSetSize -= Trimmed * PAGE_SIZE; 468 Vm->Flags.BeingTrimmed = 0; 469 MiUnlockWorkingSet(PsGetCurrentThread(), Vm); 470 } 471 else 472 { 473 MiUnlockWorkingSetShared(PsGetCurrentThread(), Vm); 474 } 475 476 /* Lock again */ 477 OldIrql = MiAcquireExpansionLock(); 478 479 if (Process) 480 { 481 KeDetachProcess(); 482 ExReleaseRundownProtection(&Process->RundownProtect); 483 } 484 } 485 486 MiReleaseExpansionLock(OldIrql); 487 } 488 489 } // extern "C" 490