1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*!
25  * @file
26  * @brief The PMA implementation file.
27  * This file implements the PMA object and the public interfaces.
28  *
29  * @bug
30  *  1. SLI broadcast -- Not implemented
31  */
32 
33 #include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h"
34 #include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h"
35 #include "gpu/mem_mgr/phys_mem_allocator/numa.h"
36 #include "gpu/mem_mgr/mem_scrub.h"
37 #include "utils/nvprintf.h"
38 #include "utils/nvassert.h"
39 
40 #if !defined(SRT_BUILD)
41 // These files are not found on SRT builds
42 #include "os/os.h"
43 #else
44 NV_STATUS pmaNumaAllocate
45 (
46     PMA                    *pPma,
47     NvLength                allocationCount,
48     NvU64                   pageSize,
49     PMA_ALLOCATION_OPTIONS *allocationOptions,
50     NvU64                  *pPages
51 )
52 {
53     return NV_ERR_GENERIC;
54 }
55 
56 void pmaNumaFreeInternal
57 (
58     PMA   *pPma,
59     NvU64 *pPages,
60     NvU64  pageCount,
61     NvU64  size,
62     NvU32  flag
63 )
64 {
65     return;
66 }
67 void pmaNumaSetReclaimSkipThreshold(PMA *pPma, NvU32 data)
68 {
69     return;
70 }
71 #endif
72 
73 typedef NV_STATUS (*scanFunc)(void *, NvU64, NvU64, NvU64, NvU64, NvU64*, NvU64, NvU64, NvU64*, NvBool, NvBool);
74 
75 static void
76 _pmaRollback
77 (
78     PMA           *pPma,
79     NvU64         *pPages,
80     NvU32          failCount,
81     NvU32          failFrame,
82     NvU64          pageSize,
83     PMA_PAGESTATUS oldState
84 )
85 {
86     NvU32 framesPerPage, regId, i, j;
87     NvU64 frameNum, addrBase;
88 
89     framesPerPage = (NvU32)(pageSize >> PMA_PAGE_SHIFT);
90     if (failCount != 0)
91     {
92         for(i = 0; i < failCount; i++)
93         {
94             regId = findRegionID(pPma, pPages[i]);
95             addrBase = pPma->pRegDescriptors[regId]->base;
96             frameNum = PMA_ADDR2FRAME(pPages[i], addrBase);
97 
98             for (j = 0; j < framesPerPage; j++)
99             {
100                 pPma->pMapInfo->pmaMapChangeState(pPma->pRegions[regId], (frameNum + j), oldState);
101             }
102         }
103     }
104 
105     if (failFrame != 0)
106     {
107         // might fail half-way through
108         regId = findRegionID(pPma, pPages[failCount]);
109         addrBase = pPma->pRegDescriptors[regId]->base;
110         frameNum = PMA_ADDR2FRAME(pPages[failCount], addrBase);
111         for(i = 0; i < failFrame; i++)
112         {
113             pPma->pMapInfo->pmaMapChangeState(pPma->pRegions[regId], (frameNum + i), oldState);
114         }
115     }
116 }
117 
118 ///////////////////////////////////////////////////////////////////////////////
119 //
120 // Public interfaces
121 //
122 
123 NV_STATUS
124 pmaInitialize(PMA *pPma, NvU32 initFlags)
125 {
126     NV_STATUS status = NV_OK;
127     PMA_MAP_INFO *pMapInfo;
128 
129     if (pPma == NULL)
130     {
131         return NV_ERR_INVALID_ARGUMENT;
132     }
133 
134     pPma->pPmaLock = NULL;
135     pPma->pEvictionCallbacksLock = NULL;
136 
137     // Assume portMemInitialize() has been called
138     pPma->pPmaLock = (PORT_SPINLOCK *)portMemAllocNonPaged(portSyncSpinlockSize);
139     if (pPma->pPmaLock == NULL)
140     {
141         status = NV_ERR_NO_MEMORY;
142         goto error;
143     }
144 
145     status = portSyncSpinlockInitialize(pPma->pPmaLock);
146     if (status != NV_OK)
147     {
148         portMemFree(pPma->pPmaLock);
149         pPma->pPmaLock = NULL;
150         goto error;
151     }
152 
153     pPma->pEvictionCallbacksLock = (PORT_MUTEX *)portMemAllocNonPaged(portSyncMutexSize);
154     if (pPma->pEvictionCallbacksLock == NULL)
155     {
156         status = NV_ERR_NO_MEMORY;
157         goto error;
158     }
159 
160     status = portSyncMutexInitialize(pPma->pEvictionCallbacksLock);
161     if (status != NV_OK)
162     {
163         portMemFree(pPma->pEvictionCallbacksLock);
164         pPma->pEvictionCallbacksLock = NULL;
165         goto error;
166     }
167 
168     pPma->pAllocLock = (PORT_MUTEX *)portMemAllocNonPaged(portSyncMutexSize);
169     if (pPma->pAllocLock == NULL)
170     {
171         status = NV_ERR_NO_MEMORY;
172         goto error;
173     }
174 
175     status = portSyncMutexInitialize(pPma->pAllocLock);
176     if (status != NV_OK)
177     {
178         portMemFree(pPma->pAllocLock);
179         pPma->pAllocLock = NULL;
180         goto error;
181     }
182 
183     pPma->pScrubberValidLock = (PORT_RWLOCK *)portMemAllocNonPaged(portSyncRwLockSize);
184     if (pPma->pScrubberValidLock == NULL)
185     {
186         status = NV_ERR_NO_MEMORY;
187         goto error;
188     }
189 
190     pMapInfo = (PMA_MAP_INFO *)portMemAllocNonPaged(sizeof(struct _PMA_MAP_INFO));
191     if (pMapInfo == NULL)
192     {
193         status = NV_ERR_NO_MEMORY;
194         goto error;
195     }
196 
197     status = portSyncRwLockInitialize(pPma->pScrubberValidLock);
198     if (status != NV_OK)
199     {
200         portMemFree(pPma->pScrubberValidLock);
201         pPma->pScrubberValidLock = NULL;
202         goto error;
203     }
204 
205     //
206     // Initialize all the scanning callbacks to lower layer
207     // Default use regmap
208     //
209     pMapInfo->pmaMapInit = pmaRegmapInit;
210     pMapInfo->pmaMapDestroy = pmaRegmapDestroy;
211     pMapInfo->pmaMapChangeState = pmaRegmapChangeState;
212     pMapInfo->pmaMapChangeStateAttrib = pmaRegmapChangeStateAttrib;
213     pMapInfo->pmaMapChangeStateAttribEx = pmaRegmapChangeStateAttribEx;
214     pMapInfo->pmaMapChangePageStateAttrib = pmaRegmapChangePageStateAttrib;
215     pMapInfo->pmaMapRead = pmaRegmapRead;
216     pMapInfo->pmaMapScanContiguous = pmaRegmapScanContiguous;
217     pMapInfo->pmaMapScanDiscontiguous = pmaRegmapScanDiscontiguous;
218     pMapInfo->pmaMapGetSize = pmaRegmapGetSize;
219     pMapInfo->pmaMapGetLargestFree = pmaRegmapGetLargestFree;
220     pMapInfo->pmaMapScanContiguousNumaEviction = pmaRegMapScanContiguousNumaEviction;
221     pMapInfo->pmaMapGetEvictingFrames = pmaRegmapGetEvictingFrames;
222     pMapInfo->pmaMapSetEvictingFrames = pmaRegmapSetEvictingFrames;
223 
224     if (initFlags != PMA_INIT_NONE)
225     {
226         pPma->bForcePersistence = !!(initFlags & PMA_INIT_FORCE_PERSISTENCE);
227 
228         // If scrubber feature is enable, PMA is not valid until scrubber registration
229         if (initFlags & PMA_INIT_SCRUB_ON_FREE)
230         {
231             portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID);
232         }
233         pPma->bScrubOnFree = !!(initFlags & PMA_INIT_SCRUB_ON_FREE);
234 
235         // If running on NUMA system, we cannot allocate from OS until node is onlined
236         if (initFlags & PMA_INIT_NUMA)
237         {
238             pPma->nodeOnlined = NV_FALSE;
239         }
240         pPma->bNuma = !!(initFlags & PMA_INIT_NUMA);
241 
242         pPma->bNumaAutoOnline = !!(initFlags & PMA_INIT_NUMA_AUTO_ONLINE);
243 
244         // If we want to run with address tree instead of regmap
245         if (initFlags & PMA_INIT_ADDRTREE)
246         {
247             pMapInfo->pmaMapInit = pmaAddrtreeInit;
248             pMapInfo->pmaMapDestroy = pmaAddrtreeDestroy;
249             pMapInfo->pmaMapChangeState = pmaAddrtreeChangeState;
250             pMapInfo->pmaMapChangeStateAttrib = pmaAddrtreeChangeStateAttrib;
251             pMapInfo->pmaMapChangeStateAttribEx = pmaAddrtreeChangeStateAttribEx;
252             pMapInfo->pmaMapChangePageStateAttrib = pmaAddrtreeChangePageStateAttrib;
253             pMapInfo->pmaMapRead = pmaAddrtreeRead;
254             pMapInfo->pmaMapScanContiguous = pmaAddrtreeScanContiguous;
255             pMapInfo->pmaMapScanDiscontiguous = pmaAddrtreeScanDiscontiguous;
256             pMapInfo->pmaMapGetSize = pmaAddrtreeGetSize;
257             pMapInfo->pmaMapGetLargestFree = pmaAddrtreeGetLargestFree;
258             pMapInfo->pmaMapScanContiguousNumaEviction = pmaAddrtreeScanContiguousNumaEviction;
259             pMapInfo->pmaMapGetEvictingFrames = pmaAddrtreeGetEvictingFrames;
260             pMapInfo->pmaMapSetEvictingFrames = pmaAddrtreeSetEvictingFrames;
261             NV_PRINTF(LEVEL_WARNING, "Going to use addrtree for PMA init!!\n");
262         }
263     }
264     pPma->pMapInfo = pMapInfo;
265 
266     pPma->pmaStats.numFreeFrames = 0;
267     pPma->pmaStats.num2mbPages = 0;
268     pPma->pmaStats.numFree2mbPages = 0;
269     pPma->pmaStats.numFreeFramesProtected = 0;
270     pPma->pmaStats.num2mbPagesProtected = 0;
271     pPma->pmaStats.numFree2mbPagesProtected = 0;
272     pPma->regSize = 0;
273     portAtomicSetSize(&pPma->initScrubbing, PMA_SCRUB_INITIALIZE);
274 
275     // OK not to take lock since it's initialization
276     NV_ASSERT(pmaStateCheck(pPma));
277 
278     return NV_OK;
279 
280 error:
281     pmaDestroy(pPma);
282     return status;
283 }
284 
285 NV_STATUS
286 pmaQueryConfigs(PMA *pPma, NvU32 *pConfig)
287 {
288     NvU32 config = 0;
289 
290     if (pPma == NULL || pConfig == NULL)
291     {
292         return NV_ERR_INVALID_STATE;
293     }
294 
295     if (pPma->bScrubOnFree)
296     {
297         config |= PMA_QUERY_SCRUB_ENABLED;
298 
299         portSyncRwLockAcquireRead(pPma->pScrubberValidLock);
300         if (pmaPortAtomicGet(&pPma->scrubberValid) == PMA_SCRUBBER_VALID)
301         {
302             config |= PMA_QUERY_SCRUB_VALID;
303         }
304         portSyncRwLockReleaseRead(pPma->pScrubberValidLock);
305     }
306     config |= pPma->bNuma ? (PMA_QUERY_NUMA_ENABLED) : 0;
307 
308     portSyncSpinlockAcquire(pPma->pPmaLock);
309     config |= pPma->nodeOnlined ? (PMA_QUERY_NUMA_ONLINED) : 0;
310     portSyncSpinlockRelease(pPma->pPmaLock);
311 
312     // Only expose the states the clients asked for
313     *pConfig = (*pConfig) & config;
314     return NV_OK;
315 }
316 
317 NV_STATUS
318 pmaRegMemScrub(PMA *pPma, OBJMEMSCRUB *pScrubObj)
319 {
320     NV_ASSERT(pPma && pPma->bScrubOnFree);
321     portSyncRwLockAcquireWrite(pPma->pScrubberValidLock);
322     pPma->pScrubObj = pScrubObj;
323     portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_VALID);
324     portSyncRwLockReleaseWrite(pPma->pScrubberValidLock);
325 
326     return NV_OK;
327 }
328 
329 void
330 pmaUnregMemScrub(PMA *pPma)
331 {
332     NV_ASSERT(pPma && pPma->bScrubOnFree);
333     portSyncRwLockAcquireWrite(pPma->pScrubberValidLock);
334     portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID);
335     pPma->pScrubObj = NULL;
336     portSyncRwLockReleaseWrite(pPma->pScrubberValidLock);
337 }
338 
339 NV_STATUS
340 pmaNumaOnlined(PMA *pPma, NvS32 numaNodeId,
341                NvU64 coherentCpuFbBase, NvU64 coherentCpuFbSize)
342 {
343     if ((pPma == NULL) || (!pPma->bNuma) ||
344         (numaNodeId == PMA_NUMA_NO_NODE))
345     {
346         NV_ASSERT(0);
347         return NV_ERR_INVALID_STATE;
348     }
349 
350     portSyncSpinlockAcquire(pPma->pPmaLock);
351     pPma->nodeOnlined = NV_TRUE;
352     pPma->numaNodeId = numaNodeId;
353     pPma->coherentCpuFbBase = coherentCpuFbBase;
354     pPma->coherentCpuFbSize = coherentCpuFbSize;
355     portSyncSpinlockRelease(pPma->pPmaLock);
356 
357     return NV_OK;
358 }
359 
360 
361 void
362 pmaNumaOfflined(PMA *pPma)
363 {
364     if ((pPma == NULL) || (!pPma->bNuma))
365     {
366         NV_ASSERT(0);
367         return;
368     }
369 
370     portSyncSpinlockAcquire(pPma->pPmaLock);
371     pPma->nodeOnlined = NV_FALSE;
372     pPma->numaNodeId = PMA_NUMA_NO_NODE;
373     portSyncSpinlockRelease(pPma->pPmaLock);
374 }
375 
376 
377 void
378 pmaDestroy(PMA *pPma)
379 {
380     NvU32 i;
381 
382     NV_ASSERT(pPma != NULL);
383 
384     NV_ASSERT(pmaStateCheck(pPma));
385 
386     if (pmaPortAtomicGet(&pPma->initScrubbing) == PMA_SCRUB_IN_PROGRESS)
387     {
388         pmaScrubComplete(pPma);
389     }
390 
391     if (pPma->bNuma)
392     {
393         if (pPma->nodeOnlined != NV_FALSE)
394         {
395             //
396             // Not really an error right now but it will be later, when we are able
397             // to offline memory.
398             //
399             NV_PRINTF(LEVEL_WARNING, "Destroying PMA before node %d is offlined\n",
400                                      pPma->numaNodeId);
401         }
402     }
403 
404     for (i = 0; i < pPma->regSize; i++)
405     {
406         pPma->pMapInfo->pmaMapDestroy((void *)pPma->pRegions[i]);
407         portMemFree(pPma->pRegDescriptors[i]);
408     }
409     pPma->regSize = 0;
410 
411     if (pPma->blacklistCount != 0)
412     {
413         portMemFree(pPma->pBlacklistChunks);
414     }
415 
416     portMemFree(pPma->pMapInfo);
417 
418     if (pPma->pAllocLock != NULL)
419     {
420         portSyncMutexDestroy(pPma->pAllocLock);
421         portMemFree(pPma->pAllocLock);
422     }
423 
424     if (pPma->pScrubberValidLock != NULL)
425     {
426         portSyncRwLockDestroy(pPma->pScrubberValidLock);
427         portMemFree(pPma->pScrubberValidLock);
428     }
429 
430     if (pPma->pEvictionCallbacksLock != NULL)
431     {
432         portSyncMutexDestroy(pPma->pEvictionCallbacksLock);
433         portMemFree(pPma->pEvictionCallbacksLock);
434     }
435 
436     if (pPma->pPmaLock != NULL)
437     {
438         portSyncSpinlockDestroy(pPma->pPmaLock);
439         portMemFree(pPma->pPmaLock);
440     }
441 }
442 
443 
444 NV_STATUS
445 pmaRegisterRegion
446 (
447     PMA                   *pPma,
448     NvU32                  id,
449     NvBool                 bAsyncEccScrub,
450     PMA_REGION_DESCRIPTOR *pRegionDesc,
451     NvU32                  blacklistCount,
452     PPMA_BLACKLIST_ADDRESS pBlacklistPageBase
453 )
454 {
455     NvU64 numFrames;
456     void *pMap;
457     NvU64 physBase, physLimit;
458     NV_STATUS status = NV_OK;
459 
460     if (pPma == NULL || pRegionDesc == NULL || id != pPma->regSize
461         || (pBlacklistPageBase == NULL && blacklistCount != 0))
462     {
463         if (pPma == NULL)
464         {
465             NV_PRINTF(LEVEL_ERROR, "ERROR: NULL PMA object\n");
466         }
467         else if (id != pPma->regSize)
468         {
469             NV_PRINTF(LEVEL_ERROR, "ERROR: Non-consecutive region ID %d (should be %d)\n",
470                 id, pPma->regSize);
471         }
472         if (pRegionDesc == NULL)
473             NV_PRINTF(LEVEL_ERROR, "ERROR: NULL region descriptor\n");
474         if (pBlacklistPageBase == NULL && blacklistCount != 0)
475             NV_PRINTF(LEVEL_ERROR, "ERROR: Blacklist failure.  List is NULL but count = %d\n",
476                 blacklistCount);
477 
478         return NV_ERR_INVALID_ARGUMENT;
479     }
480 
481     if (pPma->bNuma)
482     {
483         NV_PRINTF(LEVEL_WARNING, "WARNING: registering regions on NUMA system.\n");
484     }
485 
486     physBase = pRegionDesc->base;
487     physLimit = pRegionDesc->limit;
488 
489     if (!NV_IS_ALIGNED(physBase, PMA_GRANULARITY) ||
490         !NV_IS_ALIGNED((physLimit + 1), PMA_GRANULARITY))
491     {
492         NV_PRINTF(LEVEL_ERROR, "ERROR: Region range %llx..%llx unaligned\n",
493             physBase, physLimit);
494         // just try to check alignment on 64KB boundaries
495         return NV_ERR_INVALID_ARGUMENT;
496     }
497 
498     NV_ASSERT(pmaStateCheck(pPma));
499 
500     numFrames = (physLimit - physBase + 1) >> PMA_PAGE_SHIFT;
501 
502     pMap = pPma->pMapInfo->pmaMapInit(numFrames, physBase, &pPma->pmaStats,
503                                       pRegionDesc->bProtected);
504     if (pMap == NULL)
505     {
506         return NV_ERR_NO_MEMORY;
507     }
508 
509     pPma->pRegions[id] = pMap;
510 
511     // Deep copy of descriptor
512     pPma->pRegDescriptors[id] =
513       (PMA_REGION_DESCRIPTOR *) portMemAllocNonPaged(sizeof(PMA_REGION_DESCRIPTOR));
514     portMemCopy(pPma->pRegDescriptors[id], sizeof(PMA_REGION_DESCRIPTOR),
515         pRegionDesc, sizeof(PMA_REGION_DESCRIPTOR));
516 
517     pPma->regSize++;
518 
519     if (bAsyncEccScrub)
520     {
521         //
522         // Scrubbing cannot be done before we start. This is to protect against spurious pmaScrubComplete
523         // calls from RM
524         //
525         NV_ASSERT(pmaPortAtomicGet(&pPma->initScrubbing) != PMA_SCRUB_DONE);
526 
527         // Mark region as "scrubbing" until background scrubbing completes
528         pmaSetBlockStateAttrib(pPma, physBase, physLimit - physBase + 1, ATTRIB_SCRUBBING, ATTRIB_SCRUBBING);
529 
530         //
531         // This depends on RM initialization order: RM will only call pmaScrubComplete
532         // once after all regions are registered and finished scrubbing.
533         // The return value cannot be asserted. For example, when we are registering
534         // the second region, the old returned value is _IN_PROGRESS and that is expected.
535         //
536         portAtomicCompareAndSwapSize(&pPma->initScrubbing, PMA_SCRUB_IN_PROGRESS,
537                                                            PMA_SCRUB_INITIALIZE);
538     }
539 
540     status = pmaRegisterBlacklistInfo(pPma, physBase, pBlacklistPageBase, blacklistCount);
541     if (status != NV_OK)
542     {
543         pPma->pMapInfo->pmaMapDestroy(pMap);
544         portMemFree(pPma->pRegDescriptors[id]);
545         return status;
546     }
547 
548     NV_PRINTF(LEVEL_INFO, "Registered region:\n");
549     pmaRegionPrint(pPma, pPma->pRegDescriptors[id], pPma->pRegions[id]);
550     NV_PRINTF(LEVEL_INFO, "%d region(s) now registered\n", pPma->regSize);
551 
552     return status;
553 }
554 
555 NV_STATUS
556 pmaAllocatePages
557 (
558     PMA                    *pPma,
559     NvLength                allocationCount,
560     NvU64                   pageSize,
561     PMA_ALLOCATION_OPTIONS *allocationOptions,
562     NvU64                  *pPages
563 )
564 {
565     NvS32 regionList[PMA_REGION_SIZE];
566     NV_STATUS status, prediction;
567     NvU32 flags, evictFlag, contigFlag, persistFlag, alignFlag, pinFlag, rangeFlag, blacklistOffFlag, partialFlag, skipScrubFlag, reverseFlag;
568     NvU32 regId, regionIdx;
569     NvU64 numPagesAllocatedThisTime, numPagesLeftToAllocate, numPagesAllocatedSoFar;
570     NvU64 addrBase, addrLimit;
571     NvU64 rangeStart, rangeEnd;
572     NvU64 *curPages;
573     NvBool blacklistOffPerRegion[PMA_REGION_SIZE]={NV_FALSE};
574     NvU64 blacklistOffAddrStart[PMA_REGION_SIZE] = {0}, blacklistOffRangeSize[PMA_REGION_SIZE] = {0};
575     NvBool bScrubOnFree = NV_FALSE;
576 
577     void *pMap = NULL;
578     scanFunc useFunc;
579     PMA_PAGESTATUS pinOption;
580     NvU64 alignment = pageSize;
581     NvU32 framesPerPage  = (NvU32)(pageSize >> PMA_PAGE_SHIFT);
582 
583     //
584     // A boolean indicating if we should try to evict. We at most try eviction once per call
585     // to pmaAllocatePages.
586     //
587     NvBool tryEvict = NV_TRUE;
588     NvBool tryAlloc = NV_TRUE;
589 
590     const NvU64 numFramesToAllocateTotal = framesPerPage * allocationCount;
591 
592     if (pPma == NULL || pPages == NULL || allocationCount == 0
593         || (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB)
594         || allocationOptions == NULL)
595     {
596         if (pPma == NULL)
597             NV_PRINTF(LEVEL_ERROR, "NULL PMA object\n");
598         if (pPages == NULL)
599             NV_PRINTF(LEVEL_ERROR, "NULL page list pointer\n");
600         if (allocationCount == 0)
601             NV_PRINTF(LEVEL_ERROR, "count == 0\n");
602         if (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB)
603             NV_PRINTF(LEVEL_ERROR, "pageSize=0x%llx (not 64K, 128K, 2M, or 512M)\n", pageSize);
604         if (allocationOptions == NULL)
605             NV_PRINTF(LEVEL_ERROR, "NULL allocationOptions\n");
606         return NV_ERR_INVALID_ARGUMENT;
607     }
608 
609     flags = allocationOptions->flags;
610     evictFlag   = !(flags & PMA_ALLOCATE_DONT_EVICT);
611     contigFlag  = !!(flags & PMA_ALLOCATE_CONTIGUOUS);
612     pinFlag     = !!(flags & PMA_ALLOCATE_PINNED);
613     rangeFlag   = !!(flags & PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE);
614     persistFlag = pPma->bForcePersistence || !!(flags & PMA_ALLOCATE_PERSISTENT);
615     alignFlag   = !!(flags & PMA_ALLOCATE_FORCE_ALIGNMENT);
616     blacklistOffFlag = !!(flags & PMA_ALLOCATE_TURN_BLACKLIST_OFF);
617     partialFlag = !!(flags & PMA_ALLOCATE_ALLOW_PARTIAL);
618     skipScrubFlag = !!(flags & PMA_ALLOCATE_NO_ZERO);
619     reverseFlag = !!(flags & PMA_ALLOCATE_REVERSE_ALLOC);
620 
621     // Fork out new code path for NUMA sub-allocation from OS
622     if (pPma->bNuma)
623     {
624         if (reverseFlag)
625         {
626             NV_PRINTF(LEVEL_ERROR, "Reverse allocation not supported on NUMA.\n");
627             return NV_ERR_INVALID_ARGUMENT;
628         }
629         return pmaNumaAllocate(pPma, allocationCount, pageSize, allocationOptions, pPages);
630     }
631 
632     //
633     // Scrub on free is enabled for this allocation request if the feature is enabled and the
634     // caller does not want to skip scrubber.
635     // Caller may want to skip scrubber when it knows the memory is zero'ed or when we are
636     // initializing RM structures needed by the scrubber itself.
637     //
638     bScrubOnFree = pPma->bScrubOnFree && (!skipScrubFlag);
639 
640     //
641     // PMA only knows the page is zero'ed if PMA scrubbed it.
642     // For example, if something else scrubbed the page, called PMA with ALLOCATE_NO_ZERO,
643     // the _RESULT_IS_ZERO flag is not set because PMA did not scrub that page.
644     //
645     allocationOptions->resultFlags = bScrubOnFree ? PMA_ALLOCATE_RESULT_IS_ZERO : 0;
646 
647     if (blacklistOffFlag && !contigFlag)
648     {
649         NV_PRINTF(LEVEL_ERROR, "Blacklist can only be turned off for contiguous allocations\n");
650         return NV_ERR_INVALID_ARGUMENT;
651     }
652 
653     if (bScrubOnFree && blacklistOffFlag)
654     {
655         NV_PRINTF(LEVEL_ERROR, "Blacklist cannot be turned off when scrub on free is enabled\n");
656         return NV_ERR_INVALID_ARGUMENT;
657     }
658 
659     if (rangeFlag && (!NV_IS_ALIGNED(allocationOptions->physBegin, pageSize)
660           || !NV_IS_ALIGNED((allocationOptions->physEnd + 1), pageSize)))
661     {
662         NV_PRINTF(LEVEL_WARNING,
663                 "base [0x%llx] or limit [0x%llx] not aligned to page size 0x%llx\n",
664                 allocationOptions->physBegin,
665                 allocationOptions->physEnd + 1,
666                 pageSize);
667         return NV_ERR_INVALID_ARGUMENT;
668     }
669 
670     //
671     // Minimum alignment is requested page size. Alignment granularity is 64K.
672     // Alignment must be power of two for PMA math
673     //
674     if (alignFlag)
675     {
676         if (!NV_IS_ALIGNED(allocationOptions->alignment, _PMA_64KB) ||
677             !portUtilIsPowerOfTwo(allocationOptions->alignment))
678         {
679             NV_PRINTF(LEVEL_WARNING,
680                 "alignment [%llx] is not aligned to 64KB or is not power of two.",
681                 alignment);
682             return NV_ERR_INVALID_ARGUMENT;
683         }
684 
685         alignment = NV_MAX(pageSize, allocationOptions->alignment);
686         if (!contigFlag && alignment > pageSize)
687         {
688             NV_PRINTF(LEVEL_WARNING,
689                 "alignment [%llx] larger than the pageSize [%llx] not supported for non-contiguous allocs\n",
690                 alignment, pageSize);
691             return NV_ERR_INVALID_ARGUMENT;
692         }
693     }
694 
695     pinOption = pinFlag ? STATE_PIN : STATE_UNPIN;
696     pinOption |= persistFlag ? ATTRIB_PERSISTENT : 0;
697 
698     useFunc = contigFlag ? (pPma->pMapInfo->pmaMapScanContiguous) :
699                            (pPma->pMapInfo->pmaMapScanDiscontiguous);
700 
701     // No locking required because the states don't change
702     status = pmaSelector(pPma, allocationOptions, regionList);
703     if (status != NV_OK)
704     {
705         NV_PRINTF(LEVEL_FATAL, "Region selector failed\n");
706         return status;
707     }
708 
709     if (bScrubOnFree)
710     {
711         portSyncMutexAcquire(pPma->pAllocLock);
712         portSyncRwLockAcquireRead(pPma->pScrubberValidLock);
713         if (pmaPortAtomicGet(&pPma->scrubberValid) != PMA_SCRUBBER_VALID)
714         {
715             NV_PRINTF(LEVEL_WARNING, "PMA object is not valid\n");
716             portSyncRwLockReleaseRead(pPma->pScrubberValidLock);
717             portSyncMutexRelease(pPma->pAllocLock);
718             return NV_ERR_INVALID_STATE;
719         }
720     }
721 
722     tryEvict = (evictFlag == 1);
723 
724 pmaAllocatePages_retry:
725     //
726     // Retry implies that the PMA lock has been released and will be re-acquired
727     // after checking the scrubber so any pages allocated so far are not guaranteed
728     // to be there any more. Restart from scratch.
729     //
730     NV_PRINTF(LEVEL_INFO, "Attempt %s allocation of 0x%llx pages of size 0x%llx "
731                           "(0x%x frames per page)\n",
732                           contigFlag ? "contiguous" : "discontiguous",
733                           (NvU64)allocationCount, pageSize, framesPerPage);
734 
735     // Check if scrubbing is done before allocating each time before we retry
736     if (bScrubOnFree)
737     {
738         if ((status = _pmaCheckScrubbedPages(pPma, 0, NULL, 0)) != NV_OK)
739             goto scrub_fatal;
740     }
741 
742     // Attempting to allocate starts here
743     numPagesLeftToAllocate = allocationCount;
744     numPagesAllocatedSoFar = 0;
745     curPages = pPages;
746 
747     portSyncSpinlockAcquire(pPma->pPmaLock);
748 
749     NV_ASSERT(pmaStateCheck(pPma));
750 
751     prediction = _pmaPredictOutOfMemory(pPma, allocationCount, pageSize, allocationOptions);
752     if (!tryEvict && (prediction == NV_ERR_NO_MEMORY))
753     {
754         NV_PRINTF(LEVEL_INFO, "Returning OOM from prediction path.\n");
755         status = NV_ERR_NO_MEMORY;
756         goto normal_exit;
757     }
758 
759     for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
760     {
761         MEMORY_PROTECTION prot;
762 
763         if (regionList[regionIdx] == -1)
764         {
765             status = NV_ERR_NO_MEMORY;
766             goto normal_exit;
767         }
768         NV_ASSERT(regionList[regionIdx] < PMA_REGION_SIZE);
769 
770         regId = (NvU32)regionList[regionIdx];
771         pMap  = pPma->pRegions[regId];
772 
773         addrBase = pPma->pRegDescriptors[regId]->base;
774         addrLimit = pPma->pRegDescriptors[regId]->limit;
775         prot = pPma->pRegDescriptors[regId]->bProtected ? MEMORY_PROTECTION_PROTECTED :
776                                                           MEMORY_PROTECTION_UNPROTECTED;
777 
778         //
779         // If the start address of the range is less than the region's base
780         // address, start from the base itself.
781         //
782         rangeStart = rangeFlag ? ((allocationOptions->physBegin >= addrBase) ?
783                                   (allocationOptions->physBegin - addrBase) : 0) : 0;
784         rangeEnd   = rangeFlag ? ((allocationOptions->physEnd >= addrBase) ?
785                                   (allocationOptions->physEnd - addrBase) : 0) : 0;
786 
787         if (rangeStart > rangeEnd)
788         {
789             status = NV_ERR_INVALID_ARGUMENT;
790             goto normal_exit;
791         }
792 
793         //
794         // Before continuing with allocation, lets check if we need to turn-off
795         // blacklisting. During retry, we don't have to free the blacklisted pages again
796         //
797         if (blacklistOffFlag && !blacklistOffPerRegion[regId])
798         {
799             if (allocationOptions->physBegin > addrLimit)
800             {
801                 blacklistOffAddrStart[regId] = 0;
802                 blacklistOffRangeSize[regId] = 0;
803             }
804             else
805             {
806                 // if the range falls within the region then free blacklisted pages in the entire region
807                 blacklistOffAddrStart[regId] = addrBase;
808                 blacklistOffRangeSize[regId] = (addrLimit - addrBase + 1);
809                 _pmaFreeBlacklistPages(pPma, regId, blacklistOffAddrStart[regId], blacklistOffRangeSize[regId]);
810                 blacklistOffPerRegion[regId] = NV_TRUE;
811             }
812         }
813 
814         NV_ASSERT(numPagesLeftToAllocate + numPagesAllocatedSoFar == allocationCount);
815         NV_ASSERT(numPagesLeftToAllocate > 0);
816 
817         numPagesAllocatedThisTime = 0;
818         status = (*useFunc)(pMap, addrBase, rangeStart, rangeEnd, numPagesLeftToAllocate,
819             curPages, pageSize, alignment, &numPagesAllocatedThisTime, !tryEvict, (NvBool)reverseFlag);
820 
821         NV_ASSERT(numPagesAllocatedThisTime <= numPagesLeftToAllocate);
822 
823         if (contigFlag)
824         {
825             // Contiguous allocations are all or nothing
826             NV_ASSERT(numPagesAllocatedThisTime == 0 ||
827                       numPagesAllocatedThisTime == numPagesLeftToAllocate);
828         }
829 
830         //
831         // Adjust the counts and the pointer within the array of pages for the
832         // discontiguous case where only some pages might have been successfully
833         // allocated.
834         //
835         numPagesAllocatedSoFar += numPagesAllocatedThisTime;
836         curPages += numPagesAllocatedThisTime;
837         numPagesLeftToAllocate -= numPagesAllocatedThisTime;
838 
839         //
840         // PMA must currently catch addrtree shortcomings and fail the request
841         // Just follow the no memory path for now to properly release locks
842         //
843         if (status == NV_ERR_INVALID_ARGUMENT)
844         {
845             status = NV_ERR_NO_MEMORY;
846         }
847 
848         if (status == NV_ERR_IN_USE && !tryEvict)
849         {
850             //
851             // If memory is evictable, but eviction is not allowed by the
852             // caller, just return the no memory error.
853             //
854             NV_PRINTF(LEVEL_WARNING, "Memory evictable, but eviction not allowed, returning\n");
855             status = NV_ERR_NO_MEMORY;
856         }
857 
858         if (status == NV_OK)
859         {
860             NV_ASSERT(numPagesLeftToAllocate == 0);
861             NV_ASSERT(numPagesAllocatedSoFar == allocationCount);
862             break;
863         }
864         else if (status == NV_ERR_NO_MEMORY)
865         {
866             //
867             // Print an "out of memory" mssg only after we've scanned through
868             // all the regions. Printing an OOM message on per region basis may
869             // confuse someone debugging that we've actually run out of memory.
870             //
871             if ((regionIdx < (pPma->regSize - 1)) && (regionList[regionIdx + 1] == -1))
872             {
873                 NV_PRINTF(LEVEL_ERROR, "Status no_memory\n");
874             }
875             if (contigFlag)
876             {
877                 // Contiguous allocations are all or nothing.
878                 NV_ASSERT(numPagesAllocatedThisTime == 0);
879             }
880         }
881         else if (tryEvict)
882         {
883             NV_PRINTF(LEVEL_INFO, "Status evictable, region before eviction:\n");
884             pmaRegionPrint(pPma, pPma->pRegDescriptors[regId], pMap);
885 
886             NV_ASSERT(numPagesLeftToAllocate > 0);
887 
888             if (contigFlag)
889             {
890                 NV_ASSERT(numPagesLeftToAllocate == allocationCount);
891                 NV_ASSERT(numPagesAllocatedThisTime == 0);
892                 NV_ASSERT(numPagesAllocatedSoFar == 0);
893 
894                 NvU64 evictStart  = *curPages;
895                 NvU64 evictEnd    = *curPages + (numFramesToAllocateTotal << PMA_PAGE_SHIFT) - 1;
896 
897                 NV_PRINTF(LEVEL_INFO, "Attempt %s eviction of 0x%llx pages of size 0x%llx, "
898                                       "(0x%x frames per page) in the frame range 0x%llx..0x%llx\n",
899                                       contigFlag ? "contiguous" : "discontiguous",
900                                       numPagesLeftToAllocate,
901                                       pageSize,
902                                       framesPerPage,
903                                       (evictStart - addrBase) >> PMA_PAGE_SHIFT,
904                                       (evictEnd - addrBase) >> PMA_PAGE_SHIFT);
905 
906                 status = _pmaEvictContiguous(pPma, pMap, evictStart, evictEnd, prot);
907             }
908             else
909             {
910                 // Default to allowing the whole region to be evicted
911                 NvU64 evictPhysBegin = addrBase;
912                 NvU64 evictPhysEnd = addrLimit;
913 
914                 if (rangeFlag)
915                 {
916                     //
917                     // And if a specific physical range was requested, intersect
918                     // it with the region.
919                     //
920                     evictPhysBegin = NV_MAX(allocationOptions->physBegin, evictPhysBegin);
921                     evictPhysEnd   = NV_MIN(allocationOptions->physEnd, evictPhysEnd);
922 
923                     // Regions that would cause the intersection to be empty are skipped.
924                     NV_ASSERT(evictPhysBegin <= evictPhysEnd);
925                 }
926 
927                 NV_PRINTF(LEVEL_INFO, "Attempt %s eviction of 0x%llx pages of size 0x%llx, "
928                                       "(0x%x frames per page), in the frame range 0x%llx..0x%llx\n",
929                                       contigFlag ? "contiguous" : "discontiguous",
930                                       numPagesLeftToAllocate,
931                                       pageSize,
932                                       framesPerPage,
933                                       (evictPhysBegin - addrBase) >> PMA_PAGE_SHIFT,
934                                       (evictPhysEnd - addrBase) >> PMA_PAGE_SHIFT);
935 
936                 status = _pmaEvictPages(pPma, pMap, curPages, numPagesLeftToAllocate,
937                                         pPages, numPagesAllocatedSoFar, pageSize,
938                                         evictPhysBegin, evictPhysEnd, prot);
939             }
940 
941             if (status == NV_OK)
942             {
943                 numPagesAllocatedSoFar = allocationCount;
944             }
945             else
946             {
947                 NV_PRINTF(LEVEL_INFO, "Eviction/scrubbing failed, region after:\n");
948                 pmaRegionPrint(pPma, pPma->pRegDescriptors[regId], pMap);
949             }
950 
951             if (status == NV_ERR_INSUFFICIENT_RESOURCES)
952             {
953                 NV_PRINTF(LEVEL_ERROR, "ERROR: scrubber OOM\n");
954 
955                 // Scrubber is out of memory
956                 goto scrub_fatal;
957             }
958         }
959     }
960 
961     //
962     // if scrubbing is active in the background, release lock and spin until it
963     // completes, then re-try.
964     //
965     if ((status == NV_ERR_NO_MEMORY) &&
966         (pmaPortAtomicGet(&pPma->initScrubbing) == PMA_SCRUB_IN_PROGRESS))
967     {
968         // Release the spinlock before attempting a semaphore acquire.
969         portSyncSpinlockRelease(pPma->pPmaLock);
970 
971         // Wait until scrubbing is complete.
972         while (pmaPortAtomicGet(&pPma->initScrubbing) != PMA_SCRUB_DONE)
973         {
974             // Deschedule without PMA lock
975             pmaOsSchedule();
976         }
977         NV_PRINTF(LEVEL_INFO, "Retrying after eviction/scrub\n");
978         goto pmaAllocatePages_retry;
979     }
980 
981     if ((status == NV_ERR_NO_MEMORY) && partialFlag && (numPagesAllocatedSoFar > 0))
982     {
983         //
984         // If scrub on free is enabled, continue to scrubWaitForAll if we haven't already,
985         // otherwise succeed the partial allocation.
986         // If scrub on free is not enabled, there is no waiting to try, so succeed the
987         // partial allocation immediately.
988         //
989         if (!bScrubOnFree  || !tryAlloc)
990         {
991             NV_PRINTF(LEVEL_INFO, "Succeed partial allocation\n");
992             status = NV_OK;
993         }
994     }
995 
996     if (status == NV_ERR_NO_MEMORY && bScrubOnFree)
997     {
998         PSCRUB_NODE pPmaScrubList = NULL;
999         NvU64       count;
1000         portSyncSpinlockRelease(pPma->pPmaLock);
1001 
1002         NV_PRINTF(LEVEL_INFO, "Waiting for scrubber\n");
1003 
1004         status = scrubCheckAndWaitForSize(pPma->pScrubObj, numPagesLeftToAllocate,
1005                                           pageSize, &pPmaScrubList, &count);
1006 
1007         if (status == NV_OK)
1008         {
1009             if (count > 0)
1010             {
1011                 _pmaClearScrubBit(pPma, pPmaScrubList, count);
1012             }
1013 
1014             //
1015             // Free the actual list, although allocated by objscrub
1016             // there is no need for failure case handling to free the list,  because the call
1017             // returns error for 1)memory allocation failure or 2)nothing remaining to scrub.
1018             //
1019             portMemFree(pPmaScrubList);
1020         }
1021 
1022         //
1023         // Set tryEvict to NV_FALSE because we know UVM already failed eviction and any
1024         // available memory that comes after we tried eviction will not be counted towards
1025         // this allocation.
1026         //
1027         if (tryAlloc)
1028         {
1029             tryAlloc = NV_FALSE;
1030             tryEvict = NV_FALSE;
1031             NV_PRINTF(LEVEL_INFO, "Retrying after waiting for scrubber\n");
1032             goto pmaAllocatePages_retry;
1033         }
1034 
1035         if (blacklistOffFlag)
1036         {
1037             for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
1038             {
1039                 if (blacklistOffPerRegion[regionIdx] == NV_FALSE)
1040                     continue;
1041                 _pmaReallocBlacklistPages(pPma, regionIdx, blacklistOffAddrStart[regionIdx], blacklistOffRangeSize[regionIdx]);
1042             }
1043         }
1044         if (bScrubOnFree)
1045         {
1046             portSyncRwLockReleaseRead(pPma->pScrubberValidLock);
1047             portSyncMutexRelease(pPma->pAllocLock);
1048         }
1049         NV_PRINTF(LEVEL_INFO, "Returning OOM after waiting for scrubber\n");
1050         return NV_ERR_NO_MEMORY;
1051     }
1052 
1053     if (status == NV_OK)
1054     {
1055         NvU32 i;
1056 
1057         //
1058         // Here we need to double check if the scrubber was valid because the contiguous eviction
1059         // which called pmaFreePages could have had a fatal failure that resulted in some
1060         // pages being unscrubbed.
1061         //
1062         if (bScrubOnFree && (pmaPortAtomicGet(&pPma->scrubberValid) != PMA_SCRUBBER_VALID))
1063         {
1064             portSyncSpinlockRelease(pPma->pPmaLock);
1065             NV_PRINTF(LEVEL_FATAL, "Failing allocation because the scrubber is not valid.\n");
1066             status = NV_ERR_INSUFFICIENT_RESOURCES;
1067             goto scrub_fatal;
1068         }
1069 
1070         // Commit
1071         allocationOptions->numPagesAllocated = (NvLength)numPagesAllocatedSoFar;
1072 
1073         if (contigFlag)
1074         {
1075             NvU64 frameBase;
1076             const NvU64 numFramesAllocated = framesPerPage * numPagesAllocatedSoFar;
1077 
1078             regId = findRegionID(pPma, pPages[0]);
1079             pMap  = pPma->pRegions[regId];
1080             addrBase = pPma->pRegDescriptors[regId]->base;
1081             frameBase = PMA_ADDR2FRAME(pPages[0], addrBase);
1082 
1083             NV_PRINTF(LEVEL_INFO, "Successfully allocated frames 0x%llx through 0x%llx\n",
1084                                   frameBase,
1085                                   frameBase + numFramesAllocated - 1);
1086 
1087             for (i = 0; i < numPagesAllocatedSoFar; i++)
1088             {
1089                 pPma->pMapInfo->pmaMapChangePageStateAttrib(pMap, frameBase + (i * framesPerPage),
1090                                                             pageSize, pinOption, NV_TRUE);
1091             }
1092 
1093             if (blacklistOffFlag && blacklistOffPerRegion[regId])
1094             {
1095                 NvU64 allocatedRegionEnd = PMA_FRAME2ADDR(frameBase + numFramesAllocated - 1, addrBase) + PMA_GRANULARITY - 1;
1096                 NvU64 blacklistOffAddrEnd = blacklistOffAddrStart[regId] + blacklistOffRangeSize[regId] - 1;
1097                 blacklistOffPerRegion[regId] = NV_FALSE;
1098                 _pmaReallocBlacklistPages(pPma, regId, blacklistOffAddrStart[regId],  (pPages[0] - blacklistOffAddrStart[regId] + 1));
1099                 if (allocatedRegionEnd < blacklistOffAddrEnd)
1100                     _pmaReallocBlacklistPages(pPma, regId, allocatedRegionEnd, (blacklistOffAddrEnd - allocatedRegionEnd));
1101             }
1102         }
1103         else
1104         {
1105             NvU64 frameRangeStart = 0;
1106             NvU64 lastFrameRangeEnd = 0;
1107             NvU64 frameBase = 0;
1108 
1109             (void)frameRangeStart;   //Silence the compiler
1110             (void)lastFrameRangeEnd;
1111 
1112             NV_PRINTF(LEVEL_INFO, "Successfully allocated frames:\n");
1113 
1114             for (i = 0; i < numPagesAllocatedSoFar; i++)
1115             {
1116                 regId = findRegionID(pPma, pPages[i]);
1117                 pMap  = pPma->pRegions[regId];
1118                 addrBase = pPma->pRegDescriptors[regId]->base;
1119                 frameBase = PMA_ADDR2FRAME(pPages[i], addrBase);
1120 
1121                 // Print out contiguous frames in the same NV_PRINTF
1122                 if (i == 0)
1123                 {
1124                     frameRangeStart = frameBase;
1125                 }
1126                 else if ((lastFrameRangeEnd + 1) != frameBase)
1127                 {
1128                     // Break in frame range detected
1129                     NV_PRINTF(LEVEL_INFO, "0x%llx through 0x%llx \n",
1130                                           frameRangeStart,
1131                                           lastFrameRangeEnd);
1132 
1133                     frameRangeStart = frameBase;
1134                 }
1135                 lastFrameRangeEnd = frameBase + framesPerPage - 1;
1136 
1137                 pPma->pMapInfo->pmaMapChangePageStateAttrib(pMap, PMA_ADDR2FRAME(pPages[i], addrBase),
1138                                                             pageSize, pinOption, NV_TRUE);
1139 
1140             }
1141             NV_PRINTF(LEVEL_INFO, "0x%llx through 0x%llx \n",
1142                                   frameRangeStart,
1143                                   frameBase + framesPerPage - 1);
1144         }
1145     }
1146 
1147 normal_exit:
1148     if (blacklistOffFlag)
1149     {
1150         for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
1151         {
1152             if (blacklistOffPerRegion[regionIdx] == NV_FALSE)
1153                 continue;
1154             _pmaReallocBlacklistPages(pPma, regionIdx, blacklistOffAddrStart[regionIdx], blacklistOffRangeSize[regionIdx]);
1155         }
1156     }
1157 
1158     portSyncSpinlockRelease(pPma->pPmaLock);
1159     if (bScrubOnFree)
1160     {
1161         portSyncRwLockReleaseRead(pPma->pScrubberValidLock);
1162         portSyncMutexRelease(pPma->pAllocLock);
1163     }
1164     return status;
1165 
1166 scrub_fatal:
1167     if (blacklistOffFlag)
1168     {
1169         for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
1170         {
1171             if (blacklistOffPerRegion[regionIdx] == NV_FALSE)
1172                 continue;
1173             _pmaReallocBlacklistPages(pPma, regionIdx, blacklistOffAddrStart[regionIdx], blacklistOffRangeSize[regionIdx]);
1174         }
1175     }
1176     // Note we do not have the PMA lock.
1177     portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID);
1178     portSyncRwLockReleaseRead(pPma->pScrubberValidLock);
1179     portSyncMutexRelease(pPma->pAllocLock);
1180     return status;
1181 
1182 }
1183 
1184 NV_STATUS
1185 pmaAllocatePagesBroadcast
1186 (
1187     PMA                   **pPma,
1188     NvU32                   pmaCount,
1189     NvLength                allocationCount,
1190     NvU64                   pageSize,
1191     PMA_ALLOCATION_OPTIONS *allocationOptions,
1192     NvU64                  *pPages
1193 )
1194 {
1195 
1196     if (pPma == NULL || pmaCount == 0 || allocationCount == 0
1197         || (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB)
1198         || pPages == NULL)
1199     {
1200         return NV_ERR_INVALID_ARGUMENT;
1201     }
1202 
1203     return NV_ERR_GENERIC;
1204 }
1205 
1206 NV_STATUS
1207 pmaPinPages
1208 (
1209     PMA      *pPma,
1210     NvU64    *pPages,
1211     NvLength  pageCount,
1212     NvU64     pageSize
1213 )
1214 {
1215     NV_STATUS status = NV_OK;
1216     NvU32          framesPerPage, regId, i, j;
1217     NvU64          frameNum, addrBase;
1218     PMA_PAGESTATUS state;
1219     framesPerPage  = (NvU32)(pageSize >> PMA_PAGE_SHIFT);
1220 
1221     if (pPma == NULL || pageCount == 0 || pPages == NULL
1222         || (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB))
1223     {
1224         return NV_ERR_INVALID_ARGUMENT;
1225     }
1226 
1227     portSyncSpinlockAcquire(pPma->pPmaLock);
1228 
1229     for(i = 0; i < pageCount; i++)
1230     {
1231         regId = findRegionID(pPma, pPages[i]);
1232         addrBase = pPma->pRegDescriptors[regId]->base;
1233         frameNum = PMA_ADDR2FRAME(pPages[i], addrBase);
1234 
1235         for (j = 0; j < framesPerPage; j++)
1236         {
1237             state = pPma->pMapInfo->pmaMapRead(pPma->pRegions[regId], (frameNum + j), NV_TRUE);
1238 
1239             //
1240             // Check for incorrect usage of the API where the caller requests to
1241             // pin pages that are not allocated unpinned.
1242             //
1243             if ((state & STATE_MASK) != STATE_UNPIN)
1244                 status = NV_ERR_INVALID_STATE;
1245 
1246             //
1247             // Check for pages being evicted. Notably this is expected if the
1248             // call races with eviction.
1249             //
1250             if (state & ATTRIB_EVICTING)
1251                 status = NV_ERR_IN_USE;
1252 
1253             if (status != NV_OK)
1254             {
1255                 //
1256                 // Don't print the error for the eviction case as that's
1257                 // expected to happen.
1258                 //
1259                 if (status != NV_ERR_IN_USE)
1260                 {
1261                     NV_PRINTF(LEVEL_ERROR,
1262                         "Pin failed at page %d frame %d in region %d state %d\n",
1263                         i, j, regId, state);
1264                 }
1265                 _pmaRollback(pPma, pPages, i, j, pageSize, STATE_UNPIN);
1266                 goto done;
1267             }
1268             else
1269             {
1270                 pPma->pMapInfo->pmaMapChangeState(pPma->pRegions[regId], (frameNum + j), STATE_PIN);
1271             }
1272         }
1273     }
1274 
1275 done:
1276     portSyncSpinlockRelease(pPma->pPmaLock);
1277 
1278     return status;
1279 }
1280 
1281 
1282 NV_STATUS
1283 pmaUnpinPages
1284 (
1285     PMA      *pPma,
1286     NvU64    *pPages,
1287     NvLength  pageCount,
1288     NvU64     pageSize
1289 )
1290 {
1291     NvU32          framesPerPage, regId, i, j;
1292     NvU64          frameNum, addrBase;
1293     PMA_PAGESTATUS state;
1294     framesPerPage  = (NvU32)(pageSize >> PMA_PAGE_SHIFT);
1295 
1296     if (pPma == NULL || pageCount == 0 || pPages == NULL
1297         || (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB))
1298     {
1299         return NV_ERR_INVALID_ARGUMENT;
1300     }
1301 
1302     portSyncSpinlockAcquire(pPma->pPmaLock);
1303 
1304     for(i = 0; i < pageCount; i++)
1305     {
1306         regId = findRegionID(pPma, pPages[i]);
1307         addrBase = pPma->pRegDescriptors[regId]->base;
1308         frameNum = PMA_ADDR2FRAME(pPages[i], addrBase);
1309 
1310         for (j = 0; j < framesPerPage; j++)
1311         {
1312             state = pPma->pMapInfo->pmaMapRead(pPma->pRegions[regId], (frameNum + j), NV_FALSE);
1313             if (state != STATE_PIN)
1314             {
1315                 NV_PRINTF(LEVEL_ERROR, "Unpin failed at %dth page %dth frame\n",
1316                                         i, j);
1317                 _pmaRollback(pPma, pPages, i, j, pageSize, STATE_PIN);
1318                 portSyncSpinlockRelease(pPma->pPmaLock);
1319                 return NV_ERR_INVALID_STATE;
1320             }
1321             else
1322             {
1323                 pPma->pMapInfo->pmaMapChangeState(pPma->pRegions[regId], (frameNum + j), STATE_UNPIN);
1324             }
1325         }
1326     }
1327 
1328     portSyncSpinlockRelease(pPma->pPmaLock);
1329 
1330     return NV_OK;
1331 }
1332 
1333 
1334 void
1335 pmaFreePages
1336 (
1337     PMA   *pPma,
1338     NvU64 *pPages,
1339     NvU64  pageCount,
1340     NvU64  size,
1341     NvU32  flag
1342 )
1343 {
1344     // TODO Support free of multiple regions in one call??
1345     NvU64 i, j, frameNum, framesPerPage, addrBase;
1346     NvU32 regId;
1347     NvBool bScrubValid = NV_TRUE;
1348     NvBool bNeedScrub = pPma->bScrubOnFree && !(flag & PMA_FREE_SKIP_SCRUB);
1349 
1350     NV_ASSERT(pPma != NULL);
1351     NV_ASSERT(pageCount != 0);
1352     NV_ASSERT(pPages != NULL);
1353 
1354     if (pageCount != 1)
1355     {
1356         NV_ASSERT((size == _PMA_64KB)  ||
1357                   (size == _PMA_128KB) ||
1358                   (size == _PMA_2MB)   ||
1359                   (size == _PMA_512MB));
1360     }
1361 
1362     // Fork out new code path for NUMA sub-allocation from OS
1363     if (pPma->bNuma)
1364     {
1365         portSyncSpinlockAcquire(pPma->pPmaLock);
1366         pmaNumaFreeInternal(pPma, pPages, pageCount, size, flag);
1367         portSyncSpinlockRelease(pPma->pPmaLock);
1368 
1369         return;
1370     }
1371 
1372     framesPerPage = size >> PMA_PAGE_SHIFT;
1373 
1374     // Check if any scrubbing is done before we actually free
1375     if (bNeedScrub)
1376     {
1377         portSyncRwLockAcquireRead(pPma->pScrubberValidLock);
1378         if (pmaPortAtomicGet(&pPma->scrubberValid) == PMA_SCRUBBER_VALID)
1379         {
1380             if (_pmaCheckScrubbedPages(pPma, 0, NULL, 0) != NV_OK)
1381             {
1382                 portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID);
1383                 portSyncRwLockReleaseRead(pPma->pScrubberValidLock);
1384                 bScrubValid = NV_FALSE;
1385                 NV_PRINTF(LEVEL_WARNING, "Scrubber object is not valid\n");
1386             }
1387         }
1388         else
1389         {
1390             // We allow free with invalid scrubber object
1391             portSyncRwLockReleaseRead(pPma->pScrubberValidLock);
1392             bScrubValid = NV_FALSE;
1393             NV_PRINTF(LEVEL_WARNING, "Scrubber object is not valid\n");
1394         }
1395     }
1396     // Only hold Reader lock here if (bScrubValid && bNeedScrub)
1397 
1398     portSyncSpinlockAcquire(pPma->pPmaLock);
1399 
1400     for (i = 0; i < pageCount; i++)
1401     {
1402         regId    = findRegionID(pPma, pPages[i]);
1403         addrBase = pPma->pRegDescriptors[regId]->base;
1404         frameNum = PMA_ADDR2FRAME(pPages[i], addrBase);
1405 
1406         _pmaReallocBlacklistPages(pPma, regId, pPages[i], pageCount * size);
1407 
1408         for (j = 0; j < framesPerPage; j++)
1409         {
1410             PMA_PAGESTATUS newStatus = (bScrubValid && bNeedScrub) ? ATTRIB_SCRUBBING : STATE_FREE;
1411             //
1412             // Reset everything except for the (ATTRIB_EVICTING and ATTRIB_BLACKLIST) state to support memory being freed
1413             // after being picked for eviction.
1414             //
1415             pPma->pMapInfo->pmaMapChangeStateAttribEx(pPma->pRegions[regId], (frameNum + j), newStatus, ~(ATTRIB_EVICTING | ATTRIB_BLACKLIST));
1416         }
1417     }
1418 
1419     portSyncSpinlockRelease(pPma->pPmaLock);
1420 
1421     // Maybe we need to scrub the page on free
1422     if (bScrubValid && bNeedScrub)
1423     {
1424         PSCRUB_NODE pPmaScrubList = NULL;
1425         NvU64 count;
1426         if (scrubSubmitPages(pPma->pScrubObj, size, pPages, pageCount,
1427                              &pPmaScrubList, &count) != NV_OK)
1428         {
1429             portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID);
1430             goto exit;
1431         }
1432 
1433         if (count > 0)
1434         {
1435             _pmaClearScrubBit(pPma, pPmaScrubList, count);
1436         }
1437 exit:
1438         // Free the actual list, although allocated by objscrub
1439         portMemFree(pPmaScrubList);
1440 
1441         portSyncRwLockReleaseRead(pPma->pScrubberValidLock);
1442     }
1443 }
1444 
1445 
1446 void
1447 pmaClearScrubRange
1448 (
1449     PMA *pPma,
1450     NvU64 rangeBase,
1451     NvU64 rangeLimit
1452 )
1453 {
1454     NvU32 regionIdx;
1455     NvU64 physBase, physLimit;
1456 
1457     for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
1458     {
1459         physBase = pPma->pRegDescriptors[regionIdx]->base;
1460         physLimit = pPma->pRegDescriptors[regionIdx]->limit;
1461 
1462         if ((physBase >= rangeBase) && (physLimit <= rangeLimit))
1463         {
1464             pmaSetBlockStateAttrib(pPma, physBase, physLimit - physBase + 1, 0, ATTRIB_SCRUBBING);
1465         }
1466     }
1467 }
1468 
1469 
1470 NV_STATUS
1471 pmaScrubComplete
1472 (
1473     PMA *pPma
1474 )
1475 {
1476     NvU32 regionIdx;
1477     NvU64 physBase, physLimit;
1478 
1479 
1480     if (pPma == NULL)
1481     {
1482         return NV_ERR_INVALID_ARGUMENT;
1483     }
1484 
1485     if (pmaPortAtomicGet(&pPma->initScrubbing) != PMA_SCRUB_IN_PROGRESS)
1486     {
1487         return NV_ERR_GENERIC;
1488     }
1489 
1490     // Clear the scrubbing bit for all regions
1491     for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
1492     {
1493         physBase = pPma->pRegDescriptors[regionIdx]->base;
1494         physLimit = pPma->pRegDescriptors[regionIdx]->limit;
1495 
1496         pmaSetBlockStateAttrib(pPma, physBase, physLimit - physBase + 1, 0, ATTRIB_SCRUBBING);
1497     }
1498 
1499     NV_PRINTF(LEVEL_INFO, "Inside\n");
1500     for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
1501     {
1502         pmaRegionPrint(pPma, pPma->pRegDescriptors[regionIdx], pPma->pRegions[regionIdx]);
1503     }
1504 
1505     portAtomicSetSize(&pPma->initScrubbing, PMA_SCRUB_DONE);
1506 
1507 
1508     return NV_OK;
1509 }
1510 
1511 
1512 NV_STATUS
1513 pmaRegisterEvictionCb
1514 (
1515     PMA              *pPma,
1516     pmaEvictPagesCb_t pEvictPagesCb,
1517     pmaEvictRangeCb_t pEvictRangeCb,
1518     void             *ctxPtr
1519 )
1520 {
1521     NV_STATUS status = NV_OK;
1522 
1523     if (pPma == NULL || pEvictPagesCb == NULL || pEvictRangeCb == NULL)
1524         return NV_ERR_INVALID_ARGUMENT;
1525 
1526     //
1527     // Lock the eviction callback mutex to guarantee that all the previously
1528     // registered callbacks have been flushed before registering new ones.
1529     //
1530     portSyncMutexAcquire(pPma->pEvictionCallbacksLock);
1531 
1532     //
1533     // Take the spin lock to make setting the callbacks atomic with allocations
1534     // using the callbacks.
1535     //
1536     portSyncSpinlockAcquire(pPma->pPmaLock);
1537 
1538     //
1539     // Both callbacks are always set together to a non-NULL value so just check
1540     // one of them to make sure they are unset.
1541     //
1542     if (pPma->evictPagesCb == NULL)
1543     {
1544         pPma->evictPagesCb = pEvictPagesCb;
1545         pPma->evictRangeCb = pEvictRangeCb;
1546         pPma->evictCtxPtr  = ctxPtr;
1547     }
1548     else
1549     {
1550         status = NV_ERR_INVALID_STATE;
1551     }
1552 
1553     portSyncSpinlockRelease(pPma->pPmaLock);
1554 
1555     portSyncMutexRelease(pPma->pEvictionCallbacksLock);
1556 
1557     return status;
1558 }
1559 
1560 
1561 void
1562 pmaUnregisterEvictionCb
1563 (
1564     PMA *pPma
1565 )
1566 {
1567     NvBool evictionPending;
1568 
1569     if (pPma == NULL)
1570         return;
1571 
1572     //
1573     // Lock the eviction callbacks mutex to prevent new callbacks from being
1574     // registered while the old ones are being unregistered and flushed.
1575     //
1576     portSyncMutexAcquire(pPma->pEvictionCallbacksLock);
1577 
1578     //
1579     // Take the spin lock to make removing the callbacks atomic with allocations
1580     // using the callbacks.
1581     //
1582     portSyncSpinlockAcquire(pPma->pPmaLock);
1583 
1584     // TODO: Assert that no unpinned allocations are left.
1585 
1586     pPma->evictPagesCb = NULL;
1587     pPma->evictRangeCb = NULL;
1588     pPma->evictCtxPtr  = NULL;
1589 
1590     evictionPending = pmaIsEvictionPending(pPma);
1591 
1592     portSyncSpinlockRelease(pPma->pPmaLock);
1593 
1594     //
1595     // Even though no unpinned allocations should be present, there still could
1596     // be pending eviction callbacks that picked some unpinned pages for
1597     // eviction before they were freed. Wait for all of them to finish.
1598     //
1599     while (evictionPending)
1600     {
1601         // TODO: Consider adding a better wait mechanism.
1602         pmaOsSchedule();
1603 
1604         portSyncSpinlockAcquire(pPma->pPmaLock);
1605 
1606         evictionPending = pmaIsEvictionPending(pPma);
1607 
1608         portSyncSpinlockRelease(pPma->pPmaLock);
1609     }
1610 
1611     portSyncMutexRelease(pPma->pEvictionCallbacksLock);
1612 }
1613 
1614 void
1615 pmaGetFreeMemory
1616 (
1617     PMA             *pPma,
1618     NvU64           *pBytesFree
1619 )
1620 {
1621     portSyncSpinlockAcquire(pPma->pPmaLock);
1622 
1623     *pBytesFree = pPma->pmaStats.numFreeFrames << PMA_PAGE_SHIFT;
1624 
1625     portSyncSpinlockRelease(pPma->pPmaLock);
1626 }
1627 
1628 void
1629 pmaGetTotalMemory
1630 (
1631     PMA             *pPma,
1632     NvU64           *pBytesTotal
1633 )
1634 {
1635     void *pMap;
1636     NvU64 totalBytesInRegion;
1637     NvU32 i;
1638 
1639     *pBytesTotal = 0;
1640 
1641     for (i = 0; i < pPma->regSize; i++)
1642     {
1643         pMap = pPma->pRegions[i];
1644         pPma->pMapInfo->pmaMapGetSize(pMap, &totalBytesInRegion);
1645 
1646         *pBytesTotal += totalBytesInRegion;
1647     }
1648 }
1649 
1650 NV_STATUS
1651 pmaGetRegionInfo
1652 (
1653     PMA                     *pPma,
1654     NvU32                   *pRegSize,
1655     PMA_REGION_DESCRIPTOR  **ppRegionDesc
1656 )
1657 {
1658     if (pPma == NULL || pRegSize == NULL || ppRegionDesc == NULL)
1659         return NV_ERR_INVALID_ARGUMENT;
1660 
1661     *pRegSize = pPma->regSize;
1662     *ppRegionDesc = pPma->pRegDescriptors[0];
1663     return NV_OK;
1664 }
1665 
1666 void
1667 pmaGetLargestFree
1668 (
1669     PMA             *pPma,
1670     NvU64           *pLargestFree,
1671     NvU64           *pRegionBase,
1672     NvU64           *pLargestOffset
1673 )
1674 {
1675     void *pMap;
1676     NvU64 largestFreeInRegion;
1677     NvU32 i;
1678 
1679     *pLargestFree = 0;
1680     *pRegionBase = 0;
1681 
1682     //
1683     // FIXME: This field is still not being used by any RM client.
1684     // Set it to "bad" value for the present time. This should ideally
1685     // contain the offset of the largest free chunk.
1686     //
1687     *pLargestOffset = ~0ULL;
1688 
1689     portSyncSpinlockAcquire(pPma->pPmaLock);
1690 
1691     for (i = 0; i < pPma->regSize; i++)
1692     {
1693         pMap = pPma->pRegions[i];
1694         pPma->pMapInfo->pmaMapGetLargestFree(pMap, &largestFreeInRegion);
1695 
1696         if (*pLargestFree < largestFreeInRegion)
1697         {
1698             *pLargestFree = largestFreeInRegion;
1699             *pRegionBase = pPma->pRegDescriptors[i]->base;
1700         }
1701     }
1702 
1703     portSyncSpinlockRelease(pPma->pPmaLock);
1704 
1705     NV_PRINTF(LEVEL_INFO, "Largest Free Bytes = 0x%llx, base = 0x%llx, largestOffset = 0x%llx.\n",
1706         *pLargestFree, *pRegionBase, *pLargestOffset);
1707 }
1708 
1709 /*!
1710  * @brief Returns a list of PMA allocated blocks which has ATTRIB_PERSISTENT
1711  *        attribute set. It will be used by FBSR module to save/restore
1712  *        clients PMA allocations during system suspend/resume.
1713  *
1714  * @param[in]     pPma              PMA pointer
1715  * @param[in/out] ppPersistList     Pointer to list of persistent segments
1716  *
1717  * @return
1718  *      NV_OK                   Success
1719  *      NV_ERR_NO_MEMORY        Failure to allocate list
1720  */
1721 NV_STATUS
1722 pmaBuildPersistentList
1723 (
1724     PMA             *pPma,
1725     PRANGELISTTYPE  *ppPersistList
1726 )
1727 {
1728     return pmaBuildList(pPma, ppPersistList, ATTRIB_PERSISTENT);
1729 }
1730 
1731 /*!
1732  * @brief Returns a list of all PMA allocated blocks. For all the PMA
1733  *        allocated blocks, either STATE_PIN or STATE_UNPIN attribute will
1734  *        be set. It will be used by FBSR module to save/restore clients
1735  *        PMA allocations for Unix GC-OFF based power management.
1736  *
1737  * @param[in]     pPma      PMA pointer
1738  * @param[in/out] ppList    Pointer to list of all the PMA allocated blocks.
1739  *
1740  * @return
1741  *      NV_OK                   Success
1742  *      NV_ERR_NO_MEMORY        Failure to allocate list
1743  */
1744 NV_STATUS
1745 pmaBuildAllocatedBlocksList
1746 (
1747     PMA             *pPma,
1748     PRANGELISTTYPE  *ppList
1749 )
1750 {
1751     return pmaBuildList(pPma, ppList, STATE_PIN | STATE_UNPIN);
1752 }
1753 
1754 /*!
1755  * @brief Frees previously generated list by function pmaBuildPersistentList().
1756  *
1757  * @param[in]       pPma            PMA pointer
1758  * @param[in/out]   ppPersistList   Pointer to list of persistent segments
1759  *
1760  * @return
1761  *      void
1762  */
1763 void
1764 pmaFreePersistentList
1765 (
1766     PMA             *pPma,
1767     PRANGELISTTYPE  *ppPersistList
1768 )
1769 {
1770     pmaFreeList(pPma, ppPersistList);
1771 }
1772 
1773 /*!
1774  * @brief Frees previously generated list by function
1775  *        pmaBuildAllocatedBlocksList().
1776  *
1777  * @param[in]     pPma      PMA pointer
1778  * @param[in/out] ppList    Pointer to list of all the PMA allocated blocks.
1779  *
1780  * @return
1781  *      void
1782  */
1783 void
1784 pmaFreeAllocatedBlocksList
1785 (
1786     PMA             *pPma,
1787     PRANGELISTTYPE  *ppList
1788 )
1789 {
1790     pmaFreeList(pPma, ppList);
1791 }
1792 
1793 /*!
1794  * @brief Returns client managed blacklisted pages in the PMA region
1795  *
1796  * @param[in]  pPma             PMA pointer
1797  * @param[in]  pChunks          pointer to blacklist addresses in the PMA region
1798  * @param[in]  pPageSize        pointer to Size of each blacklist page addresses
1799  * @param[in]  pValidEntries    pointer to valid client managed blacklist pages
1800  *
1801  * @return
1802  *     void
1803  */
1804 void
1805 pmaGetClientBlacklistedPages
1806 (
1807     PMA   *pPma,
1808     NvU64 *pChunks,
1809     NvU64 *pPageSize,
1810     NvU32 *pNumChunks
1811 )
1812 {
1813     NvU32  region = 0;
1814     NvU32  validEntries = 0;
1815     NvU32  chunk  = 0;
1816 
1817     NvU32 blacklistCount = 0;
1818     NvBool bClientManagedBlacklist = NV_FALSE;
1819     PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pBlacklistChunk;
1820 
1821     for (region = 0; region < pPma->regSize; region++)
1822     {
1823         pmaQueryBlacklistInfo(pPma, &blacklistCount,
1824                               &bClientManagedBlacklist, &pBlacklistChunks);
1825         if (blacklistCount && bClientManagedBlacklist)
1826         {
1827             for (chunk = 0; chunk < blacklistCount; chunk++)
1828             {
1829                 pBlacklistChunk = &pBlacklistChunks[chunk];
1830                 if (!pBlacklistChunk->bIsValid)
1831                 {
1832                     pChunks[validEntries++] = pBlacklistChunk->physOffset;
1833                 }
1834             }
1835         }
1836     }
1837 
1838     if (validEntries == 0)
1839         pChunks = NULL;
1840 
1841     *pPageSize  = _PMA_64KB;
1842     *pNumChunks = validEntries;
1843 }
1844 
1845 /*!
1846  * @brief Returns the total blacklist size in bytes for
1847  *        both statically and dynamically blacklisted pages.
1848  *        pDynamicBlacklistSize and pStaticBlacklistSize are only copied-out if non-NULL.
1849  *
1850  * @param[in]  pPma                     PMA pointer
1851  * @param[in]  pDynamicBlacklistSize    pointer to dynamic blacklist size (bytes)
1852  * @param[in]  pStaticBlacklistSize     pointer to static blacklist size  (bytes)
1853  *
1854  * @return
1855  *     void
1856  */
1857 void
1858 pmaGetBlacklistSize
1859 (
1860     PMA   *pPma,
1861     NvU32 *pDynamicBlacklistSize,
1862     NvU32 *pStaticBlacklistSize
1863 )
1864 {
1865     NvU32 dynamicBlacklistCount = 0;
1866     NvU32 staticBlacklistCount = 0;
1867     NvU32 blacklistCount = 0;
1868     NvU32 region, size;
1869 
1870     PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pChunk;
1871 
1872     for (region = 0; region < pPma->regSize; region++)
1873     {
1874         pmaQueryBlacklistInfo(pPma, &blacklistCount,
1875                               NULL, &pBlacklistChunks);
1876         for (size = 0; size < blacklistCount; size++)
1877         {
1878             pChunk = &pBlacklistChunks[size];
1879 
1880             if (pChunk->bIsDynamic)
1881                 dynamicBlacklistCount++;
1882             else
1883                 staticBlacklistCount++;
1884         }
1885     }
1886 
1887     if (pDynamicBlacklistSize != NULL)
1888         *pDynamicBlacklistSize = dynamicBlacklistCount << PMA_PAGE_SHIFT;
1889 
1890     if (pStaticBlacklistSize != NULL)
1891         *pStaticBlacklistSize = staticBlacklistCount << PMA_PAGE_SHIFT;
1892 }
1893 
1894 void
1895 pmaClearScrubbedPages
1896 (
1897     PMA *pPma,
1898     PSCRUB_NODE pPmaScrubList,
1899     NvU64 count
1900 )
1901 {
1902     _pmaClearScrubBit(pPma, pPmaScrubList, count);
1903 }
1904 
1905 void pmaPrintMapState
1906 (
1907     PMA *pPma
1908 )
1909 {
1910     NvU32 regionIdx;
1911     for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
1912     {
1913         pmaRegionPrint(pPma, pPma->pRegDescriptors[regionIdx], pPma->pRegions[regionIdx]);
1914     }
1915 }
1916 
1917 NV_STATUS
1918 pmaAddToBlacklistTracking
1919 (
1920     PMA   *pPma,
1921     NvU64  physAddr
1922 )
1923 {
1924     PMA_BLACKLIST_ADDRESS blacklistPages = {0};
1925     NV_STATUS status = NV_OK;
1926     if (pmaIsBlacklistingAddrUnique(pPma, physAddr))
1927     {
1928         blacklistPages.physOffset  = physAddr;
1929         blacklistPages.bIsDynamic  = NV_TRUE;
1930         status = pmaRegisterBlacklistInfo(pPma, 0, &blacklistPages, 1);
1931     }
1932     return status;
1933 }
1934 
1935 void
1936 pmaGetTotalProtectedMemory
1937 (
1938     PMA   *pPma,
1939     NvU64 *pBytesTotal
1940 )
1941 {
1942     void *pMap;
1943     NvU64 totalBytesInRegion;
1944     NvU32 i;
1945 
1946     *pBytesTotal = 0;
1947 
1948     for (i = 0; i < pPma->regSize; i++)
1949     {
1950         if (pPma->pRegDescriptors[i]->bProtected)
1951         {
1952             pMap = pPma->pRegions[i];
1953             pPma->pMapInfo->pmaMapGetSize(pMap, &totalBytesInRegion);
1954             *pBytesTotal += totalBytesInRegion;
1955         }
1956     }
1957 }
1958 
1959 void
1960 pmaGetTotalUnprotectedMemory
1961 (
1962     PMA   *pPma,
1963     NvU64 *pBytesTotal
1964 )
1965 {
1966     NvU64 totalBytesInProtectedRegion = 0;
1967     NvU64 totalBytesOverall = 0;
1968 
1969     *pBytesTotal = 0;
1970 
1971     pmaGetTotalMemory(pPma, &totalBytesOverall);
1972     pmaGetTotalProtectedMemory(pPma, &totalBytesInProtectedRegion);
1973 
1974     NV_ASSERT_OR_RETURN_VOID(totalBytesOverall >= totalBytesInProtectedRegion);
1975 
1976     *pBytesTotal = totalBytesOverall - totalBytesInProtectedRegion;
1977 }
1978 
1979 void
1980 pmaGetFreeProtectedMemory
1981 (
1982     PMA   *pPma,
1983     NvU64 *pBytesFree
1984 )
1985 {
1986     portSyncSpinlockAcquire(pPma->pPmaLock);
1987 
1988     *pBytesFree = (pPma->pmaStats.numFreeFramesProtected) << PMA_PAGE_SHIFT;
1989 
1990     portSyncSpinlockRelease(pPma->pPmaLock);
1991 }
1992 
1993 void
1994 pmaGetFreeUnprotectedMemory
1995 (
1996     PMA   *pPma,
1997     NvU64 *pBytesFree
1998 )
1999 {
2000     portSyncSpinlockAcquire(pPma->pPmaLock);
2001 
2002     *pBytesFree = (pPma->pmaStats.numFreeFrames -
2003                    pPma->pmaStats.numFreeFramesProtected) << PMA_PAGE_SHIFT;
2004 
2005     portSyncSpinlockRelease(pPma->pPmaLock);
2006 }
2007