1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h"
25 #include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h"
26 #include "gpu/mem_mgr/mem_scrub.h"
27 #include "utils/nvprintf.h"
28 #include "utils/nvassert.h"
29 
30 #if !defined(SRT_BUILD)
31 // These files are not found on SRT builds
32 #include "os/os.h"
33 #else
34 static NvU64 osGetPageRefcount(NvU64 sysPagePhysAddr)
35 {
36     return 0;
37 }
38 
39 static NvU64 osCountTailPages(NvU64 sysPagePhysAddr)
40 {
41     return 0;
42 }
43 
44 static void osAllocReleasePage(NvU64 sysPagePhysAddr, NvU32 pageCount)
45 {
46     return;
47 }
48 
49 static NV_STATUS osOfflinePageAtAddress(NvU64 address)
50 {
51     return NV_ERR_GENERIC;
52 }
53 
54 static NvU8 osGetPageShift(void)
55 {
56     return 0;
57 }
58 
59 NV_STATUS scrubCheck(OBJMEMSCRUB *pScrubber, PSCRUB_NODE *ppList, NvU64 *size)
60 {
61     return NV_ERR_GENERIC;
62 }
63 
64 NV_STATUS scrubSubmitPages(OBJMEMSCRUB *pScrubber, NvU64 chunkSize, NvU64* pages,
65                            NvU64 pageCount, PSCRUB_NODE *ppList, NvU64 *size)
66 {
67     return NV_ERR_GENERIC;
68 }
69 
70 NV_STATUS scrubWaitPages(OBJMEMSCRUB *pScrubber, NvU64 chunkSize, NvU64* pages, NvU32 pageCount)
71 {
72     return NV_ERR_GENERIC;
73 }
74 
75 NV_STATUS scrubCheckAndWaitForSize (OBJMEMSCRUB *pScrubber, NvU64 numPages,
76                                     NvU64 pageSize, PSCRUB_NODE *ppList, NvU64 *pSize)
77 {
78     return NV_ERR_GENERIC;
79 }
80 #endif
81 
82 // Local helpers
83 NvU32
84 findRegionID(PMA *pPma, NvU64 address)
85 {
86     NvU32 i;
87 
88     for (i = 0; i < pPma->regSize; i++)
89     {
90         NvU64 start, limit;
91         start = pPma->pRegDescriptors[i]->base;
92         limit = pPma->pRegDescriptors[i]->limit;
93         if (address >= start && address <= limit)
94         {
95             return i;
96         }
97     }
98 
99     // Should never get here
100     NV_ASSERT(0);
101     return 0;
102 }
103 
104 
105 void
106 pmaPrintBlockStatus(PMA_PAGESTATUS blockStatus)
107 {
108     // Use DBG_PRINTF so as not to prepend "NVRM:" everytime, as NV_PRINTF does
109     if ((blockStatus & STATE_MASK) == STATE_FREE) {
110         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "STATE_FREE         ");
111     }
112     else if ((blockStatus & STATE_MASK) == STATE_UNPIN) {
113         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "STATE_UNPIN  ");
114     }
115     else if ((blockStatus & STATE_MASK) == STATE_PIN) {
116         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "STATE_PIN    ");
117     }
118     else {
119         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "UNKNOWN STATE");
120     }
121 
122     if (blockStatus & ATTRIB_PERSISTENT) {
123         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " | ATTRIB_PERSISTENT");
124     }
125     else {
126         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "             ");
127     }
128 
129     if (blockStatus & ATTRIB_SCRUBBING) {
130         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " | ATTRIB_SCRUBBING ");
131     }
132     else {
133         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "             ");
134     }
135 
136     if (blockStatus & ATTRIB_EVICTING) {
137         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " | ATTRIB_EVICTING  ");
138     }
139     else {
140         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "             ");
141     }
142 
143     if (blockStatus & ATTRIB_BLACKLIST) {
144         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " | ATTRIB_BLACKLIST ");
145     }
146     else {
147         NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "             ");
148     }
149 
150     NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n");
151 }
152 
153 void
154 pmaRegionPrint(PMA *pPma, PMA_REGION_DESCRIPTOR *pRegion, void *pMap)
155 {
156     NvU32 i;
157     PMA_PAGESTATUS currStatus, blockStatus = STATE_FREE;
158     NvU64 addrBase, addrLimit, numFrames, blockStart = 0;
159 
160     NV_ASSERT(pRegion != NULL);
161     NV_ASSERT(pMap != NULL);
162 
163     (void)blockStart; //Silence the compiler
164 
165     addrBase = pRegion->base;
166     addrLimit = pRegion->limit;
167     numFrames = (addrLimit - addrBase + 1) >> PMA_PAGE_SHIFT;
168 
169     NV_PRINTF(LEVEL_INFO, "Region: 0x%llx..0x%llx\n", addrBase, addrLimit);
170     NV_PRINTF(LEVEL_INFO, "Total frames: 0x%llx\n", numFrames);
171 
172     for (i = 0; i < numFrames; i++)
173     {
174         currStatus = pPma->pMapInfo->pmaMapRead(pMap, i, NV_TRUE);
175         if (i == 0)
176         {
177             blockStatus = currStatus;
178             blockStart  = i;
179         }
180 
181         if (blockStatus != currStatus)
182         {
183             NV_PRINTF(LEVEL_INFO, "%8llx..%8x: ", blockStart, i-1);
184             pmaPrintBlockStatus(blockStatus);
185 
186             blockStatus = currStatus;
187             blockStart  = i;
188         }
189     }
190     NV_PRINTF(LEVEL_INFO, "%8llx..%8x: ", blockStart, i-1);
191     pmaPrintBlockStatus(blockStatus);
192 }
193 
194 NvBool
195 pmaStateCheck(PMA *pPma)
196 {
197     NvU32 size, i;
198     PMA_REGION_DESCRIPTOR *pState;
199     void *pMap = NULL;
200 
201     if (pPma == NULL) return NV_FALSE;
202 
203     size = pPma->regSize;
204     if (size >= PMA_REGION_SIZE) return NV_FALSE;
205 
206     if (pPma->bNuma)
207     {
208         if (!pPma->nodeOnlined)
209         {
210             NV_PRINTF(LEVEL_INFO, "Warning: NUMA state not onlined.\n");
211             return NV_TRUE;
212         }
213         else if (pPma->numaNodeId == PMA_NUMA_NO_NODE)
214         {
215             NV_PRINTF(LEVEL_INFO, "NUMA node ID invalid.\n");
216             return NV_FALSE;
217         }
218     }
219 
220     for (i = 0; i < size; i++)
221     {
222         pMap = pPma->pRegions[i];
223         pState = pPma->pRegDescriptors[i];
224 
225         if (pMap == NULL || pState == NULL) return NV_FALSE;
226     }
227 
228     return NV_TRUE;
229 }
230 
231 void
232 pmaSetBlockStateAttribUnderPmaLock
233 (
234     PMA           *pPma,
235     NvU64          base,
236     NvU64          size,
237     PMA_PAGESTATUS pmaState,
238     PMA_PAGESTATUS pmaStateWriteMask
239 )
240 {
241     NvU64 numFrames, baseFrame, i;
242     NvS32 regId;
243     void *pMap;
244 
245     NV_ASSERT(pPma != NULL);
246     NV_ASSERT(NV_IS_ALIGNED(base, PMA_GRANULARITY));
247     NV_ASSERT(NV_IS_ALIGNED(size, PMA_GRANULARITY));
248 
249     regId = findRegionID(pPma, base); // assume same id for base+size TODO check this
250     pMap = pPma->pRegions[regId];
251 
252     numFrames = size >> PMA_PAGE_SHIFT;
253     baseFrame = (base - pPma->pRegDescriptors[regId]->base) >> PMA_PAGE_SHIFT;
254 
255     for (i = 0; i < numFrames; i++)
256     {
257         pPma->pMapInfo->pmaMapChangeStateAttribEx(pMap, (baseFrame + i), pmaState, pmaStateWriteMask);
258     }
259 }
260 
261 void
262 pmaSetBlockStateAttrib
263 (
264     PMA           *pPma,
265     NvU64          base,
266     NvU64          size,
267     PMA_PAGESTATUS pmaState,
268     PMA_PAGESTATUS pmaStateWriteMask
269 )
270 {
271     NV_ASSERT(pPma != NULL);
272 
273     portSyncSpinlockAcquire(pPma->pPmaLock);
274 
275     pmaSetBlockStateAttribUnderPmaLock(pPma, base, size, pmaState, pmaStateWriteMask);
276 
277     portSyncSpinlockRelease(pPma->pPmaLock);
278 }
279 
280 // This must be called with the PMA lock held!
281 void
282 pmaStatsUpdateState
283 (
284     NvU64 *pNumFree,
285     NvU64 numPages,
286     PMA_PAGESTATUS oldState,
287     PMA_PAGESTATUS newState
288 )
289 {
290     NV_ASSERT(pNumFree != NULL);
291 
292     oldState &= STATE_MASK;
293     newState &= STATE_MASK;
294 
295     if ((oldState == STATE_FREE) && (newState != STATE_FREE))
296     {
297         (*pNumFree) -= numPages;
298       //  NV_PRINTF(LEVEL_INFO, "Decrease to 0x%llx \n", *pNumFree);
299     }
300     else if ((oldState != STATE_FREE) && (newState == STATE_FREE))
301     {
302         (*pNumFree) += numPages;
303       //  NV_PRINTF(LEVEL_INFO, "Increase to 0x%llx \n", *pNumFree);
304     }
305 }
306 
307 NvBool pmaIsEvictionPending(PMA *pPma)
308 {
309     NvU32 i;
310     void *pMap = NULL;
311 
312     for (i = 0; i < pPma->regSize; ++i)
313     {
314         pMap = pPma->pRegions[i];
315         if (pPma->pMapInfo->pmaMapGetEvictingFrames(pMap) > 0)
316             return NV_TRUE;
317     }
318 
319     return NV_FALSE;
320 }
321 
322 void pmaOsSchedule(void)
323 {
324     // TODO Move osSchedule() to nvport?
325 #if !defined(SRT_BUILD)
326     osSchedule();
327 #endif
328 }
329 
330 /*!
331  * @brief Handle eviction results from UVM and free the reuse pages to
332  * OS if eviction failed half-way.
333  * If eviction was successful, we have to double check the refcount and
334  * decide if it's ok to reuse the pages for this eviction.
335  * See bug 2019754.
336  */
337 static NV_STATUS
338 _pmaCleanupNumaReusePages
339 (
340     PMA         *pPma,
341     NvU64        evictStart,
342     NvU64        numFrames,
343     NvBool       bEvictionSucceeded
344 )
345 {
346     NvU32 regId;
347     NvU64 sysPhysAddr = 0, sysPagePhysAddr = 0;
348     NvU64 frameNum, addrBase, i;
349     PMA_PAGESTATUS currentStatus;
350     NvBool bRaisedRefcount = NV_FALSE;
351 
352     regId       = findRegionID(pPma, evictStart);
353     addrBase    = pPma->pRegDescriptors[regId]->base;
354     frameNum    = PMA_ADDR2FRAME(evictStart, addrBase);
355     sysPhysAddr = evictStart + pPma->coherentCpuFbBase;
356 
357     if (bEvictionSucceeded == NV_TRUE)
358     {
359         //
360         // If eviction from UVM succeeded, we double check the refcount and
361         // update whether we should reuse these pages or not. If refcount is
362         // greater than the appropriate number (1 for non-compound pages; for
363         // compound pages, refcount should be equal to the number of pages
364         // in this compound page), that means someone called get_user_pages
365         // on those pages and we need to fail this eviction.
366         //
367         for (i = 0; i < numFrames; i++)
368         {
369             sysPagePhysAddr = sysPhysAddr + (i << PMA_PAGE_SHIFT);
370 
371             if (osGetPageRefcount(sysPagePhysAddr) > osCountTailPages(sysPagePhysAddr))
372             {
373                 bRaisedRefcount = NV_TRUE;
374                 break;
375             }
376         }
377     }
378 
379     if (!bEvictionSucceeded || bRaisedRefcount)
380     {
381         //
382         // Eviction Failed. Need to clean up.
383         // Since we set the NUMA_REUSE bit when we decide to reuse the pages,
384         // we know exactly which pages to free both to OS and in PMA bitmap.
385         //
386         NvU8 osPageShift = osGetPageShift();
387 
388         NV_ASSERT_OR_RETURN(PMA_PAGE_SHIFT >= osPageShift, NV_ERR_INVALID_STATE);
389 
390         for (i = 0; i < numFrames; i++)
391         {
392             currentStatus = pPma->pMapInfo->pmaMapRead(pPma->pRegions[regId], (frameNum + i), NV_TRUE);
393             sysPagePhysAddr = sysPhysAddr + (i << PMA_PAGE_SHIFT);
394 
395             if (currentStatus & ATTRIB_NUMA_REUSE)
396             {
397                 osAllocReleasePage(sysPagePhysAddr, 1 << (PMA_PAGE_SHIFT - osPageShift));
398                 pPma->pMapInfo->pmaMapChangeStateAttribEx(pPma->pRegions[regId], (frameNum + i),
399                                                           STATE_FREE, (STATE_MASK | ATTRIB_NUMA_REUSE));
400             }
401         }
402 
403         return NV_ERR_NO_MEMORY;
404     }
405 
406     return NV_OK;
407 }
408 
409 
410 /*!
411  * @brief Eviction for contiguous allocation always evicts the full
412  *  range to be allocated and the pmaMapScanContiguous()
413  *  function sets the address to start eviction at as the first
414  *  entry in the array of pages.
415  */
416 NV_STATUS
417 _pmaEvictContiguous
418 (
419     PMA              *pPma,
420     void             *pMap,
421     NvU64             evictStart,
422     NvU64             evictEnd,
423     MEMORY_PROTECTION prot
424 )
425 {
426     NV_STATUS status;
427     NvU64 numFramesToEvict;
428     NvU64 evictSize;
429     NvU64 frameEvictionsInProcess = pPma->pMapInfo->pmaMapGetEvictingFrames(pMap);
430     NvBool pmaNumaEnabled = pPma->bNuma;
431 
432     evictSize = evictEnd - evictStart + 1;
433     numFramesToEvict = evictSize >> PMA_PAGE_SHIFT;
434     frameEvictionsInProcess += numFramesToEvict;
435     pPma->pMapInfo->pmaMapSetEvictingFrames(pMap, frameEvictionsInProcess);
436 
437     pmaSetBlockStateAttribUnderPmaLock(pPma, evictStart, evictSize, ATTRIB_EVICTING, ATTRIB_EVICTING);
438 
439     // Release PMA lock before calling into UVM for eviction.
440     portSyncSpinlockRelease(pPma->pPmaLock);
441 
442     if (pPma->bScrubOnFree)
443     {
444         PSCRUB_NODE pPmaScrubList = NULL;
445         portSyncMutexRelease(pPma->pAllocLock);
446 
447         status = pPma->evictRangeCb(pPma->evictCtxPtr, evictStart, evictEnd, prot);
448 
449         portSyncMutexAcquire(pPma->pAllocLock);
450 
451         NV_PRINTF(LEVEL_INFO, "evictRangeCb returned with status %llx\n", (NvU64)status);
452 
453         if (status != NV_OK)
454         {
455             goto evict_cleanup;
456         }
457         // For NUMA we will scrub only what's being evicted and returned to client.
458         if (pmaNumaEnabled)
459         {
460             //
461             // The evicting contiguous range is marked as ATTRIB_EVICTING
462             // and hence there will be no page stealing.
463             //
464             NvU64 count;
465 
466             if ((status = scrubSubmitPages(pPma->pScrubObj, (NvU32)evictSize, &evictStart,
467                                            1, &pPmaScrubList, &count)) != NV_OK)
468             {
469                 status = NV_ERR_INSUFFICIENT_RESOURCES;
470                 goto scrub_exit;
471             }
472 
473             if (count > 0)
474                 _pmaClearScrubBit(pPma, pPmaScrubList, count);
475         }
476 
477         if ((status = _pmaCheckScrubbedPages(pPma, evictSize, &evictStart, 1)) != NV_OK)
478         {
479             status = NV_ERR_INSUFFICIENT_RESOURCES;
480             goto scrub_exit; // just incase someone adds anything below.
481         }
482 
483 scrub_exit:
484         portMemFree(pPmaScrubList);
485 
486         if (!pmaNumaEnabled &&
487             (status == NV_ERR_INSUFFICIENT_RESOURCES))
488         {
489             NV_PRINTF(LEVEL_INFO, "ERROR: scrubber OOM!\n");
490             goto exit; // fix this later, never exit early violating lock semantics
491         }
492     }
493     else
494     {
495         status = pPma->evictRangeCb(pPma->evictCtxPtr, evictStart, evictEnd, prot);
496         NV_PRINTF(LEVEL_INFO, "evictRangeCb returned with status %llx\n", (NvU64)status);
497     }
498 
499 evict_cleanup:
500     // Reacquire PMA lock after returning from UVM and scrubber.
501     portSyncSpinlockAcquire(pPma->pPmaLock);
502 
503     //
504     // When we are in NUMA mode, we need to double check the NUMA_REUSE page attribute
505     // to possibly return these pages to OS.
506     //
507     if (pmaNumaEnabled)
508     {
509         status = _pmaCleanupNumaReusePages(pPma, evictStart, numFramesToEvict, (status == NV_OK));
510     }
511 
512     pmaSetBlockStateAttribUnderPmaLock(pPma, evictStart, evictSize, 0, ATTRIB_EVICTING | ATTRIB_NUMA_REUSE);
513 
514     frameEvictionsInProcess = pPma->pMapInfo->pmaMapGetEvictingFrames(pMap);
515     NV_ASSERT(frameEvictionsInProcess >= numFramesToEvict);
516     pPma->pMapInfo->pmaMapSetEvictingFrames(pMap, (frameEvictionsInProcess - numFramesToEvict));
517 
518 exit:
519     return status;
520 }
521 
522 /*!
523  * @brief Eviction for a non-contiguous range will allow the UVM driver to pick
524  * and evict the specific pages being evicted. The UVM driver is required to hand
525  * back pages to PMA in STATE_PIN state to prevent page stealing.
526  */
527 NV_STATUS
528 _pmaEvictPages
529 (
530     PMA              *pPma,
531     void             *pMap,
532     NvU64            *evictPages,
533     NvU64             evictPageCount,
534     NvU64            *allocPages,
535     NvU64             allocPageCount,
536     NvU64             pageSize,
537     NvU64             physBegin,
538     NvU64             physEnd,
539     MEMORY_PROTECTION prot
540 )
541 {
542     NvU64 i;
543     NV_STATUS status;
544     NvU64 numFramesToEvict = evictPageCount * (pageSize >> PMA_PAGE_SHIFT);
545     NvU64 frameEvictionsInProcess = pPma->pMapInfo->pmaMapGetEvictingFrames(pMap);
546     NvBool pmaNumaEnabled = pPma->bNuma;
547 
548     frameEvictionsInProcess += numFramesToEvict;
549     pPma->pMapInfo->pmaMapSetEvictingFrames(pMap, frameEvictionsInProcess);
550 
551     //
552     // Pin all the already allocated pages before unlocking the PMA
553     // lock to prevent them from being allocated while eviction is
554     // happening.
555     //
556     for (i = 0; i < allocPageCount; i++)
557         pmaSetBlockStateAttribUnderPmaLock(pPma, allocPages[i], pageSize, STATE_PIN, STATE_PIN);
558 
559     // Release PMA lock before calling into UVM for eviction.
560     portSyncSpinlockRelease(pPma->pPmaLock);
561 
562     if (pPma->bScrubOnFree)
563     {
564         PSCRUB_NODE pPmaScrubList = NULL;
565         NvU64 count = 0;
566 
567         portSyncMutexRelease(pPma->pAllocLock);
568         status = pPma->evictPagesCb(pPma->evictCtxPtr, pageSize, evictPages,
569                             (NvU32)evictPageCount, physBegin, physEnd, prot);
570         portSyncMutexAcquire(pPma->pAllocLock);
571 
572         NV_PRINTF(LEVEL_INFO, "evictPagesCb returned with status %llx\n", (NvU64)status);
573 
574         if (status != NV_OK)
575         {
576             goto evict_cleanup;
577         }
578 
579         // Don't need to mark ATTRIB_SCRUBBING to protect the pages because they are already pinned
580         status = scrubSubmitPages(pPma->pScrubObj, pageSize, evictPages,
581                                   (NvU32)evictPageCount, &pPmaScrubList, &count);
582         NV_ASSERT_OR_GOTO((status == NV_OK), scrub_exit);
583 
584         if (count > 0)
585             _pmaClearScrubBit(pPma, pPmaScrubList, count);
586 
587         // Wait for our scrubbing to complete
588        status = _pmaCheckScrubbedPages(pPma, pageSize, evictPages, (NvU32)evictPageCount);
589 scrub_exit:
590        // Free the actual list, although allocated by objscrub
591         portMemFree(pPmaScrubList);
592 
593         if ((status != NV_OK) && !pmaNumaEnabled)
594         {
595             status = NV_ERR_INSUFFICIENT_RESOURCES; // Caller expects this status.
596             NV_PRINTF(LEVEL_ERROR, "ERROR: scrubber OOM!\n");
597             NV_ASSERT_OK_OR_RETURN(status);
598        }
599     }
600     else
601     {
602         status = pPma->evictPagesCb(pPma->evictCtxPtr, pageSize, evictPages,
603                                 (NvU32)evictPageCount, physBegin, physEnd, prot);
604         NV_PRINTF(LEVEL_INFO, "evictPagesCb returned with status %llx\n", (NvU64)status);
605     }
606 
607 evict_cleanup:
608     // Reacquire PMA lock after returning from UVM.
609     portSyncSpinlockAcquire(pPma->pPmaLock);
610 
611     // Unpin the allocations now that we reacquired the PMA lock.
612     for (i = 0; i < allocPageCount; i++)
613         pmaSetBlockStateAttribUnderPmaLock(pPma, allocPages[i], pageSize, 0, STATE_PIN);
614 
615     frameEvictionsInProcess = pPma->pMapInfo->pmaMapGetEvictingFrames(pMap);
616     NV_ASSERT(frameEvictionsInProcess >= numFramesToEvict);
617     pPma->pMapInfo->pmaMapSetEvictingFrames(pMap, (frameEvictionsInProcess - numFramesToEvict));
618 
619     return status;
620 }
621 
622 //
623 // Region selector
624 // Given specific PMA_ALLOCATE_* requirements, generate a list of possible intersecting regions
625 // Invalid regionList IDs set to -1
626 //
627 NV_STATUS
628 pmaSelector
629 (
630     PMA                     *pPma,
631     PMA_ALLOCATION_OPTIONS  *allocationOptions,
632     NvS32                   *regionList
633 )
634 {
635     // regSize never decreases + registered states don't change, so lock-free
636     NvU32     i;
637     NvU32     flags = allocationOptions->flags;
638     NvU32     regionCount = 0;
639     NV_STATUS status = NV_OK;
640 
641     NV_ASSERT(regionList != NULL);
642     NV_ASSERT(allocationOptions != NULL);
643 
644     for (i=0; i < pPma->regSize; i++)
645     {
646         if (flags & PMA_ALLOCATE_SPECIFY_REGION_ID)
647         {
648             if (i != allocationOptions->regionId)
649             {
650                 // Skip: wrong region ID
651                 continue;
652             }
653         }
654 
655         if (!!(flags & PMA_ALLOCATE_PROTECTED_REGION) ^
656             (pPma->pRegDescriptors[i]->bProtected))
657         {
658             // Don't allow unprotected allocations in protected region
659             // OR protected allocations in unprotected region.
660             continue;
661         }
662 
663         if (flags & PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE)
664         {
665             NvU64 regionBegin, regionEnd;
666             NvU64 rangeBegin, rangeEnd;
667             PMA_REGION_DESCRIPTOR *regionDes;
668 
669             rangeBegin = allocationOptions->physBegin;
670             rangeEnd = allocationOptions->physEnd;
671 
672             regionDes = pPma->pRegDescriptors[i];
673             regionBegin = regionDes->base;
674             regionEnd = regionDes->limit;
675 
676             if ((rangeEnd < regionBegin) || (rangeBegin > regionEnd))
677             {
678                 // Skip: Requested range doesn't intersect region
679                 continue;
680             }
681         }
682 
683         if (flags & PMA_ALLOCATE_SPECIFY_MINIMUM_SPEED)
684         {
685             if (pPma->pRegDescriptors[i]->performance < allocationOptions->minimumSpeed)
686             {
687                 // Skip: Region perf less than minimum threshold
688                 continue;
689             }
690         }
691 
692         if (regionCount > 0)
693         {
694             NvU32 j = regionCount;
695 
696             if (flags & PMA_ALLOCATE_REVERSE_ALLOC)
697             {
698                 // Find insertion point (highest memory address to lowest)
699                 while ((j > 0) &&
700                     (pPma->pRegDescriptors[i]->limit > pPma->pRegDescriptors[regionList[j-1]]->limit))
701                 {
702                     regionList[j] = regionList[j-1];
703                     j--;
704                 }
705             }
706             else if (flags & PMA_ALLOCATE_PREFER_SLOWEST)
707             {
708                 // Find insertion point (slowest to fastest)
709                 while ((j > 0) &&
710                     (pPma->pRegDescriptors[i]->performance < pPma->pRegDescriptors[regionList[j-1]]->performance))
711                 {
712                     regionList[j] = regionList[j-1];
713                     j--;
714                 }
715             }
716             else
717             {
718                 // Find insertion point (fastest to slowest)
719                 while ((j > 0) &&
720                     (pPma->pRegDescriptors[i]->performance > pPma->pRegDescriptors[regionList[j-1]]->performance))
721                 {
722                     regionList[j] = regionList[j-1];
723                     j--;
724                 }
725             }
726 
727             // Insert in sorted order
728             regionList[j] = i;
729             regionCount++;
730         }
731         else
732         {
733             regionList[regionCount++] = i;
734         }
735     }
736 
737     // Invalidate the unused slots
738     for (i = regionCount; i < pPma->regSize; i++)
739     {
740         regionList[i] = -1;
741     }
742 
743     if (regionCount == 0)
744     {
745         status = NV_ERR_NO_MEMORY;
746     }
747 
748     return status;
749 }
750 
751 /*!
752  * @brief This function will get a list of base+size and then goes in and
753  * clear the scrubbing bit on any pages in these ranges. It is only called
754  * when we know something is done scrubbing.
755  *
756  * @param[in] pPmaScrubList     The list of ranges that need to be cleared
757  * @param[in] count             Length of the list
758  *
759  * Note:
760  *  - This call takes the PMA lock! Do not call this with PMA lock held.
761  */
762 void
763 _pmaClearScrubBit
764 (
765     PMA         *pPma,
766     PSCRUB_NODE pPmaScrubList,
767     NvU64       count
768 )
769 {
770     NvU32 i;
771     NvU64 base;
772     NvU64 size;
773 
774     NV_ASSERT(count > 0);
775     portSyncSpinlockAcquire(pPma->pPmaLock);
776 
777     for (i = 0; i < count; i++)
778     {
779         base = pPmaScrubList[i].base;
780         size = pPmaScrubList[i].size;
781         NV_ASSERT(size > 0);
782         pmaSetBlockStateAttribUnderPmaLock(pPma, base, size, 0, ATTRIB_SCRUBBING);
783     }
784     portSyncSpinlockRelease(pPma->pPmaLock);
785 }
786 
787 /*!
788  * @brief This function will optionally wait for scrubbing to be finished for a
789  * list of pages, then check the scrubber status and clear the ATTRIB_SCRUBBING
790  * page attribute on any pages that have completed scrubbing
791  *
792  * @param[in] chunkSize The size of each page being waited on
793  * @param[in] pPages    The list of pages being waited on
794  * @param[in] pageCount The number of pages we are waiting for
795  *                      If pageCount == 0, then we don't wait for any pages
796  *
797  * Locking:
798  * - This needs to be called without the PMA lock!
799  * - This call will take the PMA lock internally to modify page attributes.
800  */
801 NV_STATUS
802 _pmaCheckScrubbedPages
803 (
804     PMA     *pPma,
805     NvU64   chunkSize,
806     NvU64   *pPages,
807     NvU32   pageCount
808 )
809 {
810     PSCRUB_NODE pPmaScrubList = NULL;
811     NvU64 count = 0;
812     NV_STATUS status = NV_OK;
813 
814     // If the caller wants to wait for something, we wait first before checking
815     if (pageCount != 0)
816     {
817         if ((status = scrubWaitPages(pPma->pScrubObj, chunkSize, pPages, pageCount)) != NV_OK)
818             return status;
819     }
820 
821     status = scrubCheck(pPma->pScrubObj, &pPmaScrubList, &count);
822     NV_ASSERT_OR_GOTO((status == NV_OK), exit);
823 
824     // This call takes the PMA lock!
825     if (count > 0)
826         _pmaClearScrubBit(pPma, pPmaScrubList, count);
827 
828 exit:
829     // Free the actual list, although allocated by objscrub
830     portMemFree(pPmaScrubList);
831 
832     return status;
833 }
834 
835 
836 NV_STATUS
837 _pmaPredictOutOfMemory
838 (
839     PMA                    *pPma,
840     NvLength                allocationCount,
841     NvU64                   pageSize,
842     PMA_ALLOCATION_OPTIONS *allocationOptions
843 )
844 {
845     NvU32 alignFlag, partialFlag;
846     NvU64 alignment;
847     NvU64 free2mbPages = 0;
848     NvU64 bytesFree    = 0;
849 
850     alignFlag   = !!((allocationOptions->flags) & PMA_ALLOCATE_FORCE_ALIGNMENT);
851     partialFlag = !!((allocationOptions->flags) & PMA_ALLOCATE_ALLOW_PARTIAL);
852     alignment   = allocationOptions->alignment;
853 
854     if ((alignFlag && (alignment == _PMA_2MB)) || pageSize == _PMA_2MB)
855     {
856         if (allocationOptions->flags & PMA_ALLOCATE_PROTECTED_REGION)
857         {
858             free2mbPages = pPma->pmaStats.numFree2mbPagesProtected;
859         }
860         else
861         {
862             free2mbPages = pPma->pmaStats.numFree2mbPages -
863                            pPma->pmaStats.numFree2mbPagesProtected;
864         }
865 
866         // If we have at least one page free, don't fail a partial allocation
867         if (partialFlag && (free2mbPages > 0))
868         {
869             return NV_OK;
870         }
871 
872         if (free2mbPages < allocationCount)
873         {
874             return NV_ERR_NO_MEMORY;
875         }
876     }
877 
878     // Do a quick check and exit early if we are in OOM case
879     if (allocationOptions->flags & PMA_ALLOCATE_PROTECTED_REGION)
880     {
881         bytesFree = pPma->pmaStats.numFreeFramesProtected << PMA_PAGE_SHIFT;
882     }
883     else
884     {
885         bytesFree = (pPma->pmaStats.numFreeFrames -
886                      pPma->pmaStats.numFreeFramesProtected) << PMA_PAGE_SHIFT;
887     }
888 
889     // If we have at least one page free, don't fail a partial allocation
890     if (partialFlag && (bytesFree >= pageSize))
891     {
892         return NV_OK;
893     }
894 
895     if (bytesFree < (pageSize * allocationCount))
896     {
897         return NV_ERR_NO_MEMORY;
898     }
899 
900     return NV_OK;
901 }
902 
903 /*!
904  * @brief Internal function to intermittently free the blacklisted pages in the
905  * range of allocation request. This will enable PMA to allow OS to manage those
906  * blacklisted pages after being allocated.
907  *
908  * @param[in] pPma       PMA Object
909  * @param[in] regId      PMA Region ID , where the allocation falls into
910  * @param[in] rangeBegin Start address for the allocation range
911  * @param[in] rangeSize  Size of the allocation region
912  *
913  * Locking:
914  * - This needs to be called with the PMA lock!
915  */
916 
917 void
918 _pmaFreeBlacklistPages
919 (
920     PMA   *pPma,
921     NvU32  regId,
922     NvU64  rangeBegin,
923     NvU64  rangeSize
924 )
925 {
926     NvU32 blacklistCount = 0;
927     NvU32 chunk;
928     NvU64 alignedBlacklistAddr;
929     NvBool bClientManagedBlacklist = NV_FALSE;
930     PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pBlacklistChunk;
931 
932     pmaQueryBlacklistInfo(pPma, &blacklistCount, &bClientManagedBlacklist, &pBlacklistChunks);
933 
934     if(blacklistCount == 0)
935         return; // return early, nothing to do.
936 
937     for (chunk = 0; chunk < blacklistCount; chunk++)
938     {
939         pBlacklistChunk = &pBlacklistChunks[chunk];
940         if (pBlacklistChunk->bIsValid && (pBlacklistChunk->physOffset >= rangeBegin &&
941                pBlacklistChunk->physOffset <= (rangeBegin + rangeSize - 1)))
942         {
943             //
944             // Clear the blacklist attribute of the pages
945             // Since physOffset here is the absolute address, make sure we align it to 64k
946             //
947             alignedBlacklistAddr = NV_ALIGN_DOWN64(pBlacklistChunk->physOffset, PMA_GRANULARITY);
948             pmaSetBlockStateAttribUnderPmaLock(pPma, alignedBlacklistAddr, PMA_GRANULARITY, 0, ATTRIB_BLACKLIST);
949             pBlacklistChunk->bIsValid = NV_FALSE;
950             bClientManagedBlacklist = NV_TRUE;
951         }
952     }
953 
954     pmaSetClientManagedBlacklist(pPma, bClientManagedBlacklist);
955 
956     return;
957 }
958 
959 /*!
960  * @brief Internal function to reallocate blacklisted pages in the
961  * range of allocation request.This is called, when the allocation requesting
962  * blacklisting OFF fails or when the allocation with blacklisting OFF gets free-d.
963  *
964  * @param[in] pPma       PMA Object
965  * @param[in] regId      PMA Region ID , where the allocation falls into
966  * @param[in] rangeBegin Start address for the allocation range
967  * @param[in] rangeSize  Size of the allocation region
968  *
969  * Locking:
970  * - This needs to be called with the PMA lock!
971  */
972 
973 void _pmaReallocBlacklistPages
974 (
975     PMA  *pPma,
976     NvU32 regId,
977     NvU64 rangeBegin,
978     NvU64 rangeSize
979 )
980 {
981     NvU32 blacklistCount = 0;
982     NvU32 chunk;
983     NvU64 alignedBlacklistAddr;
984     NvU32 reallocatedBlacklistCount = 0;
985 
986     NvBool bClientManagedBlacklist = NV_FALSE;
987     PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pBlacklistChunk;
988     pmaQueryBlacklistInfo(pPma, &blacklistCount, &bClientManagedBlacklist, &pBlacklistChunks);
989 
990     if (blacklistCount == 0 || !bClientManagedBlacklist)
991     {
992         return;
993     }
994 
995     // Assert if scrub on free is enabled for client managed blacklist
996     NV_ASSERT(pPma->bScrubOnFree == NV_FALSE);
997 
998     for (chunk = 0; chunk < blacklistCount; chunk++)
999     {
1000         pBlacklistChunk = &pBlacklistChunks[chunk];
1001         if (!pBlacklistChunk->bIsValid &&
1002                (pBlacklistChunk->physOffset >= rangeBegin &&
1003                 pBlacklistChunk->physOffset <= (rangeBegin + rangeSize -1)))
1004         {
1005             // Since physOffset here is the absolute address, make sure we align it to 64k
1006             alignedBlacklistAddr = NV_ALIGN_DOWN64(pBlacklistChunk->physOffset, PMA_GRANULARITY);
1007             pmaSetBlockStateAttribUnderPmaLock(pPma, alignedBlacklistAddr, PMA_GRANULARITY, ATTRIB_BLACKLIST, ATTRIB_BLACKLIST);
1008             pBlacklistChunk->bIsValid = NV_TRUE;
1009         }
1010         reallocatedBlacklistCount = (pBlacklistChunk->bIsValid == NV_TRUE) ? reallocatedBlacklistCount+1:
1011                   reallocatedBlacklistCount;
1012     }
1013 
1014     // Reset the flag if client handed over the blacklisted pages in their region to RM.
1015     if (chunk == reallocatedBlacklistCount)
1016     {
1017         pmaSetClientManagedBlacklist(pPma, NV_FALSE);
1018     }
1019 }
1020 
1021 /*!
1022  * @brief Internal function to lookup if the current frame is blacklisted already
1023  * If so, we will return NV_TRUE, otherwise NV_FALSE.
1024  *
1025  * @param[in] pPma       PMA Object
1026  * @param[in] regId      PMA Region ID , where the allocation falls into
1027  * @param[in] frameNum   Frame Number which needs to be checked.
1028  *
1029  * Locking:
1030  * - This needs to be called with the PMA lock!
1031  */
1032 NvBool
1033 _pmaLookupBlacklistFrame
1034 (
1035     PMA   *pPma,
1036     NvU32  regId,
1037     NvU64  frameNum
1038 )
1039 {
1040     NvU32 blacklistCount;
1041     NvU64 addrBase;
1042     NvU32 chunk;
1043     NvU64 cliManagedBlackFrame = 0;
1044 
1045     NvBool bClientManagedBlacklist = NV_FALSE;
1046     PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pBlacklistChunk;
1047 
1048     pmaQueryBlacklistInfo(pPma, &blacklistCount, &bClientManagedBlacklist, &pBlacklistChunks);
1049 
1050     if (blacklistCount == 0 || !bClientManagedBlacklist)
1051         return NV_FALSE;
1052 
1053     addrBase = pPma->pRegDescriptors[regId]->base;
1054     for (chunk = 0; chunk < blacklistCount; chunk++)
1055     {
1056         pBlacklistChunk = &pBlacklistChunks[chunk];
1057         if (pBlacklistChunk->bIsValid)
1058         {
1059             // calculate the frame addr
1060             cliManagedBlackFrame = PMA_ADDR2FRAME(pBlacklistChunk->physOffset, addrBase);
1061             if (cliManagedBlackFrame == frameNum)
1062             {
1063                 return NV_TRUE;
1064             }
1065         }
1066     }
1067     return NV_FALSE;
1068 }
1069 
1070 /*!
1071  * @brief Returns a list of PMA-managed blocks with the specified state and
1072  *        attributes.
1073  *
1074  * @param[in]     pPma          PMA pointer
1075  * @param[in/out] ppList        Pointer to list of segments having specified
1076  *                              state and attributes
1077  * @param[in]     pageStatus    PMA page state and attribute
1078  *
1079  * @return
1080  *      NV_OK                   Success
1081  *      NV_ERR_NO_MEMORY        Failure to allocate list
1082  */
1083 NV_STATUS
1084 pmaBuildList
1085 (
1086     PMA             *pPma,
1087     PRANGELISTTYPE  *ppList,
1088     PMA_PAGESTATUS   pageStatus
1089 )
1090 {
1091     NvU32 regionIdx, frameNum;
1092     NvU64 addrBase, addrLimit, numFrames;
1093     NvU64 blockStart = 0, blockEnd = 0;
1094     NvBool bBlockValid;
1095     PMA_PAGESTATUS pageState;
1096     PRANGELISTTYPE pRangeCurr, pRangeList = NULL;
1097     NV_STATUS status = NV_OK;
1098     void *pMap = NULL;
1099 
1100     for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++)
1101     {
1102         pMap = pPma->pRegions[regionIdx];
1103         addrBase = pPma->pRegDescriptors[regionIdx]->base;
1104         addrLimit = pPma->pRegDescriptors[regionIdx]->limit;
1105         numFrames = (addrLimit - addrBase + 1) >> PMA_PAGE_SHIFT;
1106         bBlockValid = NV_FALSE;
1107 
1108         for (frameNum = 0; frameNum < numFrames; frameNum++)
1109         {
1110             pageState = pPma->pMapInfo->pmaMapRead(pMap, frameNum, NV_TRUE);
1111             if (pageState & pageStatus)
1112             {
1113                 if (bBlockValid)
1114                 {
1115                     // Block start already found.  Find the end
1116                     blockEnd = frameNum;
1117                 }
1118                 else
1119                 {
1120                     // Block start found.  Now find the end
1121                     blockStart = frameNum;
1122                     blockEnd = frameNum;
1123                     bBlockValid = NV_TRUE;
1124                 }
1125             }
1126             else if (bBlockValid)
1127             {
1128                 // Block found having required PMA page state. Store it in the list
1129                 pRangeCurr = (PRANGELISTTYPE) portMemAllocNonPaged(sizeof(RANGELISTTYPE));
1130                 if (pRangeCurr)
1131                 {
1132                     pRangeCurr->base  = addrBase + blockStart * PMA_GRANULARITY;
1133                     pRangeCurr->limit = addrBase + blockEnd * PMA_GRANULARITY + PMA_GRANULARITY - 1;
1134                     pRangeCurr->pNext = pRangeList;
1135                     pRangeList = pRangeCurr;
1136                 }
1137                 else
1138                 {
1139                     // Allocation failed
1140                     pmaFreeList(pPma, &pRangeList);
1141                     pRangeList = NULL;
1142                     status = NV_ERR_NO_MEMORY;
1143                     break;
1144                 }
1145 
1146                 bBlockValid = NV_FALSE;
1147             }
1148         }
1149 
1150         // No point checking further if we are already out of memory
1151         if (status == NV_ERR_NO_MEMORY)
1152             break;
1153 
1154         // Check if last frame was part of a block.
1155         if (bBlockValid)
1156         {
1157             // Block found having required PMA page state. Store it in the list
1158             pRangeCurr = (PRANGELISTTYPE) portMemAllocNonPaged(sizeof(RANGELISTTYPE));
1159             if (pRangeCurr)
1160             {
1161                 pRangeCurr->base  = addrBase + blockStart * PMA_GRANULARITY;
1162                 pRangeCurr->limit = addrBase + blockEnd * PMA_GRANULARITY + PMA_GRANULARITY - 1;
1163                 pRangeCurr->pNext = pRangeList;
1164                 pRangeList = pRangeCurr;
1165             }
1166             else
1167             {
1168                 // Allocation failed
1169                 pmaFreeList(pPma, &pRangeList);
1170                 pRangeList = NULL;
1171                 status = NV_ERR_NO_MEMORY;
1172                 break;
1173             }
1174         }
1175     }
1176 
1177     *ppList = pRangeList;
1178 
1179     return status;
1180 }
1181 
1182 /*!
1183  * @brief Frees previously generated list of PMA-managed blocks with
1184  *        function pmaBuildList()
1185  *
1186  * @param[in]     pPma      PMA pointer
1187  * @param[in/out] ppList    Pointer to list of PMA segments
1188  *
1189  * @return
1190  *      None
1191  */
1192 void
1193 pmaFreeList
1194 (
1195     PMA             *pPma,
1196     PRANGELISTTYPE  *ppList
1197 )
1198 {
1199     PRANGELISTTYPE pRangeCurr = *ppList;
1200     PRANGELISTTYPE pRangeNext;
1201 
1202     while (pRangeCurr)
1203     {
1204         pRangeNext = pRangeCurr->pNext;;
1205         portMemFree(pRangeCurr);
1206         pRangeCurr = pRangeNext;
1207     }
1208 
1209     *ppList = NULL;
1210 }
1211 
1212 NV_STATUS
1213 pmaRegisterBlacklistInfo
1214 (
1215     PMA                    *pPma,
1216     NvU64                   physAddrBase,
1217     PMA_BLACKLIST_ADDRESS  *pBlacklistPageBase,
1218     NvU32                   blacklistCount
1219 )
1220 {
1221     NvU32 i;
1222     NvU64 alignedBlacklistAddr;
1223     PMA_BLACKLIST_CHUNK *pBlacklistChunk = NULL;
1224     NvU32 nextBlacklistEntry = 0;
1225     NvU32 blacklistEntryIn = 0;
1226 
1227     if (blacklistCount == 0  || pBlacklistPageBase == NULL)
1228     {
1229         return NV_OK;
1230     }
1231 
1232     if (pPma->pBlacklistChunks == NULL)
1233     {
1234         pPma->pBlacklistChunks = (PMA_BLACKLIST_CHUNK *)
1235             portMemAllocNonPaged( PMA_MAX_BLACKLIST_ENTRIES * sizeof(PMA_BLACKLIST_CHUNK));
1236         if (pPma->pBlacklistChunks == NULL)
1237         {
1238             pPma->blacklistCount = 0;
1239             NV_PRINTF(LEVEL_ERROR, "ERROR: Insufficient memory to allocate blacklisting tracking structure.\n");
1240             return NV_ERR_NO_MEMORY;
1241         }
1242         portMemSet(pPma->pBlacklistChunks, 0, PMA_MAX_BLACKLIST_ENTRIES * sizeof(PMA_BLACKLIST_CHUNK));
1243     }
1244 
1245     nextBlacklistEntry = pPma->blacklistCount;
1246 
1247     for (i = nextBlacklistEntry; i < (nextBlacklistEntry + blacklistCount); i++)
1248     {
1249         pBlacklistChunk = &pPma->pBlacklistChunks[i];
1250         pBlacklistChunk->physOffset = pBlacklistPageBase[blacklistEntryIn].physOffset;
1251         pBlacklistChunk->bIsDynamic = pBlacklistPageBase[blacklistEntryIn].bIsDynamic;
1252 
1253         // Since physOffset here is the absolute address, make sure we align it to 64K
1254         alignedBlacklistAddr = NV_ALIGN_DOWN64(pBlacklistPageBase[blacklistEntryIn].physOffset, PMA_GRANULARITY);
1255         pmaSetBlockStateAttrib(pPma, alignedBlacklistAddr, PMA_GRANULARITY, ATTRIB_BLACKLIST, ATTRIB_BLACKLIST);
1256         pBlacklistChunk->bIsValid = NV_TRUE;
1257 
1258         //
1259         // In NUMA systems, memory allocation comes directly from kernel, which
1260         // won't check for ATTRIB_BLACKLIST. So pages need to be blacklisted
1261         // directly through the kernel.
1262         //
1263         // Use physOffset without 64K alignment, because kernel may use a different
1264         // page size.
1265         //
1266         // This is only needed for NUMA systems that auto online NUMA memory.
1267         // Other systems (e.g., P9) already do blacklisting in nvidia-persistenced.
1268         //
1269         if (pPma->bNuma && pPma->bNumaAutoOnline)
1270         {
1271             NV_STATUS status;
1272 
1273             NV_PRINTF(LEVEL_INFO,
1274                       "NUMA enabled - blacklisting page through kernel at address 0x%llx (GPA) 0x%llx (SPA)\n",
1275                       pBlacklistPageBase[blacklistEntryIn].physOffset,
1276                       pBlacklistPageBase[blacklistEntryIn].physOffset + pPma->coherentCpuFbBase);
1277             status = osOfflinePageAtAddress(pBlacklistPageBase[blacklistEntryIn].physOffset + pPma->coherentCpuFbBase);
1278             if (status != NV_OK)
1279             {
1280                 NV_PRINTF(LEVEL_ERROR, "osOfflinePageAtAddress() failed with status: %d\n", status);
1281             }
1282         }
1283 
1284         blacklistEntryIn++;
1285     }
1286 
1287     pPma->blacklistCount += blacklistCount;
1288 
1289     return NV_OK;
1290 }
1291 
1292 void
1293 pmaSetClientManagedBlacklist
1294 (
1295     PMA    *pPma,
1296     NvBool bClientManagedBlacklist
1297 )
1298 {
1299     pPma->bClientManagedBlacklist = bClientManagedBlacklist;
1300 }
1301 
1302 void
1303 pmaQueryBlacklistInfo
1304 (
1305     PMA     *pPma,
1306     NvU32   *pBlacklistCount,
1307     NvBool  *pbClientManagedBlacklist,
1308     PMA_BLACKLIST_CHUNK **ppBlacklistChunks
1309 )
1310 {
1311     if (pBlacklistCount)
1312     {
1313         *pBlacklistCount = pPma->blacklistCount;
1314     }
1315 
1316     if (pbClientManagedBlacklist)
1317     {
1318         *pbClientManagedBlacklist = pPma->bClientManagedBlacklist;
1319     }
1320 
1321     if (ppBlacklistChunks)
1322     {
1323         *ppBlacklistChunks = pPma->pBlacklistChunks;
1324     }
1325 }
1326 
1327 NvBool
1328 pmaIsBlacklistingAddrUnique
1329 (
1330     PMA   *pPma,
1331     NvU64 physAddr
1332 )
1333 {
1334     NvU32 count = 0;
1335     PMA_BLACKLIST_CHUNK *pBlacklistChunk = NULL;
1336     if (pPma->blacklistCount == 0)
1337     {
1338         return NV_TRUE;
1339     }
1340     for (count = 0; count < pPma->blacklistCount; count++)
1341     {
1342         pBlacklistChunk = &pPma->pBlacklistChunks[count];
1343         if (pBlacklistChunk->physOffset == physAddr)
1344         {
1345             return NV_FALSE;
1346         }
1347     }
1348     return NV_TRUE;
1349 }
1350 
1351