1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*!
25  * @file ctx_buf_pool.c
26  * @brief This file defines interfaces that act as wrappers around the RM memory
27  *        pool interfaces. These interfaces are used for creating RM memory pools for
28  *        RM internal allocations like global (engine-specific) and local (context-specific)
29  *        context buffers. While client page tables are also RM internal allocations
30  *        and use RM memory pools, they DO NOT use interfaces defined in this file.
31  */
32 
33 #include "core/core.h"
34 #include "core/locks.h"
35 #include "mem_mgr/ctx_buf_pool.h"
36 #include "class/cl90f1.h"
37 #include "virtualization/hypervisor/hypervisor.h"
38 #include "vgpu/vgpu_events.h"
39 #include "gpu/mem_mgr/mem_mgr.h"
40 #include "gpu/bus/kern_bus.h"
41 #include "kernel/gpu/fifo/kernel_fifo.h"
42 #include "kernel/gpu/gr/kernel_graphics.h"
43 #include "gpu/mem_mgr/heap.h"
44 
45 /*
46  * @brief Are memory pools supported for context buffers
47  *
48  * @param[in] pGpu OBJGPU pointer
49  *
50  * @return NvBool
51  */
52 NvBool
53 ctxBufPoolIsSupported
54 (
55     OBJGPU *pGpu
56 )
57 {
58     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
59     NvBool bCallingContextPlugin;
60     NvU32 gfid = GPU_GFID_PF;
61 
62     if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA))
63     {
64         NV_PRINTF(LEVEL_INFO, "Ctx buffers not supported in PMA\n");
65         return NV_FALSE;
66     }
67 
68     if (!memmgrIsPmaInitialized(pMemoryManager))
69     {
70         NV_PRINTF(LEVEL_INFO, "PMA is disabled. Ctx buffers will be allocated in RM reserved heap\n");
71         return NV_FALSE;
72     }
73 
74     if (IS_VIRTUAL(pGpu) || RMCFG_FEATURE_PLATFORM_GSP)
75     {
76         NV_PRINTF(LEVEL_INFO, "Guest RM/GSP don't support ctx buffers in PMA\n");
77         return NV_FALSE;
78     }
79 
80     //
81     // In virtualized env, host RM should use CtxBuffer for all allocations made
82     // on behalf of plugin or for PF usages
83     //
84     NV_ASSERT_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin) == NV_OK, NV_FALSE);
85     NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, NV_FALSE);
86     if (hypervisorIsVgxHyper() && !bCallingContextPlugin && IS_GFID_VF(gfid))
87     {
88         NV_PRINTF(LEVEL_INFO, "ctx buffers in PMA not supported for allocations host RM makes on behalf of guest\n");
89         return NV_FALSE;
90     }
91 
92     NV_PRINTF(LEVEL_INFO, "Ctx buffer pool enabled. Ctx buffers will be allocated from PMA\n");
93     return NV_TRUE;
94 }
95 
96 /*
97  * @brief Initializes all context buffer pools for a VA space
98  *
99  * @param[in]  pGpu         OBJGPU pointer
100  * @param[in]  pHeap        Pointer to Heap object to whose PMA this pool is tied
101  * @param[out] ppCtxBufPool Pointer to context buffer pool
102  *
103  * @return NV_STATUS
104  */
105 NV_STATUS
106 ctxBufPoolInit
107 (
108     OBJGPU             *pGpu,
109     Heap               *pHeap,
110     CTX_BUF_POOL_INFO **ppCtxBufPool
111 )
112 {
113     NV_STATUS status = NV_OK;
114     CTX_BUF_POOL_INFO *pCtxBufPool = NULL;
115     NvU32 i, poolConfig;
116 
117     NV_ASSERT_OR_RETURN(ppCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT);
118 
119     if (!ctxBufPoolIsSupported(pGpu))
120     {
121         return NV_OK;
122     }
123 
124     pCtxBufPool = portMemAllocNonPaged(sizeof(CTX_BUF_POOL_INFO));
125     NV_ASSERT_OR_RETURN((pCtxBufPool != NULL), NV_ERR_NO_MEMORY);
126     portMemSet(pCtxBufPool, 0, sizeof(CTX_BUF_POOL_INFO));
127 
128     //
129     // create a mem pool for each page size supported by RM
130     // pool corresponding to RM_ATTR_PAGE_SIZE_DEFAULT remains unused
131     //
132     for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++)
133     {
134         switch (i)
135         {
136             case RM_ATTR_PAGE_SIZE_DEFAULT:
137             case RM_ATTR_PAGE_SIZE_4KB:
138                 poolConfig = POOL_CONFIG_CTXBUF_4K;
139                 break;
140             case RM_ATTR_PAGE_SIZE_BIG:
141                 poolConfig = POOL_CONFIG_CTXBUF_64K;
142                 break;
143             case RM_ATTR_PAGE_SIZE_HUGE:
144                 poolConfig = POOL_CONFIG_CTXBUF_2M;
145                 break;
146             case RM_ATTR_PAGE_SIZE_512MB:
147                 poolConfig = POOL_CONFIG_CTXBUF_512M;
148                 break;
149             default:
150                 NV_PRINTF(LEVEL_ERROR, "Unsupported page size attr %d\n", i);
151                 return NV_ERR_INVALID_STATE;
152         }
153         NV_ASSERT_OK_OR_GOTO(status,
154             rmMemPoolSetup((void*)&pHeap->pmaObject, &pCtxBufPool->pMemPool[i],
155                            poolConfig),
156             cleanup);
157 
158     }
159     NV_PRINTF(LEVEL_INFO, "Ctx buf pool successfully initialized\n");
160 
161 cleanup:
162     if (status != NV_OK)
163     {
164         if (pCtxBufPool != NULL)
165         {
166             ctxBufPoolDestroy(&pCtxBufPool);
167         }
168     }
169     *ppCtxBufPool = pCtxBufPool;
170     return status;
171 }
172 
173 /*
174  * @brief Destroys all context buffer pools for a VA space
175  *
176  * @param[in] ppCtxBufPool Pointer to context buffer pool
177  *
178  * @return
179  */
180 void
181 ctxBufPoolDestroy
182 (
183     CTX_BUF_POOL_INFO **ppCtxBufPool
184 )
185 {
186     NvU32 i;
187     CTX_BUF_POOL_INFO *pCtxBufPool;
188     NV_ASSERT((ppCtxBufPool != NULL) && (*ppCtxBufPool != NULL));
189     if ((ppCtxBufPool == NULL) || (*ppCtxBufPool == NULL))
190     {
191         NV_PRINTF(LEVEL_ERROR, "Ctx buf pool doesn't exist\n");
192         return;
193     }
194 
195     pCtxBufPool = *ppCtxBufPool;
196 
197     for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++)
198     {
199         if (pCtxBufPool->pMemPool[i] != NULL)
200         {
201             rmMemPoolDestroy(pCtxBufPool->pMemPool[i]);
202             pCtxBufPool->pMemPool[i] = NULL;
203         }
204     }
205     portMemFree(pCtxBufPool);
206     *ppCtxBufPool = NULL;
207     NV_PRINTF(LEVEL_INFO, "Ctx buf pool destroyed\n");
208 }
209 
210 /*
211  * @brief Calculates total amount of memory required for all buffers in each pool and reserves the memory
212  *
213  * Q. Why do we need a separate function to calculate memory when we already know size of all buffers?
214  * A. Memory required to allocate a buffer depends on 3 things: size, page size and alignment.
215  *    context buffers don't have alignment requirements so alignment directly depends on page size.
216  *    page size is determined based on the size of buffer and RM_ATTR_PAGE_SIZE parameter.
217  *    Once we get the page size, we can align the size of buffer accordingly and also route it to correct pool.
218  *    Each buffer has different size and attr and so will have different page size and will accordingly go to different pools.
219  *    Today, alignment is determined at alloc time(inside heapAlloc) and usually page size in map call.
220  *    We use the same algorithm (memmgrDeterminePageSize) below to determine alignment and page size for each buffer.
221  *
222  * Q. Why do we need a list of context buffers?
223  * A. Reserving memory from PMA requires us to drop GPU lock. To determine
224  *    page size we need GPU lock since we are accessing some global state here.
225  *    So we first calculate memory requirements for each pool based on which buffers
226  *    will eventually end up in those pools.
227  *    Later we drop GPU lock and reserve memory for each pool.
228  *    This avoids acquiring and dropping locks for each buffer and also avoids making a call to PMA
229  *    for each buffer.
230  *
231  * @param[in] pCtxBufPool      Pointer to context buffer pool
232  * @param[in] pBufInfoList     List of context buffers to reserve memory for
233  * @param[in] bufcount         number of buffers to reserve memory for
234  *
235  * @return NV_STATUS
236  */
237 NV_STATUS
238 ctxBufPoolReserve
239 (
240     OBJGPU            *pGpu,
241     CTX_BUF_POOL_INFO *pCtxBufPool,
242     CTX_BUF_INFO      *pBufInfoList,
243     NvU32              bufCount
244 )
245 {
246     NV_STATUS status = NV_OK;
247     NvU64 pageSize;
248     NvU32 i;
249     NvU64 totalSize[RM_ATTR_PAGE_SIZE_INVALID] = {0};
250     NvU64 size;
251 
252     NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT);
253     NV_ASSERT_OR_RETURN(pBufInfoList != NULL, NV_ERR_INVALID_ARGUMENT);
254     NV_ASSERT_OR_RETURN(bufCount > 0, NV_ERR_INVALID_ARGUMENT);
255 
256     for (i = 0; i < bufCount; i++)
257     {
258         size = pBufInfoList[i].size;
259 
260         // update size and pageSize based on buffer alignment requirement and buffer size
261         NV_ASSERT_OK_OR_RETURN(ctxBufPoolGetSizeAndPageSize(pCtxBufPool, pGpu,
262             pBufInfoList[i].align, pBufInfoList[i].attr, pBufInfoList[i].bContig, &size, &pageSize));
263 
264         //
265         // Determine the pool(4K/64K/2M) from where this buffer will eventually
266         // get allocated and mark that pool to reserve this memory.
267         //
268         switch(pageSize)
269         {
270             case RM_PAGE_SIZE:
271                 totalSize[RM_ATTR_PAGE_SIZE_4KB] += size;
272                 break;
273             case RM_PAGE_SIZE_64K:
274             case RM_PAGE_SIZE_128K:
275                 totalSize[RM_ATTR_PAGE_SIZE_BIG] += size;
276                 break;
277             case RM_PAGE_SIZE_HUGE:
278                 totalSize[RM_ATTR_PAGE_SIZE_HUGE] += size;
279                 break;
280             case RM_PAGE_SIZE_512M:
281                 totalSize[RM_ATTR_PAGE_SIZE_512MB] += size;
282                 break;
283             default:
284                 NV_PRINTF(LEVEL_ERROR, "Unrecognized/unsupported page size = 0x%llx\n", pageSize);
285                 NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT);
286         }
287         NV_PRINTF(LEVEL_INFO, "Reserving 0x%llx bytes for buf Id = 0x%x in pool with page size = 0x%llx\n", size, i, pageSize);
288     }
289 
290     for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++)
291     {
292         if (totalSize[i] > 0)
293         {
294             NV_ASSERT_OK_OR_GOTO(status, rmMemPoolReserve(pCtxBufPool->pMemPool[i], totalSize[i], 0), done);
295             NV_PRINTF(LEVEL_INFO, "Reserved 0x%llx bytes in pool with RM_ATTR_PAGE_SIZE_* = 0x%x\n", totalSize[i], i);
296         }
297     }
298 
299 done:
300     if (status != NV_OK)
301     {
302         NV_ASSERT_OK(ctxBufPoolTrim(pCtxBufPool));
303         NV_PRINTF(LEVEL_ERROR, "Failed to reserve memory. trimming all pools\n");
304     }
305     return status;
306 
307 }
308 
309 /*
310  * @brief Trims out additional memory from context buffer pools
311  *
312  * @param[in] pCtxBufPool Pointer to context buffer pool
313  *
314  * @return NV_STATUS
315  */
316 NV_STATUS
317 ctxBufPoolTrim
318 (
319     CTX_BUF_POOL_INFO *pCtxBufPool
320 )
321 {
322     NvU32 i;
323     NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT);
324 
325     for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++)
326     {
327         rmMemPoolTrim(pCtxBufPool->pMemPool[i], 0, 0);
328         NV_PRINTF(LEVEL_INFO, "Trimmed pool with RM_ATTR_PAGE_SIZE_* = 0x%x\n", i);
329     }
330     return NV_OK;
331 }
332 
333 /*
334  * @brief Releases all memory from context buffer pools
335  *
336  *        If there are pending allocations in any of the pools then
337  *        this function just returns early.
338  *
339  * @param[in] pCtxBufPool Pointer to context buffer pool
340  *
341  * @return
342  */
343 void
344 ctxBufPoolRelease
345 (
346     CTX_BUF_POOL_INFO *pCtxBufPool
347 )
348 {
349     NvU32 i;
350     NV_ASSERT(pCtxBufPool != NULL);
351 
352     for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++)
353     {
354         rmMemPoolRelease(pCtxBufPool->pMemPool[i], 0);
355     }
356 }
357 
358 /*
359  * @brief Allocates memory from context buffer pools
360  *
361  * @param[in] pCtxBufPool Pointer to context buffer pool
362  * @param[in] pMemDesc    Pointer to context buffer memory descriptor
363  *
364  * @return NV_STATUS
365  */
366 NV_STATUS
367 ctxBufPoolAllocate
368 (
369     CTX_BUF_POOL_INFO *pCtxBufPool,
370     PMEMORY_DESCRIPTOR pMemDesc
371 )
372 {
373     RM_POOL_ALLOC_MEM_RESERVE_INFO *pPool = NULL;
374     NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT);
375     NV_ASSERT_OR_RETURN(pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT);
376 
377     NV_ADDRESS_SPACE addrSpace = memdescGetAddressSpace(pMemDesc);
378     if (addrSpace != ADDR_FBMEM)
379     {
380         NV_PRINTF(LEVEL_ERROR, "ctx buf pool is only used for buffers to be allocated in FB\n"
381                                "SYSMEM buffers don't need memory to be pre-reserved in pool\n");
382         return NV_ERR_INVALID_ARGUMENT;
383     }
384 
385     // If page size is not set, then set it based on actual size of memdesc and its alignment
386     NvU64 pageSize = memdescGetPageSize(pMemDesc, AT_GPU);
387     if ((pageSize == 0) || (memdescGetContiguity(pMemDesc, AT_GPU)))
388     {
389         NvU64 newPageSize;
390         NV_ASSERT_OK_OR_RETURN(ctxBufPoolGetSizeAndPageSize(pCtxBufPool, pMemDesc->pGpu,
391             pMemDesc->Alignment, RM_ATTR_PAGE_SIZE_DEFAULT, memdescGetContiguity(pMemDesc, AT_GPU),
392            &pMemDesc->ActualSize, &newPageSize));
393 
394         //
395         // Update the page size in the memdesc only if it isn't set already.
396         // This is ok as we get new page size only if no page size was set or if the buffer is contiguous or both.
397         // For physically contig buffers, PA address in the memdesc remains the same irrespective of page size.
398         // For such buffers, if pageSize was already set then we don't want to change it as it can change the way
399         // buffers are mapped by RM vs HW(this is specifically applicable to main GR ctx buffer)
400         //
401         if (pageSize == 0)
402         {
403             memdescSetPageSize(pMemDesc, AT_GPU, newPageSize);
404             NV_PRINTF(LEVEL_INFO, "Ctx buffer page size set to 0x%llx\n", newPageSize);
405         }
406         pageSize = newPageSize;
407     }
408 
409     // Determine the pool(4K/64K/2M) from where this buffer is to be allocated
410     switch(pageSize)
411     {
412         case RM_PAGE_SIZE:
413             pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_4KB];
414             break;
415         case RM_PAGE_SIZE_64K:
416         case RM_PAGE_SIZE_128K:
417             pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_BIG];
418             break;
419         case RM_PAGE_SIZE_HUGE:
420             pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_HUGE];
421             break;
422         case RM_PAGE_SIZE_512M:
423             pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_512MB];
424             break;
425         default:
426             NV_PRINTF(LEVEL_ERROR, "Unsupported page size = 0x%llx set for context buffer\n", pageSize);
427             NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT);
428     }
429     NV_ASSERT_OK_OR_RETURN(rmMemPoolAllocate(pPool, (RM_POOL_ALLOC_MEMDESC*)pMemDesc));
430     NV_PRINTF(LEVEL_INFO, "Buffer allocated from ctx buf pool with page size = 0x%llx\n", pageSize);
431     return NV_OK;
432 }
433 
434 /*
435  * @brief Frees memory from context buffer pools
436  *
437  * @param[in] pCtxBufPool Pointer to context buffer pool
438  * @param[in] pMemDesc    Pointer to context buffer memory descriptor
439  *
440  * @return NV_STATUS
441  */
442 NV_STATUS
443 ctxBufPoolFree
444 (
445     CTX_BUF_POOL_INFO *pCtxBufPool,
446     PMEMORY_DESCRIPTOR pMemDesc
447 )
448 {
449     RM_POOL_ALLOC_MEM_RESERVE_INFO *pPool = NULL;
450     NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT);
451     NV_ASSERT_OR_RETURN(pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT);
452 
453     NvU64 pageSize = memdescGetPageSize(pMemDesc, AT_GPU);
454 
455     //
456     // If buffer is contiguous, then it may or may not be allocated from the same pool
457     // as its page size. (see ctxBufPoolAllocate)
458     // In such case, determine the size of buffer as done during allocation to route the
459     // free to correct pool.
460     //
461     if (memdescGetContiguity(pMemDesc, AT_GPU))
462     {
463         NvU64 size = pMemDesc->ActualSize;
464         NV_ASSERT_OK_OR_RETURN(ctxBufPoolGetSizeAndPageSize(pCtxBufPool, pMemDesc->pGpu,
465             pMemDesc->Alignment, RM_ATTR_PAGE_SIZE_DEFAULT, NV_TRUE, &size, &pageSize));
466     }
467 
468     switch(pageSize)
469     {
470         case RM_PAGE_SIZE:
471             pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_4KB];
472             break;
473         case RM_PAGE_SIZE_64K:
474         case RM_PAGE_SIZE_128K:
475             pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_BIG];
476             break;
477         case RM_PAGE_SIZE_HUGE:
478             pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_HUGE];
479             break;
480         case RM_PAGE_SIZE_512M:
481             pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_512MB];
482             break;
483         default:
484             NV_PRINTF(LEVEL_ERROR, "Unsupported page size detected for context buffer\n");
485             NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE);
486     }
487 
488     // If scrubber is being skipped by PMA we need to manually scrub this memory
489     if (rmMemPoolIsScrubSkipped(pPool))
490     {
491         OBJGPU *pGpu = pMemDesc->pGpu;
492         NvU8   *pMem = kbusMapRmAperture_HAL(pGpu, pMemDesc);
493         if (pMem == NULL)
494         {
495             NV_PRINTF(LEVEL_ERROR, "Failed to BAR2 map memdesc. memory won't be scrubbed\n");
496             NV_ASSERT(pMem != NULL);
497         }
498         else
499         {
500             portMemSet(pMem, 0, (pMemDesc->PageCount * RM_PAGE_SIZE));
501             kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pMem, NV_TRUE);
502         }
503     }
504     rmMemPoolFree(pPool, (RM_POOL_ALLOC_MEMDESC*)pMemDesc, 0);
505 
506     NV_PRINTF(LEVEL_INFO, "Buffer freed from ctx buf pool with page size = 0x%llx\n", pageSize);
507     return NV_OK;
508 }
509 
510 /*
511  * @brief Returns memory pool for global buffers like runlists, GR global buffers etc.
512  *
513  * @param[in]  pGpu          OBJGPU pointer
514  * @param[in]  bufId         Id to identify the buffer
515  * @param[in]  rmEngineType  RM Engine Type
516  * @param[out] ppCtxBufPool  Pointer to context buffer pool
517  *
518  * @return NV_STATUS
519  */
520 NV_STATUS
521 ctxBufPoolGetGlobalPool
522 (
523     OBJGPU *pGpu,
524     CTX_BUF_ID bufId,
525     RM_ENGINE_TYPE rmEngineType,
526     CTX_BUF_POOL_INFO **ppCtxBufPool
527 )
528 {
529     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
530     CTX_BUF_POOL_INFO *pCtxBufPool = NULL;
531 
532     NV_ASSERT_OR_RETURN(ppCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT);
533     NV_ASSERT_OR_RETURN(RM_ENGINE_TYPE_IS_VALID(rmEngineType), NV_ERR_INVALID_ARGUMENT);
534 
535     switch (bufId)
536     {
537         case CTX_BUF_ID_RUNLIST:
538             pCtxBufPool = kfifoGetRunlistBufPool(pGpu, pKernelFifo, rmEngineType);
539             break;
540         case CTX_BUF_ID_GR_GLOBAL:
541         {
542             KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, RM_ENGINE_TYPE_GR_IDX(rmEngineType));
543             NV_ASSERT_OR_RETURN(RM_ENGINE_TYPE_IS_GR(rmEngineType), NV_ERR_INVALID_ARGUMENT);
544             pCtxBufPool = kgraphicsGetCtxBufPool(pGpu, pKernelGraphics);
545             break;
546         }
547         default:
548             NV_PRINTF(LEVEL_ERROR, "Invalid buf Id = 0x%x requested\n", bufId);
549             return NV_ERR_INVALID_ARGUMENT;
550     }
551     *ppCtxBufPool = pCtxBufPool;
552     return NV_OK;
553 }
554 
555 /*
556  * @brief Get updated buffer size and page size for a context buffer
557  *
558  * @param[in]      pGpu         OBJGPU pointer
559  * @param[in]      alignment    Expected buffer alignment
560  * @param[in]      attr         Page size attribute for buffer
561  * @param[in]      bContig      Is buffer physically contiguouss
562  * @param[in/out]  pSize        Size of buffer
563  * @param[out]     pPageSize    Page size for buffer
564  *
565  * @return NV_STATUS
566  */
567 NV_STATUS
568 ctxBufPoolGetSizeAndPageSize
569 (
570     CTX_BUF_POOL_INFO *pCtxBufPool,
571     OBJGPU            *pGpu,
572     NvU64              alignment,
573     RM_ATTR_PAGE_SIZE  attr,
574     NvBool             bContig,
575     NvU64             *pSize,
576     NvU64             *pPageSize
577 )
578 {
579     MemoryManager          *pMemoryManager      = GPU_GET_MEMORY_MANAGER(pGpu);
580     NV_STATUS               status              = NV_OK;
581     NvU64                   pageSize            = 0;
582     NvU32                   allocFlags          = 0;
583     NvU32                   retAttr             = 0;
584     NvU32                   retAttr2            = 0;
585     NvU64                   size                = 0;
586 
587     NV_ASSERT_OR_RETURN(pSize != NULL, NV_ERR_INVALID_ARGUMENT);
588     NV_ASSERT_OR_RETURN(pPageSize != NULL, NV_ERR_INVALID_ARGUMENT);
589 
590     size = *pSize;
591     retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, retAttr);
592 
593     switch (attr)
594     {
595         case RM_ATTR_PAGE_SIZE_DEFAULT:
596             retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT, retAttr);
597             break;
598         case RM_ATTR_PAGE_SIZE_4KB:
599             retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, retAttr);
600             break;
601         case RM_ATTR_PAGE_SIZE_BIG:
602             retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, retAttr);
603             break;
604         case RM_ATTR_PAGE_SIZE_HUGE:
605             retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, retAttr);
606             retAttr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _2MB, retAttr2);
607             break;
608         case RM_ATTR_PAGE_SIZE_512MB:
609             retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, retAttr);
610             retAttr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _512MB, retAttr2);
611             break;
612         default:
613             NV_PRINTF(LEVEL_ERROR, "unsupported page size attr\n");
614             return NV_ERR_NOT_SUPPORTED;
615     }
616 
617     // Update the size of buffer based on requested alignment
618     {
619         NvU32 tempAttr = 0;
620         NvU64 tempAlign = alignment;
621 
622         if (attr == RM_ATTR_PAGE_SIZE_DEFAULT)
623             tempAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, retAttr);
624         else
625             tempAttr = retAttr;
626 
627         status = memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, &size, &tempAlign, 0,
628                                                    allocFlags, tempAttr, retAttr2, 0);
629         if (status != NV_OK)
630         {
631             NV_PRINTF(LEVEL_ERROR, "Couldn't determine buffer alignment\n");
632             DBG_BREAKPOINT();
633             return status;
634         }
635     }
636 
637     //
638     // If buffer needs to be allocated contiguously then we need to route it to a pool
639     // whose chunk size >= buffer size
640     //
641     if (bContig)
642     {
643         NvU64 chunkSize = 0;
644         NvU32 i;
645         for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++)
646         {
647             NV_ASSERT_OK_OR_RETURN(rmMemPoolGetChunkAndPageSize(pCtxBufPool->pMemPool[i], &chunkSize, &pageSize));
648             if (chunkSize >= size)
649             {
650                 size = chunkSize;
651                 break;
652             }
653         }
654         if (i == RM_ATTR_PAGE_SIZE_INVALID)
655         {
656             NV_PRINTF(LEVEL_ERROR, "couldn't find pool with chunksize >= 0x%llx\n", size);
657             DBG_BREAKPOINT();
658             status = NV_ERR_NO_MEMORY;
659             return status;
660         }
661     }
662     else
663     {
664         // Determine page size based on updated buffer size
665         pageSize = memmgrDeterminePageSize(pMemoryManager, 0, size, NVOS32_ATTR_FORMAT_PITCH,
666                                            allocFlags, &retAttr, &retAttr2);
667     }
668 
669     // make sure we get a valid page size and alignment is taken care of
670     if ((pageSize == 0) || ((NvU64)pageSize < alignment))
671     {
672         NV_PRINTF(LEVEL_ERROR, "Incorrect page size determination\n");
673         DBG_BREAKPOINT();
674         status = NV_ERR_INVALID_STATE;
675         return status;
676     }
677 
678     // Align up buffer size based on page size
679     size = NV_ALIGN_UP64(size, pageSize);
680 
681     *pPageSize = pageSize;
682     *pSize = size;
683     NV_PRINTF(LEVEL_INFO, "Buffer updated size = 0x%llx with page size = 0x%llx\n", size, pageSize);
684     return status;
685 }
686 
687 /*
688  * @brief Is scrubbing skipped for allocations in this ctx buf pool
689  *
690  * @param[in] pCtxBufPool  Pointer to context buffer pool
691  *
692  * @return NvBool
693  */
694 NvBool
695 ctxBufPoolIsScrubSkipped
696 (
697     CTX_BUF_POOL_INFO *pCtxBufPool
698 )
699 {
700     NvU32 i;
701     NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT);
702     for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++)
703     {
704         if (!rmMemPoolIsScrubSkipped(pCtxBufPool->pMemPool[i]))
705             return NV_FALSE;
706     }
707 
708     return NV_TRUE;
709 }
710 
711 /*
712  * @brief Set ctx buf pool to skip scrub for all its allocations
713  *
714  * @param[in] pCtxBufPool  Pointer to context buffer pool
715  * @param[in] bSkipScrub   Should scrubbing be skipped
716  *
717  */
718 void
719 ctxBufPoolSetScrubSkip
720 (
721     CTX_BUF_POOL_INFO *pCtxBufPool,
722     NvBool             bSkipScrub
723 )
724 {
725     NvU32 i;
726     NV_ASSERT_OR_RETURN_VOID(pCtxBufPool != NULL);
727     for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++)
728     {
729         rmMemPoolSkipScrub(pCtxBufPool->pMemPool[i], bSkipScrub);
730     }
731 }
732