1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /********************************* VMA Manager *****************************\
25 *                                                                           *
26 *   The VirtMemAllocator is managed in this module.  All priviledged        *
27 *   state and object interaction is handled here.                           *
28 *                                                                           *
29 ****************************************************************************/
30 
31 #include "core/core.h"
32 #include "gpu/gpu.h"
33 #include "gpu/mem_mgr/virt_mem_allocator.h"
34 #include "rmapi/control.h"
35 #include "gpu/mem_mgr/virt_mem_allocator_common.h"
36 #include "os/os.h"
37 #include "core/system.h"
38 #include "gpu/mem_mgr/mem_mgr.h"
39 #include "gpu/mem_sys/kern_mem_sys.h"
40 #include "diagnostics/profiler.h"
41 #include "mem_mgr/vaspace.h"
42 #include "mem_mgr/gpu_vaspace.h"
43 #include "mem_mgr/virtual_mem.h"
44 #include "class/cl0000.h"
45 #include "class/cl90f1.h" // FERMI_VASPACE_A
46 #include "ctrl/ctrl0080/ctrl0080dma.h"
47 #include "ctrl/ctrl208f/ctrl208fdma.h"
48 #include "vgpu/rpc.h"
49 #include "core/locks.h"
50 #include "virtualization/kernel_hostvgpudeviceapi.h"
51 #include "gpu/subdevice/subdevice_diag.h"
52 #include "gpu/device/device.h"
53 #include "gpu/subdevice/subdevice.h"
54 #include "gpu/bus/kern_bus.h"
55 
56 /*!
57  * @brief Allocate mapping.
58  *
59 * @todo Update function comment.
60  * Please update function description and argument comments
61  * if you do understand what function does and arguments mean.
62  * Below is just a template for you.
63  *
64  * @param[in] pGpu              OBJGPU pointer
65  * @param[in] pDma              VirtMemAllocator pointer
66  * @param[in] pVirtualMemory    VirtualMemory pointer
67  * @param[in] pMemory           Memory object to map
68  * @param[in] pDmaMappingInfo   CLI_DMA_MAPPING_INFO pointer
69  *
70  * @returns NV_STATUS status = NV_OK on success, or status upon failure.
71  */
72 NV_STATUS
73 dmaAllocMap_IMPL
74 (
75     OBJGPU               *pGpu,
76     VirtMemAllocator     *pDma,
77     OBJVASPACE           *pVAS,
78     VirtualMemory        *pVirtualMemory,
79     Memory               *pMemory,
80     CLI_DMA_MAPPING_INFO *pDmaMappingInfo
81 )
82 {
83     KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
84     NV_STATUS         status = NV_OK;
85     NvU32             p2p;
86     NvU64             vaddr;
87     NvU32             dmaAllocMapFlag;
88     NvU64             baseVirtAddr;
89     NvU64             virtSize;
90     NvU32             swizzId = KMIGMGR_SWIZZID_INVALID;
91 
92     CLI_DMA_ALLOC_MAP_INFO  mapInfo;
93 
94     NV_ASSERT(pVirtualMemory != NULL);
95 
96     p2p = DRF_VAL(OS46, _FLAGS, _P2P_ENABLE, pDmaMappingInfo->Flags);
97 
98     //
99     // By default any fabric memory should be mapped as peer memory. So, don't honor
100     // any P2P flags.
101     //
102     if (
103         (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_FABRIC_MC) ||
104         (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_FABRIC_V2))
105     {
106         p2p  = 0;
107     }
108 
109     if ((p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) && IsSLIEnabled(pGpu))
110     {
111         NvU32 deviceInst  = gpuGetDeviceInstance(pGpu);
112         NvU32 subDevIdTgt = DRF_VAL(OS46, _FLAGS, _P2P_SUBDEV_ID_TGT, pDmaMappingInfo->Flags);
113 
114         pGpu = gpumgrGetGpuFromSubDeviceInst(deviceInst, subDevIdTgt);
115         gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
116         pDma = GPU_GET_DMA(pGpu);
117     }
118 
119     //
120     // Temporarily set _DMA_UNICAST_REUSE_ALLOC for NV50_MEMORY_VIRTUAL since that
121     // class has already assigned VA space and allocated PTEs.
122     //
123     dmaAllocMapFlag = pDmaMappingInfo->Flags;
124     if (pVirtualMemory->bReserveVaOnAlloc)
125         dmaAllocMapFlag = FLD_SET_DRF(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, _TRUE, dmaAllocMapFlag);
126 
127     //
128     // Calculate the virtual address of the mapping.
129     //
130     virtmemGetAddressAndSize(pVirtualMemory, &baseVirtAddr, &virtSize);
131     if (FLD_TEST_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, pDmaMappingInfo->Flags))
132     {
133         // Fixed offset indicates an absolute virtual address.
134         vaddr = pDmaMappingInfo->DmaOffset;
135     }
136     else
137     {
138         // Otherwise the offset is relative to the target virtual allocation.
139         vaddr = baseVirtAddr + pDmaMappingInfo->DmaOffset;
140     }
141 
142     //
143     // Check the result is within the bounds of the target virtual allocation.
144     //
145     // Only perform this check for mappings to existing virtual memory.
146     // For CTXDMA case this check is meaningless since the [IN] dmaOffset will be garbage.
147     //
148     if (FLD_TEST_DRF(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, _TRUE, dmaAllocMapFlag))
149     {
150         NV_ASSERT_OR_RETURN(vaddr >= baseVirtAddr, NV_ERR_INVALID_OFFSET);
151         NV_ASSERT_OR_RETURN(vaddr < (baseVirtAddr + virtSize), NV_ERR_INVALID_OFFSET);
152     }
153 
154     mapInfo.pVirtualMemory  = pVirtualMemory;
155     mapInfo.pMemory         = pMemory;
156     mapInfo.pDmaMappingInfo = pDmaMappingInfo;
157 
158     if ((pKernelMIGManager != NULL) && kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager))
159     {
160         NvHandle hClient = RES_GET_CLIENT_HANDLE(pVirtualMemory);
161         MIG_INSTANCE_REF ref;
162 
163         NV_ASSERT_OK_OR_RETURN(kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref));
164         swizzId = ref.pKernelMIGGpuInstance->swizzId;
165     }
166 
167     status = dmaAllocMapping_HAL(pGpu,
168                                  pDma,
169                                  pVAS,
170                                  pDmaMappingInfo->pMemDesc,
171                                  &vaddr,
172                                  dmaAllocMapFlag,  // Use locally updated dmaAllocMapFlag
173                                  &mapInfo,
174                                  swizzId);
175 
176     if (status == NV_OK)
177     {
178         pDmaMappingInfo->DmaOffset = vaddr;
179     }
180 
181     if ((p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) && IsSLIEnabled(pGpu))
182     {
183         pGpu = gpumgrGetParentGPU(pGpu);
184         gpumgrSetBcEnabledStatus(pGpu, NV_TRUE);
185     }
186 
187     return status;
188 }
189 
190 /*!
191  * @brief Free mapping.
192  *
193  * @todo Update function comment.
194  * Please update function description and argument comments
195  * if you do understand what function does and arguments mean.
196  * Below is just a template for you.
197  *
198  * @param[in] pGpu              OBJGPU pointer
199  * @param[in] pDma              VirtMemAllocator pointer
200  * @param[in] pVirtualMemory    VirtualMemory pointer
201  * @param[in] pDmaMappingInfo   CLI_DMA_MAPPING_INFO pointer
202  *
203  * @returns NV_STATUS status = NV_OK on success, or status upon failure.
204  */
205 NV_STATUS
206 dmaFreeMap_IMPL
207 (
208     OBJGPU               *pGpu,
209     VirtMemAllocator     *pDma,
210     OBJVASPACE           *pVAS,
211     VirtualMemory        *pVirtualMemory,
212     CLI_DMA_MAPPING_INFO *pDmaMappingInfo,
213     NvU32                 flags
214 )
215 {
216     NV_STATUS status = NV_OK;
217     NvU32     p2p, subDevIdTgt;
218     NvU32     deviceInst = gpuGetDeviceInstance(pGpu);
219     CLI_DMA_ALLOC_MAP_INFO mapInfo;
220 
221     p2p =  DRF_VAL(OS46, _FLAGS, _P2P_ENABLE, pDmaMappingInfo->Flags);
222     subDevIdTgt = DRF_VAL(OS46, _FLAGS, _P2P_SUBDEV_ID_TGT, pDmaMappingInfo->Flags);
223 
224     if ((p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) && IsSLIEnabled(pGpu))
225     {
226         pGpu = gpumgrGetGpuFromSubDeviceInst(deviceInst, subDevIdTgt);
227         gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
228         pDma = GPU_GET_DMA(pGpu);
229     }
230 
231     mapInfo.pVirtualMemory  = pVirtualMemory;
232     mapInfo.pMemory         = NULL;
233     mapInfo.pDmaMappingInfo = pDmaMappingInfo;
234 
235     // free mapping in context dma
236     status = dmaFreeMapping_HAL(pGpu, pDma, pVAS, pDmaMappingInfo->DmaOffset,
237                                 pDmaMappingInfo->pMemDesc, flags, &mapInfo);
238 
239     if ((p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) && IsSLIEnabled(pGpu))
240     {
241         pGpu = gpumgrGetParentGPU(pGpu);
242         gpumgrSetBcEnabledStatus(pGpu, NV_TRUE);
243     }
244 
245     return status;
246 }
247 
248 //
249 // deviceCtrlCmdDmaGetPteInfo_IMPL
250 //
251 // Lock Requirements:
252 //      Assert that API lock and GPUs lock held on entry
253 //
254 NV_STATUS
255 deviceCtrlCmdDmaGetPteInfo_IMPL
256 (
257     Device *pDevice,
258     NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams
259 )
260 {
261     OBJGPU         *pGpu = GPU_RES_GET_GPU(pDevice);
262     OBJVASPACE     *pVAS = NULL;
263     NV_STATUS       status = NV_OK;
264     CALL_CONTEXT   *pCallContext = resservGetTlsCallContext();
265     RmCtrlParams   *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
266 
267     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
268 
269     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
270                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), pRmCtrlParams->hObject,
271                                                             pParams->hVASpace, &pVAS));
272 
273     status = vaspaceGetPteInfo(pVAS, pGpu, pParams, NULL);
274     if (status != NV_OK)
275     {
276         NV_PRINTF(LEVEL_ERROR, "vaspaceGetPteInfo failed\n");
277     }
278 
279     return status;
280 }
281 
282 //
283 // deviceCtrlCmdDmaUpdatePde2_IMPL
284 //
285 // Lock Requirements:
286 //      Assert that API lock and GPUs lock held on entry
287 //
288 NV_STATUS
289 deviceCtrlCmdDmaUpdatePde2_IMPL
290 (
291     Device *pDevice,
292     NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams
293 )
294 {
295     OBJGPU      *pGpu       = GPU_RES_GET_GPU(pDevice);;
296     OBJVASPACE  *pVAS       = NULL;
297     NV_STATUS    status     = NV_OK;
298     NvBool       bBcState   = NV_TRUE;
299     CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
300 
301     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
302 
303     if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL)
304     {
305             return NV_ERR_INSUFFICIENT_PERMISSIONS;
306     }
307 
308     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
309     {
310         NV_RM_RPC_UPDATE_PDE_2(pGpu, RES_GET_CLIENT_HANDLE(pDevice), RES_GET_HANDLE(pDevice), pParams, status);
311         return status;
312     }
313 
314     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
315                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice),
316                                                             pParams->hVASpace, &pVAS));
317 
318     // Force to UC if client passed in sub-device handle.
319     if (0 != pParams->subDeviceId)
320     {
321         bBcState = gpumgrGetBcEnabledStatus(pGpu);
322 
323         pGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu),
324                                              pParams->subDeviceId - 1);
325         gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
326     }
327 
328     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
329     {
330         OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE);
331         if (pGVAS == NULL)
332         {
333             status = NV_ERR_NOT_SUPPORTED;
334             SLI_LOOP_BREAK;
335         }
336         status = gvaspaceUpdatePde2(pGVAS, pGpu, pParams);
337         if (status != NV_OK)
338         {
339             SLI_LOOP_BREAK;
340         }
341     }
342     SLI_LOOP_END
343 
344     // Restore BC if required.
345     if (0 != pParams->subDeviceId)
346     {
347         gpumgrSetBcEnabledStatus(pGpu, bBcState);
348     }
349 
350     return status;
351 }
352 
353 //
354 // deviceCtrlCmdDmaSetVASpaceSize_IMPL
355 //
356 // Lock Requirements:
357 //      Assert that API lock and GPUs lock held on entry
358 //
359 NV_STATUS
360 deviceCtrlCmdDmaSetVASpaceSize_IMPL
361 (
362     Device *pDevice,
363     NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *pParams
364 )
365 {
366     OBJGPU          *pGpu    = GPU_RES_GET_GPU(pDevice);
367     OBJVASPACE      *pVAS    = NULL;
368 
369     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
370 
371     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
372                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice),
373                                                             pParams->hVASpace, &pVAS));
374 
375     //
376     // vGPU:
377     //
378     // Since vGPU does all real hardware management in the
379     // host, if we are in non SRIOV (legacy) guest RM, do a
380     // RPC to the host to do the hardware update.
381     //
382     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
383     {
384         CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
385         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
386         NV_STATUS     status = NV_OK;
387 
388         NV_RM_RPC_CONTROL(pGpu,
389                           pRmCtrlParams->hClient,
390                           pRmCtrlParams->hObject,
391                           pRmCtrlParams->cmd,
392                           pRmCtrlParams->pParams,
393                           pRmCtrlParams->paramsSize,
394                           status);
395         if (status != NV_OK)
396         {
397             return status;
398         }
399     }
400 
401     OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE);
402     NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_NOT_SUPPORTED);
403     NV_ASSERT_OK_OR_RETURN(gvaspaceResize(pGVAS, pParams));
404 
405     return NV_OK;
406 }
407 
408 //
409 // deviceCtrlCmdDmaSetPageDirectory_IMPL
410 //
411 // Lock Requirements:
412 //      Assert that API lock and GPUs lock held on entry
413 //
414 NV_STATUS
415 deviceCtrlCmdDmaSetPageDirectory_IMPL
416 (
417     Device *pDevice,
418     NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams
419 )
420 {
421     NvHandle        hClient     = RES_GET_CLIENT_HANDLE(pDevice);
422     NvHandle        hDevice     = RES_GET_HANDLE(pDevice);
423     OBJGPU         *pGpu        = GPU_RES_GET_GPU(pDevice);
424     OBJVASPACE     *pVAS;
425     NV_STATUS       status      = NV_OK;
426     NvBool          bBcState    = NV_FALSE;
427 
428     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
429 
430     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
431     {
432         NV_RM_RPC_SET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status);
433         if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || status != NV_OK)
434         {
435             return status;
436         }
437     }
438 
439     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
440                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), hDevice,
441                                                             pParams->hVASpace, &pVAS));
442 
443     // Force to UC if client passed in sub-device handle.
444     if (0 != pParams->subDeviceId)
445     {
446         bBcState = gpumgrGetBcEnabledStatus(pGpu);
447 
448         pGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu),
449                                              pParams->subDeviceId - 1);
450         gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
451     }
452 
453 
454     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
455     {
456         OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE);
457         if (pGVAS == NULL)
458         {
459             status = NV_ERR_NOT_SUPPORTED;
460             SLI_LOOP_BREAK;
461         }
462         status = gvaspaceExternalRootDirCommit(pGVAS, hClient, pGpu, pParams);
463         if (status != NV_OK)
464         {
465             SLI_LOOP_BREAK;
466         }
467     }
468     SLI_LOOP_END
469 
470     // Restore BC if required.
471     if (0 != pParams->subDeviceId)
472     {
473         gpumgrSetBcEnabledStatus(pGpu, bBcState);
474     }
475 
476     if (status != NV_OK && IS_GSP_CLIENT(pGpu))
477     {
478         NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS params = {0};
479 
480         params.hVASpace    = pParams->hVASpace;
481         params.subDeviceId = pParams->subDeviceId;
482 
483         NV_RM_RPC_UNSET_PAGE_DIRECTORY(pGpu, hClient, hDevice, &params, status);
484     }
485 
486     if (status != NV_OK && IS_VIRTUAL_WITH_SRIOV(pGpu))
487     {
488         NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS params = {0};
489 
490         params.hVASpace    = pParams->hVASpace;
491         params.subDeviceId = pParams->subDeviceId;
492 
493         NV_RM_RPC_CONTROL(pGpu,
494                           hClient,
495                           hDevice,
496                           NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY,
497                           &params,
498                           sizeof(NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS),
499                           status);
500 
501     }
502 
503     return status;
504 }
505 
506 //
507 // deviceCtrlCmdDmaUnsetPageDirectory_IMPL
508 //
509 // Lock Requirements:
510 //      Assert that API lock and GPUs lock held on entry
511 //
512 NV_STATUS
513 deviceCtrlCmdDmaUnsetPageDirectory_IMPL
514 (
515     Device *pDevice,
516     NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams
517 )
518 {
519     OBJGPU         *pGpu          = GPU_RES_GET_GPU(pDevice);
520     OBJVASPACE     *pVAS          = NULL;
521     CALL_CONTEXT   *pCallContext  = resservGetTlsCallContext();
522     RmCtrlParams   *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
523     NvBool          bBcState      = NV_FALSE;
524     NV_STATUS       status        = NV_OK;
525 
526     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
527 
528     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
529     {
530         NV_RM_RPC_CONTROL(pGpu,
531                           pRmCtrlParams->hClient,
532                           pRmCtrlParams->hObject,
533                           pRmCtrlParams->cmd,
534                           pRmCtrlParams->pParams,
535                           pRmCtrlParams->paramsSize,
536                           status);
537         return status;
538     }
539 
540 
541     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
542                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice),
543                                                             pParams->hVASpace, &pVAS));
544 
545     OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE);
546     NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_NOT_SUPPORTED);
547 
548     // Force to UC if client passed in sub-device handle.
549     if (pParams->subDeviceId != 0)
550     {
551         bBcState = gpumgrGetBcEnabledStatus(pGpu);
552 
553         pGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu),
554                                              pParams->subDeviceId - 1);
555         gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
556     }
557 
558     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
559     {
560         status = gvaspaceExternalRootDirRevoke(pGVAS, pGpu, pParams);
561         if (status != NV_OK)
562         {
563             SLI_LOOP_BREAK;
564         }
565     }
566     SLI_LOOP_END
567 
568     //
569     // Updating the instance block of all channels in the TSGs that's using
570     // the VA space
571     //
572     status = gvaspaceUnregisterAllChanGrps(pGVAS, pGpu);
573 
574     // Restore BC if required.
575     if (pParams->subDeviceId != 0)
576     {
577         gpumgrSetBcEnabledStatus(pGpu, bBcState);
578     }
579 
580     if (IS_GSP_CLIENT(pGpu) || IS_VIRTUAL_WITH_SRIOV(pGpu))
581     {
582         NV_RM_RPC_CONTROL(pGpu,
583                           pRmCtrlParams->hClient,
584                           pRmCtrlParams->hObject,
585                           pRmCtrlParams->cmd,
586                           pRmCtrlParams->pParams,
587                           pRmCtrlParams->paramsSize,
588                           status);
589     }
590 
591     return status;
592 }
593 
594 //
595 // deviceCtrlCmdDmaSetPteInfo_IMPL
596 //
597 // Lock Requirements:
598 //      Assert that API lock and GPUs lock held on entry
599 //
600 NV_STATUS
601 deviceCtrlCmdDmaSetPteInfo_IMPL
602 (
603     Device *pDevice,
604     NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams
605 )
606 {
607     OBJGPU     *pGpu    = GPU_RES_GET_GPU(pDevice);
608     OBJVASPACE *pVAS    = NULL;
609     NV_STATUS   status  = NV_OK;
610 
611     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
612 
613     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
614                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice),
615                                                             pParams->hVASpace, &pVAS));
616 
617     status = vaspaceSetPteInfo(pVAS, pGpu, pParams);
618     if (status != NV_OK)
619     {
620         NV_PRINTF(LEVEL_ERROR, "vaspaceGetPteInfo failed\n");
621         NV_ASSERT(0);
622     }
623 
624     return status;
625 }
626 
627 //
628 // deviceCtrlCmdDmaFlush_IMPL
629 //
630 // Lock Requirements:
631 //      Assert that API lock and GPUs lock held on entry
632 //
633 NV_STATUS
634 deviceCtrlCmdDmaFlush_IMPL
635 (
636     Device *pDevice,
637     NV0080_CTRL_DMA_FLUSH_PARAMS *flushParams
638 )
639 {
640     OBJGPU             *pGpu        = GPU_RES_GET_GPU(pDevice);
641     KernelBus          *pKernelBus  = GPU_GET_KERNEL_BUS(pGpu);
642     FB_CACHE_MEMTYPE    targetMem   = FB_CACHE_MEM_UNDEFINED;
643     FB_CACHE_OP         cacheOp     = FB_CACHE_OP_UNDEFINED;
644     NV_STATUS           status      = NV_OK;
645 
646     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
647 
648     NV_PRINTF(LEVEL_INFO, "Flush op invoked with target Unit 0x%x\n",
649               flushParams->targetUnit);
650 
651     if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _L2_INVALIDATE,
652                      _SYSMEM, flushParams->targetUnit))
653     {
654         targetMem = FB_CACHE_SYSTEM_MEMORY;
655         cacheOp =  FB_CACHE_INVALIDATE;
656     }
657     if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _L2_INVALIDATE,
658                     _PEERMEM, flushParams->targetUnit))
659     {
660         targetMem = FB_CACHE_PEER_MEMORY;
661         cacheOp =  FB_CACHE_INVALIDATE;
662     }
663     if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _L2, _ENABLE,
664                      flushParams->targetUnit))
665     {
666         targetMem = FB_CACHE_DIRTY;
667         cacheOp =  FB_CACHE_WRITEBACK;
668     }
669     if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _COMPTAG, _ENABLE,
670                      flushParams->targetUnit))
671     {
672         targetMem = FB_CACHE_COMPTAG_MEMORY;
673         cacheOp =  FB_CACHE_WRITEBACK;
674     }
675 
676     if ((targetMem != FB_CACHE_MEM_UNDEFINED) && (cacheOp != FB_CACHE_OP_UNDEFINED))
677     {
678         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
679         {
680             KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
681             status = kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, NULL, targetMem, cacheOp);
682             if (status != NV_OK)
683             {
684                 SLI_LOOP_RETURN(status);
685             }
686         }
687         SLI_LOOP_END
688     }
689 
690     if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _FB, _ENABLE,
691                      flushParams->targetUnit))
692     {
693         status = kbusSendSysmembar(pGpu, pKernelBus);
694     }
695 
696     return status;
697 }
698 
699 //
700 // deviceCtrlCmdDmaAdvSchedGetVaCaps_IMPL
701 //
702 // Lock Requirements:
703 //      Assert that API lock and GPUs lock held on entry
704 //
705 NV_STATUS
706 deviceCtrlCmdDmaAdvSchedGetVaCaps_IMPL
707 (
708     Device *pDevice,
709     NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams
710 )
711 {
712     OBJGPU        *pGpu           = GPU_RES_GET_GPU(pDevice);
713     OBJVASPACE    *pVAS           = NULL;
714     NV_STATUS      status         = NV_OK;
715     const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig =
716         kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu));
717 
718     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner());
719 
720     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
721         vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice),
722                                           pParams->hVASpace, &pVAS));
723 
724     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
725         vaspaceGetVasInfo(pVAS, pParams));
726 
727     pParams->compressionPageSize = pMemorySystemConfig->comprPageSize;
728     pParams->vaSpaceId           = pVAS->vaspaceId;
729 
730     return status;
731 }
732 
733 //
734 // deviceCtrlCmdDmaGetPdeInfo_IMPL
735 //
736 // Lock Requirements:
737 //      Assert that API lock and GPUs lock held on entry
738 //
739 NV_STATUS
740 deviceCtrlCmdDmaGetPdeInfo_IMPL
741 (
742     Device *pDevice,
743     NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams
744 )
745 {
746     OBJGPU         *pGpu            = GPU_RES_GET_GPU(pDevice);
747     NV_STATUS       status          = NV_OK;
748     OBJVASPACE     *pVAS            = NULL;
749     CALL_CONTEXT   *pCallContext    = resservGetTlsCallContext();
750     RmCtrlParams   *pRmCtrlParams   = pCallContext->pControlParams->pLegacyParams;
751 
752     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
753 
754     //
755     // vGPU:
756     //
757     // Since vGPU does all real hardware management in the
758     // host, if we are in non SRIOV (legacy) guest RM, do a
759     // RPC to the host to do the hardware update.
760     //
761     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
762     {
763         NV_STATUS status = NV_OK;
764         NV_RM_RPC_CONTROL(pGpu,
765                           pRmCtrlParams->hClient,
766                           pRmCtrlParams->hObject,
767                           pRmCtrlParams->cmd,
768                           pRmCtrlParams->pParams,
769                           pRmCtrlParams->paramsSize,
770                           status);
771         return status;
772     }
773 
774     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
775                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice),
776                                                             pParams->hVASpace, &pVAS));
777 
778     if(vaspaceGetPageTableInfo(pVAS, pParams) != NV_OK)
779     {
780         status = NV_ERR_INVALID_EVENT;
781         NV_PRINTF(LEVEL_ERROR, "vaspaceGetPageTableInfo failed\n");
782         NV_ASSERT(0);
783     }
784 
785     return status;
786 }
787 NV_STATUS
788 deviceCtrlCmdDmaSetDefaultVASpace_IMPL
789 (
790     Device *pDevice,
791     NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *pParams
792 )
793 {
794     OBJGPU   *pGpu   = GPU_RES_GET_GPU(pDevice);
795     NV_STATUS status = NV_OK;
796 
797     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
798 
799     NV_ASSERT_OK_OR_RETURN(
800         deviceSetDefaultVASpace(
801             pDevice,
802             pParams->hVASpace));
803 
804     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
805     {
806         CALL_CONTEXT   *pCallContext  = resservGetTlsCallContext();
807         RmCtrlParams   *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
808 
809         NV_RM_RPC_CONTROL(pRmCtrlParams->pGpu,
810                           pRmCtrlParams->hClient,
811                           pRmCtrlParams->hObject,
812                           pRmCtrlParams->cmd,
813                           pRmCtrlParams->pParams,
814                           pRmCtrlParams->paramsSize,
815                           status);
816     }
817 
818     return status;
819 }
820 
821 //
822 // subdeviceCtrlCmdDmaInvalidateTLB
823 //
824 // Lock Requirements:
825 //      Assert that GPUs lock held on entry
826 //      Called from SW method w/o API lock
827 //
828 NV_STATUS
829 subdeviceCtrlCmdDmaInvalidateTLB_IMPL
830 (
831     Subdevice *pSubdevice,
832     NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams
833 )
834 {
835     OBJGPU     *pGpu    = GPU_RES_GET_GPU(pSubdevice);
836     OBJVASPACE *pVAS    = NULL;
837 
838     LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner());
839 
840     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
841                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pSubdevice), RES_GET_PARENT_HANDLE(pSubdevice),
842                                                             pParams->hVASpace, &pVAS));
843 
844     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) ||
845         (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
846     {
847         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
848         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
849         NV_STATUS status = NV_OK;
850         NV_RM_RPC_CONTROL(pGpu,
851                           pRmCtrlParams->hClient,
852                           pRmCtrlParams->hObject,
853                           pRmCtrlParams->cmd,
854                           pRmCtrlParams->pParams,
855                           pRmCtrlParams->paramsSize,
856                           status);
857         return status;
858     }
859 
860     //
861     // Although this function is used following PTE upgrades most of the time,
862     // we cannot guarantee that, nor can we easily determine the update type.
863     //
864     vaspaceInvalidateTlb(pVAS, pGpu, PTE_DOWNGRADE);
865 
866     return NV_OK;
867 }
868 
869 /*!
870  * @brief subdeviceCtrlCmdDmaGetInfo
871  *
872  * Lock Requirements:
873  *      Assert that both the GPUs lock and API lock are held on entry.
874  */
875 NV_STATUS
876 subdeviceCtrlCmdDmaGetInfo_IMPL
877 (
878     Subdevice *pSubdevice,
879     NV2080_CTRL_DMA_GET_INFO_PARAMS *pDmaInfoParams
880 )
881 {
882     OBJGPU     *pGpu   = GPU_RES_GET_GPU(pSubdevice);
883     NV_STATUS   status = NV_OK;
884     NvU32       i;
885     NvU32       data;
886 
887     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
888 
889     // error checck
890     if (pDmaInfoParams->dmaInfoTblSize > NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES)
891         return NV_ERR_INVALID_PARAM_STRUCT;
892 
893     // step thru list
894     for (i = 0; i < pDmaInfoParams->dmaInfoTblSize; i++)
895     {
896         switch (pDmaInfoParams->dmaInfoTbl[i].index)
897         {
898             case NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE:
899                 data = gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM);
900                 break;
901             default:
902             {
903                 data = 0;
904                 status = NV_ERR_INVALID_ARGUMENT;
905                 break;
906             }
907 
908         }
909 
910         if (status != NV_OK)
911             break;
912 
913         // save off data value
914         pDmaInfoParams->dmaInfoTbl[i].data = data;
915     }
916 
917     return status;
918 }
919 
920 /*!
921  * @brief New TLB interface control call w/o engine masks.
922  *
923  * Lock Requirements:
924  *      Assert that API lock and GPUs lock held on entry
925  *
926  */
927 NV_STATUS
928 deviceCtrlCmdDmaInvalidateTLB_IMPL
929 (
930     Device *pDevice,
931     NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams
932 )
933 {
934     OBJGPU      *pGpu    = GPU_RES_GET_GPU(pDevice);
935     OBJVASPACE  *pVAS    = NULL;
936 
937     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
938 
939     //
940     // vGPU:
941     //
942     // Since vGPU does all real hardware management in the
943     // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true),
944     // do an RPC to the host to do the hardware update.
945     //
946     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) ||
947         (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
948     {
949         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
950         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
951         NV_STATUS status = NV_OK;
952 
953         NV_RM_RPC_CONTROL(pGpu,
954                           pRmCtrlParams->hClient,
955                           pRmCtrlParams->hObject,
956                           pRmCtrlParams->cmd,
957                           pRmCtrlParams->pParams,
958                           pRmCtrlParams->paramsSize,
959                           status);
960         return status;
961     }
962 
963 
964     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
965                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice),
966                                                             pParams->hVASpace, &pVAS));
967 
968 
969     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
970     {
971         //
972         // Although this function is used following PTE upgrades most of the time,
973         // we cannot guarantee that, nor can we easily determine the update type.
974         //
975         vaspaceInvalidateTlb(pVAS, pGpu, PTE_DOWNGRADE);
976     }
977     SLI_LOOP_END
978 
979     return NV_OK;
980 }
981 
982 //
983 // deviceCtrlCmdDmaGetCaps_IMPL
984 //
985 // Lock Requirements:
986 //      Assert that API lock held on entry
987 //
988 NV_STATUS
989 deviceCtrlCmdDmaGetCaps_IMPL
990 (
991     Device *pDevice,
992     NV0080_CTRL_DMA_GET_CAPS_PARAMS *pDmaCapsParams
993 )
994 {
995     NV_STATUS   status = NV_OK;
996     OBJGPU     *pGpu = GPU_RES_GET_GPU(pDevice);
997     VirtMemAllocator *pDma = GPU_GET_DMA(pGpu);
998 
999     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner());
1000 
1001     // sanity check array size
1002     if (pDmaCapsParams->capsTblSize != NV0080_CTRL_DMA_CAPS_TBL_SIZE)
1003     {
1004         NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n",
1005                   pDmaCapsParams->capsTblSize, NV0080_CTRL_DMA_CAPS_TBL_SIZE);
1006         return NV_ERR_INVALID_ARGUMENT;
1007     }
1008 
1009     portMemSet(pDmaCapsParams->capsTbl, 0, NV0080_CTRL_DMA_CAPS_TBL_SIZE);
1010 
1011     // Fill in caps
1012     if (pDma->getProperty(pDma, PDB_PROP_DMA_ENFORCE_32BIT_POINTER))
1013         RMCTRL_SET_CAP(pDmaCapsParams->capsTbl, NV0080_CTRL_DMA_CAPS, _32BIT_POINTER_ENFORCED);
1014 
1015     if (pDma->getProperty(pDma, PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED))
1016         RMCTRL_SET_CAP(pDmaCapsParams->capsTbl, NV0080_CTRL_DMA_CAPS, _SHADER_ACCESS_SUPPORTED);
1017 
1018     if (pDma->getProperty(pDma, PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL))
1019         RMCTRL_SET_CAP(pDmaCapsParams->capsTbl, NV0080_CTRL_DMA_CAPS, _SPARSE_VIRTUAL_SUPPORTED);
1020 
1021     // Supported on all platforms except the Maxwell amodel simulator
1022     if (pDma->getProperty(pDma, PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED))
1023         RMCTRL_SET_CAP(pDmaCapsParams->capsTbl, NV0080_CTRL_DMA_CAPS, _MULTIPLE_VA_SPACES_SUPPORTED);
1024 
1025     return status;
1026 }
1027 
1028 //
1029 // deviceCtrlCmdDmaEnablePrivilegedRange_IMPL
1030 //
1031 // Lock Requirements:
1032 //      Assert that both locks are held on entry
1033 // Enables the privileged range assuming that the vaspace
1034 // has not yet been created. If the vaspace has already been
1035 // created that means we have already made allocations in this
1036 // vaspace(lazy allocation). In this case this ctrl call should fail.
1037 //
1038 NV_STATUS
1039 deviceCtrlCmdDmaEnablePrivilegedRange_IMPL
1040 (
1041     Device *pDevice,
1042     NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS *pParams
1043 )
1044 {
1045     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
1046 
1047     if (pParams->hVASpace != NV01_NULL_OBJECT)
1048     {
1049         return NV_ERR_NOT_SUPPORTED;
1050     }
1051 
1052     if (pDevice->pVASpace == NULL)
1053     {
1054         pDevice->deviceInternalAllocFlags |=
1055                                   NV_DEVICE_INTERNAL_ALLOCATION_FLAGS_ENABLE_PRIVILEGED_VASPACE;
1056         return NV_OK;
1057     }
1058 
1059     return NV_ERR_NOT_SUPPORTED;
1060 }
1061 
1062 NV_STATUS
1063 diagapiCtrlCmdDmaIsSupportedSparseVirtual_IMPL
1064 (
1065     DiagApi *pDiagApi,
1066     NV208F_CTRL_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_PARAMS *pParams
1067 )
1068 {
1069     OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi);
1070     VirtMemAllocator *pDma = GPU_GET_DMA(pGpu);
1071 
1072     pParams->bIsSupported = pDma->getProperty(pDma, PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL);
1073     return NV_OK;
1074 }
1075 
1076 NV_STATUS
1077 diagapiCtrlCmdDmaGetVasBlockDetails_IMPL
1078 (
1079     DiagApi *pDiagApi,
1080     NV208F_CTRL_DMA_GET_VAS_BLOCK_DETAILS_PARAMS *pParams
1081 )
1082 {
1083     OBJGPU             *pGpu        = GPU_RES_GET_GPU(pDiagApi);
1084     RsResourceRef      *pSubdevRef;
1085     Subdevice          *pGpuSubDevInfo;
1086     OBJVASPACE         *pVAS        = NULL;
1087     OBJEHEAP           *pHeap       = NULL;
1088     EMEMBLOCK          *pMemBlock   = NULL;
1089 
1090     if (NV_OK != refFindAncestorOfType(RES_GET_REF(pDiagApi), classId(Subdevice), &pSubdevRef))
1091         return NV_ERR_INVALID_OBJECT_PARENT;
1092 
1093     pGpuSubDevInfo = dynamicCast(pSubdevRef->pResource, Subdevice);
1094 
1095     NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
1096                           vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDiagApi), RES_GET_PARENT_HANDLE(pGpuSubDevInfo),
1097                                                             pParams->hVASpace, &pVAS));
1098 
1099     pHeap = vaspaceGetHeap(pVAS);
1100     NV_ASSERT_OR_RETURN(NULL != pHeap, NV_ERR_INVALID_ARGUMENT);
1101     pMemBlock = pHeap->eheapGetBlock(pHeap, pParams->virtualAddress, 0);
1102     NV_ASSERT_OR_RETURN(NULL != pMemBlock, NV_ERR_INVALID_ARGUMENT);
1103 
1104     pParams->beginAddress   = pMemBlock->begin;
1105     pParams->endAddress     = pMemBlock->end;
1106     pParams->alignedAddress = pMemBlock->align;
1107     pParams->pageSize = vaspaceGetMapPageSize(pVAS, pGpu, pMemBlock);
1108 
1109     NV_ASSERT_OR_RETURN(0 != pParams->pageSize, NV_ERR_INVALID_ARGUMENT);
1110 
1111     return NV_OK;
1112 }
1113 
1114 /*!
1115  * Initialize an abstracted page array with opaque page array data.
1116  *
1117  * By default, the page data is treated as an RmPhysAddr array.
1118  * If the data is an OS-specific format, the bOsFormat field must be
1119  * set to NV_TRUE.
1120  */
1121 void
1122 dmaPageArrayInit
1123 (
1124     DMA_PAGE_ARRAY *pPageArray, //!< [out] Abstracted page array.
1125     void           *pPageData,  //!< [in] Opaque page array data.
1126     NvU32           pageCount   //!< [in] Number of pages represented.
1127 )
1128 {
1129     portMemSet(pPageArray, 0, sizeof(*pPageArray));
1130     pPageArray->pData = pPageData;
1131     pPageArray->count = pageCount;
1132 }
1133 
1134 /*!
1135  * Initialize an abstracted page array from a memory descriptor.
1136  */
1137 void
1138 dmaPageArrayInitFromMemDesc
1139 (
1140     DMA_PAGE_ARRAY     *pPageArray,         //!< [out] Abstracted page array.
1141     MEMORY_DESCRIPTOR  *pMemDesc,           //!< [in] Memory descriptor.
1142     ADDRESS_TRANSLATION addressTranslation  //!< [in] Address translation for page array.
1143 )
1144 {
1145     dmaPageArrayInit(pPageArray,
1146         memdescGetPteArray(pMemDesc, addressTranslation),
1147         memdescGetPteArraySize(pMemDesc, addressTranslation));
1148 }
1149 
1150 /*!
1151  * Extract a physical page address from an abstracted page array.
1152  *
1153  * @returns The physical (byte) address of the requested page.
1154  * @returns ~0 if the index is out of bounds (fatal error).
1155  */
1156 RmPhysAddr
1157 dmaPageArrayGetPhysAddr
1158 (
1159     DMA_PAGE_ARRAY *pPageArray, //!< [in] Abstracted page array.
1160     NvU32           pageIndex   //!< [in] Page index to retrieve.
1161 )
1162 {
1163     RmPhysAddr addr;
1164 
1165     NV_ASSERT_OR_RETURN(pPageArray->pData, ~0ULL);
1166     NV_ASSERT_OR_RETURN(pageIndex < pPageArray->count, ~0ULL);
1167 
1168     if (pPageArray->bDuplicate)
1169     {
1170         pageIndex = 0;
1171     }
1172 
1173     if (pPageArray->bOsFormat)
1174     {
1175         OBJSYS *pSys = SYS_GET_INSTANCE();
1176         OBJOS *pOS = SYS_GET_OS(pSys);
1177         addr = pOS->osPageArrayGetPhysAddr(pPageArray->pOsGpuInfo,
1178             pPageArray->pData, pPageArray->startIndex + pageIndex);
1179     }
1180     else
1181     {
1182         RmPhysAddr *pPteArray = pPageArray->pData;
1183         addr = pPteArray[pPageArray->startIndex + pageIndex];
1184     }
1185 
1186     addr |= pPageArray->orMask;
1187 
1188     return addr;
1189 }
1190