1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #define NVOC_KERN_GMMU_H_PRIVATE_ACCESS_ALLOWED
25 
26 #include "gpu/mmu/kern_gmmu.h"
27 #include "gpu/gpu.h"
28 #include "kernel/gpu/rc/kernel_rc.h"
29 
30 #include "published/volta/gv100/dev_fb.h"
31 #include "published/volta/gv100/dev_ram.h"
32 #include "published/volta/gv100/dev_fault.h"
33 
34 /**
35  * @brief      Initialize the supported GMMU HW format structures.
36  * @details    GV100+ supports ATS NV4K 64K PTE encoding
37  *
38  * @param      pKernelGmmu  The KernelGmmu
39  * @param      pGpu         The gpu
40  */
41 NV_STATUS
42 kgmmuFmtFamiliesInit_GV100(OBJGPU *pGpu, KernelGmmu *pKernelGmmu)
43 {
44     extern NV_STATUS kgmmuFmtFamiliesInit_GM200(OBJGPU *pGpu, KernelGmmu *pKernelGmmu);
45     NvU32            v;
46     NV_STATUS        result;
47     GMMU_FMT_FAMILY *pFam;
48 
49     // setup nv4kPte endcoding: v - 0, vol - 1, priv - 1
50     for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v)
51     {
52         pFam = pKernelGmmu->pFmtFamilies[v];
53         if (NULL != pFam)
54         {
55             nvFieldSetBool(&pFam->pte.fldValid, NV_FALSE, pFam->nv4kPte.v8);
56             nvFieldSetBool(&pFam->pte.fldVolatile, NV_TRUE, pFam->nv4kPte.v8);
57             nvFieldSetBool(&pFam->pte.fldPrivilege, NV_TRUE, pFam->nv4kPte.v8);
58         }
59     }
60 
61     // inherit former FmtFamilies setup procedure
62     result = kgmmuFmtFamiliesInit_GM200(pGpu, pKernelGmmu);
63 
64     return result;
65 }
66 
67 NV_STATUS
68 kgmmuChangeReplayableFaultOwnership_GV100(OBJGPU *pGpu, KernelGmmu *pKernelGmmu, NvBool bOwnedByRm)
69 {
70     //
71     // Disable the interrupt when RM loses the ownership and enable it back when
72     // RM regains it. At least nvUvmInterfaceOwnPageFaultIntr() relies on that behavior.
73     //
74     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
75         return NV_OK;
76 
77     if (bOwnedByRm)
78         pKernelGmmu->uvmSharedIntrRmOwnsMask |= RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY;
79     else
80         pKernelGmmu->uvmSharedIntrRmOwnsMask &= ~RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY;
81 
82     //
83     // Notably don't set the PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS property as
84     // on Volta that would mean masking out all MMU faults from pending
85     // interrupts.
86     //
87 
88     return NV_OK;
89 }
90 
91 /*!
92  * @brief Creates the shadow fault buffer for client handling of replayable/non-replayable faults
93  *
94  * @param[in] pGpu
95  * @param[in] pKernelGmmu
96  *
97  * @returns
98  */
99 NV_STATUS
100 kgmmuClientShadowFaultBufferAlloc_GV100
101 (
102     OBJGPU           *pGpu,
103     KernelGmmu       *pKernelGmmu,
104     FAULT_BUFFER_TYPE index
105 )
106 {
107     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
108         return NV_OK;
109 
110     if (pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED))
111     {
112         NV_PRINTF(LEVEL_ERROR, "Fault-Buffer is disabled. ShadowBuffer cannot be created\n");
113         NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE);
114     }
115 
116     return kgmmuClientShadowFaultBufferAllocate(pGpu, pKernelGmmu, index);
117 }
118 
119 /*!
120  * @brief Frees the shadow fault buffer for client handling of non-replayable faults
121  *
122  * @param[in] pGpu
123  * @param[in] pKernelGmmu
124  *
125  * @returns
126  */
127 NV_STATUS
128 kgmmuClientShadowFaultBufferFree_GV100
129 (
130     OBJGPU           *pGpu,
131     KernelGmmu       *pKernelGmmu,
132     FAULT_BUFFER_TYPE index
133 )
134 {
135     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
136         return NV_OK;
137 
138     return kgmmuClientShadowFaultBufferDestroy(pGpu, pKernelGmmu, index);
139 }
140 
141 /*!
142  * @brief Writes the ATS properties to the instance block
143  *
144  * @param[in] pKernelGmmu
145  * @param[in] pVAS            OBJVASPACE pointer
146  * @param[in] subctxId        subctxId value
147  * @param[in] pInstBlkDesc    Memory descriptor for the instance block of the engine
148  *
149  * @returns NV_STATUS
150  */
151 NV_STATUS
152 kgmmuInstBlkAtsGet_GV100
153 (
154     KernelGmmu          *pKernelGmmu,
155     OBJVASPACE          *pVAS,
156     NvU32                subctxId,
157     NvU32               *pOffset,
158     NvU32               *pData
159 )
160 {
161     NvU32 pasid = 0;
162 
163     if (subctxId == FIFO_PDB_IDX_BASE)
164     {
165         // A channel is setting base PDB with a valid VAS. Otherwise, it should fail.
166         if (pVAS != NULL)
167         {
168             // Since ct_assert has to be done within compile time, it has to be at the top of the scope. Otherwise, the build fails.
169             ct_assert(SF_WIDTH(NV_RAMIN_PASID) <= 32);
170 
171             //
172             // The PASID value is provided by the OS and out of client control
173             // however if the PASID value is invalid the ATS feature will not function
174             // as expected so check sanity and fail early
175             //
176             NV_ASSERT_OR_RETURN(NV_OK == vaspaceGetPasid(pVAS, &pasid),
177                         NV_ERR_INVALID_DATA);
178             if (pasid > MASK_BITS(SF_WIDTH(NV_RAMIN_PASID)))
179             {
180                 NV_PRINTF(LEVEL_ERROR,
181                           "Invalid PASID %d (max width %d bits)\n", pasid,
182                           SF_WIDTH(NV_RAMIN_PASID));
183                 return NV_ERR_OPERATING_SYSTEM;
184             }
185 
186             *pOffset = SF_OFFSET(NV_RAMIN_ENABLE_ATS);
187             *pData   = SF_NUM(_RAMIN, _ENABLE_ATS, vaspaceIsAtsEnabled(pVAS)) |
188                        SF_NUM(_RAMIN, _PASID, pasid);
189         }
190         else
191         {
192             // We cannot set base PDB without pVAS!
193             NV_ASSERT_OR_RETURN(pVAS != NULL, NV_ERR_INVALID_STATE);
194         }
195     }
196     else
197     {
198         // In subcontext supported PDB, we set valid values with non-NULL VAS. Otherwise, PDB entry is invalid.
199         if (pVAS != NULL)
200         {
201             ct_assert(SF_WIDTH(NV_RAMIN_SC_PASID(0)) <= 32);
202 
203             //
204             // set ATS for legacy PDB if SubctxId is set to be FIFO_PDB_IDX_BASE
205             // Otherwise, set PDB with given SubctxId.
206             //
207             NV_ASSERT_OR_RETURN(NV_OK == vaspaceGetPasid(pVAS, &pasid),
208                         NV_ERR_INVALID_DATA);
209 
210             if (pasid > MASK_BITS(SF_WIDTH(NV_RAMIN_SC_PASID(subctxId))))
211             {
212                 NV_PRINTF(LEVEL_ERROR,
213                           "Invalid PASID %d (max width %d bits)\n", pasid,
214                           SF_WIDTH(NV_RAMIN_SC_PASID(subctxId)));
215                 return NV_ERR_OPERATING_SYSTEM;
216             }
217 
218             *pData = SF_IDX_NUM(_RAMIN_SC, _ENABLE_ATS, vaspaceIsAtsEnabled(pVAS), subctxId) |
219                      SF_IDX_NUM(_RAMIN_SC, _PASID, pasid, subctxId);
220         }
221         else
222         {
223             //
224             // If pVAS is NULL, that means the PDB of this SubctxId is set to Invalid.
225             // In this case, ATS should be Disabled.
226             //
227             *pData = NV_RAMIN_ENABLE_ATS_FALSE;
228         }
229 
230         *pOffset = SF_OFFSET(NV_RAMIN_SC_ENABLE_ATS(subctxId));
231     }
232 
233     return NV_OK;
234 }
235 
236 /*!
237  * @brief This gets the offset and data for vaLimit
238  *
239  * @param[in]  pKernelGmmu
240  * @param[in]  pVAS            OBJVASPACE pointer
241  * @param[in]  subctxId        subctxId value
242  * @param[in]  pParams         Pointer to the structure containing parameters passed by the engine
243  * @param[out] pOffset         Pointer to offset of NV_RAMIN_ADR_LIMIT_LO:NV_RAMIN_ADR_LIMIT_HI pair
244  * @param[out] pData           Pointer to value to write
245  *
246  * @returns NV_STATUS
247  */
248 NV_STATUS
249 kgmmuInstBlkVaLimitGet_GV100
250 (
251     KernelGmmu           *pKernelGmmu,
252     OBJVASPACE           *pVAS,
253     NvU32                 subctxId,
254     INST_BLK_INIT_PARAMS *pParams,
255     NvU32                *pOffset,
256     NvU64                *pData
257 )
258 {
259     extern NV_STATUS kgmmuInstBlkVaLimitGet_GP100(KernelGmmu *pKernelGmmu, OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData);
260 
261     if (subctxId == FIFO_PDB_IDX_BASE)
262     {
263         return kgmmuInstBlkVaLimitGet_GP100(pKernelGmmu, pVAS, subctxId, pParams,
264                     pOffset, pData);
265     }
266 
267     *pOffset = 0;
268     *pData   = 0;
269 
270     return NV_OK;
271 }
272 
273 /*!
274  * @brief This gets the offsets and data for the PDB limit
275  *
276  * @param[in] pGpu
277  * @param[in] pKernelGmmu
278  * @param[in] pVAS            OBJVASPACE pointer
279  * @param[in] pParams         Pointer to the structure containing parameters passed by the engine
280  * @param[in] subctxId        subctxId value
281  * @param[out] pOffsetLo      Pointer to low offset
282  * @param[out] pDataLo        Pointer to data written at above offset
283  * @param[out] pOffsetHi      Pointer to high offset
284  * @param[out] pDataHi        Pointer to data written at above offset
285  *
286  * @returns
287  */
288 NV_STATUS
289 kgmmuInstBlkPageDirBaseGet_GV100
290 (
291     OBJGPU               *pGpu,
292     KernelGmmu           *pKernelGmmu,
293     OBJVASPACE           *pVAS,
294     INST_BLK_INIT_PARAMS *pParams,
295     NvU32                 subctxId,
296     NvU32                *pOffsetLo,
297     NvU32                *pDataLo,
298     NvU32                *pOffsetHi,
299     NvU32                *pDataHi
300 )
301 {
302     extern NV_STATUS kgmmuInstBlkPageDirBaseGet_GP100(OBJGPU *pGpu, KernelGmmu *pKernelGmmu, OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi);
303 
304     if (subctxId == FIFO_PDB_IDX_BASE)
305     {
306         return kgmmuInstBlkPageDirBaseGet_GP100(pGpu, pKernelGmmu, pVAS,
307                     pParams, subctxId, pOffsetLo, pDataLo, pOffsetHi, pDataHi);
308     }
309     else
310     {
311         KernelFifo        *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
312         MEMORY_DESCRIPTOR *pPDB  = NULL;
313 
314         if (pParams->bIsZombieSubctx)
315         {
316                 pPDB = kfifoGetDummyPageMemDesc(pKernelFifo);
317 
318             NV_ASSERT_OR_RETURN((pPDB != NULL), NV_ERR_INVALID_STATE);
319         }
320         else if (pVAS != NULL)
321         {
322             pPDB = (pParams->bIsClientAdmin) ?
323                            vaspaceGetKernelPageDirBase(pVAS, pGpu) :
324                            vaspaceGetPageDirBase(pVAS, pGpu);
325         }
326 
327         if (pPDB == NULL)
328         {
329             //
330             // The teardown model for subcontext with UVM + CUDA is as follows:
331             //
332             // Step 1:  Unregister(vas)  -->  UnsetPageDirectory(vas)
333             // Step 2:  FreeSubcontext(vas)
334             //
335             // But new subcontext can be added between step 1 & step 2.
336             // Currently RM doesn't support the notion of a subcontext with NULL PDB.
337             // This results in RM failing subsequent subcontext allocation, causing the UNBOUND instance block failure in bug 1823795.
338             // To fix this, we will allow a subcontext to exist with invalid PDB until it is freed later.
339             // This shouldn't cause any functional issue as no access memory shouldn't happen from this subcontext.
340 
341             *pDataLo = NV_RAMIN_SC_PAGE_DIR_BASE_TARGET_INVALID;
342             *pDataHi = NV_RAMIN_SC_PAGE_DIR_BASE_TARGET_INVALID;
343         }
344         else
345         {
346             RmPhysAddr physAdd  = memdescGetPhysAddr(pPDB, AT_GPU, 0);
347             NvU32      aperture = kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pPDB);
348             NvU32      addrLo   = NvU64_LO32(physAdd >> NV_RAMIN_BASE_SHIFT);
349 
350             //
351             // Volta only supports new page table format and 64KB big page size so
352             // forcing _USE_VER2_PT_FORMAT to _TRUE and _BIG_PAGE_SIZE to 64KB.
353             //
354             *pDataLo =
355                 SF_IDX_NUM(_RAMIN_SC_PAGE_DIR_BASE, _TARGET, aperture,subctxId) |
356                 ((pParams->bIsFaultReplayable)?
357                     SF_IDX_DEF(_RAMIN_SC_PAGE_DIR_BASE, _FAULT_REPLAY_TEX, _ENABLED,  subctxId) |
358                     SF_IDX_DEF(_RAMIN_SC_PAGE_DIR_BASE, _FAULT_REPLAY_GCC, _ENABLED,  subctxId) :
359                     SF_IDX_DEF(_RAMIN_SC_PAGE_DIR_BASE, _FAULT_REPLAY_TEX, _DISABLED, subctxId) |
360                     SF_IDX_DEF(_RAMIN_SC_PAGE_DIR_BASE, _FAULT_REPLAY_GCC, _DISABLED, subctxId)) |
361                 SF_IDX_DEF(_RAMIN_SC, _USE_VER2_PT_FORMAT, _TRUE, subctxId) |
362                 SF_IDX_DEF(_RAMIN_SC, _BIG_PAGE_SIZE, _64KB, subctxId) |
363                 SF_IDX_NUM(_RAMIN_SC_PAGE_DIR_BASE, _VOL, memdescGetVolatility(pPDB), subctxId) |
364                 SF_IDX_NUM(_RAMIN_SC_PAGE_DIR_BASE, _LO, addrLo, subctxId);
365 
366             *pDataHi = SF_IDX_NUM(_RAMIN_SC_PAGE_DIR_BASE, _HI, NvU64_HI32(physAdd), subctxId);
367         }
368 
369         *pOffsetLo = SF_OFFSET(NV_RAMIN_SC_PAGE_DIR_BASE_LO(subctxId));
370         *pOffsetHi = SF_OFFSET(NV_RAMIN_SC_PAGE_DIR_BASE_HI(subctxId));
371      }
372 
373     return NV_OK;
374 }
375 
376 /**
377  * @brief Report MMU Fault buffer overflow errors. MMU Fault
378  *        buffer overflow is a fatal error. Raise an assert and
379  *        any client notifications if registered, to ensure
380  *        overflow is debugged properly.
381  *
382  * @param[in]  pGpu
383  * @param[in]  pKernelGmmu
384  *
385  * @returns
386  */
387 NV_STATUS
388 kgmmuReportFaultBufferOverflow_GV100
389 (
390     OBJGPU      *pGpu,
391     KernelGmmu  *pKernelGmmu
392 )
393 {
394     NV_STATUS rmStatus = NV_OK;
395     NvU32   faultStatus = kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, GPU_GFID_PF);
396     NvU32   faultBufferGet;
397     NvU32   faultBufferPut;
398     PEVENTNOTIFICATION *ppEventNotification  = NULL;
399     NvU32   faultBufferSize;
400 
401     kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER,
402                                   &faultBufferGet, NULL);
403     faultBufferGet = DRF_VAL(_PFB_PRI, _MMU_FAULT_BUFFER_GET, _PTR, faultBufferGet);
404 
405     kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER,
406                                   &faultBufferPut, NULL);
407     faultBufferPut = DRF_VAL(_PFB_PRI, _MMU_FAULT_BUFFER_PUT, _PTR, faultBufferPut);
408 
409     faultBufferSize = kgmmuReadMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER, GPU_GFID_PF);
410 
411     if (kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu, NULL))
412     {
413         if (IsVOLTA(pGpu))
414         {
415             //
416             // Check if Non_replayable interrupt is set when overflow is seen.
417             // This shouldn't happen as this can cause a live-lock considering
418             // top-half will kept on coming and will not let overflow interrupt
419             // serviced. HW should disable the FAULT_INTR when overflow is
420             // detected.
421             //
422             NV_PRINTF(LEVEL_ERROR, "MMU Fault: GPU %d: HW-BUG : "
423                 "NON_REPLAYABLE_INTR is high when OVERFLOW is detected\n",
424                 pGpu->gpuInstance);
425             NV_ASSERT(0);
426         }
427         else
428         {
429             //
430             // With message-based MMU interrupts (Turing onwards), it is
431             // possible for us to get here - a real fault can happen while an
432             // overflow happens, and there is no ordering guarantee about the
433             // order of these interrupts in HW. However, if we write GET pointer
434             // with GET != PUT while overflow is detected, the fault interrupt
435             // will not be sent. Instead, the overflow interrupt will be sent,
436             // so this will not cause an interrupt storm with message-based
437             // interrupts. If HW does have a bug though, we'll see the below
438             // print repeatedly which can point to a HW bug where it isn't
439             // behaving the way it is designed to do.
440             //
441             NV_PRINTF(LEVEL_INFO, "MMU Fault: GPU %d: NON_REPLAYABLE_INTR "
442                 "is high when OVERFLOW is detected\n", pGpu->gpuInstance);
443         }
444     }
445 
446     // Check if overflow is due to incorrect fault buffer size or GET > SIZE
447     if (FLD_TEST_DRF(_PFB_PRI, _MMU_FAULT_STATUS, _NON_REPLAYABLE_GETPTR_CORRUPTED, _SET, faultStatus) ||
448         FLD_TEST_DRF(_PFB_PRI, _MMU_FAULT_STATUS, _REPLAYABLE_GETPTR_CORRUPTED, _SET, faultStatus))
449     {
450         NV_PRINTF(LEVEL_ERROR,
451                   "MMU Fault: GPU %d: Buffer overflow detected due to GET > SIZE\n",
452                   pGpu->gpuInstance);
453     }
454     else
455     {
456         NV_PRINTF(LEVEL_ERROR,
457                   "MMU Fault: GPU %d: Buffer overflow detected due to incorrect SIZE\n",
458                   pGpu->gpuInstance);
459 
460         NV_PRINTF(LEVEL_ERROR,
461                   "MMU Fault: GPU %d: Buffer SIZE is expected to handle max faults "
462                   "possible in system\n", pGpu->gpuInstance);
463     }
464 
465     NV_PRINTF(LEVEL_ERROR,
466               "MMU Fault: GPU %d: STATUS - 0x%x GET - 0x%x, PUT - 0x%x  SIZE - 0x%x\n",
467               pGpu->gpuInstance, faultStatus, faultBufferGet, faultBufferPut,
468               faultBufferSize);
469 
470     // Raise an event for Mods if registered as Mods checks for overflow
471     if ((NV_OK == CliGetEventNotificationList(pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferClient,
472                  pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferObject, NULL, &ppEventNotification)) && ppEventNotification)
473     {
474         MODS_ARCH_ERROR_PRINTF("MMU Fault Buffer overflow detected\n");
475         rmStatus = notifyEvents(pGpu, *ppEventNotification, NVC369_NOTIFIER_MMU_FAULT_ERROR,
476                                 0, 0, NV_OK, NV_OS_WRITE_THEN_AWAKEN);
477             if (rmStatus != NV_OK)
478                 return rmStatus;
479     }
480 
481     krcBreakpoint(GPU_GET_KERNEL_RC(pGpu));
482 
483     faultStatus = kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, GPU_GFID_PF);
484     faultStatus = FLD_SET_DRF(_PFB_PRI, _MMU_FAULT_STATUS, _NON_REPLAYABLE_OVERFLOW, _RESET,
485                               faultStatus);
486     kgmmuWriteMmuFaultStatus_HAL(pGpu, pKernelGmmu, faultStatus);
487     return rmStatus;
488 }
489 
490 /*!
491  * @brief Get the engine ID associated with the Graphics Engine
492  */
493 NvU32
494 kgmmuGetGraphicsEngineId_GV100
495 (
496     KernelGmmu *pKernelGmmu
497 )
498 {
499     return NV_PFAULT_MMU_ENG_ID_GRAPHICS;
500 }
501 
502 /*!
503  * @brief Reinit GMMU Peer PTE format to handle 47-bit peer addressing.
504  *        This is controlled by NVSWITCH discovery and will not be enabled
505  *        outside of specialized compute configurations.
506  *
507  * @param[in]   pGmmu  The valid gmmu
508  */
509 NV_STATUS
510 kgmmuEnableNvlinkComputePeerAddressing_GV100(KernelGmmu *pKernelGmmu)
511 {
512     NvU32       v;
513 
514     //
515     // Recalculate the format structures
516     //
517     for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v)
518     {
519         if (!kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, g_gmmuFmtVersions[v]))
520             continue;
521 
522         kgmmuFmtInitPeerPteFld_HAL(pKernelGmmu, &pKernelGmmu->pFmtFamilies[v]->pte,
523                                   g_gmmuFmtVersions[v]);
524     }
525 
526     return NV_OK;
527 }
528