1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 /* ------------------------- System Includes -------------------------------- */
26 #include "gpu/gpu.h"
27 #include "gpu/bif/kernel_bif.h"
28 #include "platform/chipset/chipset.h"
29 #include "nvdevid.h"
30 
31 #include "published/maxwell/gm107/dev_boot.h"
32 #include "published/maxwell/gm107/dev_nv_xve.h"
33 #include "published/maxwell/gm107/dev_nv_xve1.h"
34 
35 #include "published/maxwell/gm107/dev_nv_pcfg_xve_addendum.h"
36 #include "published/maxwell/gm107/dev_nv_pcfg_xve1_addendum.h"
37 
38 // Defines for C73 chipset registers
39 #ifndef NV_XVR_VEND_XP1
40 #define NV_XVR_VEND_XP1                                  0x00000F04 /* RW-4R */
41 
42 #define NV_XVR_VEND_XP1_IGNORE_L0S                            23:23 /* RWIVF */
43 #define NV_XVR_VEND_XP1_IGNORE_L0S_INIT                  0x00000000 /* RWI-V */
44 #define NV_XVR_VEND_XP1_IGNORE_L0S__PROD                 0x00000000 /* RW--V */
45 #define NV_XVR_VEND_XP1_IGNORE_L0S_EN                    0x00000001 /* RW--V */
46 #endif
47 
48 // XVE register map for PCIe config space
49 static const NvU32 xveRegMapValid[] = NV_PCFG_XVE_REGISTER_VALID_MAP;
50 static const NvU32 xveRegMapWrite[] = NV_PCFG_XVE_REGISTER_WR_MAP;
51 static const NvU32 xve1RegMapValid[] = NV_PCFG_XVE1_REGISTER_VALID_MAP;
52 static const NvU32 xve1RegMapWrite[] = NV_PCFG_XVE1_REGISTER_WR_MAP;
53 
54 static NV_STATUS _kbifSavePcieConfigRegisters_GM107(OBJGPU *pGpu, KernelBif *pKernelBif, const PKBIF_XVE_REGMAP_REF pRegmapRef);
55 static NV_STATUS _kbifRestorePcieConfigRegisters_GM107(OBJGPU *pGpu, KernelBif *pKernelBif, const PKBIF_XVE_REGMAP_REF pRegmapRef);
56 
57 /* ------------------------ Public Functions -------------------------------- */
58 
59 /*!
60  * @brief Get PCIe config test registers
61  *
62  * @param[in]  pGpu        GPU object pointer
63  * @param[in]  pKernelBif  BIF object pointer
64  */
65 void
66 kbifGetPcieConfigAccessTestRegisters_GM107
67 (
68     OBJGPU    *pGpu,
69     KernelBif *pKernelBif,
70     NvU32     *pciStart,
71     NvU32     *pcieStart
72 )
73 {
74    *pciStart  = NV_XVE_ID;
75    *pcieStart = NV_XVE_VCCAP_HDR;
76 }
77 
78 /*!
79  * @brief Verify PCIe config test registers
80  *
81  * @param[in]  pGpu        GPU object pointer
82  * @param[in]  pKernelBif  BIF object pointer
83  *
84  * @return  NV_OK
85  */
86 NV_STATUS
87 kbifVerifyPcieConfigAccessTestRegisters_GM107
88 (
89     OBJGPU    *pGpu,
90     KernelBif *pKernelBif,
91     NvU32      nvXveId,
92     NvU32      nvXveVccapHdr
93 )
94 {
95     NvU32 data;
96 
97     GPU_BUS_CFG_RD32(pGpu, NV_XVE_ID, &data);
98 
99     if (FLD_TEST_DRF(_XVE, _ID, _VENDOR, _NVIDIA, data))
100     {
101         if (data != nvXveId)
102             return NV_ERR_NOT_SUPPORTED;
103 
104         GPU_BUS_CFG_RD32(pGpu, NV_XVE_VCCAP_HDR, &data);
105 
106         if (FLD_TEST_DRF(_XVE, _VCCAP_HDR, _ID, _VC, data) &&
107             FLD_TEST_DRF(_XVE, _VCCAP_HDR, _VER, _1, data))
108         {
109             if (data != nvXveVccapHdr)
110                 return NV_ERR_NOT_SUPPORTED;
111             return NV_OK;
112         }
113     }
114     return NV_ERR_NOT_SUPPORTED;
115 }
116 
117 /*!
118  * @brief Re-arm MSI
119  *
120  * @param[in]  pGpu        GPU object pointer
121  * @param[in]  pKernelBif  Kernel BIF object pointer
122  */
123 void
124 kbifRearmMSI_GM107
125 (
126     OBJGPU    *pGpu,
127     KernelBif *pKernelBif
128 )
129 {
130     NV_STATUS status = gpuSanityCheckRegisterAccess(pGpu, 0, NULL);
131 
132     if (status != NV_OK)
133     {
134         return;
135     }
136 
137     // The 32 byte value doesn't matter, HW only looks at the offset.
138     osGpuWriteReg032(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_CYA_2, 0);
139 }
140 
141 /*!
142  * @brief Check if MSI is enabled in HW
143  *
144  * @param[in]  pGpu        GPU object pointer
145  * @param[in]  pKernelBif  BIF object pointer
146  *
147  * @return  True if MSI enabled else False
148  */
149 NvBool
150 kbifIsMSIEnabledInHW_GM107
151 (
152     OBJGPU    *pGpu,
153     KernelBif *pKernelBif
154 )
155 {
156     NvU32 data32;
157     if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_MSI_CTRL, &data32))
158     {
159         NV_PRINTF(LEVEL_ERROR, "unable to read NV_XVE_MSI_CTRL\n");
160     }
161 
162     return FLD_TEST_DRF(_XVE, _MSI_CTRL, _MSI, _ENABLE, data32);
163 }
164 
165 /*!
166  * @brief Check if access to PCI config space is enabled
167  *
168  * @param[in]  pGpu        GPU object pointer
169  * @param[in]  pKernelBif  Kernel BIF object pointer
170  *
171  * @return  True if access to PCI config space is enabled
172  */
173 NvBool
174 kbifIsPciIoAccessEnabled_GM107
175 (
176     OBJGPU    *pGpu,
177     KernelBif *pKernelBif
178 )
179 {
180     NvU32   data = 0;
181 
182     if (NV_OK == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEV_CTRL, &data))
183     {
184         if (FLD_TEST_DRF(_XVE, _DEV_CTRL, _CMD_IO_SPACE, _ENABLED, data))
185         {
186             return NV_TRUE;
187         }
188     }
189 
190     return NV_FALSE;
191 }
192 
193 /*!
194  * @brief Check if device is a 3D controller
195  *
196  * @param[in]  pGpu        GPU object pointer
197  * @param[in]  pKernelBif  Kernel BIF object pointer
198  *
199  * @return  True if device is a 3D controller
200  */
201 NvBool
202 kbifIs3dController_GM107
203 (
204     OBJGPU    *pGpu,
205     KernelBif *pKernelBif
206 )
207 {
208     NvU32   data = 0;
209 
210     if (NV_OK == GPU_BUS_CFG_RD32(pGpu, NV_XVE_REV_ID, &data))
211     {
212         if (FLD_TEST_DRF(_XVE, _REV_ID, _CLASS_CODE, _3D, data))
213         {
214             return NV_TRUE;
215         }
216     }
217 
218     return NV_FALSE;
219 }
220 
221 /*!
222  * @brief Enable/disable no snoop for GPU
223  *
224  * @param[in]  pGpu        GPU object pointer
225  * @param[in]  pKernelBif  Kernel BIF object pointer
226  * @param[in]  bEnable     True if No snoop needs to be enabled
227  *
228  * @return NV_OK If no snoop modified as requested
229  */
230 NV_STATUS
231 kbifEnableNoSnoop_GM107
232 (
233     OBJGPU    *pGpu,
234     KernelBif *pKernelBif,
235     NvBool     bEnable
236 )
237 {
238     NvU8  fieldVal;
239     NvU32 regVal;
240 
241     regVal = GPU_REG_RD32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_DEVICE_CONTROL_STATUS);
242 
243     fieldVal = bEnable ? 1 : 0;
244     regVal   = FLD_SET_DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS,
245                                _ENABLE_NO_SNOOP, fieldVal, regVal);
246 
247     GPU_REG_WR32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_DEVICE_CONTROL_STATUS, regVal);
248 
249     return NV_OK;
250 }
251 
252 /*!
253  * @brief Enables Relaxed Ordering PCI-E Capability in the PCI Config Space
254  *
255  * @param[in]  pGpu        GPU object pointer
256  * @param[in]  pKernelBif  Kernel BIF object pointer
257  */
258 void
259 kbifPcieConfigEnableRelaxedOrdering_GM107
260 (
261     OBJGPU    *pGpu,
262     KernelBif *pKernelBif
263 )
264 {
265     NvU32 xveDevCtrlStatus;
266 
267     if(NV_ERR_GENERIC  == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus))
268     {
269         NV_PRINTF(LEVEL_ERROR,
270                   "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n");
271         DBG_BREAKPOINT();
272     }
273     else
274     {
275         GPU_BUS_CFG_FLD_WR_DRF_DEF(pGpu, xveDevCtrlStatus, _XVE, _DEVICE_CONTROL_STATUS,
276                                    _ENABLE_RELAXED_ORDERING, _INIT);
277     }
278 }
279 
280 /*!
281  * @brief Disables Relaxed Ordering PCI-E Capability in the PCI Config Space
282  *
283  * @param[in]  pGpu        GPU object pointer
284  * @param[in]  pKernelBif  Kernel BIF object pointer
285  */
286 void
287 kbifPcieConfigDisableRelaxedOrdering_GM107
288 (
289     OBJGPU    *pGpu,
290     KernelBif *pKernelBif
291 )
292 {
293     NvU32 xveDevCtrlStatus;
294 
295     if(NV_ERR_GENERIC  == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus))
296     {
297         NV_PRINTF(LEVEL_ERROR,
298                   "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n");
299         DBG_BREAKPOINT();
300     }
301     else
302     {
303         xveDevCtrlStatus = FLD_SET_DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS,
304                                            _ENABLE_RELAXED_ORDERING, 0, xveDevCtrlStatus);
305         GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, xveDevCtrlStatus);
306     }
307 }
308 
309 /*!
310  * Helper function for bifSavePcieConfigRegisters_GM107()
311  *
312  * @param[in]  pGpu           GPU object pointer
313  * @param[in]  pKernelBif     Kernel Bif object pointer
314  * @param[in]  pRegmapRef     XVE Register map structure pointer
315  *
316  * @return  'NV_OK' if successful, an RM error code otherwise.
317  */
318 static NV_STATUS
319 _kbifSavePcieConfigRegisters_GM107
320 (
321     OBJGPU    *pGpu,
322     KernelBif *pKernelBif,
323     const PKBIF_XVE_REGMAP_REF pRegmapRef
324 )
325 {
326     NV_STATUS status;
327     NvU16     index;
328 
329     // Read and save config space offset based on the bit map
330     for (index = 0; index < pRegmapRef->numXveRegMapValid; index++)
331     {
332         NvU16 i, regOffset, bufOffset;
333         NvU32 mask = 1;
334 
335         for (i = 0; i < sizeof(pRegmapRef->xveRegMapValid[0]) * 8; i++)
336         {
337             mask = 1 << i;
338             NV_ASSERT((pRegmapRef->xveRegMapWrite[index] & mask) == 0 ||
339                       (pRegmapRef->xveRegMapValid[index] & mask) != 0);
340 
341             if ((pRegmapRef->xveRegMapValid[index] & mask) == 0)
342             {
343                 continue;
344             }
345 
346             bufOffset = (index * sizeof(pRegmapRef->xveRegMapValid[0]) * 8) + i;
347             regOffset = bufOffset * sizeof(pRegmapRef->bufBootConfigSpace[0]);
348 
349             status = PCI_FUNCTION_BUS_CFG_RD32(pGpu, pRegmapRef->nFunc,
350                                                regOffset, &pRegmapRef->bufBootConfigSpace[bufOffset]);
351             if (status != NV_OK)
352             {
353                 return status;
354             }
355         }
356     }
357 
358     pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_SECONDARY_BUS_RESET_SUPPORTED, NV_TRUE);
359 
360     return NV_OK;
361 }
362 
363 /*!
364  * Save boot time PCIe Config space
365  *
366  * @param[in]  pGpu           GPU object pointer
367  * @param[in]  pKernelBif     Kernel Bif object pointer
368  *
369  * @return  'NV_OK' if successful, an RM error code otherwise.
370  */
371 NV_STATUS
372 kbifSavePcieConfigRegisters_GM107
373 (
374     OBJGPU    *pGpu,
375     KernelBif *pKernelBif
376 )
377 {
378     NV_STATUS status;
379 
380     //
381     // Save config space if GPU is about to enter Function Level Reset
382     // OR if GPU is about to enter GC6 state
383     // OR if on non-windows platform, FORCE_PCIE_CONFIG_SAVE is set and SBR is snabled
384     // OR if on windows platform, SBR is enabled
385     //
386     if (!pKernelBif->bPreparingFunctionLevelReset &&
387         !IS_GPU_GC6_STATE_ENTERING(pGpu) &&
388         !((RMCFG_FEATURE_PLATFORM_WINDOWS ||
389            pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_FORCE_PCIE_CONFIG_SAVE)) &&
390           pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_SECONDARY_BUS_RESET_ENABLED)))
391     {
392         return NV_OK;
393     }
394 
395     // save pcie config space for function 0
396     status = _kbifSavePcieConfigRegisters_GM107(pGpu, pKernelBif,
397                                                 &pKernelBif->xveRegmapRef[0]);
398     if (status != NV_OK)
399     {
400         NV_PRINTF(LEVEL_ERROR, "Saving PCIe config space failed for gpu.\n");
401         NV_ASSERT(0);
402         return status;
403     }
404 
405     // No need to save/restore azalia config space if gpu is in GC6 cycle or if it is in FLR
406     if (IS_GPU_GC6_STATE_ENTERING(pGpu) ||
407         pKernelBif->bPreparingFunctionLevelReset)
408     {
409         return NV_OK;
410     }
411 
412     // Return early if device is not multifunction (azalia is disabled or not present)
413     if (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_DEVICE_IS_MULTIFUNCTION))
414     {
415         return NV_OK;
416     }
417 
418     // Save pcie config space for function 1
419     status = _kbifSavePcieConfigRegisters_GM107(pGpu, pKernelBif,
420                                                 &pKernelBif->xveRegmapRef[1]);
421     if (status != NV_OK)
422     {
423         NV_PRINTF(LEVEL_ERROR, "Saving PCIe config space failed for azalia.\n");
424         NV_ASSERT(0);
425     }
426 
427     return status;
428 }
429 
430 /*!
431  * Helper function for bifRestorePcieConfigRegisters_GM107()
432  *
433  * @param[in]  pGpu           GPU object pointer
434  * @param[in]  pKernelBif     Kernel Bif object pointer
435  * @param[in]  pRegmapRef     XVE Register map structure pointer
436  *
437  * @return  'NV_OK' if successful, an RM error code otherwise.
438  */
439 static NV_STATUS
440 _kbifRestorePcieConfigRegisters_GM107
441 (
442     OBJGPU    *pGpu,
443     KernelBif *pKernelBif,
444     const PKBIF_XVE_REGMAP_REF pRegmapRef
445 )
446 {
447     NvU32      domain = gpuGetDomain(pGpu);
448     NvU8       bus    = gpuGetBus(pGpu);
449     NvU8       device = gpuGetDevice(pGpu);
450     NvU16      vendorId;
451     NvU16      deviceId;
452     NvU32      val;
453     NV_STATUS  status;
454     void      *handle;
455     NvU16      index;
456     RMTIMEOUT  timeout;
457     NvBool     bGcxPmuCfgRestore;
458 
459     bGcxPmuCfgRestore = pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_GCX_PMU_CFG_SPACE_RESTORE);
460 
461     handle = osPciInitHandle(domain, bus, device, pRegmapRef->nFunc,
462                              &vendorId, &deviceId);
463     NV_ASSERT_OR_RETURN(handle, NV_ERR_INVALID_POINTER);
464 
465     if (IS_GPU_GC6_STATE_EXITING(pGpu) &&
466         bGcxPmuCfgRestore)
467     {
468         //
469         // PMU Will Restore the config Space
470         // As a last step PMU should set CMD_MEMORY_SPACE ENABLED after it restores the config space
471         // Poll This register to see if PMU is finished or not otherwise timeout.
472         //
473         gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0);
474         do
475         {
476             val = osPciReadDword(handle, NV_XVE_DEV_CTRL);
477             status = gpuCheckTimeout(pGpu, &timeout);
478             if (status == NV_ERR_TIMEOUT)
479             {
480                 NV_PRINTF(LEVEL_ERROR,
481                           "Timeout waiting for PCIE Config Space Restore from PMU, RM takes over\n");
482                 DBG_BREAKPOINT();
483 
484                 NvU32 *pReg = NULL;
485                 pReg  = &pRegmapRef->bufBootConfigSpace[NV_XVE_DEV_CTRL /
486                                                 sizeof(pRegmapRef->bufBootConfigSpace[0])];
487                 *pReg = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_MEMORY_SPACE, _ENABLED, *pReg);
488                 osPciWriteDword(handle, NV_XVE_DEV_CTRL, pRegmapRef->bufBootConfigSpace[1]);
489                 osPciWriteDword(handle, NV_XVE_BAR0, pRegmapRef->bufBootConfigSpace[4]);
490 
491                 break;
492             }
493         } while (FLD_TEST_DRF(_XVE, _DEV_CTRL, _CMD_MEMORY_SPACE, _DISABLED, val));
494 
495         return NV_OK;
496     }
497 
498     // Enable BAR0 accesses so we can restore config space more quickly.
499     kbifRestoreBar0_HAL(pGpu, pKernelBif, handle, pRegmapRef->bufBootConfigSpace);
500 
501     // Enable required fields of NV_XVE_DEV_CTRL
502     val = osPciReadDword(handle, NV_XVE_DEV_CTRL);
503     val = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_MEMORY_SPACE, _ENABLED, val) |
504           FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_BUS_MASTER, _ENABLED, val);
505     osPciWriteDword(handle, NV_XVE_DEV_CTRL, val);
506 
507     // Restore only the valid config space offsets based on bit map
508     for (index = 0; index < pRegmapRef->numXveRegMapValid; index++)
509     {
510         NvU16 i, regOffset, bufOffset;
511 
512         for (i = 0; i < sizeof(pRegmapRef->xveRegMapValid[0]) * 8; i++)
513         {
514             if ((pRegmapRef->xveRegMapWrite[index] & (1 << i)) == 0)
515             {
516                 continue;
517             }
518 
519             bufOffset = (index * sizeof(pRegmapRef->xveRegMapValid[0]) * 8) + i;
520             regOffset = bufOffset * sizeof(pRegmapRef->bufBootConfigSpace[0]);
521             if (regOffset == NV_XVE_DEV_CTRL)
522             {
523                 continue;
524             }
525 
526             //
527             // This is a special case where we don't use the standard macro to write a register.
528             // The macro will not allow access when PDB_PROP_GPU_IS_LOST is true.
529             // This check is required to keep other accesses from touching the GPU for now.
530             //
531             osGpuWriteReg032(pGpu,
532                              ((pRegmapRef->nFunc == 0) ? DEVICE_BASE(NV_PCFG) : DEVICE_BASE(NV_PCFG1)) + regOffset,
533                              pRegmapRef->bufBootConfigSpace[bufOffset]);
534 
535             if (pRegmapRef->nFunc != 0)
536             {
537                 status = PCI_FUNCTION_BUS_CFG_WR32(pGpu, pRegmapRef->nFunc, regOffset,
538                                                    pRegmapRef->bufBootConfigSpace[bufOffset]);
539                 if (status != NV_OK)
540                 {
541                     return status;
542                 }
543             }
544         }
545     }
546 
547     //
548     // Restore saved value of NV_XVE_DEV_CTRL, the second register saved in the buffer.
549     // If we reach this point, it's RM-CPU restoration path.
550     // Check if PMU_CFG_SPACE_RESTORE property was enabled
551     // to confirm it's a debugging parallel restoration and
552     // set back to _ENABLE before restoration
553     //
554     if (bGcxPmuCfgRestore)
555     {
556         NvU32 *pReg = NULL;
557         pReg  = &pRegmapRef->bufBootConfigSpace[NV_XVE_DEV_CTRL /
558                                                 sizeof(pRegmapRef->bufBootConfigSpace[0])];
559         *pReg = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_MEMORY_SPACE, _ENABLED, *pReg);
560 
561     }
562 
563     osPciWriteDword(handle, NV_XVE_DEV_CTRL, pRegmapRef->bufBootConfigSpace[1]);
564 
565     return NV_OK;
566 }
567 
568 /*!
569  * Restore boot time PCIe Config space
570  *
571  * @param[in]  pGpu        GPU object pointer
572  * @param[in]  pKernelBif  Kernel Bif object pointer
573  *
574  * @return  'NV_OK' if successful, an RM error code otherwise.
575  */
576 NV_STATUS
577 kbifRestorePcieConfigRegisters_GM107
578 (
579     OBJGPU    *pGpu,
580     KernelBif *pKernelBif
581 )
582 {
583     NV_STATUS status;
584     RMTIMEOUT timeout;
585     NvU64     timeStampStart;
586     NvU64     timeStampEnd;
587 
588     // Restore pcie config space for function 0
589     status = _kbifRestorePcieConfigRegisters_GM107(pGpu, pKernelBif,
590                                                    &pKernelBif->xveRegmapRef[0]);
591     if (status != NV_OK)
592     {
593         NV_PRINTF(LEVEL_ERROR, "Restoring PCIe config space failed for gpu.\n");
594         NV_ASSERT(0);
595         return status;
596     }
597 
598     // No need to save/restore azalia config space if gpu is in GC6 cycle or if it is in FLR
599     if (IS_GPU_GC6_STATE_EXITING(pGpu) ||
600         pKernelBif->bInFunctionLevelReset)
601     {
602         //
603         // Check that GPU is really accessible.
604         // Skip on pre-silicon because there can be timing issues in the test between device ready and this code.
605         // Todo: find a safe timeout for pre-silicon runs
606         //
607         if (IS_SILICON(pGpu))
608         {
609             // Check if GPU is actually accessible before continue
610             osGetPerformanceCounter(&timeStampStart);
611             gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0);
612             NvU32 pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0);
613 
614             while (pmcBoot0 != pGpu->chipId0)
615             {
616                 NV_PRINTF(LEVEL_INFO,
617                           "GPU not back on the bus after %s, 0x%x != 0x%x!\n",
618                           pKernelBif->bInFunctionLevelReset?"FLR":"GC6 exit", pmcBoot0, pGpu->chipId0);
619                 pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0);
620                 NV_ASSERT(0);
621                 status = gpuCheckTimeout(pGpu, &timeout);
622                 if (status == NV_ERR_TIMEOUT)
623                 {
624                     NV_PRINTF(LEVEL_ERROR,
625                               "Timeout GPU not back on the bus after %s,\n", pKernelBif->bInFunctionLevelReset?"FLR":"GC6 exit");
626                     DBG_BREAKPOINT();
627                     return status;
628                 }
629             }
630 
631             osGetPerformanceCounter(&timeStampEnd);
632             NV_PRINTF(LEVEL_ERROR,
633                       "Time spend on GPU back on bus is 0x%x ns,\n",
634                       (NvU32)NV_MIN(NV_U32_MAX, timeStampEnd - timeStampStart));
635         }
636 
637         return NV_OK;
638     }
639 
640     // Return early if device is not multifunction (azalia is disabled or not present)
641     if (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_DEVICE_IS_MULTIFUNCTION))
642     {
643         return NV_OK;
644     }
645 
646     // Restore pcie config space for function 1
647     status = _kbifRestorePcieConfigRegisters_GM107(pGpu, pKernelBif,
648                                                    &pKernelBif->xveRegmapRef[1]);
649     if (status != NV_OK)
650     {
651         NV_PRINTF(LEVEL_ERROR, "Restoring PCIe config space failed for azalia.\n");
652         NV_ASSERT(0);
653     }
654 
655     return status;
656 }
657 
658 /*!
659  * @brief Get XVE status bits
660  *
661  * @param[in]   pGpu        GPU object pointer
662  * @param[in]   pKernelBif  BIF object pointer
663  * @param[out]  pBits       PCIe error status values
664  * @param[out]  pStatus     Full XVE status
665  *
666  * @return  NV_OK
667  */
668 NV_STATUS
669 kbifGetXveStatusBits_GM107
670 (
671     OBJGPU    *pGpu,
672     KernelBif *pKernelBif,
673     NvU32     *pBits,
674     NvU32     *pStatus
675 )
676 {
677     // control/status reg
678     NvU32 xveDevCtrlStatus;
679 
680     if (NV_OK  != GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus))
681     {
682         NV_PRINTF(LEVEL_ERROR,
683                   "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n");
684     }
685     if ( pBits == NULL )
686         return NV_ERR_GENERIC;
687 
688     *pBits = 0;
689 
690     // The register read above returns garbage on fmodel, so just return.
691     if (IS_FMODEL(pGpu))
692     {
693         if (pStatus)
694         {
695             *pStatus = 0;
696         }
697         return NV_OK;
698     }
699 
700     if (pStatus)
701         *pStatus = xveDevCtrlStatus;
702 
703     if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _CORR_ERROR_DETECTED, 1))
704         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR;
705     if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _NON_FATAL_ERROR_DETECTED, 1))
706         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR;
707     if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _FATAL_ERROR_DETECTED, 1))
708         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR;
709     if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _UNSUPP_REQUEST_DETECTED, 1))
710         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST;
711 
712     if (pKernelBif->EnteredRecoverySinceErrorsLastChecked)
713     {
714         pKernelBif->EnteredRecoverySinceErrorsLastChecked = NV_FALSE;
715         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_ENTERED_RECOVERY;
716     }
717 
718     return NV_OK;
719 }
720 
721 /*!
722  * @brief Clear the XVE status bits
723  *
724  * @param[in]   pGpu        GPU object pointer
725  * @param[in]   pKernelBif  BIF object pointer
726  * @param[out]  pStatus     Full XVE status
727  *
728  * @return  NV_OK
729  */
730 NV_STATUS
731 kbifClearXveStatus_GM107
732 (
733     OBJGPU    *pGpu,
734     KernelBif *pKernelBif,
735     NvU32     *pStatus
736 )
737 {
738     NvU32 xveDevCtrlStatus;
739 
740     if (pStatus)
741     {
742         xveDevCtrlStatus = *pStatus;
743         if (xveDevCtrlStatus == 0)
744         {
745             return NV_OK;
746         }
747     }
748     else
749     {
750         if (NV_OK  != GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus))
751         {
752             NV_PRINTF(LEVEL_ERROR,
753                       "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n");
754         }
755     }
756 
757     GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, xveDevCtrlStatus);
758 
759     return NV_OK;
760 }
761 
762 /*!
763  * @brief Get XVE AER bits
764  *
765  * @param[in]   pGpu        GPU object pointer
766  * @param[in]   pKernelBif  BIF object pointer
767  * @param[out]  pBits       PCIe AER error status values
768  *
769  * @return  NV_OK
770  */
771 NV_STATUS
772 kbifGetXveAerBits_GM107
773 (
774     OBJGPU    *pGpu,
775     KernelBif *pKernelBif,
776     NvU32     *pBits
777 )
778 {
779     NvU32 xveAerUncorr;
780     NvU32 xveAerCorr;
781 
782     if (pBits == NULL)
783     {
784         return NV_ERR_GENERIC;
785     }
786 
787     *pBits = 0;
788 
789     if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_AER_UNCORR_ERR, &xveAerUncorr))
790     {
791         NV_PRINTF(LEVEL_ERROR, "Unable to read NV_XVE_AER_UNCORR_ERR\n");
792         return NV_ERR_GENERIC;
793     }
794     if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_AER_CORR_ERR, &xveAerCorr))
795     {
796         NV_PRINTF(LEVEL_ERROR, "Unable to read NV_XVE_AER_CORR_ERR\n");
797         return NV_ERR_GENERIC;
798     }
799 
800     // The register read above returns garbage on fmodel, so just return.
801     if (IS_FMODEL(pGpu))
802     {
803         return NV_OK;
804     }
805 
806     if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _DLINK_PROTO_ERR, _ACTIVE, xveAerUncorr))
807         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR;
808     if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _POISONED_TLP, _ACTIVE, xveAerUncorr))
809         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP;
810     if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _CPL_TIMEOUT, _ACTIVE, xveAerUncorr))
811         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT;
812     if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _UNEXP_CPL, _ACTIVE, xveAerUncorr))
813         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL;
814     if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _MALFORMED_TLP, _ACTIVE, xveAerUncorr))
815         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP;
816     if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _UNSUPPORTED_REQ, _ACTIVE, xveAerUncorr))
817         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ;
818 
819     if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RCV_ERR, _ACTIVE, xveAerCorr))
820         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR;
821     if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _BAD_TLP, _ACTIVE, xveAerCorr))
822         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP;
823     if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _BAD_DLLP , _ACTIVE, xveAerCorr))
824         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP;
825     if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RPLY_ROLLOVER, _ACTIVE, xveAerCorr))
826         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER;
827     if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RPLY_TIMEOUT, _ACTIVE, xveAerCorr))
828         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT;
829     if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _ADVISORY_NONFATAL, _ACTIVE, xveAerCorr))
830         *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL;
831 
832     return NV_OK;
833 }
834 
835 /*!
836  * @brief Clear the XVE AER bits
837  *
838  * @param[in]  pGpu        GPU object pointer
839  * @param[in]  pKernelBif  BIF object pointer
840  * @param[in]  bits        PCIe AER error status values
841  *
842  * @return  NV_OK
843  */
844 NV_STATUS
845 kbifClearXveAer_GM107
846 (
847     OBJGPU    *pGpu,
848     KernelBif *pKernelBif,
849     NvU32      bits
850 )
851 {
852     NvU32 xveAerUncorr = 0;
853     NvU32 xveAerCorr   = 0;
854 
855     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR)
856         xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _DLINK_PROTO_ERR, _CLEAR, xveAerUncorr);
857     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP)
858         xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _POISONED_TLP, _CLEAR, xveAerUncorr);
859     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT)
860         xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _CPL_TIMEOUT, _CLEAR, xveAerUncorr);
861     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL)
862         xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _UNEXP_CPL, _CLEAR, xveAerUncorr);
863     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP)
864         xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _MALFORMED_TLP, _CLEAR, xveAerUncorr);
865     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ)
866         xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _UNSUPPORTED_REQ, _CLEAR, xveAerUncorr);
867 
868     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR)
869         xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RCV_ERR, _CLEAR, xveAerCorr);
870     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP)
871         xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _BAD_TLP, _CLEAR, xveAerCorr);
872     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP)
873         xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _BAD_DLLP, _CLEAR, xveAerCorr);
874     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER)
875         xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RPLY_ROLLOVER, _CLEAR, xveAerCorr);
876     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT)
877         xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RPLY_TIMEOUT, _CLEAR, xveAerCorr);
878     if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL)
879         xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _ADVISORY_NONFATAL, _CLEAR, xveAerCorr);
880 
881     if (xveAerUncorr != 0)
882     {
883         GPU_BUS_CFG_WR32(pGpu, NV_XVE_AER_UNCORR_ERR, xveAerUncorr);
884     }
885     if (xveAerCorr != 0)
886     {
887         GPU_BUS_CFG_WR32(pGpu, NV_XVE_AER_CORR_ERR, xveAerCorr);
888     }
889 
890     return NV_OK;
891 }
892 
893 /*!
894  * @brief Returns the BAR0 offset and size of the PCI config space mirror
895  *
896  * @param[in]  pGpu        GPU object pointer
897  * @param[in]  pKernelBif  Kernel BIF object pointer
898  * @param[out]  pBase      BAR0 offset of the PCI config space mirror
899  * @param[out]  pSize      Size in bytes of the PCI config space mirror
900  *
901  * @returns NV_OK
902  */
903 NV_STATUS
904 kbifGetPciConfigSpacePriMirror_GM107
905 (
906     OBJGPU    *pGpu,
907     KernelBif *pKernelBif,
908     NvU32     *pBase,
909     NvU32     *pSize
910 )
911 {
912     *pBase = DEVICE_BASE(NV_PCFG);
913     *pSize = DEVICE_EXTENT(NV_PCFG) - DEVICE_BASE(NV_PCFG) + 1;
914     return NV_OK;
915 }
916 
917 /*!
918  * @brief C73 chipset WAR
919  *
920  * @param[in]  pGpu        GPU object pointer
921  * @param[in]  pKernelBif  Kernel BIF object pointer
922  */
923 void
924 kbifExecC73War_GM107
925 (
926     OBJGPU    *pGpu,
927     KernelBif *pKernelBif
928 )
929 {
930     OBJSYS  *pSys = SYS_GET_INSTANCE();
931     OBJOS   *pOS  = SYS_GET_OS(pSys);
932     OBJCL   *pCl  = SYS_GET_CL(pSys);
933     NvU32    val;
934 
935     if (CS_NVIDIA_C73 == pCl->Chipset)
936     {
937         //
938         // Turn off L0s on the chipset which are required by the suspend/resume
939         // cycles in Vista. See bug 400044 for more details.
940         //
941 
942         // vAddr is a mapped cpu virtual addr into the root ports config space.
943         if (!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS) &&
944             (pGpu->gpuClData.rootPort.vAddr != 0))
945         {
946             val = MEM_RD32((NvU8*)pGpu->gpuClData.rootPort.vAddr+NV_XVR_VEND_XP1);
947             val = FLD_SET_DRF(_XVR, _VEND_XP1, _IGNORE_L0S, _EN, val);
948             MEM_WR32((NvU8*)pGpu->gpuClData.rootPort.vAddr+NV_XVR_VEND_XP1, val);
949         }
950         else if (pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS) &&
951                  pGpu->gpuClData.rootPort.addr.valid)
952         {
953             val = osPciReadDword(pGpu->gpuClData.rootPort.addr.handle, NV_XVR_VEND_XP1);
954             val = FLD_SET_DRF(_XVR, _VEND_XP1, _IGNORE_L0S, _EN, val);
955             osPciWriteDword(pGpu->gpuClData.rootPort.addr.handle, NV_XVR_VEND_XP1, val);
956         }
957         else
958         {
959             NV_PRINTF(LEVEL_ERROR,
960                       "Cannot turn off L0s on C73 chipset, suspend/resume may fail (Bug 400044).\n");
961             DBG_BREAKPOINT();
962         }
963     }
964 }
965 
966 NV_STATUS
967 kbifGetBusOptionsAddr_GM107
968 (
969     OBJGPU     *pGpu,
970     KernelBif  *pKernelBif,
971     BUS_OPTIONS options,
972     NvU32      *addrReg
973 )
974 {
975     NV_STATUS status = NV_OK;
976 
977     switch (options)
978     {
979         case BUS_OPTIONS_DEV_CONTROL_STATUS:
980             *addrReg = NV_XVE_DEVICE_CONTROL_STATUS;
981             break;
982         case BUS_OPTIONS_DEV_CONTROL_STATUS_2:
983             *addrReg = NV_XVE_DEVICE_CONTROL_STATUS_2;
984             break;
985         case BUS_OPTIONS_LINK_CONTROL_STATUS:
986             *addrReg = NV_XVE_LINK_CONTROL_STATUS;
987             break;
988         case BUS_OPTIONS_LINK_CAPABILITIES:
989             *addrReg = NV_XVE_LINK_CAPABILITIES;
990             break;
991         case BUS_OPTIONS_L1_PM_SUBSTATES_CTRL_1:
992             *addrReg = NV_XVE_L1_PM_SUBSTATES_CTRL1;
993             break;
994         default:
995             NV_PRINTF(LEVEL_ERROR, "Invalid register type passed 0x%x\n",
996                       options);
997             status = NV_ERR_GENERIC;
998             break;
999     }
1000     return status;
1001 }
1002 
1003 NV_STATUS
1004 kbifDisableSysmemAccess_GM107
1005 (
1006     OBJGPU     *pGpu,
1007     KernelBif  *pKernelBif,
1008     NvBool      bDisable
1009 )
1010 {
1011     NV_STATUS status = NV_OK;
1012     RM_API   *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
1013     NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS params = {0};
1014 
1015     // Only support on Windows
1016     NV_ASSERT_OR_RETURN(RMCFG_FEATURE_PLATFORM_WINDOWS, NV_ERR_NOT_SUPPORTED);
1017 
1018     params.bDisable = bDisable;
1019     status = pRmApi->Control(pRmApi,
1020                              pGpu->hInternalClient,
1021                              pGpu->hInternalSubdevice,
1022                              NV2080_CTRL_CMD_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS,
1023                              &params,
1024                              sizeof(NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS));
1025 
1026     // Only set the PDB in kernel if it was set in physical successfully
1027     if (status == NV_OK)
1028     {
1029         pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_SYSTEM_ACCESS_DISABLED, bDisable);
1030     }
1031 
1032     return status;
1033 }
1034 
1035 /*!
1036  * This function setups the xve register map pointers
1037  *
1038  * @param[in]  pGpu           GPU object pointer
1039  * @param[in]  pKernelBif     Pointer to KernelBif object
1040  * @param[in]  func           PCIe function number
1041  *
1042  * @return  'NV_OK' if successful, an RM error code otherwise.
1043  */
1044 NV_STATUS
1045 kbifInitXveRegMap_GM107
1046 (
1047     OBJGPU    *pGpu,
1048     KernelBif *pKernelBif,
1049     NvU8       func
1050 )
1051 {
1052     if (func == 0)
1053     {
1054         pKernelBif->xveRegmapRef[0].nFunc              = 0;
1055         pKernelBif->xveRegmapRef[0].xveRegMapValid     = xveRegMapValid;
1056         pKernelBif->xveRegmapRef[0].xveRegMapWrite     = xveRegMapWrite;
1057         pKernelBif->xveRegmapRef[0].numXveRegMapValid  = sizeof(xveRegMapValid)/sizeof(xveRegMapValid[0]);
1058         pKernelBif->xveRegmapRef[0].numXveRegMapWrite  = sizeof(xveRegMapWrite)/sizeof(xveRegMapWrite[0]);
1059         pKernelBif->xveRegmapRef[0].bufBootConfigSpace = pKernelBif->cacheData.gpuBootConfigSpace;
1060         // No MSIX for this GPU
1061         pKernelBif->xveRegmapRef[0].bufMsixTable       = NULL;
1062     }
1063     else if (func == 1)
1064     {
1065         pKernelBif->xveRegmapRef[1].nFunc              = 1;
1066         pKernelBif->xveRegmapRef[1].xveRegMapValid     = xve1RegMapValid;
1067         pKernelBif->xveRegmapRef[1].xveRegMapWrite     = xve1RegMapWrite;
1068         pKernelBif->xveRegmapRef[1].numXveRegMapValid  = sizeof(xve1RegMapValid)/sizeof(xve1RegMapValid[0]);
1069         pKernelBif->xveRegmapRef[1].numXveRegMapWrite  = sizeof(xve1RegMapWrite)/sizeof(xve1RegMapWrite[0]);
1070         pKernelBif->xveRegmapRef[1].bufBootConfigSpace = pKernelBif->cacheData.azaliaBootConfigSpace;
1071         // No MSIX for this func
1072         pKernelBif->xveRegmapRef[1].bufMsixTable       = NULL;
1073     }
1074     else
1075     {
1076         NV_PRINTF(LEVEL_ERROR, "Invalid argument, func: %d.\n", func);
1077         NV_ASSERT(0);
1078         return NV_ERR_INVALID_ARGUMENT;
1079     }
1080 
1081     return NV_OK;
1082 }
1083 
1084 /*!
1085  * @brief Clears Bus Master Enable bit in command register, disabling
1086  *  Function 0 - from issuing any new requests to sysmem.
1087  *
1088  * @param[in] pGpu        GPU object pointer
1089  * @param[in] pKernelBif  KernelBif object pointer
1090  *
1091  * @return NV_OK
1092  */
1093 NV_STATUS
1094 kbifStopSysMemRequests_GM107
1095 (
1096     OBJGPU    *pGpu,
1097     KernelBif *pKernelBif,
1098     NvBool     bStop
1099 )
1100 {
1101     NvU32 regVal;
1102 
1103     NV_ASSERT_OK_OR_RETURN(GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEV_CTRL, &regVal));
1104 
1105     if (bStop)
1106     {
1107         regVal = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_BUS_MASTER, _DISABLED, regVal);
1108     }
1109     else
1110     {
1111         regVal = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_BUS_MASTER, _ENABLED, regVal);
1112     }
1113 
1114     NV_ASSERT_OK_OR_RETURN(GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEV_CTRL, regVal));
1115 
1116     return NV_OK;
1117 }
1118 
1119 
1120 /*
1121  * @brief Restore the BAR0 register from the given config space buffer
1122  * BAR0 register restore has to use the config cycle write.
1123  *
1124  * @param[in] pGpu              GPU object pointer
1125  * @param[in] pKernelBif        Pointer to KernelBif object
1126  * @param[in] handle            PCI handle for GPU
1127  * @param[in] bufConfigSpace    Stored config space
1128  */
1129 void
1130 kbifRestoreBar0_GM107
1131 (
1132     OBJGPU    *pGpu,
1133     KernelBif *pKernelBif,
1134     void      *handle,
1135     NvU32     *bufConfigSpace
1136 )
1137 {
1138     //
1139     // Not much ROI in storing BAR offsets for legacy chips since
1140     // BAR offsets are not going to change ever for legacy chips
1141     //
1142     osPciWriteDword(handle, NV_XVE_BAR0,
1143                     bufConfigSpace[NV_XVE_BAR0/sizeof(NvU32)]);
1144 }
1145 
1146 
1147 /*!
1148  * @brief Check if any of the BAR register reads returns a valid value.
1149  *
1150  * @param[in] pGpu          GPU object pointer
1151  * @param[in] pKernelBif    KernelBif object pointer
1152  *
1153  * @returns   NV_TRUE if any BAR register read returns a valid value
1154  *            NV_FALSE if all the BAR registers return an invalid values
1155  */
1156 NvBool
1157 kbifAnyBarsAreValid_GM107
1158 (
1159     OBJGPU    *pGpu,
1160     KernelBif *pKernelBif
1161 )
1162 {
1163     NvU32 domain = gpuGetDomain(pGpu);
1164     NvU8 bus = gpuGetBus(pGpu);
1165     NvU8 device = gpuGetDevice(pGpu);
1166     NvU16 vendorId, deviceId;
1167     void *handle;
1168 
1169     handle = osPciInitHandle(domain, bus, device, 0, &vendorId, &deviceId);
1170 
1171     if (osPciReadDword(handle, NV_XVE_BAR0) == pKernelBif->cacheData.gpuBootConfigSpace[4])
1172     {
1173         // BAR0 is valid
1174         return NV_TRUE;
1175     }
1176 
1177     if ((osPciReadDword(handle, NV_XVE_BAR1_LO) == pKernelBif->cacheData.gpuBootConfigSpace[5]) &&
1178         (osPciReadDword(handle, NV_XVE_BAR1_HI) == pKernelBif->cacheData.gpuBootConfigSpace[6]))
1179     {
1180         // BAR1 is valid
1181         return NV_TRUE;
1182     }
1183 
1184     return NV_FALSE;
1185 }
1186 
1187 /*!
1188  * @brief Try restoring BAR registers and command register using config cycles
1189  *
1190  * @param[in] pGpu          GPU object pointer
1191  * @param[in] pKernelBif    KernelBif object pointer
1192  *
1193  * @returns    NV_OK on success
1194  *             NV_ERR_INVALID_READ if the register read returns unexpected value
1195  */
1196 NV_STATUS
1197 kbifRestoreBarsAndCommand_GM107
1198 (
1199     OBJGPU    *pGpu,
1200     KernelBif *pKernelBif
1201 )
1202 {
1203     NvU32 domain = gpuGetDomain(pGpu);
1204     NvU8 bus = gpuGetBus(pGpu);
1205     NvU8 device = gpuGetDevice(pGpu);
1206     NvU16 vendorId, deviceId;
1207     void *handle;
1208 
1209     handle = osPciInitHandle(domain, bus, device, 0, &vendorId, &deviceId);
1210 
1211     osPciWriteDword(handle, NV_XVE_BAR0, pKernelBif->cacheData.gpuBootConfigSpace[4]);
1212     osPciWriteDword(handle, NV_XVE_BAR1_LO, pKernelBif->cacheData.gpuBootConfigSpace[5]);
1213     osPciWriteDword(handle, NV_XVE_BAR1_HI, pKernelBif->cacheData.gpuBootConfigSpace[6]);
1214     osPciWriteDword(handle, NV_XVE_BAR2_LO, pKernelBif->cacheData.gpuBootConfigSpace[7]);
1215     osPciWriteDword(handle, NV_XVE_BAR2_HI, pKernelBif->cacheData.gpuBootConfigSpace[8]);
1216     osPciWriteDword(handle, NV_XVE_BAR3, pKernelBif->cacheData.gpuBootConfigSpace[9]);
1217     osPciWriteDword(handle, NV_XVE_DEV_CTRL, pKernelBif->cacheData.gpuBootConfigSpace[1]);
1218 
1219     if (GPU_REG_RD32(pGpu, NV_PMC_BOOT_0) != pGpu->chipId0)
1220     {
1221         return NV_ERR_INVALID_READ;
1222     }
1223 
1224     return NV_OK;
1225 }
1226 
1227 /*!
1228  * @brief HAL specific BIF software state initialization
1229  *
1230  * @param[in] pGpu       GPU object pointer
1231  * @param[in] pKernelBif KernelBif object pointer
1232  *
1233  * @return    NV_OK on success
1234  */
1235 NV_STATUS
1236 kbifInit_GM107
1237 (
1238     OBJGPU    *pGpu,
1239     KernelBif *pKernelBif
1240 )
1241 {
1242     // Cache the offsets of BAR registers into an array for subsequent use
1243     kbifStoreBarRegOffsets_HAL(pGpu, pKernelBif, NV_XVE_BAR0);
1244 
1245     return NV_OK;
1246 }
1247 
1248 /*!
1249  * @brief Destructor
1250  *
1251  * @param[in] pKernelBif
1252  *
1253  * @returns void
1254  */
1255 void
1256 kbifDestruct_GM107
1257 (
1258     KernelBif *pKernelBif
1259 )
1260 {
1261     portMemFree(pKernelBif->xveRegmapRef[0].bufMsixTable);
1262     pKernelBif->xveRegmapRef[0].bufMsixTable = NULL;
1263 }
1264 
1265 
1266 
1267 
1268