1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24
25 /* ------------------------- System Includes -------------------------------- */
26 #include "gpu/gpu.h"
27 #include "gpu/bif/kernel_bif.h"
28 #include "gpu/bus/kern_bus.h"
29 #include "platform/chipset/chipset.h"
30 #include "nvdevid.h"
31 #include <rmapi/nv_gpu_ops.h>
32
33 #include "published/maxwell/gm107/dev_boot.h"
34 #include "published/maxwell/gm107/dev_nv_xve.h"
35 #include "published/maxwell/gm107/dev_nv_xve1.h"
36
37 #include "published/maxwell/gm107/dev_nv_pcfg_xve_addendum.h"
38 #include "published/maxwell/gm107/dev_nv_pcfg_xve1_addendum.h"
39
40 // Defines for C73 chipset registers
41 #ifndef NV_XVR_VEND_XP1
42 #define NV_XVR_VEND_XP1 0x00000F04 /* RW-4R */
43
44 #define NV_XVR_VEND_XP1_IGNORE_L0S 23:23 /* RWIVF */
45 #define NV_XVR_VEND_XP1_IGNORE_L0S_INIT 0x00000000 /* RWI-V */
46 #define NV_XVR_VEND_XP1_IGNORE_L0S__PROD 0x00000000 /* RW--V */
47 #define NV_XVR_VEND_XP1_IGNORE_L0S_EN 0x00000001 /* RW--V */
48 #endif
49
50 // Factor by which vGPU migration API bandwidth should be derated
51 #define VGPU_MIGRATION_API_DERATE_FACTOR 5
52
53 // XVE register map for PCIe config space
54 static const NvU32 xveRegMapValid[] = NV_PCFG_XVE_REGISTER_VALID_MAP;
55 static const NvU32 xveRegMapWrite[] = NV_PCFG_XVE_REGISTER_WR_MAP;
56 static const NvU32 xve1RegMapValid[] = NV_PCFG_XVE1_REGISTER_VALID_MAP;
57 static const NvU32 xve1RegMapWrite[] = NV_PCFG_XVE1_REGISTER_WR_MAP;
58
59 static NV_STATUS _kbifSavePcieConfigRegisters_GM107(OBJGPU *pGpu, KernelBif *pKernelBif, const PKBIF_XVE_REGMAP_REF pRegmapRef);
60 static NV_STATUS _kbifRestorePcieConfigRegisters_GM107(OBJGPU *pGpu, KernelBif *pKernelBif, const PKBIF_XVE_REGMAP_REF pRegmapRef);
61
62 /* ------------------------ Public Functions -------------------------------- */
63
64 /*!
65 * @brief Get PCIe config test registers
66 *
67 * @param[in] pGpu GPU object pointer
68 * @param[in] pKernelBif BIF object pointer
69 */
70 void
kbifGetPcieConfigAccessTestRegisters_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU32 * pciStart,NvU32 * pcieStart)71 kbifGetPcieConfigAccessTestRegisters_GM107
72 (
73 OBJGPU *pGpu,
74 KernelBif *pKernelBif,
75 NvU32 *pciStart,
76 NvU32 *pcieStart
77 )
78 {
79 *pciStart = NV_XVE_ID;
80 *pcieStart = NV_XVE_VCCAP_HDR;
81 }
82
83 /*!
84 * @brief Verify PCIe config test registers
85 *
86 * @param[in] pGpu GPU object pointer
87 * @param[in] pKernelBif BIF object pointer
88 *
89 * @return NV_OK
90 */
91 NV_STATUS
kbifVerifyPcieConfigAccessTestRegisters_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU32 nvXveId,NvU32 nvXveVccapHdr)92 kbifVerifyPcieConfigAccessTestRegisters_GM107
93 (
94 OBJGPU *pGpu,
95 KernelBif *pKernelBif,
96 NvU32 nvXveId,
97 NvU32 nvXveVccapHdr
98 )
99 {
100 NvU32 data;
101
102 GPU_BUS_CFG_RD32(pGpu, NV_XVE_ID, &data);
103
104 if (FLD_TEST_DRF(_XVE, _ID, _VENDOR, _NVIDIA, data))
105 {
106 if (data != nvXveId)
107 return NV_ERR_NOT_SUPPORTED;
108
109 GPU_BUS_CFG_RD32(pGpu, NV_XVE_VCCAP_HDR, &data);
110
111 if (FLD_TEST_DRF(_XVE, _VCCAP_HDR, _ID, _VC, data) &&
112 FLD_TEST_DRF(_XVE, _VCCAP_HDR, _VER, _1, data))
113 {
114 if (data != nvXveVccapHdr)
115 return NV_ERR_NOT_SUPPORTED;
116 return NV_OK;
117 }
118 }
119 return NV_ERR_NOT_SUPPORTED;
120 }
121
122 /*!
123 * @brief Re-arm MSI
124 *
125 * @param[in] pGpu GPU object pointer
126 * @param[in] pKernelBif Kernel BIF object pointer
127 */
128 void
kbifRearmMSI_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)129 kbifRearmMSI_GM107
130 (
131 OBJGPU *pGpu,
132 KernelBif *pKernelBif
133 )
134 {
135 NV_STATUS status = gpuSanityCheckRegisterAccess(pGpu, 0, NULL);
136
137 if (status != NV_OK)
138 {
139 return;
140 }
141
142 // The 32 byte value doesn't matter, HW only looks at the offset.
143 osGpuWriteReg032(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_CYA_2, 0);
144 }
145
146 /*!
147 * @brief Check if MSI is enabled in HW
148 *
149 * @param[in] pGpu GPU object pointer
150 * @param[in] pKernelBif BIF object pointer
151 *
152 * @return True if MSI enabled else False
153 */
154 NvBool
kbifIsMSIEnabledInHW_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)155 kbifIsMSIEnabledInHW_GM107
156 (
157 OBJGPU *pGpu,
158 KernelBif *pKernelBif
159 )
160 {
161 NvU32 data32;
162 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_MSI_CTRL, &data32))
163 {
164 NV_PRINTF(LEVEL_ERROR, "unable to read NV_XVE_MSI_CTRL\n");
165 }
166
167 return FLD_TEST_DRF(_XVE, _MSI_CTRL, _MSI, _ENABLE, data32);
168 }
169
170 /*!
171 * @brief Check if access to PCI config space is enabled
172 *
173 * @param[in] pGpu GPU object pointer
174 * @param[in] pKernelBif Kernel BIF object pointer
175 *
176 * @return True if access to PCI config space is enabled
177 */
178 NvBool
kbifIsPciIoAccessEnabled_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)179 kbifIsPciIoAccessEnabled_GM107
180 (
181 OBJGPU *pGpu,
182 KernelBif *pKernelBif
183 )
184 {
185 NvU32 data = 0;
186
187 if (NV_OK == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEV_CTRL, &data))
188 {
189 if (FLD_TEST_DRF(_XVE, _DEV_CTRL, _CMD_IO_SPACE, _ENABLED, data))
190 {
191 return NV_TRUE;
192 }
193 }
194
195 return NV_FALSE;
196 }
197
198 /*!
199 * @brief Check if device is a 3D controller
200 *
201 * @param[in] pGpu GPU object pointer
202 * @param[in] pKernelBif Kernel BIF object pointer
203 *
204 * @return True if device is a 3D controller
205 */
206 NvBool
kbifIs3dController_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)207 kbifIs3dController_GM107
208 (
209 OBJGPU *pGpu,
210 KernelBif *pKernelBif
211 )
212 {
213 NvU32 data = 0;
214
215 if (NV_OK == GPU_BUS_CFG_RD32(pGpu, NV_XVE_REV_ID, &data))
216 {
217 if (FLD_TEST_DRF(_XVE, _REV_ID, _CLASS_CODE, _3D, data))
218 {
219 return NV_TRUE;
220 }
221 }
222
223 return NV_FALSE;
224 }
225
226 /*!
227 * @brief Enable/disable no snoop for GPU
228 *
229 * @param[in] pGpu GPU object pointer
230 * @param[in] pKernelBif Kernel BIF object pointer
231 * @param[in] bEnable True if No snoop needs to be enabled
232 *
233 * @return NV_OK If no snoop modified as requested
234 */
235 NV_STATUS
kbifEnableNoSnoop_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvBool bEnable)236 kbifEnableNoSnoop_GM107
237 (
238 OBJGPU *pGpu,
239 KernelBif *pKernelBif,
240 NvBool bEnable
241 )
242 {
243 NvU8 fieldVal;
244 NvU32 regVal;
245
246 regVal = GPU_REG_RD32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_DEVICE_CONTROL_STATUS);
247
248 fieldVal = bEnable ? 1 : 0;
249 regVal = FLD_SET_DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS,
250 _ENABLE_NO_SNOOP, fieldVal, regVal);
251
252 GPU_REG_WR32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_DEVICE_CONTROL_STATUS, regVal);
253
254 return NV_OK;
255 }
256
257 /*!
258 * @brief Enables Relaxed Ordering PCI-E Capability in the PCI Config Space
259 *
260 * @param[in] pGpu GPU object pointer
261 * @param[in] pKernelBif Kernel BIF object pointer
262 */
263 void
kbifPcieConfigEnableRelaxedOrdering_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)264 kbifPcieConfigEnableRelaxedOrdering_GM107
265 (
266 OBJGPU *pGpu,
267 KernelBif *pKernelBif
268 )
269 {
270 NvU32 xveDevCtrlStatus;
271
272 if(NV_ERR_GENERIC == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus))
273 {
274 NV_PRINTF(LEVEL_ERROR,
275 "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n");
276 DBG_BREAKPOINT();
277 }
278 else
279 {
280 GPU_BUS_CFG_FLD_WR_DRF_DEF(pGpu, xveDevCtrlStatus, _XVE, _DEVICE_CONTROL_STATUS,
281 _ENABLE_RELAXED_ORDERING, _INIT);
282 }
283 }
284
285 /*!
286 * @brief Disables Relaxed Ordering PCI-E Capability in the PCI Config Space
287 *
288 * @param[in] pGpu GPU object pointer
289 * @param[in] pKernelBif Kernel BIF object pointer
290 */
291 void
kbifPcieConfigDisableRelaxedOrdering_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)292 kbifPcieConfigDisableRelaxedOrdering_GM107
293 (
294 OBJGPU *pGpu,
295 KernelBif *pKernelBif
296 )
297 {
298 NvU32 xveDevCtrlStatus;
299
300 if(NV_ERR_GENERIC == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus))
301 {
302 NV_PRINTF(LEVEL_ERROR,
303 "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n");
304 DBG_BREAKPOINT();
305 }
306 else
307 {
308 xveDevCtrlStatus = FLD_SET_DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS,
309 _ENABLE_RELAXED_ORDERING, 0, xveDevCtrlStatus);
310 GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, xveDevCtrlStatus);
311 }
312 }
313
314 /*!
315 * Helper function for bifSavePcieConfigRegisters_GM107()
316 *
317 * @param[in] pGpu GPU object pointer
318 * @param[in] pKernelBif Kernel Bif object pointer
319 * @param[in] pRegmapRef XVE Register map structure pointer
320 *
321 * @return 'NV_OK' if successful, an RM error code otherwise.
322 */
323 static NV_STATUS
_kbifSavePcieConfigRegisters_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,const PKBIF_XVE_REGMAP_REF pRegmapRef)324 _kbifSavePcieConfigRegisters_GM107
325 (
326 OBJGPU *pGpu,
327 KernelBif *pKernelBif,
328 const PKBIF_XVE_REGMAP_REF pRegmapRef
329 )
330 {
331 NV_STATUS status;
332 NvU16 index;
333
334 // Read and save config space offset based on the bit map
335 for (index = 0; index < pRegmapRef->numXveRegMapValid; index++)
336 {
337 NvU16 i, regOffset, bufOffset;
338 NvU32 mask = 1;
339
340 for (i = 0; i < sizeof(pRegmapRef->xveRegMapValid[0]) * 8; i++)
341 {
342 mask = 1 << i;
343 NV_ASSERT((pRegmapRef->xveRegMapWrite[index] & mask) == 0 ||
344 (pRegmapRef->xveRegMapValid[index] & mask) != 0);
345
346 if ((pRegmapRef->xveRegMapValid[index] & mask) == 0)
347 {
348 continue;
349 }
350
351 bufOffset = (index * sizeof(pRegmapRef->xveRegMapValid[0]) * 8) + i;
352 regOffset = bufOffset * sizeof(pRegmapRef->bufBootConfigSpace[0]);
353
354 status = PCI_FUNCTION_BUS_CFG_RD32(pGpu, pRegmapRef->nFunc,
355 regOffset, &pRegmapRef->bufBootConfigSpace[bufOffset]);
356 if (status != NV_OK)
357 {
358 return status;
359 }
360 }
361 }
362
363 pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_SECONDARY_BUS_RESET_SUPPORTED, NV_TRUE);
364
365 return NV_OK;
366 }
367
368 /*!
369 * Save boot time PCIe Config space
370 *
371 * @param[in] pGpu GPU object pointer
372 * @param[in] pKernelBif Kernel Bif object pointer
373 *
374 * @return 'NV_OK' if successful, an RM error code otherwise.
375 */
376 NV_STATUS
kbifSavePcieConfigRegisters_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)377 kbifSavePcieConfigRegisters_GM107
378 (
379 OBJGPU *pGpu,
380 KernelBif *pKernelBif
381 )
382 {
383 NV_STATUS status;
384
385 //
386 // Save config space if GPU is about to enter Function Level Reset
387 // OR if GPU is about to enter GC6 state
388 // OR if on non-windows platform, FORCE_PCIE_CONFIG_SAVE is set and SBR is snabled
389 // OR if on windows platform, SBR is enabled
390 //
391 if (!pKernelBif->bPreparingFunctionLevelReset &&
392 !IS_GPU_GC6_STATE_ENTERING(pGpu) &&
393 !((RMCFG_FEATURE_PLATFORM_WINDOWS ||
394 pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_FORCE_PCIE_CONFIG_SAVE)) &&
395 pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_SECONDARY_BUS_RESET_ENABLED)))
396 {
397 return NV_OK;
398 }
399
400 // save pcie config space for function 0
401 status = _kbifSavePcieConfigRegisters_GM107(pGpu, pKernelBif,
402 &pKernelBif->xveRegmapRef[0]);
403 if (status != NV_OK)
404 {
405 NV_PRINTF(LEVEL_ERROR, "Saving PCIe config space failed for gpu.\n");
406 NV_ASSERT(0);
407 return status;
408 }
409
410 // No need to save/restore azalia config space if gpu is in GC6 cycle or if it is in FLR
411 if (IS_GPU_GC6_STATE_ENTERING(pGpu) ||
412 pKernelBif->bPreparingFunctionLevelReset)
413 {
414 return NV_OK;
415 }
416
417 // Return early if device is not multifunction (azalia is disabled or not present)
418 if (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_DEVICE_IS_MULTIFUNCTION))
419 {
420 return NV_OK;
421 }
422
423 // Save pcie config space for function 1
424 status = _kbifSavePcieConfigRegisters_GM107(pGpu, pKernelBif,
425 &pKernelBif->xveRegmapRef[1]);
426 if (status != NV_OK)
427 {
428 NV_PRINTF(LEVEL_ERROR, "Saving PCIe config space failed for azalia.\n");
429 NV_ASSERT(0);
430 }
431
432 return status;
433 }
434
435 /*!
436 * Helper function for bifRestorePcieConfigRegisters_GM107()
437 *
438 * @param[in] pGpu GPU object pointer
439 * @param[in] pKernelBif Kernel Bif object pointer
440 * @param[in] pRegmapRef XVE Register map structure pointer
441 *
442 * @return 'NV_OK' if successful, an RM error code otherwise.
443 */
444 static NV_STATUS
_kbifRestorePcieConfigRegisters_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,const PKBIF_XVE_REGMAP_REF pRegmapRef)445 _kbifRestorePcieConfigRegisters_GM107
446 (
447 OBJGPU *pGpu,
448 KernelBif *pKernelBif,
449 const PKBIF_XVE_REGMAP_REF pRegmapRef
450 )
451 {
452 NvU32 domain = gpuGetDomain(pGpu);
453 NvU8 bus = gpuGetBus(pGpu);
454 NvU8 device = gpuGetDevice(pGpu);
455 NvU16 vendorId;
456 NvU16 deviceId;
457 NvU32 val;
458 NV_STATUS status;
459 void *handle;
460 NvU16 index;
461 RMTIMEOUT timeout;
462 NvBool bGcxPmuCfgRestore;
463
464 bGcxPmuCfgRestore = pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_GCX_PMU_CFG_SPACE_RESTORE);
465
466 handle = osPciInitHandle(domain, bus, device, pRegmapRef->nFunc,
467 &vendorId, &deviceId);
468 NV_ASSERT_OR_RETURN(handle, NV_ERR_INVALID_POINTER);
469
470 if (IS_GPU_GC6_STATE_EXITING(pGpu) &&
471 bGcxPmuCfgRestore)
472 {
473 //
474 // PMU Will Restore the config Space
475 // As a last step PMU should set CMD_MEMORY_SPACE ENABLED after it restores the config space
476 // Poll This register to see if PMU is finished or not otherwise timeout.
477 //
478 gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0);
479 do
480 {
481 val = osPciReadDword(handle, NV_XVE_DEV_CTRL);
482 status = gpuCheckTimeout(pGpu, &timeout);
483 if (status == NV_ERR_TIMEOUT)
484 {
485 NV_PRINTF(LEVEL_ERROR,
486 "Timeout waiting for PCIE Config Space Restore from PMU, RM takes over\n");
487 DBG_BREAKPOINT();
488
489 NvU32 *pReg = NULL;
490 pReg = &pRegmapRef->bufBootConfigSpace[NV_XVE_DEV_CTRL /
491 sizeof(pRegmapRef->bufBootConfigSpace[0])];
492 *pReg = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_MEMORY_SPACE, _ENABLED, *pReg);
493 osPciWriteDword(handle, NV_XVE_DEV_CTRL, pRegmapRef->bufBootConfigSpace[1]);
494 osPciWriteDword(handle, NV_XVE_BAR0, pRegmapRef->bufBootConfigSpace[4]);
495
496 break;
497 }
498 } while (FLD_TEST_DRF(_XVE, _DEV_CTRL, _CMD_MEMORY_SPACE, _DISABLED, val));
499
500 return NV_OK;
501 }
502
503 // Enable BAR0 accesses so we can restore config space more quickly.
504 kbifRestoreBar0_HAL(pGpu, pKernelBif, handle, pRegmapRef->bufBootConfigSpace);
505
506 // Enable required fields of NV_XVE_DEV_CTRL
507 val = osPciReadDword(handle, NV_XVE_DEV_CTRL);
508 val = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_MEMORY_SPACE, _ENABLED, val) |
509 FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_BUS_MASTER, _ENABLED, val);
510 osPciWriteDword(handle, NV_XVE_DEV_CTRL, val);
511
512 // Restore only the valid config space offsets based on bit map
513 for (index = 0; index < pRegmapRef->numXveRegMapValid; index++)
514 {
515 NvU16 i, regOffset, bufOffset;
516
517 for (i = 0; i < sizeof(pRegmapRef->xveRegMapValid[0]) * 8; i++)
518 {
519 if ((pRegmapRef->xveRegMapWrite[index] & (1 << i)) == 0)
520 {
521 continue;
522 }
523
524 bufOffset = (index * sizeof(pRegmapRef->xveRegMapValid[0]) * 8) + i;
525 regOffset = bufOffset * sizeof(pRegmapRef->bufBootConfigSpace[0]);
526 if (regOffset == NV_XVE_DEV_CTRL)
527 {
528 continue;
529 }
530
531 //
532 // This is a special case where we don't use the standard macro to write a register.
533 // The macro will not allow access when PDB_PROP_GPU_IS_LOST is true.
534 // This check is required to keep other accesses from touching the GPU for now.
535 //
536 osGpuWriteReg032(pGpu,
537 ((pRegmapRef->nFunc == 0) ? DEVICE_BASE(NV_PCFG) : DEVICE_BASE(NV_PCFG1)) + regOffset,
538 pRegmapRef->bufBootConfigSpace[bufOffset]);
539
540 if (pRegmapRef->nFunc != 0)
541 {
542 status = PCI_FUNCTION_BUS_CFG_WR32(pGpu, pRegmapRef->nFunc, regOffset,
543 pRegmapRef->bufBootConfigSpace[bufOffset]);
544 if (status != NV_OK)
545 {
546 return status;
547 }
548 }
549 }
550 }
551
552 //
553 // Restore saved value of NV_XVE_DEV_CTRL, the second register saved in the buffer.
554 // If we reach this point, it's RM-CPU restoration path.
555 // Check if PMU_CFG_SPACE_RESTORE property was enabled
556 // to confirm it's a debugging parallel restoration and
557 // set back to _ENABLE before restoration
558 //
559 if (bGcxPmuCfgRestore)
560 {
561 NvU32 *pReg = NULL;
562 pReg = &pRegmapRef->bufBootConfigSpace[NV_XVE_DEV_CTRL /
563 sizeof(pRegmapRef->bufBootConfigSpace[0])];
564 *pReg = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_MEMORY_SPACE, _ENABLED, *pReg);
565
566 }
567
568 osPciWriteDword(handle, NV_XVE_DEV_CTRL, pRegmapRef->bufBootConfigSpace[1]);
569
570 return NV_OK;
571 }
572
573 /*!
574 * Restore boot time PCIe Config space
575 *
576 * @param[in] pGpu GPU object pointer
577 * @param[in] pKernelBif Kernel Bif object pointer
578 *
579 * @return 'NV_OK' if successful, an RM error code otherwise.
580 */
581 NV_STATUS
kbifRestorePcieConfigRegisters_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)582 kbifRestorePcieConfigRegisters_GM107
583 (
584 OBJGPU *pGpu,
585 KernelBif *pKernelBif
586 )
587 {
588 NV_STATUS status;
589 RMTIMEOUT timeout;
590 NvU64 timeStampStart;
591 NvU64 timeStampEnd;
592
593 if (pKernelBif->xveRegmapRef[0].bufBootConfigSpace == NULL)
594 {
595 NV_PRINTF(LEVEL_ERROR, "Config space buffer is NULL!\n");
596 NV_ASSERT(0);
597 return NV_ERR_OBJECT_NOT_FOUND;
598 }
599
600 // Restore pcie config space for function 0
601 status = _kbifRestorePcieConfigRegisters_GM107(pGpu, pKernelBif,
602 &pKernelBif->xveRegmapRef[0]);
603 if (status != NV_OK)
604 {
605 NV_PRINTF(LEVEL_ERROR, "Restoring PCIe config space failed for gpu.\n");
606 NV_ASSERT(0);
607 return status;
608 }
609
610 // No need to save/restore azalia config space if gpu is in GC6 cycle or if it is in FLR
611 if (IS_GPU_GC6_STATE_EXITING(pGpu) ||
612 pKernelBif->bInFunctionLevelReset)
613 {
614 //
615 // Check that GPU is really accessible.
616 // Skip on pre-silicon because there can be timing issues in the test between device ready and this code.
617 // Todo: find a safe timeout for pre-silicon runs
618 //
619 if (IS_SILICON(pGpu))
620 {
621 // Check if GPU is actually accessible before continue
622 osGetPerformanceCounter(&timeStampStart);
623 gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0);
624 NvU32 pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0);
625
626 while (pmcBoot0 != pGpu->chipId0)
627 {
628 NV_PRINTF(LEVEL_INFO,
629 "GPU not back on the bus after %s, 0x%x != 0x%x!\n",
630 pKernelBif->bInFunctionLevelReset?"FLR":"GC6 exit", pmcBoot0, pGpu->chipId0);
631 pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0);
632 NV_ASSERT(0);
633 status = gpuCheckTimeout(pGpu, &timeout);
634 if (status == NV_ERR_TIMEOUT)
635 {
636 NV_PRINTF(LEVEL_ERROR,
637 "Timeout GPU not back on the bus after %s,\n", pKernelBif->bInFunctionLevelReset?"FLR":"GC6 exit");
638 DBG_BREAKPOINT();
639 return status;
640 }
641 }
642
643 osGetPerformanceCounter(&timeStampEnd);
644 NV_PRINTF(LEVEL_ERROR,
645 "Time spend on GPU back on bus is 0x%x ns,\n",
646 (NvU32)NV_MIN(NV_U32_MAX, timeStampEnd - timeStampStart));
647 }
648
649 return NV_OK;
650 }
651
652 // Return early if device is not multifunction (azalia is disabled or not present)
653 if (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_DEVICE_IS_MULTIFUNCTION))
654 {
655 return NV_OK;
656 }
657
658 // Restore pcie config space for function 1
659 status = _kbifRestorePcieConfigRegisters_GM107(pGpu, pKernelBif,
660 &pKernelBif->xveRegmapRef[1]);
661 if (status != NV_OK)
662 {
663 NV_PRINTF(LEVEL_ERROR, "Restoring PCIe config space failed for azalia.\n");
664 NV_ASSERT(0);
665 }
666
667 return status;
668 }
669
670 /*!
671 * @brief Get XVE status bits
672 *
673 * @param[in] pGpu GPU object pointer
674 * @param[in] pKernelBif BIF object pointer
675 * @param[out] pBits PCIe error status values
676 * @param[out] pStatus Full XVE status
677 *
678 * @return NV_OK
679 */
680 NV_STATUS
kbifGetXveStatusBits_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU32 * pBits,NvU32 * pStatus)681 kbifGetXveStatusBits_GM107
682 (
683 OBJGPU *pGpu,
684 KernelBif *pKernelBif,
685 NvU32 *pBits,
686 NvU32 *pStatus
687 )
688 {
689 // control/status reg
690 NvU32 xveDevCtrlStatus;
691
692 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus))
693 {
694 NV_PRINTF(LEVEL_ERROR,
695 "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n");
696 }
697 if ( pBits == NULL )
698 return NV_ERR_GENERIC;
699
700 *pBits = 0;
701
702 // The register read above returns garbage on fmodel, so just return.
703 if (IS_FMODEL(pGpu))
704 {
705 if (pStatus)
706 {
707 *pStatus = 0;
708 }
709 return NV_OK;
710 }
711
712 if (pStatus)
713 *pStatus = xveDevCtrlStatus;
714
715 if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _CORR_ERROR_DETECTED, 1))
716 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR;
717 if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _NON_FATAL_ERROR_DETECTED, 1))
718 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR;
719 if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _FATAL_ERROR_DETECTED, 1))
720 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR;
721 if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _UNSUPP_REQUEST_DETECTED, 1))
722 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST;
723
724 if (pKernelBif->EnteredRecoverySinceErrorsLastChecked)
725 {
726 pKernelBif->EnteredRecoverySinceErrorsLastChecked = NV_FALSE;
727 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_ENTERED_RECOVERY;
728 }
729
730 return NV_OK;
731 }
732
733 /*!
734 * @brief Clear the XVE status bits
735 *
736 * @param[in] pGpu GPU object pointer
737 * @param[in] pKernelBif BIF object pointer
738 * @param[out] pStatus Full XVE status
739 *
740 * @return NV_OK
741 */
742 NV_STATUS
kbifClearXveStatus_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU32 * pStatus)743 kbifClearXveStatus_GM107
744 (
745 OBJGPU *pGpu,
746 KernelBif *pKernelBif,
747 NvU32 *pStatus
748 )
749 {
750 NvU32 xveDevCtrlStatus;
751
752 if (pStatus)
753 {
754 xveDevCtrlStatus = *pStatus;
755 if (xveDevCtrlStatus == 0)
756 {
757 return NV_OK;
758 }
759 }
760 else
761 {
762 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus))
763 {
764 NV_PRINTF(LEVEL_ERROR,
765 "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n");
766 }
767 }
768
769 GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, xveDevCtrlStatus);
770
771 return NV_OK;
772 }
773
774 /*!
775 * @brief Get XVE AER bits
776 *
777 * @param[in] pGpu GPU object pointer
778 * @param[in] pKernelBif BIF object pointer
779 * @param[out] pBits PCIe AER error status values
780 *
781 * @return NV_OK
782 */
783 NV_STATUS
kbifGetXveAerBits_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU32 * pBits)784 kbifGetXveAerBits_GM107
785 (
786 OBJGPU *pGpu,
787 KernelBif *pKernelBif,
788 NvU32 *pBits
789 )
790 {
791 NvU32 xveAerUncorr;
792 NvU32 xveAerCorr;
793
794 if (pBits == NULL)
795 {
796 return NV_ERR_GENERIC;
797 }
798
799 *pBits = 0;
800
801 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_AER_UNCORR_ERR, &xveAerUncorr))
802 {
803 NV_PRINTF(LEVEL_ERROR, "Unable to read NV_XVE_AER_UNCORR_ERR\n");
804 return NV_ERR_GENERIC;
805 }
806 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_AER_CORR_ERR, &xveAerCorr))
807 {
808 NV_PRINTF(LEVEL_ERROR, "Unable to read NV_XVE_AER_CORR_ERR\n");
809 return NV_ERR_GENERIC;
810 }
811
812 // The register read above returns garbage on fmodel, so just return.
813 if (IS_FMODEL(pGpu))
814 {
815 return NV_OK;
816 }
817
818 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _DLINK_PROTO_ERR, _ACTIVE, xveAerUncorr))
819 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR;
820 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _POISONED_TLP, _ACTIVE, xveAerUncorr))
821 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP;
822 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _CPL_TIMEOUT, _ACTIVE, xveAerUncorr))
823 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT;
824 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _UNEXP_CPL, _ACTIVE, xveAerUncorr))
825 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL;
826 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _MALFORMED_TLP, _ACTIVE, xveAerUncorr))
827 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP;
828 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _UNSUPPORTED_REQ, _ACTIVE, xveAerUncorr))
829 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ;
830
831 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RCV_ERR, _ACTIVE, xveAerCorr))
832 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR;
833 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _BAD_TLP, _ACTIVE, xveAerCorr))
834 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP;
835 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _BAD_DLLP , _ACTIVE, xveAerCorr))
836 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP;
837 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RPLY_ROLLOVER, _ACTIVE, xveAerCorr))
838 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER;
839 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RPLY_TIMEOUT, _ACTIVE, xveAerCorr))
840 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT;
841 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _ADVISORY_NONFATAL, _ACTIVE, xveAerCorr))
842 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL;
843
844 return NV_OK;
845 }
846
847 /*!
848 * @brief Clear the XVE AER bits
849 *
850 * @param[in] pGpu GPU object pointer
851 * @param[in] pKernelBif BIF object pointer
852 * @param[in] bits PCIe AER error status values
853 *
854 * @return NV_OK
855 */
856 NV_STATUS
kbifClearXveAer_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU32 bits)857 kbifClearXveAer_GM107
858 (
859 OBJGPU *pGpu,
860 KernelBif *pKernelBif,
861 NvU32 bits
862 )
863 {
864 NvU32 xveAerUncorr = 0;
865 NvU32 xveAerCorr = 0;
866
867 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR)
868 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _DLINK_PROTO_ERR, _CLEAR, xveAerUncorr);
869 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP)
870 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _POISONED_TLP, _CLEAR, xveAerUncorr);
871 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT)
872 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _CPL_TIMEOUT, _CLEAR, xveAerUncorr);
873 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL)
874 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _UNEXP_CPL, _CLEAR, xveAerUncorr);
875 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP)
876 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _MALFORMED_TLP, _CLEAR, xveAerUncorr);
877 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ)
878 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _UNSUPPORTED_REQ, _CLEAR, xveAerUncorr);
879
880 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR)
881 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RCV_ERR, _CLEAR, xveAerCorr);
882 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP)
883 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _BAD_TLP, _CLEAR, xveAerCorr);
884 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP)
885 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _BAD_DLLP, _CLEAR, xveAerCorr);
886 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER)
887 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RPLY_ROLLOVER, _CLEAR, xveAerCorr);
888 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT)
889 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RPLY_TIMEOUT, _CLEAR, xveAerCorr);
890 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL)
891 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _ADVISORY_NONFATAL, _CLEAR, xveAerCorr);
892
893 if (xveAerUncorr != 0)
894 {
895 GPU_BUS_CFG_WR32(pGpu, NV_XVE_AER_UNCORR_ERR, xveAerUncorr);
896 }
897 if (xveAerCorr != 0)
898 {
899 GPU_BUS_CFG_WR32(pGpu, NV_XVE_AER_CORR_ERR, xveAerCorr);
900 }
901
902 return NV_OK;
903 }
904
905 /*!
906 * @brief Returns the BAR0 offset and size of the PCI config space mirror
907 *
908 * @param[in] pGpu GPU object pointer
909 * @param[in] pKernelBif Kernel BIF object pointer
910 * @param[out] pBase BAR0 offset of the PCI config space mirror
911 * @param[out] pSize Size in bytes of the PCI config space mirror
912 *
913 * @returns NV_OK
914 */
915 NV_STATUS
kbifGetPciConfigSpacePriMirror_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU32 * pBase,NvU32 * pSize)916 kbifGetPciConfigSpacePriMirror_GM107
917 (
918 OBJGPU *pGpu,
919 KernelBif *pKernelBif,
920 NvU32 *pBase,
921 NvU32 *pSize
922 )
923 {
924 *pBase = DEVICE_BASE(NV_PCFG);
925 *pSize = DEVICE_EXTENT(NV_PCFG) - DEVICE_BASE(NV_PCFG) + 1;
926 return NV_OK;
927 }
928
929 /*!
930 * @brief C73 chipset WAR
931 *
932 * @param[in] pGpu GPU object pointer
933 * @param[in] pKernelBif Kernel BIF object pointer
934 */
935 void
kbifExecC73War_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)936 kbifExecC73War_GM107
937 (
938 OBJGPU *pGpu,
939 KernelBif *pKernelBif
940 )
941 {
942 OBJSYS *pSys = SYS_GET_INSTANCE();
943 OBJOS *pOS = SYS_GET_OS(pSys);
944 OBJCL *pCl = SYS_GET_CL(pSys);
945 NvU32 val;
946
947 if (CS_NVIDIA_C73 == pCl->Chipset)
948 {
949 //
950 // Turn off L0s on the chipset which are required by the suspend/resume
951 // cycles in Vista. See bug 400044 for more details.
952 //
953
954 // vAddr is a mapped cpu virtual addr into the root ports config space.
955 if (!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS) &&
956 (pGpu->gpuClData.rootPort.vAddr != 0))
957 {
958 val = MEM_RD32((NvU8*)pGpu->gpuClData.rootPort.vAddr+NV_XVR_VEND_XP1);
959 val = FLD_SET_DRF(_XVR, _VEND_XP1, _IGNORE_L0S, _EN, val);
960 MEM_WR32((NvU8*)pGpu->gpuClData.rootPort.vAddr+NV_XVR_VEND_XP1, val);
961 }
962 else if (pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS) &&
963 pGpu->gpuClData.rootPort.addr.valid)
964 {
965 val = osPciReadDword(pGpu->gpuClData.rootPort.addr.handle, NV_XVR_VEND_XP1);
966 val = FLD_SET_DRF(_XVR, _VEND_XP1, _IGNORE_L0S, _EN, val);
967 osPciWriteDword(pGpu->gpuClData.rootPort.addr.handle, NV_XVR_VEND_XP1, val);
968 }
969 else
970 {
971 NV_PRINTF(LEVEL_ERROR,
972 "Cannot turn off L0s on C73 chipset, suspend/resume may fail (Bug 400044).\n");
973 DBG_BREAKPOINT();
974 }
975 }
976 }
977
978 NV_STATUS
kbifGetBusOptionsAddr_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,BUS_OPTIONS options,NvU32 * addrReg)979 kbifGetBusOptionsAddr_GM107
980 (
981 OBJGPU *pGpu,
982 KernelBif *pKernelBif,
983 BUS_OPTIONS options,
984 NvU32 *addrReg
985 )
986 {
987 NV_STATUS status = NV_OK;
988
989 switch (options)
990 {
991 case BUS_OPTIONS_DEV_CONTROL_STATUS:
992 *addrReg = NV_XVE_DEVICE_CONTROL_STATUS;
993 break;
994 case BUS_OPTIONS_DEV_CONTROL_STATUS_2:
995 *addrReg = NV_XVE_DEVICE_CONTROL_STATUS_2;
996 break;
997 case BUS_OPTIONS_LINK_CONTROL_STATUS:
998 *addrReg = NV_XVE_LINK_CONTROL_STATUS;
999 break;
1000 case BUS_OPTIONS_LINK_CAPABILITIES:
1001 *addrReg = NV_XVE_LINK_CAPABILITIES;
1002 break;
1003 case BUS_OPTIONS_L1_PM_SUBSTATES_CTRL_1:
1004 *addrReg = NV_XVE_L1_PM_SUBSTATES_CTRL1;
1005 break;
1006 default:
1007 NV_PRINTF(LEVEL_ERROR, "Invalid register type passed 0x%x\n",
1008 options);
1009 status = NV_ERR_GENERIC;
1010 break;
1011 }
1012 return status;
1013 }
1014
1015 NV_STATUS
kbifDisableSysmemAccess_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvBool bDisable)1016 kbifDisableSysmemAccess_GM107
1017 (
1018 OBJGPU *pGpu,
1019 KernelBif *pKernelBif,
1020 NvBool bDisable
1021 )
1022 {
1023 NV_STATUS status = NV_OK;
1024 RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
1025 NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS params = {0};
1026
1027 // Only support on Windows
1028 NV_ASSERT_OR_RETURN(RMCFG_FEATURE_PLATFORM_WINDOWS, NV_ERR_NOT_SUPPORTED);
1029
1030 params.bDisable = bDisable;
1031 status = pRmApi->Control(pRmApi,
1032 pGpu->hInternalClient,
1033 pGpu->hInternalSubdevice,
1034 NV2080_CTRL_CMD_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS,
1035 ¶ms,
1036 sizeof(NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS));
1037
1038 // Only set the PDB in kernel if it was set in physical successfully
1039 if (status == NV_OK)
1040 {
1041 pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_SYSTEM_ACCESS_DISABLED, bDisable);
1042 }
1043
1044 return status;
1045 }
1046
1047 /*!
1048 * This function setups the xve register map pointers
1049 *
1050 * @param[in] pGpu GPU object pointer
1051 * @param[in] pKernelBif Pointer to KernelBif object
1052 * @param[in] func PCIe function number
1053 *
1054 * @return 'NV_OK' if successful, an RM error code otherwise.
1055 */
1056 NV_STATUS
kbifInitXveRegMap_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU8 func)1057 kbifInitXveRegMap_GM107
1058 (
1059 OBJGPU *pGpu,
1060 KernelBif *pKernelBif,
1061 NvU8 func
1062 )
1063 {
1064 if (func == 0)
1065 {
1066 pKernelBif->xveRegmapRef[0].nFunc = 0;
1067 pKernelBif->xveRegmapRef[0].xveRegMapValid = xveRegMapValid;
1068 pKernelBif->xveRegmapRef[0].xveRegMapWrite = xveRegMapWrite;
1069 pKernelBif->xveRegmapRef[0].numXveRegMapValid = sizeof(xveRegMapValid)/sizeof(xveRegMapValid[0]);
1070 pKernelBif->xveRegmapRef[0].numXveRegMapWrite = sizeof(xveRegMapWrite)/sizeof(xveRegMapWrite[0]);
1071 pKernelBif->xveRegmapRef[0].bufBootConfigSpace = pKernelBif->cacheData.gpuBootConfigSpace;
1072 // No MSIX for this GPU
1073 pKernelBif->xveRegmapRef[0].bufMsixTable = NULL;
1074 }
1075 else if (func == 1)
1076 {
1077 pKernelBif->xveRegmapRef[1].nFunc = 1;
1078 pKernelBif->xveRegmapRef[1].xveRegMapValid = xve1RegMapValid;
1079 pKernelBif->xveRegmapRef[1].xveRegMapWrite = xve1RegMapWrite;
1080 pKernelBif->xveRegmapRef[1].numXveRegMapValid = sizeof(xve1RegMapValid)/sizeof(xve1RegMapValid[0]);
1081 pKernelBif->xveRegmapRef[1].numXveRegMapWrite = sizeof(xve1RegMapWrite)/sizeof(xve1RegMapWrite[0]);
1082 pKernelBif->xveRegmapRef[1].bufBootConfigSpace = pKernelBif->cacheData.azaliaBootConfigSpace;
1083 // No MSIX for this func
1084 pKernelBif->xveRegmapRef[1].bufMsixTable = NULL;
1085 }
1086 else
1087 {
1088 NV_PRINTF(LEVEL_ERROR, "Invalid argument, func: %d.\n", func);
1089 NV_ASSERT(0);
1090 return NV_ERR_INVALID_ARGUMENT;
1091 }
1092
1093 return NV_OK;
1094 }
1095
1096 /*!
1097 * @brief Clears Bus Master Enable bit in command register, disabling
1098 * Function 0 - from issuing any new requests to sysmem.
1099 *
1100 * @param[in] pGpu GPU object pointer
1101 * @param[in] pKernelBif KernelBif object pointer
1102 *
1103 * @return NV_OK
1104 */
1105 NV_STATUS
kbifStopSysMemRequests_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvBool bStop)1106 kbifStopSysMemRequests_GM107
1107 (
1108 OBJGPU *pGpu,
1109 KernelBif *pKernelBif,
1110 NvBool bStop
1111 )
1112 {
1113 NvU32 regVal;
1114
1115 NV_ASSERT_OK_OR_RETURN(GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEV_CTRL, ®Val));
1116
1117 if (bStop)
1118 {
1119 regVal = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_BUS_MASTER, _DISABLED, regVal);
1120 }
1121 else
1122 {
1123 regVal = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_BUS_MASTER, _ENABLED, regVal);
1124 }
1125
1126 NV_ASSERT_OK_OR_RETURN(GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEV_CTRL, regVal));
1127
1128 return NV_OK;
1129 }
1130
1131
1132 /*
1133 * @brief Restore the BAR0 register from the given config space buffer
1134 * BAR0 register restore has to use the config cycle write.
1135 *
1136 * @param[in] pGpu GPU object pointer
1137 * @param[in] pKernelBif Pointer to KernelBif object
1138 * @param[in] handle PCI handle for GPU
1139 * @param[in] bufConfigSpace Stored config space
1140 */
1141 void
kbifRestoreBar0_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,void * handle,NvU32 * bufConfigSpace)1142 kbifRestoreBar0_GM107
1143 (
1144 OBJGPU *pGpu,
1145 KernelBif *pKernelBif,
1146 void *handle,
1147 NvU32 *bufConfigSpace
1148 )
1149 {
1150 //
1151 // Not much ROI in storing BAR offsets for legacy chips since
1152 // BAR offsets are not going to change ever for legacy chips
1153 //
1154 osPciWriteDword(handle, NV_XVE_BAR0,
1155 bufConfigSpace[NV_XVE_BAR0/sizeof(NvU32)]);
1156 }
1157
1158
1159 /*!
1160 * @brief Check if any of the BAR register reads returns a valid value.
1161 *
1162 * @param[in] pGpu GPU object pointer
1163 * @param[in] pKernelBif KernelBif object pointer
1164 *
1165 * @returns NV_TRUE if any BAR register read returns a valid value
1166 * NV_FALSE if all the BAR registers return an invalid values
1167 */
1168 NvBool
kbifAnyBarsAreValid_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)1169 kbifAnyBarsAreValid_GM107
1170 (
1171 OBJGPU *pGpu,
1172 KernelBif *pKernelBif
1173 )
1174 {
1175 NvU32 domain = gpuGetDomain(pGpu);
1176 NvU8 bus = gpuGetBus(pGpu);
1177 NvU8 device = gpuGetDevice(pGpu);
1178 NvU16 vendorId, deviceId;
1179 void *handle;
1180
1181 handle = osPciInitHandle(domain, bus, device, 0, &vendorId, &deviceId);
1182
1183 if (osPciReadDword(handle, NV_XVE_BAR0) == pKernelBif->cacheData.gpuBootConfigSpace[4])
1184 {
1185 // BAR0 is valid
1186 return NV_TRUE;
1187 }
1188
1189 if ((osPciReadDword(handle, NV_XVE_BAR1_LO) == pKernelBif->cacheData.gpuBootConfigSpace[5]) &&
1190 (osPciReadDword(handle, NV_XVE_BAR1_HI) == pKernelBif->cacheData.gpuBootConfigSpace[6]))
1191 {
1192 // BAR1 is valid
1193 return NV_TRUE;
1194 }
1195
1196 return NV_FALSE;
1197 }
1198
1199 /*!
1200 * @brief Try restoring BAR registers and command register using config cycles
1201 *
1202 * @param[in] pGpu GPU object pointer
1203 * @param[in] pKernelBif KernelBif object pointer
1204 *
1205 * @returns NV_OK on success
1206 * NV_ERR_INVALID_READ if the register read returns unexpected value
1207 */
1208 NV_STATUS
kbifRestoreBarsAndCommand_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)1209 kbifRestoreBarsAndCommand_GM107
1210 (
1211 OBJGPU *pGpu,
1212 KernelBif *pKernelBif
1213 )
1214 {
1215 NvU32 domain = gpuGetDomain(pGpu);
1216 NvU8 bus = gpuGetBus(pGpu);
1217 NvU8 device = gpuGetDevice(pGpu);
1218 NvU16 vendorId, deviceId;
1219 void *handle;
1220
1221 handle = osPciInitHandle(domain, bus, device, 0, &vendorId, &deviceId);
1222
1223 osPciWriteDword(handle, NV_XVE_BAR0, pKernelBif->cacheData.gpuBootConfigSpace[4]);
1224 osPciWriteDword(handle, NV_XVE_BAR1_LO, pKernelBif->cacheData.gpuBootConfigSpace[5]);
1225 osPciWriteDword(handle, NV_XVE_BAR1_HI, pKernelBif->cacheData.gpuBootConfigSpace[6]);
1226 osPciWriteDword(handle, NV_XVE_BAR2_LO, pKernelBif->cacheData.gpuBootConfigSpace[7]);
1227 osPciWriteDword(handle, NV_XVE_BAR2_HI, pKernelBif->cacheData.gpuBootConfigSpace[8]);
1228 osPciWriteDword(handle, NV_XVE_BAR3, pKernelBif->cacheData.gpuBootConfigSpace[9]);
1229 osPciWriteDword(handle, NV_XVE_DEV_CTRL, pKernelBif->cacheData.gpuBootConfigSpace[1]);
1230
1231 if (GPU_REG_RD32(pGpu, NV_PMC_BOOT_0) != pGpu->chipId0)
1232 {
1233 return NV_ERR_INVALID_READ;
1234 }
1235
1236 return NV_OK;
1237 }
1238
1239 /*!
1240 * @brief HAL specific BIF software state initialization
1241 *
1242 * @param[in] pGpu GPU object pointer
1243 * @param[in] pKernelBif KernelBif object pointer
1244 *
1245 * @return NV_OK on success
1246 */
1247 NV_STATUS
kbifInit_GM107(OBJGPU * pGpu,KernelBif * pKernelBif)1248 kbifInit_GM107
1249 (
1250 OBJGPU *pGpu,
1251 KernelBif *pKernelBif
1252 )
1253 {
1254 // Cache the offsets of BAR registers into an array for subsequent use
1255 kbifStoreBarRegOffsets_HAL(pGpu, pKernelBif, NV_XVE_BAR0);
1256
1257 return NV_OK;
1258 }
1259
1260 /*!
1261 * @brief Get the migration bandwidth
1262 *
1263 * @param[out] pBandwidth Migration bandwidth
1264 *
1265 * @returns NV_STATUS
1266 */
1267 NV_STATUS
kbifGetMigrationBandwidth_GM107(OBJGPU * pGpu,KernelBif * pKernelBif,NvU32 * pBandwidth)1268 kbifGetMigrationBandwidth_GM107
1269 (
1270 OBJGPU *pGpu,
1271 KernelBif *pKernelBif,
1272 NvU32 *pBandwidth
1273 )
1274 {
1275 NV_STATUS rmStatus = NV_OK;
1276 NV2080_CTRL_BUS_INFO busInfo = {0};
1277
1278 NvU32 pcieLinkRate = 0;
1279 NvU32 lanes = 0;
1280 NvU32 pciLinkMaxSpeed = 0;
1281 NvU32 pciLinkGenInfo = 0;
1282
1283 busInfo.index = NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO;
1284 busInfo.data = 0;
1285
1286 if (IS_GSP_CLIENT(pGpu))
1287 {
1288 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kbusSendBusInfo(pGpu, GPU_GET_KERNEL_BUS(pGpu), &busInfo));
1289 }
1290 else
1291 {
1292 if (kbifIsPciBusFamily(pKernelBif))
1293 {
1294 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kbifControlGetPCIEInfo(pGpu, pKernelBif, &busInfo));
1295 }
1296 else
1297 {
1298 return NV_ERR_NOT_SUPPORTED;
1299 }
1300 }
1301
1302 pciLinkGenInfo = DRF_VAL(2080, _CTRL_BUS, _INFO_PCIE_LINK_CAP_GEN, busInfo.data);
1303 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kbifGetPciLinkMaxSpeedByPciGenInfo(pGpu, pKernelBif, pciLinkGenInfo, &pciLinkMaxSpeed));
1304
1305 busInfo.index = NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS;
1306 busInfo.data = 0;
1307
1308 if (kbifIsPciBusFamily(pKernelBif))
1309 {
1310 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kbifControlGetPCIEInfo(pGpu, pKernelBif, &busInfo));
1311 }
1312 else
1313 {
1314 return NV_ERR_NOT_SUPPORTED;
1315 }
1316
1317 lanes = DRF_VAL(2080, _CTRL_BUS, _INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH, busInfo.data);
1318 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, calculatePCIELinkRateMBps(lanes, pciLinkMaxSpeed, &pcieLinkRate));
1319 *pBandwidth = (pcieLinkRate / VGPU_MIGRATION_API_DERATE_FACTOR);
1320
1321 return rmStatus;
1322 }
1323
1324 /*!
1325 * @brief Destructor
1326 *
1327 * @param[in] pKernelBif
1328 *
1329 * @returns void
1330 */
1331 void
kbifDestruct_GM107(KernelBif * pKernelBif)1332 kbifDestruct_GM107
1333 (
1334 KernelBif *pKernelBif
1335 )
1336 {
1337 portMemFree(pKernelBif->xveRegmapRef[0].bufMsixTable);
1338 pKernelBif->xveRegmapRef[0].bufMsixTable = NULL;
1339 }
1340
1341
1342
1343
1344