1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 25 /* ------------------------- System Includes -------------------------------- */ 26 #include "gpu/gpu.h" 27 #include "gpu/bif/kernel_bif.h" 28 #include "platform/chipset/chipset.h" 29 #include "nvdevid.h" 30 31 #include "published/maxwell/gm107/dev_boot.h" 32 #include "published/maxwell/gm107/dev_nv_xve.h" 33 34 #include "published/maxwell/gm107/dev_nv_pcfg_xve_addendum.h" 35 #include "published/maxwell/gm107/dev_nv_pcfg_xve1_addendum.h" 36 37 // Defines for C73 chipset registers 38 #ifndef NV_XVR_VEND_XP1 39 #define NV_XVR_VEND_XP1 0x00000F04 /* RW-4R */ 40 41 #define NV_XVR_VEND_XP1_IGNORE_L0S 23:23 /* RWIVF */ 42 #define NV_XVR_VEND_XP1_IGNORE_L0S_INIT 0x00000000 /* RWI-V */ 43 #define NV_XVR_VEND_XP1_IGNORE_L0S__PROD 0x00000000 /* RW--V */ 44 #define NV_XVR_VEND_XP1_IGNORE_L0S_EN 0x00000001 /* RW--V */ 45 #endif 46 47 // XVE register map for PCIe config space 48 static const NvU32 xveRegMapValid[] = NV_PCFG_XVE_REGISTER_VALID_MAP; 49 static const NvU32 xveRegMapWrite[] = NV_PCFG_XVE_REGISTER_WR_MAP; 50 static const NvU32 xve1RegMapValid[] = NV_PCFG_XVE1_REGISTER_VALID_MAP; 51 static const NvU32 xve1RegMapWrite[] = NV_PCFG_XVE1_REGISTER_WR_MAP; 52 53 /* ------------------------ Public Functions -------------------------------- */ 54 55 /*! 56 * @brief Get PCIe config test registers 57 * 58 * @param[in] pGpu GPU object pointer 59 * @param[in] pKernelBif BIF object pointer 60 */ 61 void 62 kbifGetPcieConfigAccessTestRegisters_GM107 63 ( 64 OBJGPU *pGpu, 65 KernelBif *pKernelBif, 66 NvU32 *pciStart, 67 NvU32 *pcieStart 68 ) 69 { 70 *pciStart = NV_XVE_ID; 71 *pcieStart = NV_XVE_VCCAP_HDR; 72 } 73 74 /*! 75 * @brief Verify PCIe config test registers 76 * 77 * @param[in] pGpu GPU object pointer 78 * @param[in] pKernelBif BIF object pointer 79 * 80 * @return NV_OK 81 */ 82 NV_STATUS 83 kbifVerifyPcieConfigAccessTestRegisters_GM107 84 ( 85 OBJGPU *pGpu, 86 KernelBif *pKernelBif, 87 NvU32 nvXveId, 88 NvU32 nvXveVccapHdr 89 ) 90 { 91 NvU32 data; 92 93 GPU_BUS_CFG_RD32(pGpu, NV_XVE_ID, &data); 94 95 if (FLD_TEST_DRF(_XVE, _ID, _VENDOR, _NVIDIA, data)) 96 { 97 if (data != nvXveId) 98 return NV_ERR_NOT_SUPPORTED; 99 100 GPU_BUS_CFG_RD32(pGpu, NV_XVE_VCCAP_HDR, &data); 101 102 if (FLD_TEST_DRF(_XVE, _VCCAP_HDR, _ID, _VC, data) && 103 FLD_TEST_DRF(_XVE, _VCCAP_HDR, _VER, _1, data)) 104 { 105 if (data != nvXveVccapHdr) 106 return NV_ERR_NOT_SUPPORTED; 107 return NV_OK; 108 } 109 } 110 return NV_ERR_NOT_SUPPORTED; 111 } 112 113 /*! 114 * @brief Re-arm MSI 115 * 116 * @param[in] pGpu GPU object pointer 117 * @param[in] pKernelBif Kernel BIF object pointer 118 */ 119 void 120 kbifRearmMSI_GM107 121 ( 122 OBJGPU *pGpu, 123 KernelBif *pKernelBif 124 ) 125 { 126 NV_STATUS status = gpuSanityCheckRegisterAccess(pGpu, 0, NULL); 127 128 if (status != NV_OK) 129 { 130 return; 131 } 132 133 // The 32 byte value doesn't matter, HW only looks at the offset. 134 osGpuWriteReg032(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_CYA_2, 0); 135 } 136 137 /*! 138 * @brief Check if MSI is enabled in HW 139 * 140 * @param[in] pGpu GPU object pointer 141 * @param[in] pKernelBif BIF object pointer 142 * 143 * @return True if MSI enabled else False 144 */ 145 NvBool 146 kbifIsMSIEnabledInHW_GM107 147 ( 148 OBJGPU *pGpu, 149 KernelBif *pKernelBif 150 ) 151 { 152 NvU32 data32; 153 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_MSI_CTRL, &data32)) 154 { 155 NV_PRINTF(LEVEL_ERROR, "unable to read NV_XVE_MSI_CTRL\n"); 156 } 157 158 return FLD_TEST_DRF(_XVE, _MSI_CTRL, _MSI, _ENABLE, data32); 159 } 160 161 /*! 162 * @brief Check if access to PCI config space is enabled 163 * 164 * @param[in] pGpu GPU object pointer 165 * @param[in] pKernelBif Kernel BIF object pointer 166 * 167 * @return True if access to PCI config space is enabled 168 */ 169 NvBool 170 kbifIsPciIoAccessEnabled_GM107 171 ( 172 OBJGPU *pGpu, 173 KernelBif *pKernelBif 174 ) 175 { 176 NvU32 data = 0; 177 178 if (NV_OK == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEV_CTRL, &data)) 179 { 180 if (FLD_TEST_DRF(_XVE, _DEV_CTRL, _CMD_IO_SPACE, _ENABLED, data)) 181 { 182 return NV_TRUE; 183 } 184 } 185 186 return NV_FALSE; 187 } 188 189 /*! 190 * @brief Check if device is a 3D controller 191 * 192 * @param[in] pGpu GPU object pointer 193 * @param[in] pKernelBif Kernel BIF object pointer 194 * 195 * @return True if device is a 3D controller 196 */ 197 NvBool 198 kbifIs3dController_GM107 199 ( 200 OBJGPU *pGpu, 201 KernelBif *pKernelBif 202 ) 203 { 204 NvU32 data = 0; 205 206 if (NV_OK == GPU_BUS_CFG_RD32(pGpu, NV_XVE_REV_ID, &data)) 207 { 208 if (FLD_TEST_DRF(_XVE, _REV_ID, _CLASS_CODE, _3D, data)) 209 { 210 return NV_TRUE; 211 } 212 } 213 214 return NV_FALSE; 215 } 216 217 /*! 218 * @brief Enable/disable no snoop for GPU 219 * 220 * @param[in] pGpu GPU object pointer 221 * @param[in] pKernelBif Kernel BIF object pointer 222 * @param[in] bEnable True if No snoop needs to be enabled 223 * 224 * @return NV_OK If no snoop modified as requested 225 */ 226 NV_STATUS 227 kbifEnableNoSnoop_GM107 228 ( 229 OBJGPU *pGpu, 230 KernelBif *pKernelBif, 231 NvBool bEnable 232 ) 233 { 234 NvU8 fieldVal; 235 NvU32 regVal; 236 237 regVal = GPU_REG_RD32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_DEVICE_CONTROL_STATUS); 238 239 fieldVal = bEnable ? 1 : 0; 240 regVal = FLD_SET_DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, 241 _ENABLE_NO_SNOOP, fieldVal, regVal); 242 243 GPU_REG_WR32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_DEVICE_CONTROL_STATUS, regVal); 244 245 return NV_OK; 246 } 247 248 /*! 249 * @brief Enables Relaxed Ordering PCI-E Capability in the PCI Config Space 250 * 251 * @param[in] pGpu GPU object pointer 252 * @param[in] pKernelBif Kernel BIF object pointer 253 */ 254 void 255 kbifPcieConfigEnableRelaxedOrdering_GM107 256 ( 257 OBJGPU *pGpu, 258 KernelBif *pKernelBif 259 ) 260 { 261 NvU32 xveDevCtrlStatus; 262 263 if(NV_ERR_GENERIC == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus)) 264 { 265 NV_PRINTF(LEVEL_ERROR, 266 "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n"); 267 DBG_BREAKPOINT(); 268 } 269 else 270 { 271 GPU_BUS_CFG_FLD_WR_DRF_DEF(pGpu, xveDevCtrlStatus, _XVE, _DEVICE_CONTROL_STATUS, 272 _ENABLE_RELAXED_ORDERING, _INIT); 273 } 274 } 275 276 /*! 277 * @brief Disables Relaxed Ordering PCI-E Capability in the PCI Config Space 278 * 279 * @param[in] pGpu GPU object pointer 280 * @param[in] pKernelBif Kernel BIF object pointer 281 */ 282 void 283 kbifPcieConfigDisableRelaxedOrdering_GM107 284 ( 285 OBJGPU *pGpu, 286 KernelBif *pKernelBif 287 ) 288 { 289 NvU32 xveDevCtrlStatus; 290 291 if(NV_ERR_GENERIC == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus)) 292 { 293 NV_PRINTF(LEVEL_ERROR, 294 "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n"); 295 DBG_BREAKPOINT(); 296 } 297 else 298 { 299 xveDevCtrlStatus = FLD_SET_DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, 300 _ENABLE_RELAXED_ORDERING, 0, xveDevCtrlStatus); 301 GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, xveDevCtrlStatus); 302 } 303 } 304 305 /*! 306 * @brief Get XVE status bits 307 * 308 * @param[in] pGpu GPU object pointer 309 * @param[in] pKernelBif BIF object pointer 310 * @param[out] pBits PCIe error status values 311 * @param[out] pStatus Full XVE status 312 * 313 * @return NV_OK 314 */ 315 NV_STATUS 316 kbifGetXveStatusBits_GM107 317 ( 318 OBJGPU *pGpu, 319 KernelBif *pKernelBif, 320 NvU32 *pBits, 321 NvU32 *pStatus 322 ) 323 { 324 // control/status reg 325 NvU32 xveDevCtrlStatus; 326 327 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus)) 328 { 329 NV_PRINTF(LEVEL_ERROR, 330 "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n"); 331 } 332 if ( pBits == NULL ) 333 return NV_ERR_GENERIC; 334 335 *pBits = 0; 336 337 // The register read above returns garbage on fmodel, so just return. 338 if (IS_FMODEL(pGpu)) 339 { 340 if (pStatus) 341 { 342 *pStatus = 0; 343 } 344 return NV_OK; 345 } 346 347 if (pStatus) 348 *pStatus = xveDevCtrlStatus; 349 350 if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _CORR_ERROR_DETECTED, 1)) 351 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR; 352 if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _NON_FATAL_ERROR_DETECTED, 1)) 353 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR; 354 if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _FATAL_ERROR_DETECTED, 1)) 355 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR; 356 if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _UNSUPP_REQUEST_DETECTED, 1)) 357 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST; 358 359 if (pKernelBif->EnteredRecoverySinceErrorsLastChecked) 360 { 361 pKernelBif->EnteredRecoverySinceErrorsLastChecked = NV_FALSE; 362 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_ENTERED_RECOVERY; 363 } 364 365 return NV_OK; 366 } 367 368 /*! 369 * @brief Clear the XVE status bits 370 * 371 * @param[in] pGpu GPU object pointer 372 * @param[in] pKernelBif BIF object pointer 373 * @param[out] pStatus Full XVE status 374 * 375 * @return NV_OK 376 */ 377 NV_STATUS 378 kbifClearXveStatus_GM107 379 ( 380 OBJGPU *pGpu, 381 KernelBif *pKernelBif, 382 NvU32 *pStatus 383 ) 384 { 385 NvU32 xveDevCtrlStatus; 386 387 if (pStatus) 388 { 389 xveDevCtrlStatus = *pStatus; 390 if (xveDevCtrlStatus == 0) 391 { 392 return NV_OK; 393 } 394 } 395 else 396 { 397 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus)) 398 { 399 NV_PRINTF(LEVEL_ERROR, 400 "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n"); 401 } 402 } 403 404 GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, xveDevCtrlStatus); 405 406 return NV_OK; 407 } 408 409 /*! 410 * @brief Get XVE AER bits 411 * 412 * @param[in] pGpu GPU object pointer 413 * @param[in] pKernelBif BIF object pointer 414 * @param[out] pBits PCIe AER error status values 415 * 416 * @return NV_OK 417 */ 418 NV_STATUS 419 kbifGetXveAerBits_GM107 420 ( 421 OBJGPU *pGpu, 422 KernelBif *pKernelBif, 423 NvU32 *pBits 424 ) 425 { 426 NvU32 xveAerUncorr; 427 NvU32 xveAerCorr; 428 429 if (pBits == NULL) 430 { 431 return NV_ERR_GENERIC; 432 } 433 434 *pBits = 0; 435 436 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_AER_UNCORR_ERR, &xveAerUncorr)) 437 { 438 NV_PRINTF(LEVEL_ERROR, "Unable to read NV_XVE_AER_UNCORR_ERR\n"); 439 return NV_ERR_GENERIC; 440 } 441 if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_AER_CORR_ERR, &xveAerCorr)) 442 { 443 NV_PRINTF(LEVEL_ERROR, "Unable to read NV_XVE_AER_CORR_ERR\n"); 444 return NV_ERR_GENERIC; 445 } 446 447 // The register read above returns garbage on fmodel, so just return. 448 if (IS_FMODEL(pGpu)) 449 { 450 return NV_OK; 451 } 452 453 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _DLINK_PROTO_ERR, _ACTIVE, xveAerUncorr)) 454 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR; 455 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _POISONED_TLP, _ACTIVE, xveAerUncorr)) 456 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP; 457 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _CPL_TIMEOUT, _ACTIVE, xveAerUncorr)) 458 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT; 459 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _UNEXP_CPL, _ACTIVE, xveAerUncorr)) 460 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL; 461 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _MALFORMED_TLP, _ACTIVE, xveAerUncorr)) 462 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP; 463 if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _UNSUPPORTED_REQ, _ACTIVE, xveAerUncorr)) 464 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ; 465 466 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RCV_ERR, _ACTIVE, xveAerCorr)) 467 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR; 468 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _BAD_TLP, _ACTIVE, xveAerCorr)) 469 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP; 470 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _BAD_DLLP , _ACTIVE, xveAerCorr)) 471 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP; 472 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RPLY_ROLLOVER, _ACTIVE, xveAerCorr)) 473 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER; 474 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RPLY_TIMEOUT, _ACTIVE, xveAerCorr)) 475 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT; 476 if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _ADVISORY_NONFATAL, _ACTIVE, xveAerCorr)) 477 *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL; 478 479 return NV_OK; 480 } 481 482 /*! 483 * @brief Clear the XVE AER bits 484 * 485 * @param[in] pGpu GPU object pointer 486 * @param[in] pKernelBif BIF object pointer 487 * @param[in] bits PCIe AER error status values 488 * 489 * @return NV_OK 490 */ 491 NV_STATUS 492 kbifClearXveAer_GM107 493 ( 494 OBJGPU *pGpu, 495 KernelBif *pKernelBif, 496 NvU32 bits 497 ) 498 { 499 NvU32 xveAerUncorr = 0; 500 NvU32 xveAerCorr = 0; 501 502 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR) 503 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _DLINK_PROTO_ERR, _CLEAR, xveAerUncorr); 504 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP) 505 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _POISONED_TLP, _CLEAR, xveAerUncorr); 506 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT) 507 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _CPL_TIMEOUT, _CLEAR, xveAerUncorr); 508 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL) 509 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _UNEXP_CPL, _CLEAR, xveAerUncorr); 510 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP) 511 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _MALFORMED_TLP, _CLEAR, xveAerUncorr); 512 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ) 513 xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _UNSUPPORTED_REQ, _CLEAR, xveAerUncorr); 514 515 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR) 516 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RCV_ERR, _CLEAR, xveAerCorr); 517 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP) 518 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _BAD_TLP, _CLEAR, xveAerCorr); 519 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP) 520 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _BAD_DLLP, _CLEAR, xveAerCorr); 521 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER) 522 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RPLY_ROLLOVER, _CLEAR, xveAerCorr); 523 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT) 524 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RPLY_TIMEOUT, _CLEAR, xveAerCorr); 525 if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL) 526 xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _ADVISORY_NONFATAL, _CLEAR, xveAerCorr); 527 528 if (xveAerUncorr != 0) 529 { 530 GPU_BUS_CFG_WR32(pGpu, NV_XVE_AER_UNCORR_ERR, xveAerUncorr); 531 } 532 if (xveAerCorr != 0) 533 { 534 GPU_BUS_CFG_WR32(pGpu, NV_XVE_AER_CORR_ERR, xveAerCorr); 535 } 536 537 return NV_OK; 538 } 539 540 /*! 541 * @brief Returns the BAR0 offset and size of the PCI config space mirror 542 * 543 * @param[in] pGpu GPU object pointer 544 * @param[in] pKernelBif Kernel BIF object pointer 545 * @param[out] pBase BAR0 offset of the PCI config space mirror 546 * @param[out] pSize Size in bytes of the PCI config space mirror 547 * 548 * @returns NV_OK 549 */ 550 NV_STATUS 551 kbifGetPciConfigSpacePriMirror_GM107 552 ( 553 OBJGPU *pGpu, 554 KernelBif *pKernelBif, 555 NvU32 *pBase, 556 NvU32 *pSize 557 ) 558 { 559 *pBase = DEVICE_BASE(NV_PCFG); 560 *pSize = DEVICE_EXTENT(NV_PCFG) - DEVICE_BASE(NV_PCFG) + 1; 561 return NV_OK; 562 } 563 564 /*! 565 * @brief C73 chipset WAR 566 * 567 * @param[in] pGpu GPU object pointer 568 * @param[in] pKernelBif Kernel BIF object pointer 569 */ 570 void 571 kbifExecC73War_GM107 572 ( 573 OBJGPU *pGpu, 574 KernelBif *pKernelBif 575 ) 576 { 577 OBJSYS *pSys = SYS_GET_INSTANCE(); 578 OBJOS *pOS = SYS_GET_OS(pSys); 579 OBJCL *pCl = SYS_GET_CL(pSys); 580 NvU32 val; 581 582 if (CS_NVIDIA_C73 == pCl->Chipset) 583 { 584 // 585 // Turn off L0s on the chipset which are required by the suspend/resume 586 // cycles in Vista. See bug 400044 for more details. 587 // 588 589 // vAddr is a mapped cpu virtual addr into the root ports config space. 590 if (!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS) && 591 (pGpu->gpuClData.rootPort.vAddr != 0)) 592 { 593 val = MEM_RD32((NvU8*)pGpu->gpuClData.rootPort.vAddr+NV_XVR_VEND_XP1); 594 val = FLD_SET_DRF(_XVR, _VEND_XP1, _IGNORE_L0S, _EN, val); 595 MEM_WR32((NvU8*)pGpu->gpuClData.rootPort.vAddr+NV_XVR_VEND_XP1, val); 596 } 597 else if (pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS) && 598 pGpu->gpuClData.rootPort.addr.valid) 599 { 600 val = osPciReadDword(pGpu->gpuClData.rootPort.addr.handle, NV_XVR_VEND_XP1); 601 val = FLD_SET_DRF(_XVR, _VEND_XP1, _IGNORE_L0S, _EN, val); 602 osPciWriteDword(pGpu->gpuClData.rootPort.addr.handle, NV_XVR_VEND_XP1, val); 603 } 604 else 605 { 606 NV_PRINTF(LEVEL_ERROR, 607 "Cannot turn off L0s on C73 chipset, suspend/resume may fail (Bug 400044).\n"); 608 DBG_BREAKPOINT(); 609 } 610 } 611 } 612 613 NV_STATUS 614 kbifGetBusOptionsAddr_GM107 615 ( 616 OBJGPU *pGpu, 617 KernelBif *pKernelBif, 618 BUS_OPTIONS options, 619 NvU32 *addrReg 620 ) 621 { 622 NV_STATUS status = NV_OK; 623 624 switch (options) 625 { 626 case BUS_OPTIONS_DEV_CONTROL_STATUS: 627 *addrReg = NV_XVE_DEVICE_CONTROL_STATUS; 628 break; 629 case BUS_OPTIONS_LINK_CONTROL_STATUS: 630 *addrReg = NV_XVE_LINK_CONTROL_STATUS; 631 break; 632 case BUS_OPTIONS_LINK_CAPABILITIES: 633 *addrReg = NV_XVE_LINK_CAPABILITIES; 634 break; 635 default: 636 NV_PRINTF(LEVEL_ERROR, "Invalid register type passed 0x%x\n", 637 options); 638 status = NV_ERR_GENERIC; 639 break; 640 } 641 return status; 642 } 643 644 NV_STATUS 645 kbifDisableSysmemAccess_GM107 646 ( 647 OBJGPU *pGpu, 648 KernelBif *pKernelBif, 649 NvBool bDisable 650 ) 651 { 652 NV_STATUS status = NV_OK; 653 RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); 654 NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS params = {0}; 655 656 // Only support on Windows 657 NV_ASSERT_OR_RETURN(RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM, NV_ERR_NOT_SUPPORTED); 658 659 params.bDisable = bDisable; 660 status = pRmApi->Control(pRmApi, 661 pGpu->hInternalClient, 662 pGpu->hInternalSubdevice, 663 NV2080_CTRL_CMD_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS, 664 ¶ms, 665 sizeof(NV2080_CTRL_INTERNAL_BIF_DISABLE_SYSTEM_MEMORY_ACCESS_PARAMS)); 666 667 // Only set the PDB in kernel if it was set in physical successfully 668 if (status == NV_OK) 669 { 670 pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_SYSTEM_ACCESS_DISABLED, bDisable); 671 } 672 673 return status; 674 } 675 676 /*! 677 * This function setups the xve register map pointers 678 * 679 * @param[in] pGpu GPU object pointer 680 * @param[in] pKernelBif Pointer to KernelBif object 681 * @param[in] func PCIe function number 682 * 683 * @return 'NV_OK' if successful, an RM error code otherwise. 684 */ 685 NV_STATUS 686 kbifInitXveRegMap_GM107 687 ( 688 OBJGPU *pGpu, 689 KernelBif *pKernelBif, 690 NvU8 func 691 ) 692 { 693 if (func == 0) 694 { 695 pKernelBif->xveRegmapRef[0].nFunc = 0; 696 pKernelBif->xveRegmapRef[0].xveRegMapValid = xveRegMapValid; 697 pKernelBif->xveRegmapRef[0].xveRegMapWrite = xveRegMapWrite; 698 pKernelBif->xveRegmapRef[0].numXveRegMapValid = sizeof(xveRegMapValid)/sizeof(xveRegMapValid[0]); 699 pKernelBif->xveRegmapRef[0].numXveRegMapWrite = sizeof(xveRegMapWrite)/sizeof(xveRegMapWrite[0]); 700 pKernelBif->xveRegmapRef[0].bufBootConfigSpace = pKernelBif->cacheData.gpuBootConfigSpace; 701 // No MSIX for this GPU 702 pKernelBif->xveRegmapRef[0].bufMsixTable = NULL; 703 } 704 else if (func == 1) 705 { 706 pKernelBif->xveRegmapRef[1].nFunc = 1; 707 pKernelBif->xveRegmapRef[1].xveRegMapValid = xve1RegMapValid; 708 pKernelBif->xveRegmapRef[1].xveRegMapWrite = xve1RegMapWrite; 709 pKernelBif->xveRegmapRef[1].numXveRegMapValid = sizeof(xve1RegMapValid)/sizeof(xve1RegMapValid[0]); 710 pKernelBif->xveRegmapRef[1].numXveRegMapWrite = sizeof(xve1RegMapWrite)/sizeof(xve1RegMapWrite[0]); 711 pKernelBif->xveRegmapRef[1].bufBootConfigSpace = pKernelBif->cacheData.azaliaBootConfigSpace; 712 // No MSIX for this func 713 pKernelBif->xveRegmapRef[1].bufMsixTable = NULL; 714 } 715 else 716 { 717 NV_PRINTF(LEVEL_ERROR, "Invalid argument, func: %d.\n", func); 718 NV_ASSERT(0); 719 return NV_ERR_INVALID_ARGUMENT; 720 } 721 722 return NV_OK; 723 } 724 725 /*! 726 * @brief Clears Bus Master Enable bit in command register, disabling 727 * Function 0 - from issuing any new requests to sysmem. 728 * 729 * @param[in] pGpu GPU object pointer 730 * @param[in] pKernelBif KernelBif object pointer 731 * 732 * @return NV_OK 733 */ 734 NV_STATUS 735 kbifStopSysMemRequests_GM107 736 ( 737 OBJGPU *pGpu, 738 KernelBif *pKernelBif, 739 NvBool bStop 740 ) 741 { 742 NvU32 regVal; 743 744 NV_ASSERT_OK_OR_RETURN(GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEV_CTRL, ®Val)); 745 746 if (bStop) 747 { 748 regVal = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_BUS_MASTER, _DISABLED, regVal); 749 } 750 else 751 { 752 regVal = FLD_SET_DRF(_XVE, _DEV_CTRL, _CMD_BUS_MASTER, _ENABLED, regVal); 753 } 754 755 NV_ASSERT_OK_OR_RETURN(GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEV_CTRL, regVal)); 756 757 return NV_OK; 758 } 759 760 761 /* 762 * @brief Restore the BAR0 register from the given config space buffer 763 * BAR0 register restore has to use the config cycle write. 764 * 765 * @param[in] pGpu GPU object pointer 766 * @param[in] pKernelBif Pointer to KernelBif object 767 * @param[in] handle PCI handle for GPU 768 * @param[in] bufConfigSpace Stored config space 769 */ 770 void 771 kbifRestoreBar0_GM107 772 ( 773 OBJGPU *pGpu, 774 KernelBif *pKernelBif, 775 void *handle, 776 NvU32 *bufConfigSpace 777 ) 778 { 779 // 780 // Not much ROI in storing BAR offsets for legacy chips since 781 // BAR offsets are not going to change ever for legacy chips 782 // 783 osPciWriteDword(handle, NV_XVE_BAR0, 784 bufConfigSpace[NV_XVE_BAR0/sizeof(NvU32)]); 785 } 786 787 788 /*! 789 * @brief Check if any of the BAR register reads returns a valid value. 790 * 791 * @param[in] pGpu GPU object pointer 792 * @param[in] pKernelBif KernelBif object pointer 793 * 794 * @returns NV_TRUE if any BAR register read returns a valid value 795 * NV_FALSE if all the BAR registers return an invalid values 796 */ 797 NvBool 798 kbifAnyBarsAreValid_GM107 799 ( 800 OBJGPU *pGpu, 801 KernelBif *pKernelBif 802 ) 803 { 804 NvU32 domain = gpuGetDomain(pGpu); 805 NvU8 bus = gpuGetBus(pGpu); 806 NvU8 device = gpuGetDevice(pGpu); 807 NvU16 vendorId, deviceId; 808 void *handle; 809 810 handle = osPciInitHandle(domain, bus, device, 0, &vendorId, &deviceId); 811 812 if (osPciReadDword(handle, NV_XVE_BAR0) == pKernelBif->cacheData.gpuBootConfigSpace[4]) 813 { 814 // BAR0 is valid 815 return NV_TRUE; 816 } 817 818 if ((osPciReadDword(handle, NV_XVE_BAR1_LO) == pKernelBif->cacheData.gpuBootConfigSpace[5]) && 819 (osPciReadDword(handle, NV_XVE_BAR1_HI) == pKernelBif->cacheData.gpuBootConfigSpace[6])) 820 { 821 // BAR1 is valid 822 return NV_TRUE; 823 } 824 825 return NV_FALSE; 826 } 827 828 /*! 829 * @brief Try restoring BAR registers and command register using config cycles 830 * 831 * @param[in] pGpu GPU object pointer 832 * @param[in] pKernelBif KernelBif object pointer 833 * 834 * @returns NV_OK on success 835 * NV_ERR_INVALID_READ if the register read returns unexpected value 836 */ 837 NV_STATUS 838 kbifRestoreBarsAndCommand_GM107 839 ( 840 OBJGPU *pGpu, 841 KernelBif *pKernelBif 842 ) 843 { 844 NvU32 domain = gpuGetDomain(pGpu); 845 NvU8 bus = gpuGetBus(pGpu); 846 NvU8 device = gpuGetDevice(pGpu); 847 NvU16 vendorId, deviceId; 848 void *handle; 849 850 handle = osPciInitHandle(domain, bus, device, 0, &vendorId, &deviceId); 851 852 osPciWriteDword(handle, NV_XVE_BAR0, pKernelBif->cacheData.gpuBootConfigSpace[4]); 853 osPciWriteDword(handle, NV_XVE_BAR1_LO, pKernelBif->cacheData.gpuBootConfigSpace[5]); 854 osPciWriteDword(handle, NV_XVE_BAR1_HI, pKernelBif->cacheData.gpuBootConfigSpace[6]); 855 osPciWriteDword(handle, NV_XVE_BAR2_LO, pKernelBif->cacheData.gpuBootConfigSpace[7]); 856 osPciWriteDword(handle, NV_XVE_BAR2_HI, pKernelBif->cacheData.gpuBootConfigSpace[8]); 857 osPciWriteDword(handle, NV_XVE_BAR3, pKernelBif->cacheData.gpuBootConfigSpace[9]); 858 osPciWriteDword(handle, NV_XVE_DEV_CTRL, pKernelBif->cacheData.gpuBootConfigSpace[1]); 859 860 if (GPU_REG_RD32(pGpu, NV_PMC_BOOT_0) != pGpu->chipId0) 861 { 862 return NV_ERR_INVALID_READ; 863 } 864 865 return NV_OK; 866 } 867 868 /*! 869 * @brief HAL specific BIF software state initialization 870 * 871 * @param[in] pGpu GPU object pointer 872 * @param[in] pKernelBif KernelBif object pointer 873 * 874 * @return NV_OK on success 875 */ 876 NV_STATUS 877 kbifInit_GM107 878 ( 879 OBJGPU *pGpu, 880 KernelBif *pKernelBif 881 ) 882 { 883 // Cache the offsets of BAR registers into an array for subsequent use 884 kbifStoreBarRegOffsets_HAL(pGpu, pKernelBif, NV_XVE_BAR0); 885 886 return NV_OK; 887 } 888 889 /*! 890 * @brief Destructor 891 * 892 * @param[in] pKernelBif 893 * 894 * @returns void 895 */ 896 void 897 kbifDestruct_GM107 898 ( 899 KernelBif *pKernelBif 900 ) 901 { 902 portMemFree(pKernelBif->xveRegmapRef[0].bufMsixTable); 903 pKernelBif->xveRegmapRef[0].bufMsixTable = NULL; 904 } 905 906 907 908 909