1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /****************************************************************************** 25 * 26 * Kernel Display Module 27 * This file contains functions managing display on CPU RM 28 * 29 ******************************************************************************/ 30 31 #define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 32 33 #include "resserv/resserv.h" 34 #include "rmapi/rmapi.h" 35 #include "rmapi/rs_utils.h" 36 #include "os/os.h" 37 38 #include "gpu/gpu.h" 39 #include "gpu/device/device.h" 40 #include "gpu/disp/kern_disp.h" 41 #include "gpu/disp/inst_mem/disp_inst_mem.h" 42 #include "gpu/disp/head/kernel_head.h" 43 #include "gpu/disp/disp_objs.h" 44 #include "gpu_mgr/gpu_mgr.h" 45 #include "objtmr.h" 46 #include "core/locks.h" 47 #include "ctrl/ctrl402c.h" 48 #include "platform/acpi_common.h" 49 50 #include "kernel/gpu/intr/engine_idx.h" 51 52 #include "ctrl/ctrl2080.h" 53 54 #include "class/cl5070.h" 55 #include "class/cl917a.h" 56 #include "class/cl917b.h" 57 #include "class/cl917e.h" 58 #include "class/cl927c.h" 59 #include "class/cl947d.h" 60 #include "class/cl957d.h" 61 #include "class/cl977d.h" 62 #include "class/cl987d.h" 63 #include "class/clc37a.h" 64 #include "class/clc37b.h" 65 #include "class/clc37d.h" 66 #include "class/clc37e.h" 67 #include "class/clc57a.h" 68 #include "class/clc57b.h" 69 #include "class/clc57d.h" 70 #include "class/clc57e.h" 71 #include "class/clc67a.h" 72 #include "class/clc67b.h" 73 #include "class/clc67d.h" 74 #include "class/clc67e.h" 75 #include "class/clc77f.h" //NVC77F_ANY_CHANNEL_DMA 76 77 #include "class/clc77d.h" 78 79 #include "gpu/disp/rg_line_callback/rg_line_callback.h" 80 81 NV_STATUS 82 kdispConstructEngine_IMPL(OBJGPU *pGpu, 83 KernelDisplay *pKernelDisplay, 84 ENGDESCRIPTOR engDesc) 85 { 86 NV_STATUS status; 87 88 // 89 // NOTE: DO NOT call IpVersion _HAL functions in ConstructEngine. 90 // IP version based _HAL functions can only be used starting StatePreInit. 91 // Long-term: RM offload initialization will be moved earlier so KernelDisplay 92 // has the ability to use IP version HAL functions even in construct phase. 93 // 94 95 // 96 // Sanity check: the only time KERNEL_DISPLAY module should be enabled 97 // while DISP is disabled is on KERNEL_ONLY build. 98 // 99 NV_ASSERT(IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu) || RMCFG_MODULE_DISP); 100 101 // 102 // We also need to check if we are in certain configurations which can't 103 // even attempt a control call to DISP. 104 // 105 if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IS_MISSING)) 106 return NV_ERR_NOT_SUPPORTED; 107 108 // Create children 109 pKernelDisplay->pInst = NULL; 110 status = kdispConstructInstMem_HAL(pKernelDisplay); 111 if (status != NV_OK) 112 { 113 return status; 114 } 115 116 status = kdispConstructKhead(pKernelDisplay); 117 118 // We defer checking whether DISP has been disabled some other way until 119 // StateInit, when we can do a control call. 120 121 return status; 122 } 123 124 void 125 kdispDestruct_IMPL 126 ( 127 KernelDisplay *pKernelDisplay 128 ) 129 { 130 // Destroy children 131 kdispDestructInstMem_HAL(pKernelDisplay); 132 kdispDestructKhead(pKernelDisplay); 133 } 134 135 /*! Constructor for DisplayInstanceMemory */ 136 NV_STATUS 137 kdispConstructInstMem_IMPL 138 ( 139 KernelDisplay *pKernelDisplay 140 ) 141 { 142 NV_STATUS status; 143 DisplayInstanceMemory *pInst; 144 145 status = objCreate(&pInst, pKernelDisplay, DisplayInstanceMemory); 146 if (status != NV_OK) 147 { 148 return status; 149 } 150 151 pKernelDisplay->pInst = pInst; 152 return NV_OK; 153 } 154 155 /*! Destructor for DisplayInstanceMemory */ 156 void 157 kdispDestructInstMem_IMPL 158 ( 159 KernelDisplay *pKernelDisplay 160 ) 161 { 162 objDelete(pKernelDisplay->pInst); 163 pKernelDisplay->pInst = NULL; 164 } 165 166 /*! Constructor for Kernel head */ 167 NV_STATUS 168 kdispConstructKhead_IMPL 169 ( 170 KernelDisplay *pKernelDisplay 171 ) 172 { 173 NV_STATUS status; 174 KernelHead *pKernelHead; 175 NvU8 headIdx; 176 177 for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) 178 { 179 status = objCreate(&pKernelHead, pKernelDisplay, KernelHead); 180 if (status != NV_OK) 181 { 182 return status; 183 } 184 185 pKernelDisplay->pKernelHead[headIdx] = pKernelHead; 186 pKernelDisplay->pKernelHead[headIdx]->PublicId = headIdx; 187 } 188 return NV_OK; 189 } 190 191 /*! Destructor for Kernel head */ 192 void 193 kdispDestructKhead_IMPL 194 ( 195 KernelDisplay *pKernelDisplay 196 ) 197 { 198 NvU8 headIdx; 199 200 for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) 201 { 202 objDelete(pKernelDisplay->pKernelHead[headIdx]); 203 pKernelDisplay->pKernelHead[headIdx] = NULL; 204 } 205 } 206 207 NV_STATUS 208 kdispStatePreInitLocked_IMPL(OBJGPU *pGpu, 209 KernelDisplay *pKernelDisplay) 210 { 211 NV_STATUS status; 212 RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); 213 NvU32 hClient = pGpu->hInternalClient; 214 NvU32 hSubdevice = pGpu->hInternalSubdevice; 215 NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS ctrlParams; 216 217 if (!gpuFuseSupportsDisplay_HAL(pGpu)) 218 return NV_ERR_NOT_SUPPORTED; 219 220 status = pRmApi->Control(pRmApi, hClient, hSubdevice, 221 NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION, 222 &ctrlParams, sizeof(ctrlParams)); 223 if (status != NV_OK) 224 { 225 NV_PRINTF(LEVEL_WARNING, 226 "Failed to read display IP version (FUSE disabled), status=0x%x\n", 227 status); 228 return status; 229 } 230 231 // NOTE: KernelDisplay IpVersion _HAL functions can only be called after this point. 232 status = gpuInitDispIpHal(pGpu, ctrlParams.ipVersion); 233 234 return status; 235 } 236 237 NV_STATUS 238 kdispInitBrightcStateLoad_IMPL(OBJGPU *pGpu, 239 KernelDisplay *pKernelDisplay) 240 { 241 NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *pBrightcInfo = NULL; 242 NvU32 status = NV_ERR_NOT_SUPPORTED; 243 RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); 244 245 pBrightcInfo = portMemAllocNonPaged(sizeof(NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS)); 246 if (pBrightcInfo == NULL) 247 { 248 NV_PRINTF(LEVEL_ERROR, "Could not allocate memory for pBrightcInfo\n"); 249 return NV_ERR_NO_MEMORY; 250 } 251 portMemSet(pBrightcInfo, 0, sizeof(*pBrightcInfo)); 252 253 pBrightcInfo->status = status; 254 if ((pKernelDisplay != NULL) && (pKernelDisplay->pStaticInfo->internalDispActiveMask != 0)) 255 { 256 // Fill in the Backlight Method Data. 257 pBrightcInfo->backLightDataSize = sizeof(pBrightcInfo->backLightData); 258 status = osCallACPI_DSM(pGpu, ACPI_DSM_FUNCTION_CURRENT, NV_ACPI_GENERIC_FUNC_GETBACKLIGHT, 259 (NvU32 *)(pBrightcInfo->backLightData), 260 &pBrightcInfo->backLightDataSize); 261 pBrightcInfo->status = status; 262 } 263 264 status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, 265 NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD, 266 pBrightcInfo, sizeof(*pBrightcInfo)); 267 268 portMemFree(pBrightcInfo); 269 270 return status; 271 } 272 273 NV_STATUS 274 kdispStateInitLocked_IMPL(OBJGPU *pGpu, 275 KernelDisplay *pKernelDisplay) 276 { 277 RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); 278 NV_STATUS status = NV_OK; 279 KernelDisplayStaticInfo *pStaticInfo; 280 281 pStaticInfo = portMemAllocNonPaged(sizeof(KernelDisplayStaticInfo)); 282 if (pStaticInfo == NULL) 283 { 284 NV_PRINTF(LEVEL_ERROR, "Could not allocate KernelDisplayStaticInfo"); 285 status = NV_ERR_NO_MEMORY; 286 goto exit; 287 } 288 portMemSet(pStaticInfo, 0, sizeof(*pStaticInfo)); 289 290 NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, 291 pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, 292 NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, 293 pStaticInfo, sizeof(*pStaticInfo)), 294 exit); 295 296 pKernelDisplay->pStaticInfo = pStaticInfo; 297 pKernelDisplay->numHeads = pStaticInfo->numHeads; 298 pStaticInfo = NULL; 299 300 // Initiate Brightc module state load 301 status = kdispInitBrightcStateLoad_HAL(pGpu, pKernelDisplay); 302 if (status != NV_OK) 303 { 304 NV_PRINTF(LEVEL_ERROR, "rmapi control call for brightc state load failed\n"); 305 goto exit; 306 } 307 308 if (pKernelDisplay->pInst != NULL) 309 { 310 NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, 311 instmemStateInitLocked(pGpu, pKernelDisplay->pInst), exit); 312 } 313 314 // Initialize any external daughterboards that 315 // might be out there. 316 317 pGpu->i2cPortForExtdev = NV402C_CTRL_NUM_I2C_PORTS; 318 319 if (pKernelDisplay->pStaticInfo->i2cPort == NV402C_CTRL_NUM_I2C_PORTS) 320 { 321 NV_PRINTF(LEVEL_INFO, "Error in getting valid I2Cport for Extdevice or extdevice doesn't exist\n"); 322 } 323 else 324 { 325 pGpu->i2cPortForExtdev = pKernelDisplay->pStaticInfo->i2cPort; 326 327 if (NV_OK != gpuExtdevConstruct_HAL(pGpu)) 328 { 329 NV_PRINTF(LEVEL_INFO, "gpuExtdevConstruct() failed or not supported\n"); 330 } 331 } 332 333 if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IMP_ENABLE)) 334 { 335 // NOTE: Fills IMP parameters and populate those to disp object in Tegra 336 NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, 337 kdispImportImpData_HAL(pKernelDisplay), exit); 338 } 339 340 exit: 341 portMemFree(pStaticInfo); 342 343 return status; 344 } 345 346 void 347 kdispStateDestroy_IMPL(OBJGPU *pGpu, 348 KernelDisplay *pKernelDisplay) 349 { 350 if (pKernelDisplay->pInst != NULL) 351 { 352 instmemStateDestroy(pGpu, pKernelDisplay->pInst); 353 } 354 355 portMemFree((void*) pKernelDisplay->pStaticInfo); 356 pKernelDisplay->pStaticInfo = NULL; 357 } 358 359 NV_STATUS 360 kdispStateLoad_IMPL 361 ( 362 OBJGPU *pGpu, 363 KernelDisplay *pKernelDisplay, 364 NvU32 flags 365 ) 366 { 367 NV_STATUS status = NV_OK; 368 369 if (pKernelDisplay->pInst != NULL) 370 status = instmemStateLoad(pGpu, pKernelDisplay->pInst, flags); 371 372 return status; 373 } 374 375 NV_STATUS 376 kdispStateUnload_IMPL 377 ( 378 OBJGPU *pGpu, 379 KernelDisplay *pKernelDisplay, 380 NvU32 flags 381 ) 382 { 383 NV_STATUS status = NV_OK; 384 385 if (pKernelDisplay->pInst != NULL) 386 status = instmemStateUnload(pGpu, pKernelDisplay->pInst, flags); 387 388 return status; 389 } 390 391 /*! Get and Populate IMP init data for Tegra */ 392 NV_STATUS 393 kdispImportImpData_IMPL(KernelDisplay *pKernelDisplay) 394 { 395 OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay); 396 RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); 397 NvU32 hClient = pGpu->hInternalClient; 398 NvU32 hSubdevice = pGpu->hInternalSubdevice; 399 NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS params; 400 NvU32 simulationMode; 401 402 // 403 // FPGA has different latency characteristics, and the current code latency 404 // models that IMP uses for silicon will not work for FPGA, so keep IMP 405 // disabled by default on Tegra FPGA. 406 // 407 simulationMode = osGetSimulationMode(); 408 if (simulationMode == NV_SIM_MODE_TEGRA_FPGA) 409 { 410 pKernelDisplay->setProperty(pDisp, PDB_PROP_KDISP_IMP_ENABLE, NV_FALSE); 411 return NV_OK; 412 } 413 414 NV_ASSERT_OK_OR_RETURN(osTegraSocGetImpImportData(¶ms.tegraImpImportData)); 415 416 NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, hClient, hSubdevice, 417 NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO, 418 ¶ms, sizeof(params))); 419 420 return NV_OK; 421 } 422 423 /*! Get internal enum equivalent of the HW class number */ 424 NV_STATUS 425 kdispGetIntChnClsForHwCls_IMPL 426 ( 427 KernelDisplay *pKernelDisplay, 428 NvU32 hwClass, 429 DISPCHNCLASS *pDispChnClass 430 ) 431 { 432 // sanity check 433 if (pDispChnClass == NULL) 434 return NV_ERR_INVALID_ARGUMENT; 435 436 switch (hwClass) 437 { 438 case NV917A_CURSOR_CHANNEL_PIO: 439 case NVC37A_CURSOR_IMM_CHANNEL_PIO: 440 case NVC57A_CURSOR_IMM_CHANNEL_PIO: 441 case NVC67A_CURSOR_IMM_CHANNEL_PIO: 442 *pDispChnClass = dispChnClass_Curs; 443 break; 444 445 case NV917B_OVERLAY_IMM_CHANNEL_PIO: 446 *pDispChnClass = dispChnClass_Ovim; 447 break; 448 449 case NV927C_BASE_CHANNEL_DMA: 450 *pDispChnClass = dispChnClass_Base; 451 break; 452 453 case NV947D_CORE_CHANNEL_DMA: 454 case NV957D_CORE_CHANNEL_DMA: 455 case NV977D_CORE_CHANNEL_DMA: 456 case NV987D_CORE_CHANNEL_DMA: 457 case NVC37D_CORE_CHANNEL_DMA: 458 case NVC57D_CORE_CHANNEL_DMA: 459 case NVC67D_CORE_CHANNEL_DMA: 460 case NVC77D_CORE_CHANNEL_DMA: 461 *pDispChnClass = dispChnClass_Core; 462 break; 463 464 case NV917E_OVERLAY_CHANNEL_DMA: 465 *pDispChnClass = dispChnClass_Ovly; 466 break; 467 468 case NVC37B_WINDOW_IMM_CHANNEL_DMA: 469 case NVC57B_WINDOW_IMM_CHANNEL_DMA: 470 case NVC67B_WINDOW_IMM_CHANNEL_DMA: 471 *pDispChnClass = dispChnClass_Winim; 472 break; 473 474 case NVC37E_WINDOW_CHANNEL_DMA: 475 case NVC57E_WINDOW_CHANNEL_DMA: 476 case NVC67E_WINDOW_CHANNEL_DMA: 477 *pDispChnClass = dispChnClass_Win; 478 break; 479 480 case NVC77F_ANY_CHANNEL_DMA: 481 // Assert incase of physical RM, Any channel is kernel only channel. 482 NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_INVALID_CHANNEL); 483 *pDispChnClass = dispChnClass_Any; 484 break; 485 486 default: 487 NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", hwClass); 488 return NV_ERR_INVALID_ARGUMENT; 489 } 490 491 return NV_OK; 492 } 493 494 void 495 kdispNotifyEvent_IMPL 496 ( 497 OBJGPU *pGpu, 498 KernelDisplay *pKernelDisplay, 499 NvU32 notifyIndex, 500 void *pNotifyParams, 501 NvU32 notifyParamsSize, 502 NvV32 info32, 503 NvV16 info16 504 ) 505 { 506 PEVENTNOTIFICATION pEventNotifications; 507 NvU32 *pNotifyActions; 508 NvU32 disableCmd, singleCmd; 509 NvU32 subDeviceInst; 510 RS_SHARE_ITERATOR it = serverutilShareIter(classId(NotifShare)); 511 512 // search notifiers with events hooked up for this gpu 513 while (serverutilShareIterNext(&it)) 514 { 515 RsShared *pShared = it.pShared; 516 DisplayApi *pDisplayApi; 517 INotifier *pNotifier; 518 Device *pDevice; 519 NotifShare *pNotifierShare = dynamicCast(pShared, NotifShare); 520 521 if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL)) 522 continue; 523 524 pNotifier = pNotifierShare->pNotifier; 525 pDisplayApi = dynamicCast(pNotifier, DisplayApi); 526 527 // Only notify matching GPUs 528 if (pDisplayApi == NULL) 529 continue; 530 531 pDevice = dynamicCast(RES_GET_REF(pDisplayApi)->pParentRef->pResource, Device); 532 533 if (GPU_RES_GET_GPU(pDevice) != pGpu) 534 continue; 535 536 gpuSetThreadBcState(GPU_RES_GET_GPU(pDevice), pDisplayApi->bBcResource); 537 538 disableCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; 539 singleCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; 540 541 // get notify actions list 542 subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); 543 pNotifyActions = pDisplayApi->pNotifyActions[subDeviceInst]; 544 if (pNotifyActions == NULL) 545 { 546 continue; 547 } 548 549 // get event list 550 pEventNotifications = inotifyGetNotificationList(pNotifier); 551 if (pEventNotifications == NULL) 552 { 553 continue; 554 } 555 556 // skip if client not "listening" to events of this type 557 if (pNotifyActions[notifyIndex] == disableCmd) 558 { 559 continue; 560 } 561 562 if (pDisplayApi->hNotifierMemory != NV01_NULL_OBJECT && 563 pDisplayApi->pNotifierMemory != NULL) 564 { 565 notifyFillNotifierMemory(pGpu, pDisplayApi->pNotifierMemory, info32, info16, 566 NV5070_NOTIFICATION_STATUS_DONE_SUCCESS, notifyIndex); 567 } 568 569 // ping events bound to subdevice associated with pGpu 570 osEventNotification(pGpu, pEventNotifications, 571 (notifyIndex | OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV), 572 pNotifyParams, notifyParamsSize); 573 574 // reset if single shot notify action 575 if (pNotifyActions[notifyIndex] == singleCmd) 576 { 577 pNotifyActions[notifyIndex] = disableCmd; 578 } 579 } 580 } 581 582 void 583 kdispSetWarPurgeSatellitesOnCoreFree_IMPL 584 ( 585 KernelDisplay *pKernelDisplay, 586 NvBool value 587 ) 588 { 589 pKernelDisplay->bWarPurgeSatellitesOnCoreFree = value; 590 } 591 592 NV_STATUS 593 kdispRegisterRgLineCallback_IMPL 594 ( 595 KernelDisplay *pKernelDisplay, 596 RgLineCallback *pRgLineCallback, 597 NvU32 head, 598 NvU32 rgIntrLine, 599 NvBool bEnable 600 ) 601 { 602 NV_ASSERT_OR_RETURN(head < OBJ_MAX_HEADS, NV_ERR_INVALID_ARGUMENT); 603 NV_ASSERT_OR_RETURN(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD, NV_ERR_INVALID_ARGUMENT); 604 605 RgLineCallback **slot = &pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine]; 606 607 if (bEnable && *slot == NULL) 608 { 609 *slot = pRgLineCallback; 610 } 611 else if (!bEnable && *slot == pRgLineCallback) 612 { 613 *slot = NULL; 614 } 615 else 616 { 617 // 618 // OBJDISP is the authority for *allocating* these "slots"; 619 // KernelDisplay trusts it as an allocator. 620 // If we try to register a callback in an existing slot, or free an 621 // empty slot, it means OBJDISP has created conflicting allocations or 622 // has allowed a double-free. (Or RgLineCallback has provided invalid 623 // parameters.) 624 // 625 NV_ASSERT_FAILED("Invalid KernelDisplay state for RgLineCallback"); 626 return NV_ERR_INVALID_STATE; 627 } 628 629 return NV_OK; 630 } 631 632 void 633 kdispInvokeRgLineCallback_KERNEL 634 ( 635 KernelDisplay *pKernelDisplay, 636 NvU32 head, 637 NvU32 rgIntrLine, 638 NvBool bIsIrqlIsr 639 ) 640 { 641 NV_ASSERT_OR_RETURN_VOID(head < OBJ_MAX_HEADS); 642 NV_ASSERT_OR_RETURN_VOID(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD); 643 644 RgLineCallback *pCallbackObject = pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine]; 645 646 if (pCallbackObject != NULL) 647 { 648 rglcbInvoke(pCallbackObject, bIsIrqlIsr); 649 } 650 else if (IS_GSP_CLIENT(ENG_GET_GPU(pKernelDisplay))) 651 { 652 // 653 // For offloaded RM case, getting a callback invocation without a registered callback could 654 // happen during or after deregistration: there might already have been an event in the 655 // queue by the time we asked physical RM to deconfigure the interrupt. 656 // 657 // Because this could lead to an A-B-A situation where a new callback is registered to the 658 // same slot and invoked in place of the old callback, we must assert against this case. 659 // To avoid this, RgLineCallback must drain the client RM event queue after deconfiguring 660 // the interrupt and before calling kdispRegisterRgLineCallback to deregister the callback. 661 // 662 NV_ASSERT_FAILED("got RgLineCallback invocation for null callback"); 663 } 664 else 665 { 666 // 667 // For the monolithic RM case, getting a callback invocation without a registered callback 668 // could happen during registration: after configuring hardware for the interrupt, but 669 // before registering the callback with KernelDisplay, the interrupt could be handled. 670 // 671 // This is not a bug in and of itself as it is harmless and expected. On the other hand we 672 // would not expect to see this warning in the log more than a few times per registration, 673 // e.g. if it were printed for every single interrupt, as the callback ought to be fully 674 // registered before excessively many interrupts are handled. 675 // 676 NV_PRINTF(LEVEL_WARNING, "got RgLineCallback invocation for null callback\n"); 677 } 678 } 679 680 #define HOTPLUG_PROFILE 0 681 682 #if HOTPLUG_PROFILE 683 684 #define ISR_TSTAMP_SIZE 18000 /* 5 minutes (5*60Hz*60)*/ 685 686 NvU32 timeStampIndexISR = ISR_TSTAMP_SIZE-1; 687 688 tmr_tstamp_u timeStampStartISR[ISR_TSTAMP_SIZE]; 689 tmr_tstamp_u timeStampDeltaISR[ISR_TSTAMP_SIZE]; 690 691 #endif 692 693 void 694 kdispServiceVblank_KERNEL 695 ( 696 OBJGPU *pGpu, 697 KernelDisplay *pKernelDisplay, 698 NvU32 headmask, 699 NvU32 state, 700 THREAD_STATE_NODE *pThreadState 701 ) 702 { 703 NvU32 pending, check_pending, pending_checked; 704 NvU32 Head; 705 NvU32 maskNonEmptyQueues[OBJ_MAX_HEADS]; // array of masks of VBLANK_STATE_PROCESS_XXX_LATENCY bits, indicating which queues are non-empty 706 NvU32 unionNonEmptyQueues = 0; // mask of VBLANK_STATE_PROCESS_XXX_LATENCY bits, union of queue states of all heads w/ pending vblank ints 707 NvU32 Count = 0; 708 NvU32 i, skippedcallbacks; 709 NvU32 maskCallbacksStillPending = 0; 710 KernelHead *pKernelHead = NULL; 711 712 #if HOTPLUG_PROFILE 713 OBJTMR *pTmr; 714 pTmr = GPU_GET_TIMER(pGpu); 715 if (++timeStampIndexISR >= ISR_TSTAMP_SIZE) 716 timeStampIndexISR = 0; 717 718 tmrGetCurrentTime(pTmr, &timeStampStartISR[timeStampIndexISR].time32.hi, &timeStampStartISR[timeStampIndexISR].time32.lo); 719 720 // For the ISR we want to know how much time since the last ISR. 721 if (timeStampIndexISR) 722 { 723 NvU64 temp64; 724 725 temp64 = timeStampStartISR[timeStampIndexISR].time64; 726 temp64 -= timeStampStartISR[timeStampIndexISR-1].time64; 727 728 timeStampDeltaISR[timeStampIndexISR].time64 = temp64; 729 } 730 #endif 731 732 733 // If the caller failed to spec which queue, figure they wanted all of them 734 if (!(state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ) 735 { 736 state |= VBLANK_STATE_PROCESS_ALL_CALLBACKS; 737 } 738 739 // If the headmask is 0, we should process all heads 740 if (headmask == 0) 741 { 742 headmask = 0xFFFFFFFF; 743 } 744 745 // 746 // If we are being asked to process the callbacks now, regardless of the true irqspending, 747 // we force the pending mask to the head mask passed in. 748 // 749 if (state & VBLANK_STATE_PROCESS_IMMEDIATE) 750 { 751 pending = headmask; 752 } 753 else 754 { 755 // We're here because at least one of the PCRTC bits MAY be pending. 756 pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState); 757 } 758 759 // No sense in doing anything if there is nothing pending. 760 if (pending == 0) 761 { 762 return; 763 } 764 765 // 766 // We want to check for pending service now and then we check again each 767 // time through the loop. Keep these seperate. 768 // 769 check_pending = pending; 770 771 // We have not checked anything yet 772 pending_checked = 0; 773 774 // Start with head 0 775 Head = 0; 776 777 // 778 // We keep scanning all supported heads, and if we have something pending, 779 // check the associated queues 780 // 781 while(pending_checked != pending) 782 { 783 pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); 784 785 // Move on if this crtc's interrupt isn't pending... 786 if ( (headmask & check_pending & ~pending_checked) & NVBIT(Head)) 787 { 788 // Track that we have now checked this head 789 pending_checked |= NVBIT(Head); 790 791 // If our queues are empty, we can bail early 792 maskNonEmptyQueues[Head] = kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, state, NULL); 793 unionNonEmptyQueues |= maskNonEmptyQueues[Head]; 794 795 // This function will check to see if there are callback states in which the 796 // caller has skipped execution. 797 skippedcallbacks = ((state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ^ VBLANK_STATE_PROCESS_ALL_CALLBACKS); 798 skippedcallbacks |= (state & (VBLANK_STATE_PROCESS_CALLED_FROM_ISR | VBLANK_STATE_PROCESS_IMMEDIATE)); 799 800 // now lets see if there's callbacks pending on the skipped callbacks 801 maskCallbacksStillPending |= NVBIT(Head) * !!kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, skippedcallbacks, NULL); 802 } 803 804 // Don't check for new interrupts if we are in immediate mode 805 if (!(state & VBLANK_STATE_PROCESS_IMMEDIATE) ) 806 { 807 pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState); 808 } 809 810 // if there was a change in the pending state, we should recheck everything 811 if (check_pending != pending) 812 { 813 // We need to recheck heads that were not pending before 814 check_pending = pending; 815 Head = 0; 816 } 817 else 818 { 819 // Nothing changed, so move on to the next head 820 Head++; 821 } 822 823 // Make sure we dont waste time on heads that dont exist 824 if (Head >= OBJ_MAX_HEADS) 825 { 826 break; 827 } 828 } 829 830 if (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) 831 { 832 // store off which heads have pending vblank interrupts, for comparison at the next DPC time. 833 pKernelDisplay->isrVblankHeads = pending; 834 835 } 836 837 // increment the per-head vblank total counter, for any head with a pending vblank intr 838 for (Head=0; Head < OBJ_MAX_HEADS; Head++) 839 { 840 // Move on if this crtc's interrupt isn't pending... 841 if ((pending & NVBIT(Head)) == 0) 842 { 843 continue; 844 } 845 846 pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); 847 // 848 // increment vblank counters, as appropriate. 849 // 850 851 // Track the fact that we passed through here. This keeps the RC manager happy. 852 Count = kheadGetVblankTotalCounter_HAL(pKernelHead) + 1; 853 kheadSetVblankTotalCounter_HAL(pKernelHead, Count); 854 855 // 856 // Update the vblank counter if we are single chip or multichip master. 857 // We now have two queues, so we need to have two vblank counters. 858 // 859 860 // did they ask for processing of low-latency work? 861 if (state & VBLANK_STATE_PROCESS_LOW_LATENCY /* & maskNonEmptyQueues[Head]*/) 862 { 863 // 864 // don't let the DPC thread increment the low-latency counter. 865 // otherwise, the counter will frequently increment at double the 866 // expected rate, breaking things like swapInterval. 867 // 868 // XXX actually, there is one case where it would be OK for the DPC 869 // thread to increment this counter: if the DPC thread could ascertain 870 // that 'pending & NVBIT(Head)' represented a new interrupt event, and 871 // not simply the one that the ISR left uncleared in PCRTC_INTR_0, for 872 // the purpose of causing this DPC thread to get queued. 873 // Not sure how to do that. 874 // 875 if ( !(state & VBLANK_STATE_PROCESS_CALLED_FROM_DPC) || (pending & NVBIT(Head) & ~pKernelDisplay->isrVblankHeads) ) 876 { 877 // either we were called from the ISR, or vblank is asserted in DPC when it wasn't in the ISR 878 879 // low latency queue requested, and this isn't a DPC thread. 880 Count = kheadGetVblankLowLatencyCounter_HAL(pKernelHead) + 1; 881 kheadSetVblankLowLatencyCounter_HAL(pKernelHead, Count); 882 } 883 } 884 885 // did they ask for processing of normal-latency work? 886 if (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY /* & maskNonEmptyQueues[Head]*/) 887 { 888 // processing of the normal latency queue requested 889 Count = kheadGetVblankNormLatencyCounter_HAL(pKernelHead) + 1; 890 kheadSetVblankNormLatencyCounter_HAL(pKernelHead, Count); 891 } 892 } 893 894 // 895 // If we have nothing to process (no work to do in queue), 896 // we can bail early. We got here for some reason, so make 897 // sure we clear the interrupts. 898 // 899 900 if (!unionNonEmptyQueues) 901 { 902 // all queues (belonging to heads with pending vblank ints) are empty. 903 kheadResetPendingVblankForKernel_HAL(pGpu, pKernelHead, pThreadState); 904 return; 905 } 906 907 // 908 // Although we have separate handlers for each head, attempt to process all 909 // interrupting heads now. What about DPCs schedule already? 910 // 911 for (Head = 0; Head < OBJ_MAX_HEADS; Head++) 912 { 913 pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); 914 // Move on if this crtc's interrupt isn't pending... 915 if ((pending & NVBIT(Head)) == 0) 916 { 917 continue; 918 } 919 920 // Process the callback list for this Head... 921 kheadProcessVblankCallbacks(pGpu, pKernelHead, state); 922 } 923 924 // 925 // if there are still callbacks pending, and we are in an ISR, 926 // then don't clear PCRTC_INTR; XXXar why would we *ever* want 927 // to clear PCRTC_INTR if there are still things pending? 928 // 929 if ( (maskCallbacksStillPending) && 930 (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) ) 931 { 932 // 933 // there are still callbacks pending; don't clear 934 // PCRTC_INTR, yet. The expectation is that the OS layer 935 // will see that interrupts are still pending and queue a 936 // DPC/BottomHalf/whatever to service the rest of the 937 // vblank callback queues 938 // 939 for(i=0; i< OBJ_MAX_HEADS; i++) 940 { 941 pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); 942 kheadResetPendingVblankForKernel_HAL(pGpu, pKernelHead, pThreadState); 943 } 944 } 945 else 946 { 947 // reset the VBlank intrs we've handled, and don't reset the vblank intrs we haven't. 948 for(i=0; i< OBJ_MAX_HEADS; i++) 949 { 950 pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); 951 if (pending & NVBIT(i) & ~maskCallbacksStillPending) 952 { 953 kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState); 954 } 955 } 956 } 957 958 return; 959 } 960 961 NvU32 kdispReadPendingVblank_KERNEL(OBJGPU *pGpu, KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *pThreadState) 962 { 963 KernelHead *pKernelHead; 964 NvU32 headIntrMask; 965 NvU32 pending = 0; 966 NvU8 headIdx; 967 968 for(headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) 969 { 970 pKernelHead = KDISP_GET_HEAD(pKernelDisplay, headIdx); 971 headIntrMask = headIntr_none; 972 pending |= kheadReadPendingVblank_HAL(pGpu, pKernelHead, headIntrMask); 973 } 974 return pending; 975 } 976 977 /** 978 * @brief Provides an opportunity to register some IntrService during intrStateInit. 979 */ 980 void 981 kdispRegisterIntrService_IMPL 982 ( 983 OBJGPU *pGpu, 984 KernelDisplay *pKernelDisplay, 985 IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX] 986 ) 987 { 988 NvU32 engineIdx = MC_ENGINE_IDX_DISP; 989 NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL); 990 pRecords[engineIdx].pInterruptService = staticCast(pKernelDisplay, IntrService); 991 } 992 993 /*! 994 * @brief Route modeset start/end notification to kernel RM 995 * 996 * Physical RM is expected to send a "start" notification at the beginning of 997 * every display modeset (supervisor interrupt sequence), and an "end" 998 * notification at the end. However, if physical RM detects back-to-back 999 * modesets, the intervening "end" notification MAY be skipped; in this case, 1000 * the "start" notification for the next modeset serves as the "end notification 1001 * for the previous modeset. 1002 * 1003 * Kernel RM will use the notification to update the BW allocation for display. 1004 * The ICC call that is required to update the BW allocation cannot be made 1005 * from physical RM. 1006 * 1007 * @param[in] pKernelDisplay KernelDisplay pointer 1008 * @param[in] bModesetStart NV_TRUE -> start of modeset; 1009 * NV_FALSE -> end of modeset 1010 * @param[in] minRequiredIsoBandwidthKBPS Min ISO BW required by IMP (KB/sec) 1011 * @param[in] minRequiredFloorBandwidthKBPS Min dramclk freq * pipe width (KB/sec) 1012 */ 1013 void 1014 kdispInvokeDisplayModesetCallback_KERNEL 1015 ( 1016 KernelDisplay *pKernelDisplay, 1017 NvBool bModesetStart, 1018 NvU32 minRequiredIsoBandwidthKBPS, 1019 NvU32 minRequiredFloorBandwidthKBPS 1020 ) 1021 { 1022 NV_STATUS status; 1023 1024 NV_PRINTF(LEVEL_INFO, 1025 "Kernel RM received \"%s of modeset\" notification " 1026 "(minRequiredIsoBandwidthKBPS = %u, minRequiredFloorBandwidthKBPS = %u)\n", 1027 bModesetStart ? "start" : "end", 1028 minRequiredIsoBandwidthKBPS, 1029 minRequiredFloorBandwidthKBPS); 1030 1031 OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay); 1032 status = 1033 kdispArbAndAllocDisplayBandwidth_HAL(pGpu, 1034 pKernelDisplay, 1035 DISPLAY_ICC_BW_CLIENT_RM, 1036 minRequiredIsoBandwidthKBPS, 1037 minRequiredFloorBandwidthKBPS); 1038 // 1039 // The modeset cannot be aborted, so, if there is an error, no recovery 1040 // is possible. 1041 // 1042 NV_ASSERT_OK(status); 1043 } 1044