1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "nvkms.h" 25 #include "nvkms-private.h" 26 #include "nvkms-api.h" 27 28 #include "nvkms-types.h" 29 #include "nvkms-utils.h" 30 #include "nvkms-console-restore.h" 31 #include "nvkms-dpy.h" 32 #include "nvkms-dma.h" 33 #include "nvkms-evo.h" 34 #include "nvkms-rm.h" 35 #include "nvkms-rmapi.h" 36 #include "nvkms-modepool.h" 37 #include "nvkms-modeset.h" 38 #include "nvkms-attributes.h" 39 #include "nvkms-dpy-override.h" 40 #include "nvkms-framelock.h" 41 #include "nvkms-surface.h" 42 #include "nvkms-3dvision.h" 43 #include "nvkms-ioctl.h" 44 #include "nvkms-headsurface.h" 45 #include "nvkms-headsurface-ioctl.h" 46 #include "nvkms-headsurface-swapgroup.h" 47 #include "nvkms-flip.h" /* nvFlipEvo */ 48 #include "nvkms-vrr.h" 49 50 #include "dp/nvdp-connector.h" 51 52 #include "nvUnixVersion.h" /* NV_VERSION_STRING */ 53 #include <class/cl0000.h> /* NV01_NULL_OBJECT/NV01_ROOT */ 54 55 #include "nv_list.h" 56 57 58 /*! \file 59 * 60 * This source file implements the API of NVKMS, built around open, 61 * close, and ioctl file operations. 62 * 63 * An NvKmsPerOpen is stored "per-open"; all API handles are specific 64 * to a per-open instance. The NvKmsPerOpen is allocated during each 65 * nvKmsOpen() call, and freed during the corresponding nvKmsClose() 66 * call. 67 * 68 * An NvKmsPerOpenDev stores the API handles for the device and all 69 * the disps and connectors on the device. It is allocated during 70 * nvKmsIoctl(ALLOC_DEVICE), and freed during nvKmsIoctl(FREE_DEVICE). 71 */ 72 73 74 /* 75 * When the NVKMS device file is opened, the per-open structure could 76 * be used for one of several actions, denoted by its "type". The 77 * per-open type starts as Undefined. The per-open's first use 78 * defines its type. Once the type transitions from Undefined to 79 * anything, it can never transition to any other type. 80 */ 81 enum NvKmsPerOpenType { 82 /* 83 * The per-open is used for making ioctl calls to make requests of 84 * NVKMS. 85 */ 86 NvKmsPerOpenTypeIoctl, 87 88 /* 89 * The per-open is used for granting access to a NVKMS registered 90 * surface. 91 */ 92 NvKmsPerOpenTypeGrantSurface, 93 94 /* 95 * The per-open is used for granting permissions. 96 */ 97 NvKmsPerOpenTypeGrantPermissions, 98 99 /* 100 * The per-open is used for granting access to a swap group 101 */ 102 NvKmsPerOpenTypeGrantSwapGroup, 103 104 /* 105 * The per-open is used to unicast a specific event. 106 */ 107 NvKmsPerOpenTypeUnicastEvent, 108 109 /* 110 * The per-open is currently undefined (this is the initial 111 * state). 112 */ 113 NvKmsPerOpenTypeUndefined, 114 }; 115 116 enum NvKmsUnicastEventType { 117 /* Used by: 118 * NVKMS_IOCTL_JOIN_SWAP_GROUP */ 119 NvKmsUnicastEventTypeDeferredRequest, 120 121 /* Used by: 122 * NVKMS_IOCTL_NOTIFY_VBLANK */ 123 NvKmsUnicastEventTypeVblankNotification, 124 125 /* Undefined, this indicates the unicast fd is available for use. */ 126 NvKmsUnicastEventTypeUndefined, 127 }; 128 129 struct NvKmsPerOpenConnector { 130 NVConnectorEvoPtr pConnectorEvo; 131 NvKmsConnectorHandle nvKmsApiHandle; 132 }; 133 134 struct NvKmsPerOpenFrameLock { 135 NVFrameLockEvoPtr pFrameLockEvo; 136 int refCnt; 137 NvKmsFrameLockHandle nvKmsApiHandle; 138 }; 139 140 struct NvKmsPerOpenDisp { 141 NVDispEvoPtr pDispEvo; 142 NvKmsDispHandle nvKmsApiHandle; 143 NvKmsFrameLockHandle frameLockHandle; 144 NVEvoApiHandlesRec connectorHandles; 145 struct NvKmsPerOpenConnector connector[NVKMS_MAX_CONNECTORS_PER_DISP]; 146 NVEvoApiHandlesRec vblankSyncObjectHandles[NVKMS_MAX_HEADS_PER_DISP]; 147 NVEvoApiHandlesRec vblankCallbackHandles[NVKMS_MAX_HEADS_PER_DISP]; 148 }; 149 150 struct NvKmsPerOpenDev { 151 NVDevEvoPtr pDevEvo; 152 NvKmsDeviceHandle nvKmsApiHandle; 153 NVEvoApiHandlesRec dispHandles; 154 NVEvoApiHandlesRec surfaceHandles; 155 struct NvKmsFlipPermissions flipPermissions; 156 struct NvKmsModesetPermissions modesetPermissions; 157 struct NvKmsPerOpenDisp disp[NVKMS_MAX_SUBDEVICES]; 158 NvBool isPrivileged; 159 NVEvoApiHandlesRec deferredRequestFifoHandles; 160 NVEvoApiHandlesRec swapGroupHandles; 161 }; 162 163 struct NvKmsPerOpenEventListEntry { 164 NVListRec eventListEntry; 165 struct NvKmsEvent event; 166 }; 167 168 struct NvKmsPerOpen { 169 nvkms_per_open_handle_t *pOpenKernel; 170 NvU32 pid; 171 enum NvKmsClientType clientType; 172 NVListRec perOpenListEntry; 173 NVListRec perOpenIoctlListEntry; 174 enum NvKmsPerOpenType type; 175 176 union { 177 struct { 178 NVListRec eventList; 179 NvU32 eventInterestMask; 180 NVEvoApiHandlesRec devHandles; 181 NVEvoApiHandlesRec frameLockHandles; 182 } ioctl; 183 184 struct { 185 NVSurfaceEvoPtr pSurfaceEvo; 186 } grantSurface; 187 188 struct { 189 NVDevEvoPtr pDevEvo; 190 NVSwapGroupPtr pSwapGroup; 191 } grantSwapGroup; 192 193 struct { 194 NVDevEvoPtr pDevEvo; 195 struct NvKmsPermissions permissions; 196 } grantPermissions; 197 198 struct { 199 /* 200 * A unicast event NvKmsPerOpen is assigned to an object, so that 201 * that object can generate events on the unicast event. Store a 202 * pointer to that object, so that we can clear the pointer when the 203 * unicast event NvKmsPerOpen is closed. 204 */ 205 enum NvKmsUnicastEventType type; 206 union { 207 struct { 208 NVDeferredRequestFifoPtr pDeferredRequestFifo; 209 } deferred; 210 211 struct { 212 NvKmsGenericHandle hCallback; 213 struct NvKmsPerOpenDisp *pOpenDisp; 214 NvU32 apiHead; 215 } vblankNotification; 216 } e; 217 } unicastEvent; 218 }; 219 }; 220 221 static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); 222 static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); 223 224 static void EnableAndSetupVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo); 225 static void DisableAndCleanVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo); 226 227 static NVListRec perOpenList = NV_LIST_INIT(&perOpenList); 228 static NVListRec perOpenIoctlList = NV_LIST_INIT(&perOpenIoctlList); 229 230 /*! 231 * Check if there is an NvKmsPerOpenDev on this NvKmsPerOpen that has 232 * the specified deviceId. 233 */ 234 static NvBool DeviceIdAlreadyPresent(struct NvKmsPerOpen *pOpen, NvU32 deviceId) 235 { 236 struct NvKmsPerOpenDev *pOpenDev; 237 NvKmsGenericHandle dev; 238 239 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 240 241 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 242 pOpenDev, dev) { 243 if (pOpenDev->pDevEvo->usesTegraDevice && 244 (deviceId == NVKMS_DEVICE_ID_TEGRA)) { 245 return TRUE; 246 } else if (pOpenDev->pDevEvo->deviceId == deviceId) { 247 return TRUE; 248 } 249 } 250 251 return FALSE; 252 } 253 254 255 /*! 256 * Get the NvKmsPerOpenDev described by NvKmsPerOpen + deviceHandle. 257 */ 258 static struct NvKmsPerOpenDev *GetPerOpenDev( 259 const struct NvKmsPerOpen *pOpen, 260 const NvKmsDeviceHandle deviceHandle) 261 { 262 if (pOpen == NULL) { 263 return NULL; 264 } 265 266 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 267 268 return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.devHandles, deviceHandle); 269 } 270 271 272 /*! 273 * Get the NvKmsPerOpenDev and NvKmsPerOpenDisp described by 274 * NvKmsPerOpen + deviceHandle + dispHandle. 275 */ 276 static NvBool GetPerOpenDevAndDisp( 277 const struct NvKmsPerOpen *pOpen, 278 const NvKmsDeviceHandle deviceHandle, 279 const NvKmsDispHandle dispHandle, 280 struct NvKmsPerOpenDev **ppOpenDev, 281 struct NvKmsPerOpenDisp **ppOpenDisp) 282 { 283 struct NvKmsPerOpenDev *pOpenDev; 284 struct NvKmsPerOpenDisp *pOpenDisp; 285 286 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 287 288 if (pOpenDev == NULL) { 289 return FALSE; 290 } 291 292 pOpenDisp = nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, 293 dispHandle); 294 295 if (pOpenDisp == NULL) { 296 return FALSE; 297 } 298 299 *ppOpenDev = pOpenDev; 300 *ppOpenDisp = pOpenDisp; 301 302 return TRUE; 303 } 304 305 306 /*! 307 * Get the NvKmsPerOpenDisp described by NvKmsPerOpen + deviceHandle + 308 * dispHandle. 309 */ 310 static struct NvKmsPerOpenDisp *GetPerOpenDisp( 311 const struct NvKmsPerOpen *pOpen, 312 const NvKmsDeviceHandle deviceHandle, 313 const NvKmsDispHandle dispHandle) 314 { 315 struct NvKmsPerOpenDev *pOpenDev; 316 317 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 318 319 if (pOpenDev == NULL) { 320 return NULL; 321 } 322 323 return nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle); 324 } 325 326 327 /*! 328 * Get the NvKmsPerOpenConnector described by NvKmsPerOpen + 329 * deviceHandle + dispHandle + connectorHandle. 330 */ 331 static struct NvKmsPerOpenConnector *GetPerOpenConnector( 332 const struct NvKmsPerOpen *pOpen, 333 const NvKmsDeviceHandle deviceHandle, 334 const NvKmsDispHandle dispHandle, 335 const NvKmsConnectorHandle connectorHandle) 336 { 337 struct NvKmsPerOpenDisp *pOpenDisp; 338 339 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); 340 341 if (pOpenDisp == NULL) { 342 return NULL; 343 } 344 345 return nvEvoGetPointerFromApiHandle(&pOpenDisp->connectorHandles, 346 connectorHandle); 347 } 348 349 350 /*! 351 * Get the NVDpyEvoRec described by NvKmsPerOpen + deviceHandle + 352 * dispHandle + dpyId. 353 */ 354 static NVDpyEvoRec *GetPerOpenDpy( 355 const struct NvKmsPerOpen *pOpen, 356 const NvKmsDeviceHandle deviceHandle, 357 const NvKmsDispHandle dispHandle, 358 const NVDpyId dpyId) 359 { 360 struct NvKmsPerOpenDisp *pOpenDisp; 361 362 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); 363 364 if (pOpenDisp == NULL) { 365 return NULL; 366 } 367 368 return nvGetDpyEvoFromDispEvo(pOpenDisp->pDispEvo, dpyId); 369 } 370 371 372 /*! 373 * Get the NvKmsPerOpenFrameLock described by pOpen + frameLockHandle. 374 */ 375 static struct NvKmsPerOpenFrameLock *GetPerOpenFrameLock( 376 const struct NvKmsPerOpen *pOpen, 377 NvKmsFrameLockHandle frameLockHandle) 378 { 379 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 380 381 return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, 382 frameLockHandle); 383 } 384 385 386 /*! 387 * Free the NvKmsPerOpenFrameLock associated with this NvKmsPerOpenDisp. 388 * 389 * Multiple disps can be assigned to the same framelock object, so 390 * NvKmsPerOpenFrameLock is reference counted: the object is freed 391 * once all NvKmsPerOpenDisps remove their reference to it. 392 * 393 * \param[in,out] pOpen The per-open data, to which the 394 * NvKmsPerOpenFrameLock is assigned. 395 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding 396 * NvKmsPerOpenFrameLock should be freed. 397 */ 398 static void FreePerOpenFrameLock(struct NvKmsPerOpen *pOpen, 399 struct NvKmsPerOpenDisp *pOpenDisp) 400 { 401 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 402 403 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 404 405 pOpenFrameLock = 406 nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, 407 pOpenDisp->frameLockHandle); 408 if (pOpenFrameLock == NULL) { 409 return; 410 } 411 412 pOpenDisp->frameLockHandle = 0; 413 414 pOpenFrameLock->refCnt--; 415 416 if (pOpenFrameLock->refCnt != 0) { 417 return; 418 } 419 420 nvEvoDestroyApiHandle(&pOpen->ioctl.frameLockHandles, 421 pOpenFrameLock->nvKmsApiHandle); 422 nvFree(pOpenFrameLock); 423 } 424 425 426 /*! 427 * Allocate and initialize an NvKmsPerOpenFrameLock. 428 * 429 * If the disp described by the specified NvKmsPerOpenDisp has a 430 * framelock object, allocate an NvKmsPerOpenFrameLock for it. 431 * 432 * Multiple disps can be assigned to the same framelock object, so 433 * NvKmsPerOpenFrameLock is reference counted: we first look to see if 434 * an NvKmsPerOpenFrameLock for this disp's framelock object already 435 * exists. If so, we increment its reference count. Otherwise, we 436 * allocate a new NvKmsPerOpenFrameLock. 437 * 438 * \param[in,out] pOpen The per-open data, to which the 439 * new NvKmsPerOpenFrameLock should be assigned. 440 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding 441 * NvKmsPerOpenFrameLock should be allocated. 442 */ 443 static NvBool AllocPerOpenFrameLock( 444 struct NvKmsPerOpen *pOpen, 445 struct NvKmsPerOpenDisp *pOpenDisp) 446 { 447 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 448 NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; 449 NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; 450 NvKmsGenericHandle handle; 451 452 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 453 454 if (pFrameLockEvo == NULL) { 455 return TRUE; 456 } 457 458 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, 459 pOpenFrameLock, handle) { 460 if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { 461 goto done; 462 } 463 } 464 465 pOpenFrameLock = nvCalloc(1, sizeof(*pOpenFrameLock)); 466 467 if (pOpenFrameLock == NULL) { 468 return FALSE; 469 } 470 471 pOpenFrameLock->pFrameLockEvo = pFrameLockEvo; 472 pOpenFrameLock->nvKmsApiHandle = 473 nvEvoCreateApiHandle(&pOpen->ioctl.frameLockHandles, pOpenFrameLock); 474 475 if (pOpenFrameLock->nvKmsApiHandle == 0) { 476 nvFree(pOpenFrameLock); 477 return FALSE; 478 } 479 480 done: 481 pOpenDisp->frameLockHandle = pOpenFrameLock->nvKmsApiHandle; 482 pOpenFrameLock->refCnt++; 483 return TRUE; 484 } 485 486 487 /*! 488 * Get the NvKmsConnectorHandle that corresponds to the given 489 * NVConnectorEvoRec on the NvKmsPerOpen + deviceHandle + dispHandle. 490 */ 491 static NvKmsConnectorHandle ConnectorEvoToConnectorHandle( 492 const struct NvKmsPerOpen *pOpen, 493 const NvKmsDeviceHandle deviceHandle, 494 const NvKmsDispHandle dispHandle, 495 const NVConnectorEvoRec *pConnectorEvo) 496 { 497 struct NvKmsPerOpenDisp *pOpenDisp; 498 struct NvKmsPerOpenConnector *pOpenConnector; 499 NvKmsGenericHandle connector; 500 501 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); 502 503 if (pOpenDisp == NULL) { 504 return 0; 505 } 506 507 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, 508 pOpenConnector, connector) { 509 if (pOpenConnector->pConnectorEvo == pConnectorEvo) { 510 return pOpenConnector->nvKmsApiHandle; 511 } 512 } 513 514 return 0; 515 } 516 517 518 /*! 519 * Get the NvKmsDeviceHandle and NvKmsDispHandle that corresponds to 520 * the given NVDispEvoRec on the NvKmsPerOpen. 521 */ 522 static NvBool DispEvoToDevAndDispHandles( 523 const struct NvKmsPerOpen *pOpen, 524 const NVDispEvoRec *pDispEvo, 525 NvKmsDeviceHandle *pDeviceHandle, 526 NvKmsDispHandle *pDispHandle) 527 { 528 NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; 529 struct NvKmsPerOpenDev *pOpenDev; 530 NvKmsGenericHandle dev; 531 532 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 533 534 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 535 pOpenDev, dev) { 536 537 struct NvKmsPerOpenDisp *pOpenDisp; 538 NvKmsGenericHandle disp; 539 540 if (pOpenDev->pDevEvo != pDevEvo) { 541 continue; 542 } 543 544 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, 545 pOpenDisp, disp) { 546 if (pOpenDisp->pDispEvo != pDispEvo) { 547 continue; 548 } 549 550 *pDeviceHandle = pOpenDev->nvKmsApiHandle; 551 *pDispHandle = pOpenDisp->nvKmsApiHandle; 552 553 return TRUE; 554 } 555 } 556 557 return FALSE; 558 } 559 560 561 /*! 562 * Get the NvKmsPerOpenDev that corresponds to the given NVDevEvoRec 563 * on the NvKmsPerOpen. 564 */ 565 static struct NvKmsPerOpenDev *DevEvoToOpenDev( 566 const struct NvKmsPerOpen *pOpen, 567 const NVDevEvoRec *pDevEvo) 568 { 569 struct NvKmsPerOpenDev *pOpenDev; 570 NvKmsGenericHandle dev; 571 572 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 573 574 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 575 pOpenDev, dev) { 576 if (pOpenDev->pDevEvo == pDevEvo) { 577 return pOpenDev; 578 } 579 } 580 581 return NULL; 582 } 583 584 585 /*! 586 * Get the NvKmsFrameLockHandle that corresponds to the given 587 * NVFrameLockEvoRec on the NvKmsPerOpen. 588 */ 589 static NvBool FrameLockEvoToFrameLockHandle( 590 const struct NvKmsPerOpen *pOpen, 591 const NVFrameLockEvoRec *pFrameLockEvo, 592 NvKmsFrameLockHandle *pFrameLockHandle) 593 { 594 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 595 NvKmsGenericHandle handle; 596 597 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 598 599 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, 600 pOpenFrameLock, handle) { 601 602 if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { 603 *pFrameLockHandle = pOpenFrameLock->nvKmsApiHandle; 604 return TRUE; 605 } 606 } 607 608 return FALSE; 609 } 610 611 612 /*! 613 * Clear the specified NvKmsPerOpenConnector. 614 * 615 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the 616 * NvKmsPerOpenConnector is assigned. 617 * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to be cleared. 618 */ 619 static void ClearPerOpenConnector( 620 struct NvKmsPerOpenDisp *pOpenDisp, 621 struct NvKmsPerOpenConnector *pOpenConnector) 622 { 623 nvEvoDestroyApiHandle(&pOpenDisp->connectorHandles, 624 pOpenConnector->nvKmsApiHandle); 625 nvkms_memset(pOpenConnector, 0, sizeof(*pOpenConnector)); 626 } 627 628 629 /*! 630 * Initialize an NvKmsPerOpenConnector. 631 * 632 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the 633 * NvKmsPerOpenConnector is assigned. 634 * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to initialize. 635 * \param[in] pConnectorEvo The connector that the NvKmsPerOpenConnector 636 * corresponds to. 637 * 638 * \return If the NvKmsPerOpenConnector is successfully initialized, 639 * return TRUE. Otherwise, return FALSE. 640 */ 641 static NvBool InitPerOpenConnector( 642 struct NvKmsPerOpenDisp *pOpenDisp, 643 struct NvKmsPerOpenConnector *pOpenConnector, 644 NVConnectorEvoPtr pConnectorEvo) 645 { 646 pOpenConnector->nvKmsApiHandle = 647 nvEvoCreateApiHandle(&pOpenDisp->connectorHandles, pOpenConnector); 648 649 if (pOpenConnector->nvKmsApiHandle == 0) { 650 goto fail; 651 } 652 653 pOpenConnector->pConnectorEvo = pConnectorEvo; 654 655 return TRUE; 656 657 fail: 658 ClearPerOpenConnector(pOpenDisp, pOpenConnector); 659 return FALSE; 660 } 661 662 /*! 663 * Clear the specified NvKmsPerOpenDisp. 664 * 665 * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp 666 * is assigned. 667 * \param[in,out] pDispEvo The NvKmsPerOpenDisp to be cleared. 668 */ 669 static void ClearPerOpenDisp( 670 struct NvKmsPerOpen *pOpen, 671 struct NvKmsPerOpenDev *pOpenDev, 672 struct NvKmsPerOpenDisp *pOpenDisp) 673 { 674 struct NvKmsPerOpenConnector *pOpenConnector; 675 NvKmsGenericHandle connector; 676 677 NVVBlankCallbackPtr pCallbackData; 678 NvKmsGenericHandle callback; 679 680 FreePerOpenFrameLock(pOpen, pOpenDisp); 681 682 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, 683 pOpenConnector, connector) { 684 ClearPerOpenConnector(pOpenDisp, pOpenConnector); 685 } 686 687 /* Destroy the API handle structures. */ 688 nvEvoDestroyApiHandles(&pOpenDisp->connectorHandles); 689 690 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { 691 nvEvoDestroyApiHandles(&pOpenDisp->vblankSyncObjectHandles[i]); 692 693 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankCallbackHandles[i], 694 pCallbackData, callback) { 695 nvRemoveUnicastEvent(pCallbackData->pUserData); 696 } 697 nvEvoDestroyApiHandles(&pOpenDisp->vblankCallbackHandles[i]); 698 } 699 700 nvEvoDestroyApiHandle(&pOpenDev->dispHandles, pOpenDisp->nvKmsApiHandle); 701 702 nvkms_memset(pOpenDisp, 0, sizeof(*pOpenDisp)); 703 } 704 705 706 /*! 707 * Initialize an NvKmsPerOpenDisp. 708 * 709 * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp 710 * is assigned. 711 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to initialize. 712 * \param[in] pDispEvo The disp that the NvKmsPerOpenDisp corresponds to. 713 * 714 * \return If the NvKmsPerOpenDisp is successfully initialized, return TRUE. 715 * Otherwise, return FALSE. 716 */ 717 static NvBool InitPerOpenDisp( 718 struct NvKmsPerOpen *pOpen, 719 struct NvKmsPerOpenDev *pOpenDev, 720 struct NvKmsPerOpenDisp *pOpenDisp, 721 NVDispEvoPtr pDispEvo) 722 { 723 NVConnectorEvoPtr pConnectorEvo; 724 NvU32 connector; 725 726 pOpenDisp->nvKmsApiHandle = 727 nvEvoCreateApiHandle(&pOpenDev->dispHandles, pOpenDisp); 728 729 if (pOpenDisp->nvKmsApiHandle == 0) { 730 goto fail; 731 } 732 733 pOpenDisp->pDispEvo = pDispEvo; 734 735 if (nvListCount(&pDispEvo->connectorList) >= 736 ARRAY_LEN(pOpenDisp->connector)) { 737 nvAssert(!"More connectors on this disp than NVKMS can handle."); 738 goto fail; 739 } 740 741 if (!nvEvoInitApiHandles(&pOpenDisp->connectorHandles, 742 ARRAY_LEN(pOpenDisp->connector))) { 743 goto fail; 744 } 745 746 connector = 0; 747 FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { 748 if (!InitPerOpenConnector(pOpenDisp, &pOpenDisp->connector[connector], 749 pConnectorEvo)) { 750 goto fail; 751 } 752 connector++; 753 } 754 755 /* Initialize the vblankSyncObjectHandles for each head. */ 756 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { 757 if (!nvEvoInitApiHandles(&pOpenDisp->vblankSyncObjectHandles[i], 758 NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { 759 goto fail; 760 } 761 } 762 763 /* Initialize the vblankCallbackHandles for each head. 764 * 765 * The limit of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply here, but 766 * we need something. */ 767 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { 768 if (!nvEvoInitApiHandles(&pOpenDisp->vblankCallbackHandles[i], 769 NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { 770 goto fail; 771 } 772 } 773 774 if (!AllocPerOpenFrameLock(pOpen, pOpenDisp)) { 775 goto fail; 776 } 777 778 return TRUE; 779 780 fail: 781 ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); 782 return FALSE; 783 } 784 785 /*! 786 * Free any SwapGroups tracked by this pOpenDev. 787 */ 788 static void FreeSwapGroups(struct NvKmsPerOpenDev *pOpenDev) 789 { 790 NVSwapGroupRec *pSwapGroup; 791 NvKmsSwapGroupHandle handle; 792 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 793 794 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->swapGroupHandles, 795 pSwapGroup, 796 handle) { 797 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); 798 799 if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 800 nvHsFreeSwapGroup(pDevEvo, pSwapGroup); 801 } else { 802 nvHsDecrementSwapGroupRefCnt(pSwapGroup); 803 } 804 } 805 } 806 807 /*! 808 * Check that the NvKmsPermissions make sense. 809 */ 810 static NvBool ValidateNvKmsPermissions( 811 const NVDevEvoRec *pDevEvo, 812 const struct NvKmsPermissions *pPermissions, 813 enum NvKmsClientType clientType) 814 { 815 if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 816 NvU32 d, h; 817 818 for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { 819 for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { 820 821 NvU8 layerMask = pPermissions->flip.disp[d].head[h].layerMask; 822 823 if (layerMask == 0) { 824 continue; 825 } 826 827 if (nvHasBitAboveMax(layerMask, pDevEvo->apiHead[h].numLayers)) { 828 return FALSE; 829 } 830 831 /* 832 * If the above blocks didn't 'continue', then there 833 * are permissions specified for this disp+head. Is 834 * the specified disp+head in range for the current 835 * configuration? 836 */ 837 if (d >= pDevEvo->nDispEvo) { 838 return FALSE; 839 } 840 841 if (h >= pDevEvo->numApiHeads) { 842 return FALSE; 843 } 844 } 845 } 846 } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { 847 NvU32 d, h; 848 849 for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { 850 for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { 851 852 NVDpyIdList dpyIdList = 853 pPermissions->modeset.disp[d].head[h].dpyIdList; 854 855 if (nvDpyIdListIsEmpty(dpyIdList)) { 856 continue; 857 } 858 859 /* 860 * If the above blocks didn't 'continue', then there 861 * are permissions specified for this disp+head. Is 862 * the specified disp+head in range for the current 863 * configuration? 864 */ 865 if (d >= pDevEvo->nDispEvo) { 866 return FALSE; 867 } 868 869 if (h >= pDevEvo->numApiHeads) { 870 return FALSE; 871 } 872 } 873 } 874 } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_SUB_OWNER) { 875 876 /* Only kapi uses this permission type, so disallow it from userspace */ 877 if (clientType != NVKMS_CLIENT_KERNEL_SPACE) { 878 return FALSE; 879 } 880 881 } else { 882 return FALSE; 883 } 884 885 return TRUE; 886 } 887 888 /*! 889 * Assign pPermissions with the maximum permissions possible for 890 * the pDevEvo. 891 */ 892 static void AssignFullNvKmsFlipPermissions( 893 const NVDevEvoRec *pDevEvo, 894 struct NvKmsFlipPermissions *pPermissions) 895 { 896 NvU32 dispIndex, apiHead; 897 898 nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); 899 900 for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { 901 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 902 pPermissions->disp[dispIndex].head[apiHead].layerMask = 903 NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1; 904 } 905 } 906 } 907 908 static void AssignFullNvKmsModesetPermissions( 909 const NVDevEvoRec *pDevEvo, 910 struct NvKmsModesetPermissions *pPermissions) 911 { 912 NvU32 dispIndex, apiHead; 913 914 nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); 915 916 for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { 917 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 918 pPermissions->disp[dispIndex].head[apiHead].dpyIdList = 919 nvAllDpyIdList(); 920 } 921 } 922 } 923 924 static void AssignFullNvKmsPermissions( 925 struct NvKmsPerOpenDev *pOpenDev 926 ) 927 { 928 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 929 930 AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions); 931 AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions); 932 } 933 934 /*! 935 * Set the modeset owner to pOpenDev 936 * 937 * \param pOpenDev The per-open device structure for the new modeset owner. 938 * \return FALSE if there was already a modeset owner. TRUE otherwise. 939 */ 940 static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) 941 { 942 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 943 944 if (pDevEvo->modesetOwner == pOpenDev) { 945 return TRUE; 946 } 947 948 if (pDevEvo->modesetOwner != NULL) { 949 return FALSE; 950 } 951 952 /* 953 * If claiming modeset ownership, undo any SST forcing imposed by 954 * console restore. 955 */ 956 if (pOpenDev != pDevEvo->pNvKmsOpenDev) { 957 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); 958 } 959 960 pDevEvo->modesetOwner = pOpenDev; 961 pDevEvo->modesetOwnerChanged = TRUE; 962 963 AssignFullNvKmsPermissions(pOpenDev); 964 return TRUE; 965 } 966 967 /* 968 * If not NULL, remove pRemoveFlip from pFlip. Returns true if there are still 969 * some remaining permissions. 970 */ 971 static NvBool RemoveFlipPermissions(struct NvKmsFlipPermissions *pFlip, 972 const struct NvKmsFlipPermissions *pRemoveFlip) 973 { 974 NvU32 d, h, dLen, hLen; 975 NvBool remainingPermissions = FALSE; 976 977 dLen = ARRAY_LEN(pFlip->disp); 978 for (d = 0; d < dLen; d++) { 979 hLen = ARRAY_LEN(pFlip->disp[d].head); 980 for (h = 0; h < hLen; h++) { 981 982 if (pRemoveFlip) { 983 pFlip->disp[d].head[h].layerMask &= 984 ~pRemoveFlip->disp[d].head[h].layerMask; 985 } 986 987 remainingPermissions |= (pFlip->disp[d].head[h].layerMask != 0); 988 } 989 } 990 991 return remainingPermissions; 992 } 993 994 /* 995 * If not NULL, remove pRemoveModeset from pModeset. Returns true if there are 996 * still some remaining permissions. 997 */ 998 static NvBool RemoveModesetPermissions(struct NvKmsModesetPermissions *pModeset, 999 const struct NvKmsModesetPermissions *pRemoveModeset) 1000 { 1001 NvU32 d, h, dLen, hLen; 1002 NvBool remainingPermissions = FALSE; 1003 1004 dLen = ARRAY_LEN(pModeset->disp); 1005 for (d = 0; d < dLen; d++) { 1006 hLen = ARRAY_LEN(pModeset->disp[d].head); 1007 for (h = 0; h < hLen; h++) { 1008 1009 if (pRemoveModeset) { 1010 pModeset->disp[d].head[h].dpyIdList = nvDpyIdListMinusDpyIdList( 1011 pModeset->disp[d].head[h].dpyIdList, 1012 pRemoveModeset->disp[d].head[h].dpyIdList); 1013 } 1014 1015 remainingPermissions |= 1016 !nvDpyIdListIsEmpty(pModeset->disp[d].head[h].dpyIdList); 1017 } 1018 } 1019 1020 return remainingPermissions; 1021 } 1022 1023 /*! 1024 * Clear permissions on the specified device for all NvKmsPerOpens. 1025 * 1026 * For NvKmsPerOpen::type==Ioctl, clear the permissions, except for the 1027 * specified pOpenDevExclude. 1028 * 1029 * For NvKmsPerOpen::type==GrantPermissions, clear 1030 * NvKmsPerOpen::grantPermissions and reset NvKmsPerOpen::type to 1031 * Undefined. 1032 */ 1033 static void RevokePermissionsInternal( 1034 const NvU32 typeBitmask, 1035 NVDevEvoRec *pDevEvo, 1036 const struct NvKmsPerOpenDev *pOpenDevExclude) 1037 { 1038 struct NvKmsPerOpen *pOpen; 1039 1040 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { 1041 1042 if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && 1043 (pOpen->grantPermissions.pDevEvo == pDevEvo) && 1044 (typeBitmask & NVBIT(pOpen->grantPermissions.permissions.type))) { 1045 nvkms_memset(&pOpen->grantPermissions, 0, 1046 sizeof(pOpen->grantPermissions)); 1047 pOpen->type = NvKmsPerOpenTypeUndefined; 1048 } 1049 1050 if (pOpen->type == NvKmsPerOpenTypeIoctl) { 1051 1052 struct NvKmsPerOpenDev *pOpenDev = 1053 DevEvoToOpenDev(pOpen, pDevEvo); 1054 1055 if (pOpenDev == NULL) { 1056 continue; 1057 } 1058 1059 if (pOpenDev == pOpenDevExclude || pOpenDev->isPrivileged) { 1060 continue; 1061 } 1062 1063 if (pOpenDev == pDevEvo->modesetSubOwner && 1064 (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER))) { 1065 FreeSwapGroups(pOpenDev); 1066 pDevEvo->modesetSubOwner = NULL; 1067 } 1068 1069 /* 1070 * Clients with sub-owner permission (or better) don't get flipping 1071 * or modeset permission revoked. 1072 */ 1073 if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 1074 continue; 1075 } 1076 1077 if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING)) { 1078 nvkms_memset(&pOpenDev->flipPermissions, 0, 1079 sizeof(pOpenDev->flipPermissions)); 1080 } 1081 1082 if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET)) { 1083 nvkms_memset(&pOpenDev->modesetPermissions, 0, 1084 sizeof(pOpenDev->modesetPermissions)); 1085 } 1086 } 1087 } 1088 } 1089 1090 static void RestoreConsole(NVDevEvoPtr pDevEvo) 1091 { 1092 // Try to issue a modeset and flip to the framebuffer console surface. 1093 if (!nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */)) { 1094 // If that didn't work, free the core channel to trigger RM's console 1095 // restore code. 1096 FreeSurfaceCtxDmasForAllOpens(pDevEvo); 1097 DisableAndCleanVblankSyncObjectForAllOpens(pDevEvo); 1098 nvFreeCoreChannelEvo(pDevEvo); 1099 1100 // Reallocate the core channel right after freeing it. This makes sure 1101 // that it's allocated and ready right away if another NVKMS client is 1102 // started. 1103 if (nvAllocCoreChannelEvo(pDevEvo)) { 1104 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); 1105 EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo); 1106 AllocSurfaceCtxDmasForAllOpens(pDevEvo); 1107 } 1108 } 1109 } 1110 1111 /*! 1112 * Release modeset ownership previously set by GrabModesetOwnership 1113 * 1114 * \param pOpenDev The per-open device structure relinquishing modeset 1115 * ownership. 1116 * \return FALSE if pOpenDev is not the modeset owner, TRUE otherwise. 1117 */ 1118 static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) 1119 { 1120 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 1121 1122 if (pDevEvo->modesetOwner != pOpenDev) { 1123 // Only the current owner can release ownership. 1124 return FALSE; 1125 } 1126 1127 FreeSwapGroups(pOpenDev); 1128 1129 pDevEvo->modesetOwner = NULL; 1130 pDevEvo->modesetOwnerChanged = TRUE; 1131 pDevEvo->handleConsoleHotplugs = TRUE; 1132 1133 RestoreConsole(pDevEvo); 1134 RevokePermissionsInternal(NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | 1135 NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) | 1136 NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER), 1137 pDevEvo, NULL /* pOpenDevExclude */); 1138 return TRUE; 1139 } 1140 1141 /*! 1142 * Free the specified NvKmsPerOpenDev. 1143 * 1144 * \param[in,out] pOpen The per-open data, to which the 1145 * NvKmsPerOpenDev is assigned. 1146 * \param[in,out] pOpenDev The NvKmsPerOpenDev to free. 1147 */ 1148 void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, 1149 struct NvKmsPerOpenDev *pOpenDev) 1150 { 1151 struct NvKmsPerOpenDisp *pOpenDisp; 1152 NvKmsGenericHandle disp; 1153 1154 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 1155 1156 if (pOpenDev == NULL) { 1157 return; 1158 } 1159 1160 nvEvoDestroyApiHandles(&pOpenDev->surfaceHandles); 1161 1162 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, 1163 pOpenDisp, disp) { 1164 ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); 1165 } 1166 1167 nvEvoDestroyApiHandles(&pOpenDev->dispHandles); 1168 1169 nvEvoDestroyApiHandle(&pOpen->ioctl.devHandles, pOpenDev->nvKmsApiHandle); 1170 1171 nvEvoDestroyApiHandles(&pOpenDev->deferredRequestFifoHandles); 1172 1173 nvEvoDestroyApiHandles(&pOpenDev->swapGroupHandles); 1174 1175 nvFree(pOpenDev); 1176 } 1177 1178 1179 /*! 1180 * Allocate and initialize an NvKmsPerOpenDev. 1181 * 1182 * \param[in,out] pOpen The per-open data, to which the 1183 * new NvKmsPerOpenDev should be assigned. 1184 * \param[in] pDevEvo The device to which the new NvKmsPerOpenDev 1185 * corresponds. 1186 * \param[in] isPrivileged The NvKmsPerOpenDev is privileged which can 1187 * do modeset anytime. 1188 * 1189 * \return On success, return a pointer to the new NvKmsPerOpenDev. 1190 * On failure, return NULL. 1191 */ 1192 struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, 1193 NVDevEvoPtr pDevEvo, NvBool isPrivileged) 1194 { 1195 struct NvKmsPerOpenDev *pOpenDev = nvCalloc(1, sizeof(*pOpenDev)); 1196 NVDispEvoPtr pDispEvo; 1197 NvU32 disp; 1198 1199 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 1200 1201 if (pOpenDev == NULL) { 1202 goto fail; 1203 } 1204 1205 pOpenDev->nvKmsApiHandle = 1206 nvEvoCreateApiHandle(&pOpen->ioctl.devHandles, pOpenDev); 1207 1208 if (pOpenDev->nvKmsApiHandle == 0) { 1209 goto fail; 1210 } 1211 1212 pOpenDev->pDevEvo = pDevEvo; 1213 1214 if (!nvEvoInitApiHandles(&pOpenDev->dispHandles, 1215 ARRAY_LEN(pOpenDev->disp))) { 1216 goto fail; 1217 } 1218 1219 if (pDevEvo->nDispEvo > ARRAY_LEN(pOpenDev->disp)) { 1220 nvAssert(!"More disps on this device than NVKMS can handle."); 1221 goto fail; 1222 } 1223 1224 FOR_ALL_EVO_DISPLAYS(pDispEvo, disp, pDevEvo) { 1225 if (!InitPerOpenDisp(pOpen, pOpenDev, &pOpenDev->disp[disp], pDispEvo)) { 1226 goto fail; 1227 } 1228 } 1229 1230 if (!nvEvoInitApiHandles(&pOpenDev->surfaceHandles, 32)) { 1231 goto fail; 1232 } 1233 1234 pOpenDev->isPrivileged = isPrivileged; 1235 if (pOpenDev->isPrivileged) { 1236 AssignFullNvKmsPermissions(pOpenDev); 1237 } 1238 1239 if (!nvEvoInitApiHandles(&pOpenDev->deferredRequestFifoHandles, 4)) { 1240 goto fail; 1241 } 1242 1243 if (!nvEvoInitApiHandles(&pOpenDev->swapGroupHandles, 4)) { 1244 goto fail; 1245 } 1246 1247 return pOpenDev; 1248 1249 fail: 1250 nvFreePerOpenDev(pOpen, pOpenDev); 1251 return NULL; 1252 } 1253 1254 1255 /*! 1256 * Assign NvKmsPerOpen::type. 1257 * 1258 * This succeeds only if NvKmsPerOpen::type is Undefined, or already 1259 * has the requested type and allowRedundantAssignment is TRUE. 1260 */ 1261 static NvBool AssignNvKmsPerOpenType(struct NvKmsPerOpen *pOpen, 1262 enum NvKmsPerOpenType type, 1263 NvBool allowRedundantAssignment) 1264 { 1265 if ((pOpen->type == type) && allowRedundantAssignment) { 1266 return TRUE; 1267 } 1268 1269 if (pOpen->type != NvKmsPerOpenTypeUndefined) { 1270 return FALSE; 1271 } 1272 1273 switch (type) { 1274 case NvKmsPerOpenTypeIoctl: 1275 nvListInit(&pOpen->ioctl.eventList); 1276 1277 if (!nvEvoInitApiHandles(&pOpen->ioctl.devHandles, NV_MAX_DEVICES)) { 1278 return FALSE; 1279 } 1280 1281 if (!nvEvoInitApiHandles(&pOpen->ioctl.frameLockHandles, 4)) { 1282 nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); 1283 return FALSE; 1284 } 1285 1286 nvListAppend(&pOpen->perOpenIoctlListEntry, &perOpenIoctlList); 1287 break; 1288 1289 case NvKmsPerOpenTypeGrantSurface: 1290 /* Nothing to do, here. */ 1291 break; 1292 1293 case NvKmsPerOpenTypeGrantSwapGroup: 1294 /* Nothing to do, here. */ 1295 break; 1296 1297 case NvKmsPerOpenTypeGrantPermissions: 1298 /* Nothing to do, here. */ 1299 break; 1300 1301 case NvKmsPerOpenTypeUnicastEvent: 1302 /* Nothing to do, here. */ 1303 break; 1304 1305 case NvKmsPerOpenTypeUndefined: 1306 nvAssert(!"unexpected NvKmsPerOpenType"); 1307 break; 1308 } 1309 1310 pOpen->type = type; 1311 return TRUE; 1312 } 1313 1314 /*! 1315 * Return whether the PerOpen can be used as a unicast event. 1316 */ 1317 static inline NvBool PerOpenIsValidForUnicastEvent( 1318 const struct NvKmsPerOpen *pOpen) 1319 { 1320 /* If the type is Undefined, it can be made a unicast event. */ 1321 1322 if (pOpen->type == NvKmsPerOpenTypeUndefined) { 1323 return TRUE; 1324 } 1325 1326 /* 1327 * If the type is already UnicastEvent but there is no active user, it can 1328 * be made a unicast event. 1329 */ 1330 if ((pOpen->type == NvKmsPerOpenTypeUnicastEvent) && 1331 (pOpen->unicastEvent.type == NvKmsUnicastEventTypeUndefined)) { 1332 return TRUE; 1333 } 1334 1335 return FALSE; 1336 } 1337 1338 /*! 1339 * Allocate the specified device. 1340 */ 1341 static NvBool AllocDevice(struct NvKmsPerOpen *pOpen, 1342 void *pParamsVoid) 1343 { 1344 struct NvKmsAllocDeviceParams *pParams = pParamsVoid; 1345 NVDevEvoPtr pDevEvo; 1346 struct NvKmsPerOpenDev *pOpenDev; 1347 NvU32 disp, apiHead; 1348 NvU8 layer; 1349 1350 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1351 1352 if (nvkms_strcmp(pParams->request.versionString, NV_VERSION_STRING) != 0) { 1353 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH; 1354 return FALSE; 1355 } 1356 1357 /* 1358 * It is an error to call NVKMS_IOCTL_ALLOC_DEVICE multiple times 1359 * on the same device with the same fd. 1360 */ 1361 if (DeviceIdAlreadyPresent(pOpen, pParams->request.deviceId)) { 1362 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; 1363 return FALSE; 1364 } 1365 1366 pDevEvo = nvFindDevEvoByDeviceId(pParams->request.deviceId); 1367 1368 if (pDevEvo == NULL) { 1369 pDevEvo = nvAllocDevEvo(&pParams->request, &pParams->reply.status); 1370 if (pDevEvo == NULL) { 1371 return FALSE; 1372 } 1373 } else { 1374 if (!pParams->request.tryInferSliMosaicFromExistingDevice && 1375 (pDevEvo->sli.mosaic != pParams->request.sliMosaic)) { 1376 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; 1377 return FALSE; 1378 } 1379 1380 if (pDevEvo->usesTegraDevice && 1381 (pParams->request.deviceId != NVKMS_DEVICE_ID_TEGRA)) { 1382 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; 1383 return FALSE; 1384 } 1385 pDevEvo->allocRefCnt++; 1386 } 1387 1388 pOpenDev = nvAllocPerOpenDev(pOpen, pDevEvo, FALSE /* isPrivileged */); 1389 1390 if (pOpenDev == NULL) { 1391 nvFreeDevEvo(pDevEvo); 1392 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; 1393 return FALSE; 1394 } 1395 1396 /* Beyond this point, the function cannot fail. */ 1397 1398 if (pParams->request.enableConsoleHotplugHandling) { 1399 pDevEvo->handleConsoleHotplugs = TRUE; 1400 } 1401 1402 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; 1403 pParams->reply.subDeviceMask = 1404 NV_TWO_N_MINUS_ONE(pDevEvo->numSubDevices); 1405 pParams->reply.numHeads = pDevEvo->numApiHeads; 1406 pParams->reply.numDisps = pDevEvo->nDispEvo; 1407 1408 ct_assert(ARRAY_LEN(pParams->reply.dispHandles) == 1409 ARRAY_LEN(pOpenDev->disp)); 1410 1411 for (disp = 0; disp < ARRAY_LEN(pParams->reply.dispHandles); disp++) { 1412 pParams->reply.dispHandles[disp] = pOpenDev->disp[disp].nvKmsApiHandle; 1413 } 1414 1415 pParams->reply.inputLutAppliesToBase = pDevEvo->caps.inputLutAppliesToBase; 1416 1417 ct_assert(ARRAY_LEN(pParams->reply.layerCaps) == 1418 ARRAY_LEN(pDevEvo->caps.layerCaps)); 1419 1420 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 1421 pParams->reply.numLayers[apiHead] = pDevEvo->apiHead[apiHead].numLayers; 1422 } 1423 1424 for (layer = 0; 1425 layer < ARRAY_LEN(pParams->reply.layerCaps); 1426 layer++) { 1427 pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer]; 1428 } 1429 1430 pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT; 1431 pParams->reply.requiresVrrSemaphores = !pDevEvo->hal->caps.supportsDisplayRate; 1432 1433 pParams->reply.nIsoSurfacesInVidmemOnly = 1434 !!NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, 1435 NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY); 1436 1437 pParams->reply.requiresAllAllocationsInSysmem = 1438 pDevEvo->requiresAllAllocationsInSysmem; 1439 pParams->reply.supportsHeadSurface = pDevEvo->isHeadSurfaceSupported; 1440 1441 pParams->reply.validNIsoFormatMask = pDevEvo->caps.validNIsoFormatMask; 1442 1443 pParams->reply.maxWidthInBytes = pDevEvo->caps.maxWidthInBytes; 1444 pParams->reply.maxWidthInPixels = pDevEvo->caps.maxWidthInPixels; 1445 pParams->reply.maxHeightInPixels = pDevEvo->caps.maxHeight; 1446 pParams->reply.cursorCompositionCaps = pDevEvo->caps.cursorCompositionCaps; 1447 pParams->reply.genericPageKind = pDevEvo->caps.genericPageKind; 1448 1449 pParams->reply.maxCursorSize = pDevEvo->cursorHal->caps.maxSize; 1450 1451 /* NVKMS swap groups and warp&blend depends on headSurface functionality. */ 1452 pParams->reply.supportsSwapGroups = pDevEvo->isHeadSurfaceSupported; 1453 pParams->reply.supportsWarpAndBlend = pDevEvo->isHeadSurfaceSupported; 1454 1455 pParams->reply.validLayerRRTransforms = pDevEvo->caps.validLayerRRTransforms; 1456 1457 pParams->reply.isoIOCoherencyModes = pDevEvo->isoIOCoherencyModes; 1458 pParams->reply.nisoIOCoherencyModes = pDevEvo->nisoIOCoherencyModes; 1459 1460 /* 1461 * TODO: Replace the isSOCDisplay check with an RM query. See Bug 3689635. 1462 */ 1463 pParams->reply.displayIsGpuL2Coherent = !pDevEvo->isSOCDisplay; 1464 1465 pParams->reply.supportsSyncpts = pDevEvo->supportsSyncpts; 1466 1467 pParams->reply.supportsIndependentAcqRelSemaphore = 1468 pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore; 1469 1470 pParams->reply.supportsVblankSyncObjects = 1471 pDevEvo->hal->caps.supportsVblankSyncObjects; 1472 1473 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; 1474 1475 return TRUE; 1476 } 1477 1478 static void UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev *pOpenDev) 1479 { 1480 NVDeferredRequestFifoRec *pDeferredRequestFifo; 1481 NvKmsGenericHandle handle; 1482 1483 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->deferredRequestFifoHandles, 1484 pDeferredRequestFifo, 1485 handle) { 1486 1487 nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); 1488 1489 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, 1490 pDeferredRequestFifo); 1491 } 1492 } 1493 1494 /* 1495 * Forward declaration since this function is used by 1496 * DisableRemainingVblankSyncObjects(). 1497 */ 1498 static void DisableAndCleanVblankSyncObject(NVDispEvoRec *pDispEvo, 1499 const NvU32 apiHead, 1500 NVVblankSyncObjectRec *pVblankSyncObject, 1501 NVEvoUpdateState *pUpdateState); 1502 1503 static void DisableRemainingVblankSyncObjects(struct NvKmsPerOpen *pOpen, 1504 struct NvKmsPerOpenDev *pOpenDev) 1505 { 1506 struct NvKmsPerOpenDisp *pOpenDisp; 1507 NvKmsGenericHandle disp; 1508 NVVblankSyncObjectRec *pVblankSyncObject; 1509 NvKmsVblankSyncObjectHandle handle; 1510 NvU32 apiHead = 0; 1511 1512 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 1513 1514 if (pOpenDev == NULL) { 1515 return; 1516 } 1517 1518 /* For each pOpenDisp: */ 1519 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, 1520 pOpenDisp, disp) { 1521 /* 1522 * A single update state can handle changes across multiple heads on a 1523 * given Disp. 1524 */ 1525 NVEvoUpdateState updateState = { }; 1526 1527 /* For each head: */ 1528 for (apiHead = 0; apiHead < ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) { 1529 NVEvoApiHandlesRec *pHandles = 1530 &pOpenDisp->vblankSyncObjectHandles[apiHead]; 1531 1532 /* For each still-active vblank sync object: */ 1533 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, 1534 pVblankSyncObject, handle) { 1535 DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead, 1536 pVblankSyncObject, 1537 &updateState); 1538 /* Remove the handle from the map. */ 1539 nvEvoDestroyApiHandle(pHandles, handle); 1540 } 1541 } 1542 1543 if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { 1544 /* 1545 * Instruct hardware to execute the staged commands from the 1546 * ConfigureVblankSyncObject() calls (inherent in 1547 * DisableAndCleanVblankSyncObject()) above. This will set up 1548 * and wait for a notification that the hardware execution 1549 * has completed. 1550 */ 1551 nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, 1552 TRUE); 1553 } 1554 } 1555 } 1556 1557 static void FreeDeviceReference(struct NvKmsPerOpen *pOpen, 1558 struct NvKmsPerOpenDev *pOpenDev) 1559 { 1560 /* Disable all client-owned vblank sync objects that still exist. */ 1561 DisableRemainingVblankSyncObjects(pOpen, pOpenDev); 1562 1563 FreeSwapGroups(pOpenDev); 1564 1565 UnregisterDeferredRequestFifos(pOpenDev); 1566 1567 nvEvoFreeClientSurfaces(pOpenDev->pDevEvo, pOpenDev, 1568 &pOpenDev->surfaceHandles); 1569 1570 if (!nvFreeDevEvo(pOpenDev->pDevEvo)) { 1571 // If this pOpenDev is the modeset owner, implicitly release it. Does 1572 // nothing if this pOpenDev is not the modeset owner. 1573 // 1574 // If nvFreeDevEvo() freed the device, then it also implicitly released 1575 // ownership. 1576 ReleaseModesetOwnership(pOpenDev); 1577 1578 nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev); 1579 1580 // If this pOpenDev is the modeset sub-owner, implicitly release it. 1581 if (pOpenDev->pDevEvo->modesetSubOwner == pOpenDev) { 1582 pOpenDev->pDevEvo->modesetSubOwner = NULL; 1583 } 1584 } 1585 1586 nvFreePerOpenDev(pOpen, pOpenDev); 1587 } 1588 1589 /*! 1590 * Free the specified device. 1591 */ 1592 static NvBool FreeDevice(struct NvKmsPerOpen *pOpen, 1593 void *pParamsVoid) 1594 { 1595 struct NvKmsFreeDeviceParams *pParams = pParamsVoid; 1596 struct NvKmsPerOpenDev *pOpenDev; 1597 1598 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 1599 1600 if (pOpenDev == NULL) { 1601 return FALSE; 1602 } 1603 1604 FreeDeviceReference(pOpen, pOpenDev); 1605 1606 return TRUE; 1607 } 1608 1609 1610 /*! 1611 * Get the disp data. This information should remain static for the 1612 * lifetime of the disp. 1613 */ 1614 static NvBool QueryDisp(struct NvKmsPerOpen *pOpen, 1615 void *pParamsVoid) 1616 { 1617 struct NvKmsQueryDispParams *pParams = pParamsVoid; 1618 struct NvKmsPerOpenDisp *pOpenDisp; 1619 const NVEvoSubDeviceRec *pSubDevice; 1620 NVDispEvoPtr pDispEvo; 1621 NvU32 connector; 1622 1623 pOpenDisp = GetPerOpenDisp(pOpen, 1624 pParams->request.deviceHandle, 1625 pParams->request.dispHandle); 1626 if (pOpenDisp == NULL) { 1627 return FALSE; 1628 } 1629 1630 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1631 1632 pDispEvo = pOpenDisp->pDispEvo; 1633 1634 // Don't include dynamic displays in validDpys. The data returned here is 1635 // supposed to be static for the lifetime of the pDispEvo. 1636 pParams->reply.validDpys = 1637 nvDpyIdListMinusDpyIdList(pDispEvo->validDisplays, 1638 pDispEvo->dynamicDpyIds); 1639 pParams->reply.bootDpys = pDispEvo->bootDisplays; 1640 pParams->reply.muxDpys = pDispEvo->muxDisplays; 1641 pParams->reply.frameLockHandle = pOpenDisp->frameLockHandle; 1642 pParams->reply.numConnectors = nvListCount(&pDispEvo->connectorList); 1643 1644 ct_assert(ARRAY_LEN(pParams->reply.connectorHandles) == 1645 ARRAY_LEN(pOpenDisp->connector)); 1646 1647 for (connector = 0; connector < ARRAY_LEN(pParams->reply.connectorHandles); 1648 connector++) { 1649 pParams->reply.connectorHandles[connector] = 1650 pOpenDisp->connector[connector].nvKmsApiHandle; 1651 } 1652 1653 pSubDevice = pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]; 1654 if (pSubDevice != NULL) { 1655 ct_assert(sizeof(pParams->reply.gpuString) >= 1656 sizeof(pSubDevice->gpuString)); 1657 nvkms_memcpy(pParams->reply.gpuString, pSubDevice->gpuString, 1658 sizeof(pSubDevice->gpuString)); 1659 } 1660 1661 return TRUE; 1662 } 1663 1664 1665 /*! 1666 * Get the connector static data. This information should remain static for the 1667 * lifetime of the connector. 1668 */ 1669 static NvBool QueryConnectorStaticData(struct NvKmsPerOpen *pOpen, 1670 void *pParamsVoid) 1671 { 1672 struct NvKmsQueryConnectorStaticDataParams *pParams = pParamsVoid; 1673 struct NvKmsPerOpenConnector *pOpenConnector; 1674 NVConnectorEvoPtr pConnectorEvo; 1675 1676 pOpenConnector = GetPerOpenConnector(pOpen, 1677 pParams->request.deviceHandle, 1678 pParams->request.dispHandle, 1679 pParams->request.connectorHandle); 1680 if (pOpenConnector == NULL) { 1681 return FALSE; 1682 } 1683 1684 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1685 1686 pConnectorEvo = pOpenConnector->pConnectorEvo; 1687 1688 pParams->reply.dpyId = pConnectorEvo->displayId; 1689 pParams->reply.isDP = nvConnectorUsesDPLib(pConnectorEvo) || 1690 nvConnectorIsDPSerializer(pConnectorEvo); 1691 pParams->reply.legacyTypeIndex = pConnectorEvo->legacyTypeIndex; 1692 pParams->reply.type = pConnectorEvo->type; 1693 pParams->reply.typeIndex = pConnectorEvo->typeIndex; 1694 pParams->reply.signalFormat = pConnectorEvo->signalFormat; 1695 pParams->reply.physicalIndex = pConnectorEvo->physicalIndex; 1696 pParams->reply.physicalLocation = pConnectorEvo->physicalLocation; 1697 1698 pParams->reply.isLvds = 1699 (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && 1700 (pConnectorEvo->or.protocol == 1701 NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM); 1702 1703 pParams->reply.locationOnChip = (pConnectorEvo->or.location == 1704 NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP); 1705 return TRUE; 1706 } 1707 1708 1709 /*! 1710 * Get the connector dynamic data. This information should reflects changes to 1711 * the connector over time (e.g. for DisplayPort MST devices). 1712 */ 1713 static NvBool QueryConnectorDynamicData(struct NvKmsPerOpen *pOpen, 1714 void *pParamsVoid) 1715 { 1716 struct NvKmsQueryConnectorDynamicDataParams *pParams = pParamsVoid; 1717 struct NvKmsPerOpenConnector *pOpenConnector; 1718 NVConnectorEvoPtr pConnectorEvo; 1719 NVDispEvoPtr pDispEvo; 1720 NVDpyEvoPtr pDpyEvo; 1721 1722 pOpenConnector = GetPerOpenConnector(pOpen, 1723 pParams->request.deviceHandle, 1724 pParams->request.dispHandle, 1725 pParams->request.connectorHandle); 1726 if (pOpenConnector == NULL) { 1727 return FALSE; 1728 } 1729 1730 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1731 1732 pConnectorEvo = pOpenConnector->pConnectorEvo; 1733 pDispEvo = pConnectorEvo->pDispEvo; 1734 1735 if (nvConnectorUsesDPLib(pConnectorEvo)) { 1736 pParams->reply.detectComplete = pConnectorEvo->detectComplete; 1737 } else { 1738 pParams->reply.detectComplete = TRUE; 1739 } 1740 1741 // Find the dynamic dpys on this connector. 1742 pParams->reply.dynamicDpyIdList = nvEmptyDpyIdList(); 1743 FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->dynamicDpyIds, pDispEvo) { 1744 if (pDpyEvo->pConnectorEvo == pConnectorEvo) { 1745 pParams->reply.dynamicDpyIdList = 1746 nvAddDpyIdToDpyIdList(pDpyEvo->id, 1747 pParams->reply.dynamicDpyIdList); 1748 } 1749 } 1750 1751 return TRUE; 1752 } 1753 1754 1755 /*! 1756 * Get the static data for the specified dpy. This information should 1757 * remain static for the lifetime of the dpy. 1758 */ 1759 static NvBool QueryDpyStaticData(struct NvKmsPerOpen *pOpen, 1760 void *pParamsVoid) 1761 { 1762 struct NvKmsQueryDpyStaticDataParams *pParams = pParamsVoid; 1763 NVDpyEvoPtr pDpyEvo; 1764 1765 pDpyEvo = GetPerOpenDpy(pOpen, 1766 pParams->request.deviceHandle, 1767 pParams->request.dispHandle, 1768 pParams->request.dpyId); 1769 if (pDpyEvo == NULL) { 1770 return FALSE; 1771 } 1772 1773 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1774 1775 pParams->reply.connectorHandle = 1776 ConnectorEvoToConnectorHandle(pOpen, 1777 pParams->request.deviceHandle, 1778 pParams->request.dispHandle, 1779 pDpyEvo->pConnectorEvo); 1780 /* 1781 * All pConnectorEvos should have corresponding pOpenConnectors, 1782 * so we should always be able to find the NvKmsConnectorHandle. 1783 */ 1784 nvAssert(pParams->reply.connectorHandle != 0); 1785 1786 pParams->reply.type = pDpyEvo->pConnectorEvo->legacyType; 1787 1788 if (pDpyEvo->dp.addressString != NULL) { 1789 const size_t len = nvkms_strlen(pDpyEvo->dp.addressString) + 1; 1790 nvkms_memcpy(pParams->reply.dpAddress, pDpyEvo->dp.addressString, 1791 NV_MIN(sizeof(pParams->reply.dpAddress), len)); 1792 pParams->reply.dpAddress[sizeof(pParams->reply.dpAddress) - 1] = '\0'; 1793 } 1794 1795 pParams->reply.mobileInternal = pDpyEvo->internal; 1796 pParams->reply.isDpMST = nvDpyEvoIsDPMST(pDpyEvo); 1797 pParams->reply.headMask = nvDpyGetPossibleApiHeadsMask(pDpyEvo); 1798 1799 return TRUE; 1800 } 1801 1802 1803 /*! 1804 * Get the dynamic data for the specified dpy. This information can 1805 * change when a hotplug occurs. 1806 */ 1807 static NvBool QueryDpyDynamicData(struct NvKmsPerOpen *pOpen, 1808 void *pParamsVoid) 1809 { 1810 struct NvKmsQueryDpyDynamicDataParams *pParams = pParamsVoid; 1811 NVDpyEvoPtr pDpyEvo; 1812 1813 pDpyEvo = GetPerOpenDpy(pOpen, 1814 pParams->request.deviceHandle, 1815 pParams->request.dispHandle, 1816 pParams->request.dpyId); 1817 if (pDpyEvo == NULL) { 1818 return FALSE; 1819 } 1820 1821 return nvDpyGetDynamicData(pDpyEvo, pParams); 1822 } 1823 1824 /* Store a copy of the user's infoString pointer, so we can copy out to it when 1825 * we're done. */ 1826 struct InfoStringExtraUserStateCommon 1827 { 1828 NvU64 userInfoString; 1829 }; 1830 1831 /* 1832 * Allocate a kernel buffer to populate the infoString which will be copied out 1833 * to userspace upon completion. 1834 */ 1835 static NvBool InfoStringPrepUserCommon( 1836 NvU32 infoStringSize, 1837 NvU64 *ppInfoString, 1838 struct InfoStringExtraUserStateCommon *pExtra) 1839 { 1840 char *kernelInfoString = NULL; 1841 1842 if (infoStringSize == 0) { 1843 *ppInfoString = 0; 1844 return TRUE; 1845 } 1846 1847 if (!nvKmsNvU64AddressIsSafe(*ppInfoString)) { 1848 return FALSE; 1849 } 1850 1851 if (infoStringSize > NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH) { 1852 return FALSE; 1853 } 1854 1855 kernelInfoString = nvCalloc(1, infoStringSize); 1856 if (kernelInfoString == NULL) { 1857 return FALSE; 1858 } 1859 1860 pExtra->userInfoString = *ppInfoString; 1861 *ppInfoString = nvKmsPointerToNvU64(kernelInfoString); 1862 1863 return TRUE; 1864 } 1865 1866 /* 1867 * Copy the infoString out to userspace and free the kernel-internal buffer. 1868 */ 1869 static NvBool InfoStringDoneUserCommon( 1870 NvU32 infoStringSize, 1871 NvU64 pInfoString, 1872 NvU32 *infoStringLenWritten, 1873 struct InfoStringExtraUserStateCommon *pExtra) 1874 { 1875 char *kernelInfoString = nvKmsNvU64ToPointer(pInfoString); 1876 int status; 1877 NvBool ret; 1878 1879 if ((infoStringSize == 0) || (*infoStringLenWritten == 0)) { 1880 ret = TRUE; 1881 goto done; 1882 } 1883 1884 nvAssert(*infoStringLenWritten <= infoStringSize); 1885 1886 status = nvkms_copyout(pExtra->userInfoString, 1887 kernelInfoString, 1888 *infoStringLenWritten); 1889 if (status == 0) { 1890 ret = TRUE; 1891 } else { 1892 ret = FALSE; 1893 *infoStringLenWritten = 0; 1894 } 1895 1896 done: 1897 nvFree(kernelInfoString); 1898 1899 return ret; 1900 } 1901 1902 struct NvKmsValidateModeIndexExtraUserState 1903 { 1904 struct InfoStringExtraUserStateCommon common; 1905 }; 1906 1907 static NvBool ValidateModeIndexPrepUser( 1908 void *pParamsVoid, 1909 void *pExtraUserStateVoid) 1910 { 1911 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; 1912 struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; 1913 1914 return InfoStringPrepUserCommon( 1915 pParams->request.infoStringSize, 1916 &pParams->request.pInfoString, 1917 &pExtra->common); 1918 } 1919 1920 static NvBool ValidateModeIndexDoneUser( 1921 void *pParamsVoid, 1922 void *pExtraUserStateVoid) 1923 { 1924 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; 1925 struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; 1926 1927 return InfoStringDoneUserCommon( 1928 pParams->request.infoStringSize, 1929 pParams->request.pInfoString, 1930 &pParams->reply.infoStringLenWritten, 1931 &pExtra->common); 1932 } 1933 1934 /*! 1935 * Validate the requested mode. 1936 */ 1937 static NvBool ValidateModeIndex(struct NvKmsPerOpen *pOpen, 1938 void *pParamsVoid) 1939 { 1940 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; 1941 NVDpyEvoPtr pDpyEvo; 1942 1943 pDpyEvo = GetPerOpenDpy(pOpen, 1944 pParams->request.deviceHandle, 1945 pParams->request.dispHandle, 1946 pParams->request.dpyId); 1947 if (pDpyEvo == NULL) { 1948 return FALSE; 1949 } 1950 1951 nvValidateModeIndex(pDpyEvo, &pParams->request, &pParams->reply); 1952 1953 return TRUE; 1954 } 1955 1956 struct NvKmsValidateModeExtraUserState 1957 { 1958 struct InfoStringExtraUserStateCommon common; 1959 }; 1960 1961 static NvBool ValidateModePrepUser( 1962 void *pParamsVoid, 1963 void *pExtraUserStateVoid) 1964 { 1965 struct NvKmsValidateModeParams *pParams = pParamsVoid; 1966 struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; 1967 1968 return InfoStringPrepUserCommon( 1969 pParams->request.infoStringSize, 1970 &pParams->request.pInfoString, 1971 &pExtra->common); 1972 } 1973 1974 static NvBool ValidateModeDoneUser( 1975 void *pParamsVoid, 1976 void *pExtraUserStateVoid) 1977 { 1978 struct NvKmsValidateModeParams *pParams = pParamsVoid; 1979 struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; 1980 1981 return InfoStringDoneUserCommon( 1982 pParams->request.infoStringSize, 1983 pParams->request.pInfoString, 1984 &pParams->reply.infoStringLenWritten, 1985 &pExtra->common); 1986 } 1987 1988 /*! 1989 * Validate the requested mode. 1990 */ 1991 static NvBool ValidateMode(struct NvKmsPerOpen *pOpen, 1992 void *pParamsVoid) 1993 { 1994 struct NvKmsValidateModeParams *pParams = pParamsVoid; 1995 NVDpyEvoPtr pDpyEvo; 1996 1997 pDpyEvo = GetPerOpenDpy(pOpen, 1998 pParams->request.deviceHandle, 1999 pParams->request.dispHandle, 2000 pParams->request.dpyId); 2001 if (pDpyEvo == NULL) { 2002 return FALSE; 2003 } 2004 2005 nvValidateModeEvo(pDpyEvo, &pParams->request, &pParams->reply); 2006 2007 return TRUE; 2008 } 2009 2010 static NvBool 2011 CopyInOneLut(NvU64 pRampsUser, struct NvKmsLutRamps **ppRampsKernel) 2012 { 2013 struct NvKmsLutRamps *pRampsKernel = NULL; 2014 int status; 2015 2016 if (pRampsUser == 0) { 2017 return TRUE; 2018 } 2019 2020 if (!nvKmsNvU64AddressIsSafe(pRampsUser)) { 2021 return FALSE; 2022 } 2023 2024 pRampsKernel = nvAlloc(sizeof(*pRampsKernel)); 2025 if (!pRampsKernel) { 2026 return FALSE; 2027 } 2028 2029 status = nvkms_copyin((char *)pRampsKernel, pRampsUser, 2030 sizeof(*pRampsKernel)); 2031 if (status != 0) { 2032 nvFree(pRampsKernel); 2033 return FALSE; 2034 } 2035 2036 *ppRampsKernel = pRampsKernel; 2037 2038 return TRUE; 2039 } 2040 2041 static NvBool 2042 CopyInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) 2043 { 2044 struct NvKmsLutRamps *pInputRamps = NULL; 2045 struct NvKmsLutRamps *pOutputRamps = NULL; 2046 2047 if (!CopyInOneLut(pCommonLutParams->input.pRamps, &pInputRamps)) { 2048 goto fail; 2049 } 2050 if (!CopyInOneLut(pCommonLutParams->output.pRamps, &pOutputRamps)) { 2051 goto fail; 2052 } 2053 2054 pCommonLutParams->input.pRamps = nvKmsPointerToNvU64(pInputRamps); 2055 pCommonLutParams->output.pRamps = nvKmsPointerToNvU64(pOutputRamps); 2056 2057 return TRUE; 2058 2059 fail: 2060 nvFree(pInputRamps); 2061 nvFree(pOutputRamps); 2062 return FALSE; 2063 } 2064 2065 static void 2066 FreeCopiedInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) 2067 { 2068 struct NvKmsLutRamps *pInputRamps = 2069 nvKmsNvU64ToPointer(pCommonLutParams->input.pRamps); 2070 struct NvKmsLutRamps *pOutputRamps = 2071 nvKmsNvU64ToPointer(pCommonLutParams->output.pRamps); 2072 2073 nvFree(pInputRamps); 2074 nvFree(pOutputRamps); 2075 } 2076 2077 /* No extra user state needed for SetMode; although we lose the user pointers 2078 * for the LUT ramps after copying them in, that's okay because we don't need 2079 * to copy them back out again. */ 2080 struct NvKmsSetModeExtraUserState 2081 { 2082 }; 2083 2084 /*! 2085 * Copy in any data referenced by pointer for the SetMode request. Currently 2086 * this is only the LUT ramps. 2087 */ 2088 static NvBool SetModePrepUser( 2089 void *pParamsVoid, 2090 void *pExtraUserStateVoid) 2091 { 2092 struct NvKmsSetModeParams *pParams = pParamsVoid; 2093 struct NvKmsSetModeRequest *pReq = &pParams->request; 2094 NvU32 disp, apiHead, dispFailed, apiHeadFailed; 2095 2096 /* Iterate over all of the common LUT ramp pointers embedded in the SetMode 2097 * request, and copy in each one. */ 2098 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { 2099 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { 2100 struct NvKmsSetLutCommonParams *pCommonLutParams = 2101 &pReq->disp[disp].head[apiHead].flip.lut; 2102 2103 if (!CopyInLutParams(pCommonLutParams)) { 2104 /* Remember how far we got through these loops before we 2105 * failed, so that we can undo everything up to this point. */ 2106 dispFailed = disp; 2107 apiHeadFailed = apiHead; 2108 goto fail; 2109 } 2110 } 2111 } 2112 2113 return TRUE; 2114 2115 fail: 2116 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { 2117 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { 2118 struct NvKmsSetLutCommonParams *pCommonLutParams = 2119 &pReq->disp[disp].head[apiHead].flip.lut; 2120 2121 if (disp > dispFailed || 2122 (disp == dispFailed && apiHead >= apiHeadFailed)) { 2123 break; 2124 } 2125 2126 FreeCopiedInLutParams(pCommonLutParams); 2127 } 2128 } 2129 2130 return FALSE; 2131 } 2132 2133 /*! 2134 * Free buffers allocated in SetModePrepUser. 2135 */ 2136 static NvBool SetModeDoneUser( 2137 void *pParamsVoid, 2138 void *pExtraUserStateVoid) 2139 { 2140 struct NvKmsSetModeParams *pParams = pParamsVoid; 2141 struct NvKmsSetModeRequest *pReq = &pParams->request; 2142 NvU32 disp, apiHead; 2143 2144 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { 2145 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { 2146 struct NvKmsSetLutCommonParams *pCommonLutParams = 2147 &pReq->disp[disp].head[apiHead].flip.lut; 2148 2149 FreeCopiedInLutParams(pCommonLutParams); 2150 } 2151 } 2152 2153 return TRUE; 2154 } 2155 2156 /*! 2157 * Perform a modeset on the device. 2158 */ 2159 static NvBool SetMode(struct NvKmsPerOpen *pOpen, 2160 void *pParamsVoid) 2161 { 2162 struct NvKmsSetModeParams *pParams = pParamsVoid; 2163 struct NvKmsPerOpenDev *pOpenDev; 2164 2165 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2166 2167 if (pOpenDev == NULL) { 2168 return FALSE; 2169 } 2170 2171 return nvSetDispModeEvo(pOpenDev->pDevEvo, pOpenDev, 2172 &pParams->request, &pParams->reply, 2173 FALSE /* bypassComposition */, 2174 TRUE /* doRasterLock */); 2175 } 2176 2177 /*! 2178 * Set the cursor image. 2179 */ 2180 static NvBool SetCursorImage(struct NvKmsPerOpen *pOpen, 2181 void *pParamsVoid) 2182 { 2183 struct NvKmsSetCursorImageParams *pParams = pParamsVoid; 2184 struct NvKmsPerOpenDev *pOpenDev; 2185 struct NvKmsPerOpenDisp *pOpenDisp; 2186 NVDispEvoPtr pDispEvo; 2187 2188 if (!GetPerOpenDevAndDisp(pOpen, 2189 pParams->request.deviceHandle, 2190 pParams->request.dispHandle, 2191 &pOpenDev, 2192 &pOpenDisp)) { 2193 return FALSE; 2194 } 2195 2196 pDispEvo = pOpenDisp->pDispEvo; 2197 2198 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { 2199 return FALSE; 2200 } 2201 2202 return nvHsIoctlSetCursorImage(pDispEvo, 2203 pOpenDev, 2204 &pOpenDev->surfaceHandles, 2205 pParams->request.head, 2206 &pParams->request.common); 2207 } 2208 2209 /*! 2210 * Change the cursor position. 2211 */ 2212 static NvBool MoveCursor(struct NvKmsPerOpen *pOpen, 2213 void *pParamsVoid) 2214 { 2215 struct NvKmsMoveCursorParams *pParams = pParamsVoid; 2216 struct NvKmsPerOpenDisp *pOpenDisp; 2217 NVDispEvoPtr pDispEvo; 2218 2219 pOpenDisp = GetPerOpenDisp(pOpen, 2220 pParams->request.deviceHandle, 2221 pParams->request.dispHandle); 2222 if (pOpenDisp == NULL) { 2223 return FALSE; 2224 } 2225 2226 pDispEvo = pOpenDisp->pDispEvo; 2227 2228 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { 2229 return FALSE; 2230 } 2231 2232 return nvHsIoctlMoveCursor(pDispEvo, 2233 pParams->request.head, 2234 &pParams->request.common); 2235 } 2236 2237 /* No extra user state needed for SetLut; although we lose the user pointers 2238 * for the LUT ramps after copying them in, that's okay because we don't need 2239 * to copy them back out again. */ 2240 struct NvKmsSetLutExtraUserState 2241 { 2242 }; 2243 2244 /*! 2245 * Copy in any data referenced by pointer for the SetLut request. Currently 2246 * this is only the LUT ramps. 2247 */ 2248 static NvBool SetLutPrepUser( 2249 void *pParamsVoid, 2250 void *pExtraUserStateVoid) 2251 { 2252 struct NvKmsSetLutParams *pParams = pParamsVoid; 2253 struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; 2254 2255 return CopyInLutParams(pCommonLutParams); 2256 } 2257 2258 /*! 2259 * Free buffers allocated in SetLutPrepUser. 2260 */ 2261 static NvBool SetLutDoneUser( 2262 void *pParamsVoid, 2263 void *pExtraUserStateVoid) 2264 { 2265 struct NvKmsSetLutParams *pParams = pParamsVoid; 2266 struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; 2267 2268 FreeCopiedInLutParams(pCommonLutParams); 2269 2270 return TRUE; 2271 } 2272 2273 /*! 2274 * Set the LUT on the specified head. 2275 */ 2276 static NvBool SetLut(struct NvKmsPerOpen *pOpen, 2277 void *pParamsVoid) 2278 { 2279 struct NvKmsSetLutParams *pParams = pParamsVoid; 2280 struct NvKmsPerOpenDisp *pOpenDisp; 2281 NVDispEvoPtr pDispEvo; 2282 2283 pOpenDisp = GetPerOpenDisp(pOpen, 2284 pParams->request.deviceHandle, 2285 pParams->request.dispHandle); 2286 if (pOpenDisp == NULL) { 2287 return FALSE; 2288 } 2289 2290 pDispEvo = pOpenDisp->pDispEvo; 2291 2292 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { 2293 return FALSE; 2294 } 2295 2296 if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, 2297 &pParams->request.common)) { 2298 return FALSE; 2299 } 2300 2301 nvEvoSetLut(pDispEvo, 2302 pParams->request.head, TRUE /* kickoff */, 2303 &pParams->request.common); 2304 2305 return TRUE; 2306 } 2307 2308 2309 /*! 2310 * Return whether the specified head is idle. 2311 */ 2312 static NvBool IdleMainLayerChannelCheckIdleOneApiHead( 2313 NVDispEvoPtr pDispEvo, 2314 NvU32 apiHead) 2315 { 2316 if (pDispEvo->pHsChannel[apiHead] != NULL) { 2317 return nvHsIdleFlipQueue(pDispEvo->pHsChannel[apiHead], 2318 FALSE /* force */); 2319 } 2320 return nvIdleMainLayerChannelCheckIdleOneApiHead(pDispEvo, apiHead); 2321 } 2322 2323 /*! 2324 * Return whether all heads described in pRequest are idle. 2325 * 2326 * Note that we loop over all requested heads, rather than return FALSE once we 2327 * find the first non-idle head, because checking for idle has side effects: in 2328 * headSurface, checking for idle gives the headSurface flip queue the 2329 * opportunity to proceed another frame. 2330 */ 2331 static NvBool IdleBaseChannelCheckIdle( 2332 NVDevEvoPtr pDevEvo, 2333 const struct NvKmsIdleBaseChannelRequest *pRequest, 2334 struct NvKmsIdleBaseChannelReply *pReply) 2335 { 2336 NvU32 apiHead, sd; 2337 NVDispEvoPtr pDispEvo; 2338 NvBool allIdle = TRUE; 2339 2340 FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { 2341 2342 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 2343 2344 NvBool idle; 2345 2346 if (!nvApiHeadIsActive(pDispEvo, apiHead)) { 2347 continue; 2348 } 2349 2350 if ((pRequest->subDevicesPerHead[apiHead] & NVBIT(sd)) == 0) { 2351 continue; 2352 } 2353 2354 idle = IdleMainLayerChannelCheckIdleOneApiHead(pDispEvo, apiHead); 2355 2356 if (!idle) { 2357 pReply->stopSubDevicesPerHead[apiHead] |= NVBIT(sd); 2358 } 2359 allIdle = allIdle && idle; 2360 } 2361 } 2362 2363 return allIdle; 2364 } 2365 2366 /*! 2367 * Idle all requested heads. 2368 * 2369 * First, wait for the heads to idle naturally. If a timeout is exceeded, then 2370 * force the non-idle heads to idle, and record these in pReply. 2371 */ 2372 static NvBool IdleBaseChannelAll( 2373 NVDevEvoPtr pDevEvo, 2374 const struct NvKmsIdleBaseChannelRequest *pRequest, 2375 struct NvKmsIdleBaseChannelReply *pReply) 2376 { 2377 NvU64 startTime = 0; 2378 2379 /* 2380 * Each element in subDevicesPerHead[] must be large enough to hold one bit 2381 * per subdevice. 2382 */ 2383 ct_assert(NVKMS_MAX_SUBDEVICES <= 2384 (sizeof(pRequest->subDevicesPerHead[0]) * 8)); 2385 2386 /* Loop until all head,sd pairs are idle, or we time out. */ 2387 do { 2388 const NvU32 timeout = 2000000; /* 2 seconds */ 2389 2390 2391 /* 2392 * Clear the pReply data, 2393 * IdleBaseChannelCheckIdle() will fill it afresh. 2394 */ 2395 nvkms_memset(pReply, 0, sizeof(*pReply)); 2396 2397 /* If all heads are idle, we are done. */ 2398 if (IdleBaseChannelCheckIdle(pDevEvo, pRequest, pReply)) { 2399 return TRUE; 2400 } 2401 2402 /* Break out of the loop if we exceed the timeout. */ 2403 if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) { 2404 break; 2405 } 2406 2407 /* At least one head is not idle; yield, and try again. */ 2408 nvkms_yield(); 2409 2410 } while (TRUE); 2411 2412 return TRUE; 2413 } 2414 2415 2416 /*! 2417 * Wait for the requested base channels to be idle, returning whether 2418 * stopping the base channels was necessary. 2419 */ 2420 static NvBool IdleBaseChannel(struct NvKmsPerOpen *pOpen, 2421 void *pParamsVoid) 2422 { 2423 struct NvKmsIdleBaseChannelParams *pParams = pParamsVoid; 2424 struct NvKmsPerOpenDev *pOpenDev; 2425 2426 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2427 2428 if (pOpenDev == NULL) { 2429 return FALSE; 2430 } 2431 2432 /* Only a modeset owner can idle base. */ 2433 2434 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 2435 return FALSE; 2436 } 2437 2438 return IdleBaseChannelAll(pOpenDev->pDevEvo, 2439 &pParams->request, &pParams->reply); 2440 } 2441 2442 2443 /* No extra user state needed for Flip; although we lose the user pointers 2444 * for the LUT ramps after copying them in, that's okay because we don't need 2445 * to copy them back out again. */ 2446 struct NvKmsFlipExtraUserState 2447 { 2448 // Nothing needed. 2449 }; 2450 2451 /*! 2452 * Copy in any data referenced by pointer for the Flip request. Currently 2453 * this is the flip head request array and the LUT ramps. 2454 */ 2455 static NvBool FlipPrepUser( 2456 void *pParamsVoid, 2457 void *pExtraUserStateVoid) 2458 { 2459 struct NvKmsFlipParams *pParams = pParamsVoid; 2460 struct NvKmsFlipRequest *pRequest = &pParams->request; 2461 struct NvKmsFlipRequestOneHead *pFlipHeadKernel = NULL; 2462 NvU64 pFlipHeadUser = pRequest->pFlipHead; 2463 size_t size; 2464 NvU32 apiHead, apiHeadFailed; 2465 int status; 2466 2467 if (!nvKmsNvU64AddressIsSafe(pFlipHeadUser)) { 2468 return FALSE; 2469 } 2470 2471 if (pRequest->numFlipHeads <= 0 || 2472 pRequest->numFlipHeads > NV_MAX_FLIP_REQUEST_HEADS) { 2473 return FALSE; 2474 } 2475 2476 size = sizeof(*pFlipHeadKernel) * pRequest->numFlipHeads; 2477 pFlipHeadKernel = nvAlloc(size); 2478 if (!pFlipHeadKernel) { 2479 return FALSE; 2480 } 2481 2482 status = nvkms_copyin((char *)pFlipHeadKernel, pFlipHeadUser, size); 2483 if (status != 0) { 2484 nvFree(pFlipHeadKernel); 2485 return FALSE; 2486 } 2487 2488 /* Iterate over all of the common LUT ramp pointers embedded in the Flip 2489 * request, and copy in each one. */ 2490 for (apiHead = 0; apiHead < pRequest->numFlipHeads; apiHead++) { 2491 struct NvKmsSetLutCommonParams *pCommonLutParams = 2492 &pFlipHeadKernel[apiHead].flip.lut; 2493 2494 if (!CopyInLutParams(pCommonLutParams)) { 2495 /* Remember how far we got through this loop before we 2496 * failed, so that we can undo everything up to this point. */ 2497 apiHeadFailed = apiHead; 2498 goto fail_lut; 2499 } 2500 } 2501 2502 pRequest->pFlipHead = nvKmsPointerToNvU64(pFlipHeadKernel); 2503 2504 return TRUE; 2505 2506 fail_lut: 2507 for (apiHead = 0; apiHead < apiHeadFailed; apiHead++) { 2508 struct NvKmsSetLutCommonParams *pCommonLutParams = 2509 &pFlipHeadKernel[apiHead].flip.lut; 2510 2511 FreeCopiedInLutParams(pCommonLutParams); 2512 } 2513 nvFree(pFlipHeadKernel); 2514 return FALSE; 2515 } 2516 2517 /*! 2518 * Free buffers allocated in FlipPrepUser. 2519 */ 2520 static NvBool FlipDoneUser( 2521 void *pParamsVoid, 2522 void *pExtraUserStateVoid) 2523 { 2524 struct NvKmsFlipParams *pParams = pParamsVoid; 2525 struct NvKmsFlipRequest *pRequest = &pParams->request; 2526 struct NvKmsFlipRequestOneHead *pFlipHead = nvKmsNvU64ToPointer(pRequest->pFlipHead); 2527 NvU32 apiHead; 2528 2529 for (apiHead = 0; apiHead < pRequest->numFlipHeads; apiHead++) { 2530 struct NvKmsSetLutCommonParams *pCommonLutParams = 2531 &pFlipHead[apiHead].flip.lut; 2532 2533 FreeCopiedInLutParams(pCommonLutParams); 2534 } 2535 nvFree(pFlipHead); 2536 /* The request is not copied back out to userspace (only the reply is), so 2537 * we don't need to worry about restoring the user pointer */ 2538 pRequest->pFlipHead = 0; 2539 2540 return TRUE; 2541 } 2542 2543 /*! 2544 * For each entry in the array pointed to by 'pFlipHead', of length 2545 * 'numFlipHeads', verify that the sd and head values specified are within 2546 * bounds and that there are no duplicates. 2547 */ 2548 static NvBool ValidateFlipHeads( 2549 NVDevEvoPtr pDevEvo, 2550 const struct NvKmsFlipRequestOneHead *pFlipHead, 2551 NvU32 numFlipHeads) 2552 { 2553 NvU32 i; 2554 ct_assert(NVKMS_MAX_HEADS_PER_DISP <= 8); 2555 NvU8 apiHeadsUsed[NVKMS_MAX_SUBDEVICES] = { }; 2556 2557 for (i = 0; i < numFlipHeads; i++) { 2558 const NvU32 sd = pFlipHead[i].sd; 2559 const NvU32 apiHead = pFlipHead[i].head; 2560 2561 if (sd >= pDevEvo->numSubDevices) { 2562 return FALSE; 2563 } 2564 if (apiHead >= pDevEvo->numApiHeads) { 2565 return FALSE; 2566 } 2567 if ((apiHeadsUsed[sd] & (1 << apiHead)) != 0) { 2568 return FALSE; 2569 } 2570 apiHeadsUsed[sd] |= (1 << apiHead); 2571 } 2572 2573 return TRUE; 2574 } 2575 2576 /*! 2577 * Flip the specified head. 2578 */ 2579 static NvBool Flip(struct NvKmsPerOpen *pOpen, 2580 void *pParamsVoid) 2581 { 2582 struct NvKmsFlipParams *pParams = pParamsVoid; 2583 struct NvKmsPerOpenDev *pOpenDev; 2584 NVDevEvoPtr pDevEvo = NULL; 2585 const struct NvKmsFlipRequest *pRequest = &pParams->request; 2586 const struct NvKmsFlipRequestOneHead *pFlipHead = 2587 nvKmsNvU64ToPointer(pRequest->pFlipHead); 2588 const NvU32 numFlipHeads = pRequest->numFlipHeads; 2589 2590 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2591 2592 if (pOpenDev == NULL) { 2593 return FALSE; 2594 } 2595 2596 pDevEvo = pOpenDev->pDevEvo; 2597 2598 if (!ValidateFlipHeads(pDevEvo, pFlipHead, numFlipHeads)) { 2599 return FALSE; 2600 } 2601 2602 return nvHsIoctlFlip(pDevEvo, pOpenDev, 2603 pFlipHead, numFlipHeads, 2604 pRequest->commit, pRequest->allowVrr, 2605 &pParams->reply); 2606 } 2607 2608 2609 /*! 2610 * Record whether this client is interested in the specified dynamic 2611 * dpy. 2612 */ 2613 static NvBool DeclareDynamicDpyInterest(struct NvKmsPerOpen *pOpen, 2614 void *pParamsVoid) 2615 { 2616 /* XXX NVKMS TODO: implement me. */ 2617 2618 return TRUE; 2619 } 2620 2621 2622 /*! 2623 * Register a surface with the specified per-open + device. 2624 */ 2625 static NvBool RegisterSurface(struct NvKmsPerOpen *pOpen, 2626 void *pParamsVoid) 2627 { 2628 struct NvKmsRegisterSurfaceParams *pParams = pParamsVoid; 2629 struct NvKmsPerOpenDev *pOpenDev; 2630 2631 /* 2632 * Only allow userspace clients to specify memory objects by FD. 2633 * This prevents clients from specifying (hClient, hObject) tuples that 2634 * really belong to other clients. 2635 */ 2636 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE && 2637 !pParams->request.useFd) { 2638 return FALSE; 2639 } 2640 2641 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2642 2643 if (pOpenDev == NULL) { 2644 return FALSE; 2645 } 2646 2647 nvEvoRegisterSurface(pOpenDev->pDevEvo, pOpenDev, pParams, 2648 NvHsMapPermissionsReadOnly); 2649 return TRUE; 2650 } 2651 2652 2653 /*! 2654 * Unregister a surface from the specified per-open + device. 2655 */ 2656 static NvBool UnregisterSurface(struct NvKmsPerOpen *pOpen, 2657 void *pParamsVoid) 2658 { 2659 struct NvKmsUnregisterSurfaceParams *pParams = pParamsVoid; 2660 struct NvKmsPerOpenDev *pOpenDev; 2661 2662 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2663 2664 if (pOpenDev == NULL) { 2665 return FALSE; 2666 } 2667 2668 nvEvoUnregisterSurface(pOpenDev->pDevEvo, pOpenDev, 2669 pParams->request.surfaceHandle, 2670 FALSE /* skipUpdate */); 2671 return TRUE; 2672 } 2673 2674 2675 /*! 2676 * Associate a surface with the NvKmsPerOpen specified by 2677 * NvKmsGrantSurfaceParams::request::fd. 2678 */ 2679 static NvBool GrantSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2680 { 2681 struct NvKmsGrantSurfaceParams *pParams = pParamsVoid; 2682 struct NvKmsPerOpenDev *pOpenDev; 2683 NVSurfaceEvoPtr pSurfaceEvo; 2684 struct NvKmsPerOpen *pOpenFd; 2685 2686 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2687 2688 if (pOpenDev == NULL) { 2689 return FALSE; 2690 } 2691 2692 pSurfaceEvo = 2693 nvEvoGetSurfaceFromHandleNoDispHWAccessOk(pOpenDev->pDevEvo, 2694 &pOpenDev->surfaceHandles, 2695 pParams->request.surfaceHandle); 2696 if (pSurfaceEvo == NULL) { 2697 return FALSE; 2698 } 2699 2700 if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) { 2701 return FALSE; 2702 } 2703 2704 /* Only the owner of the surface can grant it to other clients. */ 2705 2706 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, 2707 pParams->request.surfaceHandle)) { 2708 return FALSE; 2709 } 2710 2711 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 2712 2713 if (pOpenFd == NULL) { 2714 return FALSE; 2715 } 2716 2717 if (!AssignNvKmsPerOpenType( 2718 pOpenFd, NvKmsPerOpenTypeGrantSurface, FALSE)) { 2719 return FALSE; 2720 } 2721 2722 nvEvoIncrementSurfaceStructRefCnt(pSurfaceEvo); 2723 pOpenFd->grantSurface.pSurfaceEvo = pSurfaceEvo; 2724 2725 return TRUE; 2726 } 2727 2728 2729 /*! 2730 * Retrieve the surface and device associated with 2731 * NvKmsAcquireSurfaceParams::request::fd, and give the client an 2732 * NvKmsSurfaceHandle to the surface. 2733 */ 2734 static NvBool AcquireSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2735 { 2736 struct NvKmsAcquireSurfaceParams *pParams = pParamsVoid; 2737 struct NvKmsPerOpen *pOpenFd; 2738 struct NvKmsPerOpenDev *pOpenDev; 2739 NvKmsSurfaceHandle surfaceHandle = 0; 2740 2741 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 2742 2743 if (pOpenFd == NULL) { 2744 return FALSE; 2745 } 2746 2747 if (pOpenFd->type != NvKmsPerOpenTypeGrantSurface) { 2748 return FALSE; 2749 } 2750 2751 nvAssert(pOpenFd->grantSurface.pSurfaceEvo != NULL); 2752 2753 if (pOpenFd->grantSurface.pSurfaceEvo->rmRefCnt == 0) { /* orphan */ 2754 return FALSE; 2755 } 2756 2757 if (nvEvoSurfaceRefCntsTooLarge(pOpenFd->grantSurface.pSurfaceEvo)) { 2758 return FALSE; 2759 } 2760 2761 /* Since the surface isn't orphaned, it should have an owner, with a 2762 * pOpenDev and a pDevEvo. Get the pOpenDev for the acquiring client that 2763 * matches the owner's pDevEvo. */ 2764 nvAssert(pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo != NULL); 2765 pOpenDev = DevEvoToOpenDev(pOpen, 2766 pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo); 2767 2768 if (pOpenDev == NULL) { 2769 return FALSE; 2770 } 2771 2772 surfaceHandle = 2773 nvEvoCreateApiHandle(&pOpenDev->surfaceHandles, 2774 pOpenFd->grantSurface.pSurfaceEvo); 2775 2776 if (surfaceHandle == 0) { 2777 return FALSE; 2778 } 2779 2780 nvEvoIncrementSurfaceStructRefCnt(pOpenFd->grantSurface.pSurfaceEvo); 2781 2782 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; 2783 pParams->reply.surfaceHandle = surfaceHandle; 2784 2785 return TRUE; 2786 } 2787 2788 static NvBool ReleaseSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2789 { 2790 struct NvKmsReleaseSurfaceParams *pParams = pParamsVoid; 2791 struct NvKmsPerOpenDev *pOpenDev; 2792 2793 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2794 2795 if (pOpenDev == NULL) { 2796 return FALSE; 2797 } 2798 2799 nvEvoReleaseSurface(pOpenDev->pDevEvo, pOpenDev, 2800 pParams->request.surfaceHandle); 2801 return TRUE; 2802 } 2803 2804 2805 /*! 2806 * Associate a swap group with the NvKmsPerOpen specified by 2807 * NvKmsGrantSwapGroupParams::request::fd. 2808 */ 2809 static NvBool GrantSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2810 { 2811 struct NvKmsGrantSwapGroupParams *pParams = pParamsVoid; 2812 struct NvKmsPerOpenDev *pOpenDev; 2813 NVSwapGroupRec *pSwapGroup; 2814 struct NvKmsPerOpen *pOpenFd; 2815 2816 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2817 2818 if (pOpenDev == NULL) { 2819 return FALSE; 2820 } 2821 2822 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 2823 return FALSE; 2824 } 2825 2826 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, 2827 pParams->request.swapGroupHandle); 2828 2829 if (pSwapGroup == NULL) { 2830 return FALSE; 2831 } 2832 2833 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 2834 2835 if (pOpenFd == NULL) { 2836 return FALSE; 2837 } 2838 2839 /* 2840 * Increment the swap group refcnt while granting it so the SwapGroup 2841 * won't be freed out from under the grant fd. To complement this, 2842 * nvKmsClose() on NvKmsPerOpenTypeGrantSwapGroup calls 2843 * DecrementSwapGroupRefCnt(). 2844 */ 2845 if (!nvHsIncrementSwapGroupRefCnt(pSwapGroup)) { 2846 return FALSE; 2847 } 2848 2849 if (!AssignNvKmsPerOpenType( 2850 pOpenFd, NvKmsPerOpenTypeGrantSwapGroup, FALSE)) { 2851 nvHsDecrementSwapGroupRefCnt(pSwapGroup); 2852 return FALSE; 2853 } 2854 2855 /* we must not fail beyond this point */ 2856 2857 pOpenFd->grantSwapGroup.pSwapGroup = pSwapGroup; 2858 2859 pOpenFd->grantSwapGroup.pDevEvo = pOpenDev->pDevEvo; 2860 2861 return TRUE; 2862 } 2863 2864 2865 /*! 2866 * Retrieve the swap group and device associated with 2867 * NvKmsAcquireSwapGroupParams::request::fd, give the client an 2868 * NvKmsSwapGroupHandle to the swap group, and increment the 2869 * swap group's reference count. 2870 */ 2871 static NvBool AcquireSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2872 { 2873 struct NvKmsAcquireSwapGroupParams *pParams = pParamsVoid; 2874 struct NvKmsPerOpen *pOpenFd; 2875 struct NvKmsPerOpenDev *pOpenDev; 2876 NvKmsSwapGroupHandle swapGroupHandle = 0; 2877 2878 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 2879 2880 if (pOpenFd == NULL) { 2881 return FALSE; 2882 } 2883 2884 if (pOpenFd->type != NvKmsPerOpenTypeGrantSwapGroup) { 2885 return FALSE; 2886 } 2887 2888 /* 2889 * pSwapGroup is only freed when its last reference goes away; if pOpenFd 2890 * hasn't yet been closed, then its reference incremented in 2891 * GrantSwapGroup() couldn't have been decremented in nvKmsClose() 2892 */ 2893 nvAssert(pOpenFd->grantSwapGroup.pSwapGroup != NULL); 2894 nvAssert(pOpenFd->grantSwapGroup.pDevEvo != NULL); 2895 2896 if (pOpenFd->grantSwapGroup.pSwapGroup->zombie) { 2897 return FALSE; 2898 } 2899 2900 pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantSwapGroup.pDevEvo); 2901 2902 if (pOpenDev == NULL) { 2903 return FALSE; 2904 } 2905 2906 if (nvEvoApiHandlePointerIsPresent(&pOpenDev->swapGroupHandles, 2907 pOpenFd->grantSwapGroup.pSwapGroup)) { 2908 return FALSE; 2909 } 2910 2911 if (!nvHsIncrementSwapGroupRefCnt(pOpenFd->grantSwapGroup.pSwapGroup)) { 2912 return FALSE; 2913 } 2914 2915 swapGroupHandle = 2916 nvEvoCreateApiHandle(&pOpenDev->swapGroupHandles, 2917 pOpenFd->grantSwapGroup.pSwapGroup); 2918 2919 if (swapGroupHandle == 0) { 2920 nvHsDecrementSwapGroupRefCnt(pOpenFd->grantSwapGroup.pSwapGroup); 2921 return FALSE; 2922 } 2923 2924 /* we must not fail beyond this point */ 2925 2926 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; 2927 pParams->reply.swapGroupHandle = swapGroupHandle; 2928 2929 return TRUE; 2930 } 2931 2932 2933 /*! 2934 * Free this client's reference to the swap group. 2935 * 2936 * This is meant to be called by clients that have acquired the swap group 2937 * handle through AcquireSwapGroup(). 2938 */ 2939 static NvBool ReleaseSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2940 { 2941 struct NvKmsReleaseSwapGroupParams *pParams = pParamsVoid; 2942 struct NvKmsPerOpenDev *pOpenDev; 2943 NVSwapGroupRec *pSwapGroup; 2944 NvKmsSwapGroupHandle handle = pParams->request.swapGroupHandle; 2945 2946 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2947 2948 if (pOpenDev == NULL) { 2949 return FALSE; 2950 } 2951 2952 /* 2953 * This may operate on a swap group that has already been freed 2954 * (pSwapGroup->zombie is TRUE). 2955 */ 2956 pSwapGroup = nvHsGetSwapGroupStruct(&pOpenDev->swapGroupHandles, 2957 handle); 2958 if (pSwapGroup == NULL) { 2959 return FALSE; 2960 } 2961 2962 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); 2963 2964 nvHsDecrementSwapGroupRefCnt(pSwapGroup); 2965 2966 return TRUE; 2967 } 2968 2969 /*! 2970 * Change the value of the specified attribute. 2971 */ 2972 static NvBool SetDpyAttribute(struct NvKmsPerOpen *pOpen, 2973 void *pParamsVoid) 2974 { 2975 struct NvKmsSetDpyAttributeParams *pParams = pParamsVoid; 2976 NVDpyEvoPtr pDpyEvo; 2977 2978 pDpyEvo = GetPerOpenDpy(pOpen, 2979 pParams->request.deviceHandle, 2980 pParams->request.dispHandle, 2981 pParams->request.dpyId); 2982 if (pDpyEvo == NULL) { 2983 return FALSE; 2984 } 2985 2986 return nvSetDpyAttributeEvo(pDpyEvo, pParams); 2987 } 2988 2989 2990 /*! 2991 * Get the value of the specified attribute. 2992 */ 2993 static NvBool GetDpyAttribute(struct NvKmsPerOpen *pOpen, 2994 void *pParamsVoid) 2995 { 2996 struct NvKmsGetDpyAttributeParams *pParams = pParamsVoid; 2997 NVDpyEvoPtr pDpyEvo; 2998 2999 pDpyEvo = GetPerOpenDpy(pOpen, 3000 pParams->request.deviceHandle, 3001 pParams->request.dispHandle, 3002 pParams->request.dpyId); 3003 if (pDpyEvo == NULL) { 3004 return FALSE; 3005 } 3006 3007 return nvGetDpyAttributeEvo(pDpyEvo, pParams); 3008 } 3009 3010 3011 /*! 3012 * Get the valid values of the specified attribute. 3013 */ 3014 static NvBool GetDpyAttributeValidValues(struct NvKmsPerOpen *pOpen, 3015 void *pParamsVoid) 3016 { 3017 struct NvKmsGetDpyAttributeValidValuesParams *pParams = pParamsVoid; 3018 NVDpyEvoPtr pDpyEvo; 3019 3020 pDpyEvo = GetPerOpenDpy(pOpen, 3021 pParams->request.deviceHandle, 3022 pParams->request.dispHandle, 3023 pParams->request.dpyId); 3024 if (pDpyEvo == NULL) { 3025 return FALSE; 3026 } 3027 3028 return nvGetDpyAttributeValidValuesEvo(pDpyEvo, pParams); 3029 } 3030 3031 3032 /*! 3033 * Set the value of the specified attribute. 3034 */ 3035 static NvBool SetDispAttribute(struct NvKmsPerOpen *pOpen, 3036 void *pParamsVoid) 3037 { 3038 struct NvKmsSetDispAttributeParams *pParams = pParamsVoid; 3039 struct NvKmsPerOpenDisp *pOpenDisp; 3040 3041 pOpenDisp = GetPerOpenDisp(pOpen, 3042 pParams->request.deviceHandle, 3043 pParams->request.dispHandle); 3044 if (pOpenDisp == NULL) { 3045 return FALSE; 3046 } 3047 3048 return nvSetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); 3049 } 3050 3051 3052 /*! 3053 * Get the value of the specified attribute. 3054 */ 3055 static NvBool GetDispAttribute(struct NvKmsPerOpen *pOpen, 3056 void *pParamsVoid) 3057 { 3058 struct NvKmsGetDispAttributeParams *pParams = pParamsVoid; 3059 struct NvKmsPerOpenDisp *pOpenDisp; 3060 3061 pOpenDisp = GetPerOpenDisp(pOpen, 3062 pParams->request.deviceHandle, 3063 pParams->request.dispHandle); 3064 if (pOpenDisp == NULL) { 3065 return FALSE; 3066 } 3067 3068 return nvGetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); 3069 } 3070 3071 3072 /*! 3073 * Get the valid values of the specified attribute. 3074 */ 3075 static NvBool GetDispAttributeValidValues(struct NvKmsPerOpen *pOpen, 3076 void *pParamsVoid) 3077 { 3078 struct NvKmsGetDispAttributeValidValuesParams *pParams = pParamsVoid; 3079 3080 struct NvKmsPerOpenDisp *pOpenDisp; 3081 3082 pOpenDisp = GetPerOpenDisp(pOpen, 3083 pParams->request.deviceHandle, 3084 pParams->request.dispHandle); 3085 if (pOpenDisp == NULL) { 3086 return FALSE; 3087 } 3088 3089 return nvGetDispAttributeValidValuesEvo(pOpenDisp->pDispEvo, pParams); 3090 } 3091 3092 3093 /*! 3094 * Get information about the specified framelock device. 3095 */ 3096 static NvBool QueryFrameLock(struct NvKmsPerOpen *pOpen, 3097 void *pParamsVoid) 3098 { 3099 struct NvKmsQueryFrameLockParams *pParams = pParamsVoid; 3100 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 3101 const NVFrameLockEvoRec *pFrameLockEvo; 3102 NvU32 gpu; 3103 3104 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 3105 3106 pOpenFrameLock = 3107 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); 3108 3109 if (pOpenFrameLock == NULL) { 3110 return FALSE; 3111 } 3112 3113 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; 3114 3115 ct_assert(ARRAY_LEN(pFrameLockEvo->gpuIds) <= 3116 ARRAY_LEN(pParams->reply.gpuIds)); 3117 3118 for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) { 3119 pParams->reply.gpuIds[gpu] = pFrameLockEvo->gpuIds[gpu]; 3120 } 3121 3122 return TRUE; 3123 } 3124 3125 3126 static NvBool SetFrameLockAttribute(struct NvKmsPerOpen *pOpen, 3127 void *pParamsVoid) 3128 { 3129 struct NvKmsSetFrameLockAttributeParams *pParams = pParamsVoid; 3130 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 3131 NVFrameLockEvoRec *pFrameLockEvo; 3132 3133 pOpenFrameLock = 3134 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); 3135 3136 if (pOpenFrameLock == NULL) { 3137 return FALSE; 3138 } 3139 3140 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; 3141 3142 return nvSetFrameLockAttributeEvo(pFrameLockEvo, pParams); 3143 } 3144 3145 3146 static NvBool GetFrameLockAttribute(struct NvKmsPerOpen *pOpen, 3147 void *pParamsVoid) 3148 { 3149 struct NvKmsGetFrameLockAttributeParams *pParams = pParamsVoid; 3150 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 3151 const NVFrameLockEvoRec *pFrameLockEvo; 3152 3153 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 3154 3155 pOpenFrameLock = 3156 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); 3157 3158 if (pOpenFrameLock == NULL) { 3159 return FALSE; 3160 } 3161 3162 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; 3163 3164 return nvGetFrameLockAttributeEvo(pFrameLockEvo, pParams); 3165 } 3166 3167 3168 static NvBool GetFrameLockAttributeValidValues(struct NvKmsPerOpen *pOpen, 3169 void *pParamsVoid) 3170 { 3171 struct NvKmsGetFrameLockAttributeValidValuesParams *pParams = pParamsVoid; 3172 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 3173 const NVFrameLockEvoRec *pFrameLockEvo; 3174 3175 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 3176 3177 pOpenFrameLock = 3178 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); 3179 3180 if (pOpenFrameLock == NULL) { 3181 return FALSE; 3182 } 3183 3184 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; 3185 3186 return nvGetFrameLockAttributeValidValuesEvo(pFrameLockEvo, pParams); 3187 } 3188 3189 3190 /*! 3191 * Pop the next event off of the client's event queue. 3192 */ 3193 static NvBool GetNextEvent(struct NvKmsPerOpen *pOpen, 3194 void *pParamsVoid) 3195 { 3196 struct NvKmsGetNextEventParams *pParams = pParamsVoid; 3197 struct NvKmsPerOpenEventListEntry *pEntry; 3198 3199 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 3200 3201 if (nvListIsEmpty(&pOpen->ioctl.eventList)) { 3202 pParams->reply.valid = FALSE; 3203 return TRUE; 3204 } 3205 3206 pEntry = nvListFirstEntry(&pOpen->ioctl.eventList, 3207 struct NvKmsPerOpenEventListEntry, 3208 eventListEntry); 3209 3210 pParams->reply.valid = TRUE; 3211 pParams->reply.event = pEntry->event; 3212 3213 nvListDel(&pEntry->eventListEntry); 3214 3215 nvFree(pEntry); 3216 3217 if (nvListIsEmpty(&pOpen->ioctl.eventList)) { 3218 nvkms_event_queue_changed(pOpen->pOpenKernel, FALSE); 3219 } 3220 3221 return TRUE; 3222 } 3223 3224 3225 /*! 3226 * Record the client's event interest for the specified device. 3227 */ 3228 static NvBool DeclareEventInterest(struct NvKmsPerOpen *pOpen, 3229 void *pParamsVoid) 3230 { 3231 struct NvKmsDeclareEventInterestParams *pParams = pParamsVoid; 3232 3233 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 3234 3235 pOpen->ioctl.eventInterestMask = pParams->request.interestMask; 3236 3237 return TRUE; 3238 } 3239 3240 static NvBool ClearUnicastEvent(struct NvKmsPerOpen *pOpen, 3241 void *pParamsVoid) 3242 { 3243 struct NvKmsClearUnicastEventParams *pParams = pParamsVoid; 3244 struct NvKmsPerOpen *pOpenFd = NULL; 3245 3246 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 3247 3248 pOpenFd = nvkms_get_per_open_data(pParams->request.unicastEventFd); 3249 3250 if (pOpenFd == NULL) { 3251 return FALSE; 3252 } 3253 3254 if (pOpenFd->type != NvKmsPerOpenTypeUnicastEvent) { 3255 return FALSE; 3256 } 3257 3258 nvkms_event_queue_changed(pOpenFd->pOpenKernel, FALSE); 3259 3260 return TRUE; 3261 } 3262 3263 static NvBool SetLayerPosition(struct NvKmsPerOpen *pOpen, 3264 void *pParamsVoid) 3265 { 3266 struct NvKmsSetLayerPositionParams *pParams = pParamsVoid; 3267 struct NvKmsPerOpenDev *pOpenDev; 3268 3269 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3270 3271 if (pOpenDev == NULL) { 3272 return FALSE; 3273 } 3274 3275 /* XXX NVKMS HEADSURFACE TODO: intercept */ 3276 3277 return nvLayerSetPositionEvo(pOpenDev->pDevEvo, &pParams->request); 3278 } 3279 3280 static NvBool GrabOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3281 { 3282 struct NvKmsGrabOwnershipParams *pParams = pParamsVoid; 3283 struct NvKmsPerOpenDev *pOpenDev; 3284 3285 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3286 3287 if (pOpenDev == NULL) { 3288 return FALSE; 3289 } 3290 3291 // The only kind of ownership right now is modeset ownership. 3292 return GrabModesetOwnership(pOpenDev); 3293 } 3294 3295 static NvBool ReleaseOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3296 { 3297 struct NvKmsReleaseOwnershipParams *pParams = pParamsVoid; 3298 struct NvKmsPerOpenDev *pOpenDev; 3299 3300 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3301 3302 if (pOpenDev == NULL) { 3303 return FALSE; 3304 } 3305 3306 // The only kind of ownership right now is modeset ownership. 3307 return ReleaseModesetOwnership(pOpenDev); 3308 } 3309 3310 static NvBool GrantPermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3311 { 3312 struct NvKmsGrantPermissionsParams *pParams = pParamsVoid; 3313 struct NvKmsPerOpenDev *pOpenDev; 3314 struct NvKmsPerOpen *pOpenFd; 3315 3316 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3317 3318 if (pOpenDev == NULL) { 3319 return FALSE; 3320 } 3321 3322 /* Only a modeset owner can grant permissions. */ 3323 3324 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 3325 return FALSE; 3326 } 3327 3328 if (!ValidateNvKmsPermissions(pOpenDev->pDevEvo, 3329 &pParams->request.permissions, 3330 pOpen->clientType)) { 3331 return FALSE; 3332 } 3333 3334 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 3335 3336 if (pOpenFd == NULL) { 3337 return FALSE; 3338 } 3339 3340 if (!AssignNvKmsPerOpenType( 3341 pOpenFd, NvKmsPerOpenTypeGrantPermissions, FALSE)) { 3342 return FALSE; 3343 } 3344 3345 pOpenFd->grantPermissions.permissions = pParams->request.permissions; 3346 3347 pOpenFd->grantPermissions.pDevEvo = pOpenDev->pDevEvo; 3348 3349 return TRUE; 3350 } 3351 3352 static NvBool AcquirePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3353 { 3354 struct NvKmsAcquirePermissionsParams *pParams = pParamsVoid; 3355 struct NvKmsPerOpenDev *pOpenDev; 3356 struct NvKmsPerOpen *pOpenFd; 3357 const struct NvKmsPermissions *pPermissionsNew; 3358 enum NvKmsPermissionsType type; 3359 3360 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 3361 3362 if (pOpenFd == NULL) { 3363 return FALSE; 3364 } 3365 3366 if (pOpenFd->type != NvKmsPerOpenTypeGrantPermissions) { 3367 return FALSE; 3368 } 3369 3370 pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantPermissions.pDevEvo); 3371 3372 if (pOpenDev == NULL) { 3373 return FALSE; 3374 } 3375 3376 type = pOpenFd->grantPermissions.permissions.type; 3377 3378 pPermissionsNew = &pOpenFd->grantPermissions.permissions; 3379 3380 if (type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 3381 NvU32 d, h; 3382 3383 for (d = 0; d < ARRAY_LEN(pOpenDev->flipPermissions.disp); d++) { 3384 for (h = 0; h < ARRAY_LEN(pOpenDev->flipPermissions. 3385 disp[d].head); h++) { 3386 pOpenDev->flipPermissions.disp[d].head[h].layerMask |= 3387 pPermissionsNew->flip.disp[d].head[h].layerMask; 3388 } 3389 } 3390 3391 pParams->reply.permissions.flip = pOpenDev->flipPermissions; 3392 3393 } else if (type == NV_KMS_PERMISSIONS_TYPE_MODESET) { 3394 NvU32 d, h; 3395 3396 for (d = 0; d < ARRAY_LEN(pOpenDev->modesetPermissions.disp); d++) { 3397 for (h = 0; h < ARRAY_LEN(pOpenDev->modesetPermissions. 3398 disp[d].head); h++) { 3399 pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList = 3400 nvAddDpyIdListToDpyIdList( 3401 pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList, 3402 pPermissionsNew->modeset.disp[d].head[h].dpyIdList); 3403 } 3404 } 3405 3406 pParams->reply.permissions.modeset = pOpenDev->modesetPermissions; 3407 3408 } else if (type == NV_KMS_PERMISSIONS_TYPE_SUB_OWNER) { 3409 3410 if (pOpenDev->pDevEvo->modesetSubOwner != NULL) { 3411 /* There can be only one sub-owner */ 3412 return FALSE; 3413 } 3414 3415 pOpenDev->pDevEvo->modesetSubOwner = pOpenDev; 3416 AssignFullNvKmsPermissions(pOpenDev); 3417 3418 } else { 3419 /* 3420 * GrantPermissions() should ensure that 3421 * pOpenFd->grantPermissions.permissions.type is always valid. 3422 */ 3423 nvAssert(!"AcquirePermissions validation failure"); 3424 return FALSE; 3425 } 3426 3427 pParams->reply.permissions.type = type; 3428 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; 3429 3430 return TRUE; 3431 } 3432 3433 /*! 3434 * Clear the set of permissions from pRevokingOpenDev. 3435 * 3436 * For NvKmsPerOpen::type==Ioctl, clear from permissions. It doesn't clear 3437 * itself or privileged. 3438 * 3439 * For NvKmsPerOpen::type==GrantPermissions, clear from 3440 * NvKmsPerOpen::grantPermissions, and reset NvKmsPerOpen::type to Undefined 3441 * if it is empty. 3442 */ 3443 static NvBool RevokePermissionsSet( 3444 struct NvKmsPerOpenDev *pRevokingOpenDev, 3445 const struct NvKmsPermissions *pRevokingPermissions) 3446 { 3447 const NVDevEvoRec *pDevEvo; 3448 struct NvKmsPerOpen *pOpen; 3449 const struct NvKmsFlipPermissions *pRemoveFlip; 3450 const struct NvKmsModesetPermissions *pRemoveModeset; 3451 3452 // Only process valid permissions. 3453 if (pRevokingPermissions->type != NV_KMS_PERMISSIONS_TYPE_FLIPPING && 3454 pRevokingPermissions->type != NV_KMS_PERMISSIONS_TYPE_MODESET) { 3455 return FALSE; 3456 } 3457 3458 pDevEvo = pRevokingOpenDev->pDevEvo; 3459 pRemoveFlip = 3460 (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) 3461 ? &pRevokingPermissions->flip 3462 : NULL; 3463 pRemoveModeset = 3464 (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) 3465 ? &pRevokingPermissions->modeset 3466 : NULL; 3467 3468 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { 3469 if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && 3470 (pOpen->grantPermissions.pDevEvo == pDevEvo)) { 3471 NvBool remainingPermissions = FALSE; 3472 struct NvKmsPermissions *pFdPermissions = 3473 &pOpen->grantPermissions.permissions; 3474 3475 if (pFdPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 3476 remainingPermissions = 3477 RemoveFlipPermissions(&pFdPermissions->flip, pRemoveFlip); 3478 } else { 3479 remainingPermissions = RemoveModesetPermissions( 3480 &pFdPermissions->modeset, pRemoveModeset); 3481 } 3482 3483 // Reset if it is empty. 3484 if (!remainingPermissions) { 3485 nvkms_memset(&pOpen->grantPermissions, 0, 3486 sizeof(pOpen->grantPermissions)); 3487 pOpen->type = NvKmsPerOpenTypeUndefined; 3488 } 3489 3490 } else if (pOpen->type == NvKmsPerOpenTypeIoctl) { 3491 3492 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 3493 if (pOpenDev == NULL) { 3494 continue; 3495 } 3496 3497 if (pOpenDev == pRevokingOpenDev || pOpenDev->isPrivileged) { 3498 continue; 3499 } 3500 3501 if (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 3502 RemoveFlipPermissions(&pOpenDev->flipPermissions, pRemoveFlip); 3503 } else { 3504 RemoveModesetPermissions(&pOpenDev->modesetPermissions, 3505 pRemoveModeset); 3506 } 3507 } 3508 } 3509 3510 return TRUE; 3511 } 3512 3513 static NvBool IsHeadRevoked(const NVDispEvoRec *pDispEvo, 3514 const NvU32 apiHead, 3515 void *pData) 3516 { 3517 const struct NvKmsPermissions *pPermissions = pData; 3518 3519 return !nvDpyIdListIsEmpty( 3520 pPermissions->modeset.disp[pDispEvo->displayOwner].head[apiHead].dpyIdList); 3521 } 3522 3523 static NvBool RevokePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3524 { 3525 struct NvKmsRevokePermissionsParams *pParams = pParamsVoid; 3526 struct NvKmsPerOpenDev *pOpenDev = 3527 GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3528 const NvU32 validBitmask = 3529 NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | 3530 NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) | 3531 NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER); 3532 3533 if (pOpenDev == NULL) { 3534 return FALSE; 3535 } 3536 3537 /* Reject invalid bitmasks. */ 3538 3539 if ((pParams->request.permissionsTypeBitmask & ~validBitmask) != 0) { 3540 return FALSE; 3541 } 3542 3543 if ((pParams->request.permissionsTypeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER)) != 0) { 3544 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 3545 /* Only the modeset owner can revoke sub-owner permissions. */ 3546 return FALSE; 3547 } 3548 3549 /* 3550 * When revoking ownership permissions, shut down all heads. 3551 * 3552 * This is necessary to keep the state of nvidia-drm in sync with NVKMS. 3553 * Otherwise, an NVKMS client can leave heads enabled when handing off 3554 * control of the device back to nvidia-drm, and nvidia-drm's flip queue 3555 * handling will get out of sync because it thinks all heads are 3556 * disabled and does not expect flip events on those heads. 3557 */ 3558 nvShutDownApiHeads(pOpenDev->pDevEvo, pOpenDev, NULL /* pTestFunc */, 3559 NULL /* pData */, 3560 TRUE /* doRasterLock */); 3561 } 3562 3563 /* 3564 * Only a client with sub-owner permissions (or better) can revoke other 3565 * kinds of permissions. 3566 */ 3567 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 3568 return FALSE; 3569 } 3570 3571 if (pParams->request.permissionsTypeBitmask > 0) { 3572 // Old behavior, revoke all permissions of a type. 3573 3574 /* Revoke permissions for everyone except the caller. */ 3575 RevokePermissionsInternal(pParams->request.permissionsTypeBitmask, 3576 pOpenDev->pDevEvo, 3577 pOpenDev /* pOpenDevExclude */); 3578 } else { 3579 /* If not using bitmask, revoke using the set. */ 3580 if (!RevokePermissionsSet(pOpenDev, &pParams->request.permissions)) { 3581 return FALSE; 3582 } 3583 3584 /* 3585 * When revoking ownership permissions, shut down those heads. 3586 * 3587 * This is necessary to keep the state of nvidia-drm in sync with NVKMS. 3588 * Otherwise, an NVKMS client can leave heads enabled when handing off 3589 * control of the device back to nvidia-drm, which prevents them from 3590 * being able to be leased again. 3591 */ 3592 if (pParams->request.permissions.type == NV_KMS_PERMISSIONS_TYPE_MODESET) { 3593 nvShutDownApiHeads(pOpenDev->pDevEvo, pOpenDev, IsHeadRevoked, 3594 &pParams->request.permissions, 3595 TRUE /* doRasterLock */); 3596 } 3597 } 3598 3599 return TRUE; 3600 } 3601 3602 static NvBool RegisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, 3603 void *pParamsVoid) 3604 { 3605 struct NvKmsRegisterDeferredRequestFifoParams *pParams = pParamsVoid; 3606 struct NvKmsPerOpenDev *pOpenDev; 3607 NVSurfaceEvoPtr pSurfaceEvo; 3608 NVDeferredRequestFifoRec *pDeferredRequestFifo; 3609 NvKmsDeferredRequestFifoHandle handle; 3610 3611 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3612 3613 if (pOpenDev == NULL) { 3614 return FALSE; 3615 } 3616 3617 pSurfaceEvo = nvEvoGetSurfaceFromHandleNoDispHWAccessOk( 3618 pOpenDev->pDevEvo, 3619 &pOpenDev->surfaceHandles, 3620 pParams->request.surfaceHandle); 3621 3622 if (pSurfaceEvo == NULL) { 3623 return FALSE; 3624 } 3625 3626 /* 3627 * WAR Bug 2050970: If a surface is unregistered and it wasn't registered 3628 * with NvKmsRegisterSurfaceRequest::noDisplayHardwareAccess, then the call 3629 * to nvRMSyncEvoChannel() in nvEvoDecrementSurfaceRefCnts() may hang 3630 * if any flips in flight acquire on semaphore releases that haven't 3631 * occurred yet. 3632 * 3633 * Since a ctxdma is not necessary for the deferred request fifo surface, 3634 * we work around this by forcing all surfaces that will be registered as 3635 * a deferred request fifo to be registered with 3636 * noDisplayHardwareAccess==TRUE, then skip the idle in 3637 * nvEvoDecrementSurfaceRefCnts() for these surfaces. 3638 */ 3639 if (pSurfaceEvo->requireDisplayHardwareAccess) { 3640 return FALSE; 3641 } 3642 3643 pDeferredRequestFifo = 3644 nvEvoRegisterDeferredRequestFifo(pOpenDev->pDevEvo, pSurfaceEvo); 3645 3646 if (pDeferredRequestFifo == NULL) { 3647 return FALSE; 3648 } 3649 3650 handle = nvEvoCreateApiHandle(&pOpenDev->deferredRequestFifoHandles, 3651 pDeferredRequestFifo); 3652 3653 if (handle == 0) { 3654 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, 3655 pDeferredRequestFifo); 3656 return FALSE; 3657 } 3658 3659 pParams->reply.deferredRequestFifoHandle = handle; 3660 3661 return TRUE; 3662 } 3663 3664 static NvBool UnregisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, 3665 void *pParamsVoid) 3666 { 3667 struct NvKmsUnregisterDeferredRequestFifoParams *pParams = pParamsVoid; 3668 NvKmsDeferredRequestFifoHandle handle = 3669 pParams->request.deferredRequestFifoHandle; 3670 NVDeferredRequestFifoRec *pDeferredRequestFifo; 3671 struct NvKmsPerOpenDev *pOpenDev = 3672 GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3673 3674 if (pOpenDev == NULL) { 3675 return FALSE; 3676 } 3677 3678 pDeferredRequestFifo = 3679 nvEvoGetPointerFromApiHandle( 3680 &pOpenDev->deferredRequestFifoHandles, handle); 3681 3682 if (pDeferredRequestFifo == NULL) { 3683 return FALSE; 3684 } 3685 3686 nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); 3687 3688 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, pDeferredRequestFifo); 3689 3690 return TRUE; 3691 } 3692 3693 /*! 3694 * Get the CRC32 data for the specified dpy. 3695 */ 3696 static NvBool QueryDpyCRC32(struct NvKmsPerOpen *pOpen, 3697 void *pParamsVoid) 3698 { 3699 struct NvKmsQueryDpyCRC32Params *pParams = pParamsVoid; 3700 struct NvKmsPerOpenDev *pOpenDev; 3701 struct NvKmsPerOpenDisp *pOpenDisp; 3702 NVDispEvoPtr pDispEvo; 3703 CRC32NotifierCrcOut crcOut; 3704 3705 if (!GetPerOpenDevAndDisp(pOpen, 3706 pParams->request.deviceHandle, 3707 pParams->request.dispHandle, 3708 &pOpenDev, 3709 &pOpenDisp)) { 3710 return FALSE; 3711 } 3712 3713 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 3714 // Only a current owner can query CRC32 values. 3715 return FALSE; 3716 } 3717 3718 pDispEvo = pOpenDisp->pDispEvo; 3719 3720 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { 3721 return FALSE; 3722 } 3723 3724 nvkms_memset(&(pParams->reply), 0, sizeof(pParams->reply)); 3725 3726 // Since will only read 1 frame of CRCs, point to single reply struct vals 3727 crcOut.rasterGeneratorCrc32 = &(pParams->reply.rasterGeneratorCrc32); 3728 crcOut.compositorCrc32 = &(pParams->reply.compositorCrc32); 3729 crcOut.outputCrc32 = &(pParams->reply.outputCrc32); 3730 3731 { 3732 /* 3733 * XXX[2Heads1OR] Is it sufficient to query CRC only for the primary 3734 * hardware head? 3735 */ 3736 NvU32 head = nvGetPrimaryHwHead(pDispEvo, pParams->request.head); 3737 3738 nvAssert(head != NV_INVALID_HEAD); 3739 3740 if (!nvReadCRC32Evo(pDispEvo, head, &crcOut)) { 3741 return FALSE; 3742 } 3743 } 3744 3745 return TRUE; 3746 } 3747 3748 static NvBool AllocSwapGroup( 3749 struct NvKmsPerOpen *pOpen, 3750 void *pParamsVoid) 3751 { 3752 struct NvKmsAllocSwapGroupParams *pParams = pParamsVoid; 3753 struct NvKmsPerOpenDev *pOpenDev; 3754 NVSwapGroupRec *pSwapGroup; 3755 NvKmsSwapGroupHandle handle; 3756 3757 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3758 3759 if (pOpenDev == NULL) { 3760 return FALSE; 3761 } 3762 3763 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 3764 return FALSE; 3765 } 3766 3767 pSwapGroup = nvHsAllocSwapGroup(pOpenDev->pDevEvo, &pParams->request); 3768 3769 if (pSwapGroup == NULL) { 3770 return FALSE; 3771 } 3772 3773 handle = nvEvoCreateApiHandle(&pOpenDev->swapGroupHandles, pSwapGroup); 3774 3775 if (handle == 0) { 3776 nvHsFreeSwapGroup(pOpenDev->pDevEvo, pSwapGroup); 3777 return FALSE; 3778 } 3779 3780 pParams->reply.swapGroupHandle = handle; 3781 3782 return TRUE; 3783 } 3784 3785 static NvBool FreeSwapGroup( 3786 struct NvKmsPerOpen *pOpen, 3787 void *pParamsVoid) 3788 { 3789 struct NvKmsFreeSwapGroupParams *pParams = pParamsVoid; 3790 struct NvKmsPerOpenDev *pOpenDev; 3791 NVSwapGroupRec *pSwapGroup; 3792 NvKmsSwapGroupHandle handle = pParams->request.swapGroupHandle; 3793 3794 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3795 3796 if (pOpenDev == NULL) { 3797 return FALSE; 3798 } 3799 3800 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 3801 return FALSE; 3802 } 3803 3804 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, 3805 handle); 3806 if (pSwapGroup == NULL) { 3807 return FALSE; 3808 } 3809 3810 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); 3811 3812 nvHsFreeSwapGroup(pOpenDev->pDevEvo, pSwapGroup); 3813 3814 return TRUE; 3815 } 3816 3817 static NvBool JoinSwapGroup( 3818 struct NvKmsPerOpen *pOpen, 3819 void *pParamsVoid) 3820 { 3821 struct NvKmsJoinSwapGroupParams *pParams = pParamsVoid; 3822 const struct NvKmsJoinSwapGroupRequestOneMember *pMember = 3823 pParams->request.member; 3824 NvU32 i; 3825 NvBool anySwapGroupsPending = FALSE; 3826 NVHsJoinSwapGroupWorkArea *pJoinSwapGroupWorkArea; 3827 3828 if ((pParams->request.numMembers == 0) || 3829 (pParams->request.numMembers > 3830 ARRAY_LEN(pParams->request.member))) { 3831 return FALSE; 3832 } 3833 3834 pJoinSwapGroupWorkArea = nvCalloc(pParams->request.numMembers, 3835 sizeof(NVHsJoinSwapGroupWorkArea)); 3836 3837 if (!pJoinSwapGroupWorkArea) { 3838 return FALSE; 3839 } 3840 3841 /* 3842 * When a client is joining multiple swap groups simultaneously, all of its 3843 * deferred request fifos must enter the pendingJoined state if any of the 3844 * swap groups it's joining have pending flips. Otherwise, this sequence 3845 * can lead to a deadlock: 3846 * 3847 * - Client 0 joins DRF 0 to SG 0, DRF 1 to SG 1, with SG 0 and SG 1 3848 * fliplocked 3849 * - Client 0 submits DRF 0 ready, SG 0 flips, but the flip won't complete 3850 * and [Client 0.DRF 0] won't be released until SG 1 flips due to 3851 * fliplock 3852 * - Client 1 joins DRF 0 to SG 0, DRF 1 to SG 1 3853 * - Client 0 submits DRF 1 ready, but SG 1 doesn't flip because 3854 * [Client 1.DRF 0] has joined. 3855 * 3856 * With the pendingJoined behavior, this sequence works as follows: 3857 * 3858 * - Client 0 joins DRF 0 to SG 0, DRF 1 to SG 1, with SG 0 and SG 1 3859 * fliplocked 3860 * - Client 0 submits DRF 0 ready, SG 0 flips, but the flip won't complete 3861 * and [Client 0.DRF 0] won't be released until SG 1 flips due to 3862 * fliplock 3863 * - Client 1 joins DRF 0 to SG 0, DRF 1 to SG 1, but both enter the 3864 * pendingJoined state because [Client 0.DRF 0] has a pending flip. 3865 * - Client 0 submits DRF 1 ready, both swap groups flip, Client 0's 3866 * DRFs are both released, and Client 1's DRFs both leave the 3867 * pendingJoined state. 3868 */ 3869 for (i = 0; i < pParams->request.numMembers; i++) { 3870 struct NvKmsPerOpenDev *pOpenDev; 3871 NVSwapGroupRec *pSwapGroup; 3872 NVDeferredRequestFifoRec *pDeferredRequestFifo; 3873 struct NvKmsPerOpen *pEventOpenFd = NULL; 3874 NvKmsDeviceHandle deviceHandle = pMember[i].deviceHandle; 3875 NvKmsSwapGroupHandle swapGroupHandle = pMember[i].swapGroupHandle; 3876 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = 3877 pMember[i].deferredRequestFifoHandle; 3878 3879 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 3880 3881 if (pOpenDev == NULL) { 3882 goto fail; 3883 } 3884 3885 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, 3886 swapGroupHandle); 3887 3888 if (pSwapGroup == NULL) { 3889 goto fail; 3890 } 3891 3892 if (pSwapGroup->pendingFlip) { 3893 anySwapGroupsPending = TRUE; 3894 } 3895 3896 /* 3897 * In addition to the check for pending swap groups above, validate 3898 * the remainder of the request now. 3899 */ 3900 3901 /* 3902 * Prevent pSwapGroup->nMembers from overflowing NV_U32_MAX. 3903 * 3904 * Ideally we would want to count how many members are being added to 3905 * each swap group in the request, but as an optimization, just verify 3906 * that the number of {fifo, swapgroup} tuples joining would not 3907 * overflow any swapgroup even if every one was joining the same 3908 * swapgroup. 3909 */ 3910 if (NV_U32_MAX - pSwapGroup->nMembers < pParams->request.numMembers) { 3911 goto fail; 3912 } 3913 3914 pDeferredRequestFifo = 3915 nvEvoGetPointerFromApiHandle( 3916 &pOpenDev->deferredRequestFifoHandles, 3917 deferredRequestFifoHandle); 3918 3919 if (pDeferredRequestFifo == NULL) { 3920 goto fail; 3921 } 3922 3923 /* 3924 * If the pDeferredRequestFifo is already a member of a SwapGroup, then 3925 * fail. 3926 */ 3927 if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) { 3928 goto fail; 3929 } 3930 3931 if (pMember[i].unicastEvent.specified) { 3932 pEventOpenFd = nvkms_get_per_open_data(pMember[i].unicastEvent.fd); 3933 3934 if (pEventOpenFd == NULL) { 3935 goto fail; 3936 } 3937 3938 if (!PerOpenIsValidForUnicastEvent(pEventOpenFd)) { 3939 goto fail; 3940 } 3941 } 3942 3943 pJoinSwapGroupWorkArea[i].pDevEvo = pOpenDev->pDevEvo; 3944 pJoinSwapGroupWorkArea[i].pSwapGroup = pSwapGroup; 3945 pJoinSwapGroupWorkArea[i].pDeferredRequestFifo = pDeferredRequestFifo; 3946 pJoinSwapGroupWorkArea[i].pEventOpenFd = pEventOpenFd; 3947 pJoinSwapGroupWorkArea[i].enabledHeadSurface = FALSE; 3948 } 3949 3950 if (!nvHsJoinSwapGroup(pJoinSwapGroupWorkArea, 3951 pParams->request.numMembers, 3952 anySwapGroupsPending)) { 3953 goto fail; 3954 } 3955 3956 /* Beyond this point, the function cannot fail. */ 3957 3958 for (i = 0; i < pParams->request.numMembers; i++) { 3959 struct NvKmsPerOpen *pEventOpenFd = 3960 pJoinSwapGroupWorkArea[i].pEventOpenFd; 3961 NVDeferredRequestFifoRec *pDeferredRequestFifo = 3962 pJoinSwapGroupWorkArea[i].pDeferredRequestFifo; 3963 3964 if (pEventOpenFd) { 3965 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = pEventOpenFd; 3966 3967 pEventOpenFd->unicastEvent.type = 3968 NvKmsUnicastEventTypeDeferredRequest; 3969 pEventOpenFd->unicastEvent.e.deferred.pDeferredRequestFifo = 3970 pDeferredRequestFifo; 3971 3972 pEventOpenFd->type = NvKmsPerOpenTypeUnicastEvent; 3973 } 3974 } 3975 3976 nvFree(pJoinSwapGroupWorkArea); 3977 return TRUE; 3978 3979 fail: 3980 nvFree(pJoinSwapGroupWorkArea); 3981 return FALSE; 3982 } 3983 3984 static NvBool LeaveSwapGroup( 3985 struct NvKmsPerOpen *pOpen, 3986 void *pParamsVoid) 3987 { 3988 struct NvKmsLeaveSwapGroupParams *pParams = pParamsVoid; 3989 const struct NvKmsLeaveSwapGroupRequestOneMember *pMember = 3990 pParams->request.member; 3991 NvU32 i; 3992 3993 if ((pParams->request.numMembers == 0) || 3994 (pParams->request.numMembers > 3995 ARRAY_LEN(pParams->request.member))) { 3996 return FALSE; 3997 } 3998 3999 /* 4000 * Validate all handles passed by the caller and fail if any are invalid. 4001 */ 4002 for (i = 0; i < pParams->request.numMembers; i++) { 4003 struct NvKmsPerOpenDev *pOpenDev; 4004 NVDeferredRequestFifoRec *pDeferredRequestFifo; 4005 NvKmsDeviceHandle deviceHandle = 4006 pMember[i].deviceHandle; 4007 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = 4008 pMember[i].deferredRequestFifoHandle; 4009 4010 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 4011 4012 if (pOpenDev == NULL) { 4013 return FALSE; 4014 } 4015 4016 pDeferredRequestFifo = 4017 nvEvoGetPointerFromApiHandle( 4018 &pOpenDev->deferredRequestFifoHandles, 4019 deferredRequestFifoHandle); 4020 4021 if (pDeferredRequestFifo == NULL) { 4022 return FALSE; 4023 } 4024 4025 if (pDeferredRequestFifo->swapGroup.pSwapGroup == NULL) { 4026 return FALSE; 4027 } 4028 } 4029 4030 /* Beyond this point, the function cannot fail. */ 4031 4032 for (i = 0; i < pParams->request.numMembers; i++) { 4033 struct NvKmsPerOpenDev *pOpenDev; 4034 NVDeferredRequestFifoRec *pDeferredRequestFifo; 4035 NvKmsDeviceHandle deviceHandle = 4036 pMember[i].deviceHandle; 4037 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = 4038 pMember[i].deferredRequestFifoHandle; 4039 4040 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 4041 4042 pDeferredRequestFifo = 4043 nvEvoGetPointerFromApiHandle( 4044 &pOpenDev->deferredRequestFifoHandles, 4045 deferredRequestFifoHandle); 4046 4047 nvHsLeaveSwapGroup(pOpenDev->pDevEvo, pDeferredRequestFifo, 4048 FALSE /* teardown */); 4049 } 4050 4051 return TRUE; 4052 } 4053 4054 static NvBool SetSwapGroupClipList( 4055 struct NvKmsPerOpen *pOpen, 4056 void *pParamsVoid) 4057 { 4058 struct NvKmsSetSwapGroupClipListParams *pParams = pParamsVoid; 4059 struct NvKmsPerOpenDev *pOpenDev; 4060 NVSwapGroupRec *pSwapGroup; 4061 struct NvKmsRect *pClipList; 4062 NvBool ret; 4063 4064 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 4065 4066 if (pOpenDev == NULL) { 4067 return FALSE; 4068 } 4069 4070 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 4071 return FALSE; 4072 } 4073 4074 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, 4075 pParams->request.swapGroupHandle); 4076 4077 if (pSwapGroup == NULL) { 4078 return FALSE; 4079 } 4080 4081 /* 4082 * Create a copy of the passed-in pClipList, to be stored in pSwapGroup. 4083 * Copy from the client using nvkms_copyin() or nvkms_memcpy(), depending on 4084 * the clientType. 4085 * 4086 * We do not use the nvKmsIoctl() prepUser/doneUser infrastructure here 4087 * because that would require creating two copies of pClipList in the 4088 * user-space client case: one allocated in prepUser and freed in doneUser, 4089 * and a second in nvHsSetSwapGroupClipList(). 4090 */ 4091 if (pParams->request.nClips == 0) { 4092 pClipList = NULL; 4093 } else { 4094 const size_t len = sizeof(struct NvKmsRect) * pParams->request.nClips; 4095 4096 if ((pParams->request.pClipList == 0) || 4097 !nvKmsNvU64AddressIsSafe(pParams->request.pClipList)) { 4098 return FALSE; 4099 } 4100 4101 pClipList = nvAlloc(len); 4102 4103 if (pClipList == NULL) { 4104 return FALSE; 4105 } 4106 4107 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { 4108 int status = 4109 nvkms_copyin(pClipList, pParams->request.pClipList, len); 4110 4111 if (status != 0) { 4112 nvFree(pClipList); 4113 return FALSE; 4114 } 4115 } else { 4116 const void *pKernelPointer = 4117 nvKmsNvU64ToPointer(pParams->request.pClipList); 4118 4119 nvkms_memcpy(pClipList, pKernelPointer, len); 4120 } 4121 } 4122 4123 ret = nvHsSetSwapGroupClipList( 4124 pOpenDev->pDevEvo, 4125 pSwapGroup, 4126 pParams->request.nClips, 4127 pClipList); 4128 4129 if (!ret) { 4130 nvFree(pClipList); 4131 } 4132 4133 return ret; 4134 } 4135 4136 static NvBool SwitchMux( 4137 struct NvKmsPerOpen *pOpen, 4138 void *pParamsVoid) 4139 { 4140 struct NvKmsSwitchMuxParams *pParams = pParamsVoid; 4141 const struct NvKmsSwitchMuxRequest *r = &pParams->request; 4142 NVDpyEvoPtr pDpyEvo; 4143 4144 pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); 4145 if (pDpyEvo == NULL) { 4146 return FALSE; 4147 } 4148 4149 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(GetPerOpenDev(pOpen, r->deviceHandle))) { 4150 return FALSE; 4151 } 4152 4153 switch (pParams->request.operation) { 4154 case NVKMS_SWITCH_MUX_PRE: 4155 return nvRmMuxPre(pDpyEvo, r->state); 4156 case NVKMS_SWITCH_MUX: 4157 return nvRmMuxSwitch(pDpyEvo, r->state); 4158 case NVKMS_SWITCH_MUX_POST: 4159 return nvRmMuxPost(pDpyEvo, r->state); 4160 default: 4161 return FALSE; 4162 } 4163 } 4164 4165 static NvBool GetMuxState( 4166 struct NvKmsPerOpen *pOpen, 4167 void *pParamsVoid) 4168 { 4169 struct NvKmsGetMuxStateParams *pParams = pParamsVoid; 4170 const struct NvKmsGetMuxStateRequest *r = &pParams->request; 4171 NVDpyEvoPtr pDpyEvo; 4172 4173 pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); 4174 if (pDpyEvo == NULL) { 4175 return FALSE; 4176 } 4177 4178 pParams->reply.state = nvRmMuxState(pDpyEvo); 4179 4180 return pParams->reply.state != MUX_STATE_GET; 4181 } 4182 4183 static NvBool ExportVrrSemaphoreSurface( 4184 struct NvKmsPerOpen *pOpen, 4185 void *pParamsVoid) 4186 { 4187 struct NvKmsExportVrrSemaphoreSurfaceParams *pParams = pParamsVoid; 4188 const struct NvKmsExportVrrSemaphoreSurfaceRequest *req = &pParams->request; 4189 const struct NvKmsPerOpenDev *pOpenDev = 4190 GetPerOpenDev(pOpen, pParams->request.deviceHandle); 4191 4192 if (pOpenDev == NULL) { 4193 return FALSE; 4194 } 4195 4196 return nvExportVrrSemaphoreSurface(pOpenDev->pDevEvo, req->memFd); 4197 } 4198 4199 static void EnableAndSetupVblankSyncObject(NVDispEvoRec *pDispEvo, 4200 const NvU32 apiHead, 4201 NVVblankSyncObjectRec *pVblankSyncObject, 4202 NVEvoUpdateState *pUpdateState) 4203 { 4204 /* 4205 * The core channel re-allocation code path may end up allocating 4206 * the fewer number of sync objects than the number of sync objects which 4207 * are allocated and in use by the NVKMS clients, hCtxDma = 0 if the 4208 * nvAllocCoreChannelEvo()-> InitApiHeadState()-> nvRmAllocCoreRGSyncpts() 4209 * code path failes to re-allocate that sync object. 4210 */ 4211 if (nvApiHeadIsActive(pDispEvo, apiHead) && 4212 (pVblankSyncObject->evoSyncpt.surfaceDesc.ctxDmaHandle != 0)) { 4213 NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); 4214 4215 nvAssert(head != NV_INVALID_HEAD); 4216 4217 pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject( 4218 pDispEvo->pDevEvo, 4219 pDispEvo->headState[head].timings.rasterBlankStart.y, 4220 head, 4221 pVblankSyncObject->index, 4222 &pVblankSyncObject->evoSyncpt.surfaceDesc, 4223 pUpdateState); 4224 4225 pVblankSyncObject->enabled = TRUE; 4226 } 4227 4228 pVblankSyncObject->inUse = TRUE; 4229 } 4230 4231 static void EnableAndSetupVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo) 4232 { 4233 /* 4234 * An NVEvoUpdateState has disp-scope, and we will only have 4235 * one disp when programming syncpts. 4236 */ 4237 NVEvoUpdateState updateState = { }; 4238 struct NvKmsPerOpen *pOpen; 4239 4240 if (!pDevEvo->supportsSyncpts || 4241 !pDevEvo->hal->caps.supportsVblankSyncObjects) { 4242 return; 4243 } 4244 4245 /* If Syncpts are supported, we're on Orin, which only has one display. */ 4246 nvAssert(pDevEvo->nDispEvo == 1); 4247 4248 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 4249 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 4250 struct NvKmsPerOpenDisp *pOpenDisp; 4251 NvKmsGenericHandle disp; 4252 4253 if (pOpenDev == NULL) { 4254 continue; 4255 } 4256 4257 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, 4258 pOpenDisp, disp) { 4259 4260 nvAssert(pOpenDisp->pDispEvo == pDevEvo->pDispEvo[0]); 4261 4262 for (NvU32 apiHead = 0; apiHead < 4263 ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) { 4264 NVEvoApiHandlesRec *pHandles = 4265 &pOpenDisp->vblankSyncObjectHandles[apiHead]; 4266 NVVblankSyncObjectRec *pVblankSyncObject; 4267 NvKmsVblankSyncObjectHandle handle; 4268 4269 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, 4270 pVblankSyncObject, handle) { 4271 EnableAndSetupVblankSyncObject(pOpenDisp->pDispEvo, apiHead, 4272 pVblankSyncObject, 4273 &updateState); 4274 } 4275 } 4276 } 4277 } 4278 4279 if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) { 4280 nvEvoUpdateAndKickOff(pDevEvo->pDispEvo[0], TRUE, &updateState, 4281 TRUE); 4282 } 4283 } 4284 4285 static NvBool EnableVblankSyncObject( 4286 struct NvKmsPerOpen *pOpen, 4287 void *pParamsVoid) 4288 { 4289 struct NvKmsEnableVblankSyncObjectParams *pParams = pParamsVoid; 4290 struct NvKmsPerOpenDisp* pOpenDisp = NULL; 4291 NVDispApiHeadStateEvoRec *pApiHeadState = NULL; 4292 NVDevEvoPtr pDevEvo = NULL; 4293 NvKmsVblankSyncObjectHandle vblankHandle = 0; 4294 int freeVblankSyncObjectIdx = 0; 4295 NvU32 apiHead = pParams->request.head; 4296 NVVblankSyncObjectRec *vblankSyncObjects = NULL; 4297 NVDispEvoPtr pDispEvo = NULL; 4298 NVEvoUpdateState updateState = { }; 4299 4300 /* Obtain the Head State. */ 4301 pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle, 4302 pParams->request.dispHandle); 4303 if (pOpenDisp == NULL) { 4304 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); 4305 return FALSE; 4306 } 4307 4308 pDispEvo = pOpenDisp->pDispEvo; 4309 pDevEvo = pDispEvo->pDevEvo; 4310 4311 /* Ensure Vblank Sync Object API is supported on this chip. */ 4312 if (!pDevEvo->supportsSyncpts || 4313 !pDevEvo->hal->caps.supportsVblankSyncObjects) { 4314 nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " 4315 "supported on this chip."); 4316 return FALSE; 4317 } 4318 4319 /* Validate requested head because it comes from user input. */ 4320 if (apiHead >= ARRAY_LEN(pDispEvo->apiHeadState)) { 4321 nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", apiHead); 4322 return FALSE; 4323 } 4324 pApiHeadState = &pDispEvo->apiHeadState[apiHead]; 4325 vblankSyncObjects = pApiHeadState->vblankSyncObjects; 4326 pDevEvo = pDispEvo->pDevEvo; 4327 4328 /* 4329 * Find the available sync object. Sync Objects with handle=0 are not in 4330 * use. 4331 */ 4332 for (freeVblankSyncObjectIdx = 0; 4333 freeVblankSyncObjectIdx < pApiHeadState->numVblankSyncObjectsCreated; 4334 freeVblankSyncObjectIdx++) { 4335 if (!vblankSyncObjects[freeVblankSyncObjectIdx].inUse) { 4336 break; 4337 } 4338 } 4339 if (freeVblankSyncObjectIdx == pApiHeadState->numVblankSyncObjectsCreated) { 4340 return FALSE; 4341 } 4342 4343 /* Save the created vblank handle if it is valid. */ 4344 vblankHandle = 4345 nvEvoCreateApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], 4346 &vblankSyncObjects[freeVblankSyncObjectIdx]); 4347 if (vblankHandle == 0) { 4348 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to create vblank handle."); 4349 return FALSE; 4350 } 4351 4352 EnableAndSetupVblankSyncObject(pDispEvo, apiHead, 4353 &vblankSyncObjects[freeVblankSyncObjectIdx], 4354 &updateState); 4355 if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { 4356 nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE); 4357 } 4358 4359 /* Populate the reply field. */ 4360 pParams->reply.vblankHandle = vblankHandle; 4361 /* Note: the syncpt ID is NOT the same as the vblank handle. */ 4362 pParams->reply.syncptId = 4363 pApiHeadState->vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.id; 4364 4365 return TRUE; 4366 } 4367 4368 static void DisableAndCleanVblankSyncObject(NVDispEvoRec *pDispEvo, 4369 const NvU32 apiHead, 4370 NVVblankSyncObjectRec *pVblankSyncObject, 4371 NVEvoUpdateState *pUpdateState) 4372 { 4373 if (nvApiHeadIsActive(pDispEvo, apiHead)) { 4374 NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); 4375 4376 nvAssert(head != NV_INVALID_HEAD); 4377 4378 /* 4379 * Instruct the hardware to disable the semaphore corresponding to this 4380 * syncpt. The Update State will be populated. 4381 * 4382 * Note: Using dummy zero value for rasterLine because the disable 4383 * codepath in ConfigureVblankSyncObject() does not use that argument. 4384 */ 4385 pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject(pDispEvo->pDevEvo, 4386 0, /* rasterLine */ 4387 head, 4388 pVblankSyncObject->index, 4389 NULL, /* pSurfaceDesc */ 4390 pUpdateState); 4391 /* 4392 * Note: it is the caller's responsibility to call 4393 * nvEvoUpdateAndKickOff(). 4394 */ 4395 } 4396 4397 pVblankSyncObject->inUse = FALSE; 4398 pVblankSyncObject->enabled = FALSE; 4399 } 4400 4401 static void DisableAndCleanVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo) 4402 { 4403 /* 4404 * An NVEvoUpdateState has disp-scope, and we will only have 4405 * one disp when programming syncpts. 4406 */ 4407 NVEvoUpdateState updateState = { }; 4408 struct NvKmsPerOpen *pOpen; 4409 4410 if (!pDevEvo->supportsSyncpts || 4411 !pDevEvo->hal->caps.supportsVblankSyncObjects) { 4412 return; 4413 } 4414 4415 /* If Syncpts are supported, we're on Orin, which only has one display. */ 4416 nvAssert(pDevEvo->nDispEvo == 1); 4417 4418 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 4419 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 4420 struct NvKmsPerOpenDisp *pOpenDisp; 4421 NvKmsGenericHandle disp; 4422 4423 if (pOpenDev == NULL) { 4424 continue; 4425 } 4426 4427 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, 4428 pOpenDisp, disp) { 4429 4430 nvAssert(pOpenDisp->pDispEvo == pDevEvo->pDispEvo[0]); 4431 4432 for (NvU32 apiHead = 0; apiHead < 4433 ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) { 4434 NVEvoApiHandlesRec *pHandles = 4435 &pOpenDisp->vblankSyncObjectHandles[apiHead]; 4436 NVVblankSyncObjectRec *pVblankSyncObject; 4437 NvKmsVblankSyncObjectHandle handle; 4438 4439 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, 4440 pVblankSyncObject, handle) { 4441 DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead, 4442 pVblankSyncObject, 4443 &updateState); 4444 } 4445 } 4446 } 4447 } 4448 4449 if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) { 4450 nvEvoUpdateAndKickOff(pDevEvo->pDispEvo[0], TRUE, &updateState, 4451 TRUE); 4452 } 4453 } 4454 4455 static NvBool DisableVblankSyncObject( 4456 struct NvKmsPerOpen *pOpen, 4457 void *pParamsVoid) 4458 { 4459 struct NvKmsDisableVblankSyncObjectParams *pParams = pParamsVoid; 4460 struct NvKmsPerOpenDisp* pOpenDisp = 4461 GetPerOpenDisp(pOpen, pParams->request.deviceHandle, 4462 pParams->request.dispHandle); 4463 NVVblankSyncObjectRec *pVblankSyncObject = NULL; 4464 NvU32 apiHead = pParams->request.head; 4465 NVDevEvoPtr pDevEvo = NULL; 4466 NVEvoUpdateState updateState = { }; 4467 4468 if (pOpenDisp == NULL) { 4469 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); 4470 return FALSE; 4471 } 4472 4473 pDevEvo = pOpenDisp->pDispEvo->pDevEvo; 4474 4475 /* Ensure Vblank Sync Object API is supported on this chip. */ 4476 if (!pDevEvo->supportsSyncpts || 4477 !pDevEvo->hal->caps.supportsVblankSyncObjects) { 4478 nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " 4479 "supported on this chip."); 4480 return FALSE; 4481 } 4482 4483 /* Validate requested head because it comes from user input. */ 4484 if (apiHead >= ARRAY_LEN(pOpenDisp->pDispEvo->apiHeadState)) { 4485 nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", apiHead); 4486 return FALSE; 4487 } 4488 4489 /* Mark the indicated object as free. */ 4490 pVblankSyncObject = 4491 nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], 4492 pParams->request.vblankHandle); 4493 if (pVblankSyncObject == NULL) { 4494 nvEvoLogDebug(EVO_LOG_ERROR, "unable to find object with provided " 4495 "handle."); 4496 return FALSE; 4497 } 4498 4499 DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead, 4500 pVblankSyncObject, &updateState); 4501 4502 if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { 4503 /* 4504 * Instruct hardware to execute the staged commands from the 4505 * ConfigureVblankSyncObject() call inside of the 4506 * DisableAndCleanVblankSyncObject() call above. This will set up and 4507 * wait for a notification that the hardware execution has completed. 4508 */ 4509 nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, TRUE); 4510 } 4511 4512 /* Remove the handle from the map. */ 4513 nvEvoDestroyApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], 4514 pParams->request.vblankHandle); 4515 4516 return TRUE; 4517 } 4518 4519 static void NotifyVblankCallback(NVDispEvoRec *pDispEvo, 4520 NVVBlankCallbackPtr pCallbackData) 4521 { 4522 struct NvKmsPerOpen *pEventOpenFd = pCallbackData->pUserData; 4523 4524 /* 4525 * NOTIFY_VBLANK events are single-shot so notify the unicast FD, then 4526 * immediately unregister the callback. The unregister step is done in 4527 * nvRemoveUnicastEvent which resets the unicast event data. 4528 */ 4529 nvSendUnicastEvent(pEventOpenFd); 4530 nvRemoveUnicastEvent(pEventOpenFd); 4531 } 4532 4533 static NvBool NotifyVblank( 4534 struct NvKmsPerOpen *pOpen, 4535 void *pParamsVoid) 4536 { 4537 struct NvKmsNotifyVblankParams *pParams = pParamsVoid; 4538 struct NvKmsPerOpen *pEventOpenFd = NULL; 4539 NVVBlankCallbackPtr pCallbackData = NULL; 4540 struct NvKmsPerOpenDisp* pOpenDisp = 4541 GetPerOpenDisp(pOpen, pParams->request.deviceHandle, 4542 pParams->request.dispHandle); 4543 const NvU32 apiHead = pParams->request.head; 4544 4545 pEventOpenFd = nvkms_get_per_open_data(pParams->request.unicastEvent.fd); 4546 4547 if (pEventOpenFd == NULL) { 4548 return NV_FALSE; 4549 } 4550 4551 if (!PerOpenIsValidForUnicastEvent(pEventOpenFd)) { 4552 return NV_FALSE; 4553 } 4554 4555 pEventOpenFd->type = NvKmsPerOpenTypeUnicastEvent; 4556 4557 pCallbackData = nvApiHeadRegisterVBlankCallback(pOpenDisp->pDispEvo, 4558 apiHead, 4559 NotifyVblankCallback, 4560 pEventOpenFd); 4561 if (pCallbackData == NULL) { 4562 return NV_FALSE; 4563 } 4564 4565 pEventOpenFd->unicastEvent.type = NvKmsUnicastEventTypeVblankNotification; 4566 pEventOpenFd->unicastEvent.e.vblankNotification.pOpenDisp = pOpenDisp; 4567 pEventOpenFd->unicastEvent.e.vblankNotification.apiHead = apiHead; 4568 pEventOpenFd->unicastEvent.e.vblankNotification.hCallback 4569 = nvEvoCreateApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], 4570 pCallbackData); 4571 4572 if (pEventOpenFd->unicastEvent.e.vblankNotification.hCallback == 0) { 4573 nvApiHeadUnregisterVBlankCallback(pOpenDisp->pDispEvo, pCallbackData); 4574 return NV_FALSE; 4575 } 4576 4577 return NV_TRUE; 4578 } 4579 4580 static NvBool SetFlipLockGroup( 4581 struct NvKmsPerOpen *pOpen, 4582 void *pParamsVoid) 4583 { 4584 struct NvKmsSetFlipLockGroupParams *pParams = pParamsVoid; 4585 const struct NvKmsSetFlipLockGroupRequest *pRequest = &pParams->request; 4586 /* Fill in this array as we look up the pDevEvo from the given device 4587 * handles, so that later processing can use it without converting 4588 * deviceHandle -> pDevEvo again. */ 4589 NVDevEvoPtr pDevEvo[NV_MAX_SUBDEVICES] = { }; 4590 NvU32 dev; 4591 4592 /* Ensure we don't overrun the pDevEvo array. */ 4593 ct_assert(ARRAY_LEN(pRequest->dev) == NV_MAX_SUBDEVICES); 4594 4595 for (dev = 0; dev < ARRAY_LEN(pRequest->dev); dev++) { 4596 const struct NvKmsSetFlipLockGroupOneDev *pRequestDev = 4597 &pRequest->dev[dev]; 4598 struct NvKmsPerOpenDev *pOpenDev = NULL; 4599 NVDispEvoPtr pDispEvo; 4600 NvU32 dispIndex; 4601 NvU32 i; 4602 4603 if (pRequestDev->requestedDispsBitMask == 0) { 4604 break; 4605 } 4606 4607 pOpenDev = GetPerOpenDev(pOpen, pRequestDev->deviceHandle); 4608 4609 if (pOpenDev == NULL) { 4610 return FALSE; 4611 } 4612 4613 pDevEvo[dev] = pOpenDev->pDevEvo; 4614 4615 /* The caller must be the modeset owner for every specified device. */ 4616 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) { 4617 return FALSE; 4618 } 4619 4620 /* Do not allow the same device to be specified twice. */ 4621 for (i = 0; i < dev; i++) { 4622 if (pDevEvo[i] == pDevEvo[dev]) { 4623 return FALSE; 4624 } 4625 } 4626 4627 /* Check for invalid disps in requestedDispsBitMask. */ 4628 if (nvHasBitAboveMax(pRequestDev->requestedDispsBitMask, 4629 pDevEvo[dev]->nDispEvo)) { 4630 return FALSE; 4631 } 4632 4633 /* Check for invalid heads in requestedHeadsBitMask. */ 4634 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo[dev]) { 4635 const NvU32 requestedHeadsBitMask = 4636 pRequestDev->disp[dispIndex].requestedHeadsBitMask; 4637 NvU32 apiHead; 4638 4639 if (requestedHeadsBitMask == 0) { 4640 return FALSE; 4641 } 4642 if (nvHasBitAboveMax(requestedHeadsBitMask, 4643 pDevEvo[dev]->numHeads)) { 4644 return FALSE; 4645 } 4646 4647 /* 4648 * Verify that all API heads in requestedHeadsBitMask are active. 4649 * The requested fliplock group will be implicitly disabled if any of 4650 * these heads are specified in a modeset. 4651 */ 4652 for (apiHead = 0; apiHead < pDevEvo[dev]->numHeads; apiHead++) { 4653 if ((requestedHeadsBitMask & (1 << apiHead)) != 0) { 4654 if (!nvApiHeadIsActive(pDispEvo, apiHead)) { 4655 return FALSE; 4656 } 4657 } 4658 } 4659 } 4660 } 4661 4662 /* Verify that at least one device was specified */ 4663 if (pDevEvo[0] == NULL) { 4664 return FALSE; 4665 } 4666 4667 return nvSetFlipLockGroup(pDevEvo, pRequest); 4668 } 4669 4670 /*! 4671 * Perform the ioctl operation requested by the client. 4672 * 4673 * \param[in,out] pOpenVoid The per-open data, allocated by 4674 * nvKmsOpen(). 4675 * \param[in] cmdOpaque The NVKMS_IOCTL_ operation to perform. 4676 * \param[in,out] paramsAddress A pointer, in the client process's 4677 * address space, to the parameter 4678 * structure. This is cmd-specific. 4679 * \param[in] paramSize The client-specified size of the params. 4680 * 4681 * \return Return TRUE if the ioctl operation was successfully 4682 * performed. Otherwise, return FALSE. 4683 */ 4684 NvBool nvKmsIoctl( 4685 void *pOpenVoid, 4686 const NvU32 cmdOpaque, 4687 const NvU64 paramsAddress, 4688 const size_t paramSize) 4689 { 4690 static const struct { 4691 4692 NvBool (*proc)(struct NvKmsPerOpen *pOpen, void *pParamsVoid); 4693 NvBool (*prepUser)(void *pParamsVoid, void *pExtraStateVoid); 4694 NvBool (*doneUser)(void *pParamsVoid, void *pExtraStateVoid); 4695 const size_t paramSize; 4696 /* Size of extra state tracked for user parameters */ 4697 const size_t extraSize; 4698 4699 const size_t requestSize; 4700 const size_t requestOffset; 4701 4702 const size_t replySize; 4703 const size_t replyOffset; 4704 4705 } dispatch[] = { 4706 4707 #define _ENTRY_WITH_USER(_cmd, _func, _prepUser, _doneUser, _extraSize) \ 4708 [_cmd] = { \ 4709 .proc = _func, \ 4710 .prepUser = _prepUser, \ 4711 .doneUser = _doneUser, \ 4712 .paramSize = sizeof(struct NvKms##_func##Params), \ 4713 .requestSize = sizeof(struct NvKms##_func##Request), \ 4714 .requestOffset = offsetof(struct NvKms##_func##Params, request), \ 4715 .replySize = sizeof(struct NvKms##_func##Reply), \ 4716 .replyOffset = offsetof(struct NvKms##_func##Params, reply), \ 4717 .extraSize = _extraSize, \ 4718 } 4719 4720 #define ENTRY(_cmd, _func) \ 4721 _ENTRY_WITH_USER(_cmd, _func, NULL, NULL, 0) 4722 4723 #define ENTRY_CUSTOM_USER(_cmd, _func) \ 4724 _ENTRY_WITH_USER(_cmd, _func, \ 4725 _func##PrepUser, _func##DoneUser, \ 4726 sizeof(struct NvKms##_func##ExtraUserState)) 4727 4728 ENTRY(NVKMS_IOCTL_ALLOC_DEVICE, AllocDevice), 4729 ENTRY(NVKMS_IOCTL_FREE_DEVICE, FreeDevice), 4730 ENTRY(NVKMS_IOCTL_QUERY_DISP, QueryDisp), 4731 ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, QueryConnectorStaticData), 4732 ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, QueryConnectorDynamicData), 4733 ENTRY(NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, QueryDpyStaticData), 4734 ENTRY(NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, QueryDpyDynamicData), 4735 ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE_INDEX, ValidateModeIndex), 4736 ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE, ValidateMode), 4737 ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_MODE, SetMode), 4738 ENTRY(NVKMS_IOCTL_SET_CURSOR_IMAGE, SetCursorImage), 4739 ENTRY(NVKMS_IOCTL_MOVE_CURSOR, MoveCursor), 4740 ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_LUT, SetLut), 4741 ENTRY(NVKMS_IOCTL_IDLE_BASE_CHANNEL, IdleBaseChannel), 4742 ENTRY_CUSTOM_USER(NVKMS_IOCTL_FLIP, Flip), 4743 ENTRY(NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, 4744 DeclareDynamicDpyInterest), 4745 ENTRY(NVKMS_IOCTL_REGISTER_SURFACE, RegisterSurface), 4746 ENTRY(NVKMS_IOCTL_UNREGISTER_SURFACE, UnregisterSurface), 4747 ENTRY(NVKMS_IOCTL_GRANT_SURFACE, GrantSurface), 4748 ENTRY(NVKMS_IOCTL_ACQUIRE_SURFACE, AcquireSurface), 4749 ENTRY(NVKMS_IOCTL_RELEASE_SURFACE, ReleaseSurface), 4750 ENTRY(NVKMS_IOCTL_SET_DPY_ATTRIBUTE, SetDpyAttribute), 4751 ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE, GetDpyAttribute), 4752 ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES, 4753 GetDpyAttributeValidValues), 4754 ENTRY(NVKMS_IOCTL_SET_DISP_ATTRIBUTE, SetDispAttribute), 4755 ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE, GetDispAttribute), 4756 ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES, 4757 GetDispAttributeValidValues), 4758 ENTRY(NVKMS_IOCTL_QUERY_FRAMELOCK, QueryFrameLock), 4759 ENTRY(NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, SetFrameLockAttribute), 4760 ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, GetFrameLockAttribute), 4761 ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES, 4762 GetFrameLockAttributeValidValues), 4763 ENTRY(NVKMS_IOCTL_GET_NEXT_EVENT, GetNextEvent), 4764 ENTRY(NVKMS_IOCTL_DECLARE_EVENT_INTEREST, DeclareEventInterest), 4765 ENTRY(NVKMS_IOCTL_CLEAR_UNICAST_EVENT, ClearUnicastEvent), 4766 ENTRY(NVKMS_IOCTL_SET_LAYER_POSITION, SetLayerPosition), 4767 ENTRY(NVKMS_IOCTL_GRAB_OWNERSHIP, GrabOwnership), 4768 ENTRY(NVKMS_IOCTL_RELEASE_OWNERSHIP, ReleaseOwnership), 4769 ENTRY(NVKMS_IOCTL_GRANT_PERMISSIONS, GrantPermissions), 4770 ENTRY(NVKMS_IOCTL_ACQUIRE_PERMISSIONS, AcquirePermissions), 4771 ENTRY(NVKMS_IOCTL_REVOKE_PERMISSIONS, RevokePermissions), 4772 ENTRY(NVKMS_IOCTL_QUERY_DPY_CRC32, QueryDpyCRC32), 4773 ENTRY(NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO, 4774 RegisterDeferredRequestFifo), 4775 ENTRY(NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO, 4776 UnregisterDeferredRequestFifo), 4777 ENTRY(NVKMS_IOCTL_ALLOC_SWAP_GROUP, AllocSwapGroup), 4778 ENTRY(NVKMS_IOCTL_FREE_SWAP_GROUP, FreeSwapGroup), 4779 ENTRY(NVKMS_IOCTL_JOIN_SWAP_GROUP, JoinSwapGroup), 4780 ENTRY(NVKMS_IOCTL_LEAVE_SWAP_GROUP, LeaveSwapGroup), 4781 ENTRY(NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST, SetSwapGroupClipList), 4782 ENTRY(NVKMS_IOCTL_GRANT_SWAP_GROUP, GrantSwapGroup), 4783 ENTRY(NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, AcquireSwapGroup), 4784 ENTRY(NVKMS_IOCTL_RELEASE_SWAP_GROUP, ReleaseSwapGroup), 4785 ENTRY(NVKMS_IOCTL_SWITCH_MUX, SwitchMux), 4786 ENTRY(NVKMS_IOCTL_GET_MUX_STATE, GetMuxState), 4787 ENTRY(NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, ExportVrrSemaphoreSurface), 4788 ENTRY(NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, EnableVblankSyncObject), 4789 ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, DisableVblankSyncObject), 4790 ENTRY(NVKMS_IOCTL_NOTIFY_VBLANK, NotifyVblank), 4791 ENTRY(NVKMS_IOCTL_SET_FLIPLOCK_GROUP, SetFlipLockGroup), 4792 }; 4793 4794 struct NvKmsPerOpen *pOpen = pOpenVoid; 4795 void *pParamsKernelPointer; 4796 NvBool ret; 4797 enum NvKmsIoctlCommand cmd = cmdOpaque; 4798 void *pExtraUserState = NULL; 4799 4800 if (!AssignNvKmsPerOpenType(pOpen, NvKmsPerOpenTypeIoctl, TRUE)) { 4801 return FALSE; 4802 } 4803 4804 if (cmd >= ARRAY_LEN(dispatch)) { 4805 return FALSE; 4806 } 4807 4808 if (dispatch[cmd].proc == NULL) { 4809 return FALSE; 4810 } 4811 4812 if (paramSize != dispatch[cmd].paramSize) { 4813 return FALSE; 4814 } 4815 4816 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { 4817 pParamsKernelPointer = nvCalloc(1, paramSize + dispatch[cmd].extraSize); 4818 if (pParamsKernelPointer == NULL) { 4819 return FALSE; 4820 } 4821 4822 if (dispatch[cmd].requestSize > 0) { 4823 int status = 4824 nvkms_copyin((char *) pParamsKernelPointer + 4825 dispatch[cmd].requestOffset, 4826 paramsAddress + dispatch[cmd].requestOffset, 4827 dispatch[cmd].requestSize); 4828 if (status != 0) { 4829 nvFree(pParamsKernelPointer); 4830 return FALSE; 4831 } 4832 } 4833 4834 if (dispatch[cmd].prepUser) { 4835 pExtraUserState = (char *)pParamsKernelPointer + paramSize; 4836 4837 if (!dispatch[cmd].prepUser(pParamsKernelPointer, 4838 pExtraUserState)) { 4839 nvFree(pParamsKernelPointer); 4840 return FALSE; 4841 } 4842 } 4843 } else { 4844 pParamsKernelPointer = nvKmsNvU64ToPointer(paramsAddress); 4845 } 4846 4847 ret = dispatch[cmd].proc(pOpen, pParamsKernelPointer); 4848 4849 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { 4850 4851 if (dispatch[cmd].doneUser) { 4852 pExtraUserState = (char *)pParamsKernelPointer + paramSize; 4853 4854 if (!dispatch[cmd].doneUser(pParamsKernelPointer, 4855 pExtraUserState)) { 4856 ret = FALSE; 4857 } 4858 } 4859 4860 if (dispatch[cmd].replySize > 0) { 4861 int status = 4862 nvkms_copyout(paramsAddress + dispatch[cmd].replyOffset, 4863 (char *) pParamsKernelPointer + 4864 dispatch[cmd].replyOffset, 4865 dispatch[cmd].replySize); 4866 if (status != 0) { 4867 ret = FALSE; 4868 } 4869 } 4870 4871 nvFree(pParamsKernelPointer); 4872 } 4873 4874 return ret; 4875 } 4876 4877 4878 /*! 4879 * Close callback. 4880 * 4881 * \param[in,out] pOpenVoid The per-open data, allocated by nvKmsOpen(). 4882 */ 4883 void nvKmsClose(void *pOpenVoid) 4884 { 4885 struct NvKmsPerOpen *pOpen = pOpenVoid; 4886 4887 if (pOpen == NULL) { 4888 return; 4889 } 4890 4891 /* 4892 * First remove the pOpen from global tracking. Otherwise, assertions can 4893 * fail in the free paths below -- the assertions check that the object 4894 * being freed is not tracked by any pOpen. 4895 */ 4896 nvListDel(&pOpen->perOpenListEntry); 4897 4898 if (pOpen->type == NvKmsPerOpenTypeIoctl) { 4899 4900 struct NvKmsPerOpenEventListEntry *pEntry, *pEntryTmp; 4901 struct NvKmsPerOpenDev *pOpenDev; 4902 NvKmsGenericHandle dev; 4903 4904 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 4905 pOpenDev, dev) { 4906 FreeDeviceReference(pOpen, pOpenDev); 4907 } 4908 4909 nvEvoDestroyApiHandles(&pOpen->ioctl.frameLockHandles); 4910 4911 nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); 4912 4913 nvListForEachEntry_safe(pEntry, pEntryTmp, 4914 &pOpen->ioctl.eventList, eventListEntry) { 4915 nvListDel(&pEntry->eventListEntry); 4916 nvFree(pEntry); 4917 } 4918 4919 nvListDel(&pOpen->perOpenIoctlListEntry); 4920 } 4921 4922 if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { 4923 nvAssert(pOpen->grantSurface.pSurfaceEvo != NULL); 4924 nvEvoDecrementSurfaceStructRefCnt(pOpen->grantSurface.pSurfaceEvo); 4925 } 4926 4927 if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { 4928 nvAssert(pOpen->grantSwapGroup.pSwapGroup != NULL); 4929 nvHsDecrementSwapGroupRefCnt(pOpen->grantSwapGroup.pSwapGroup); 4930 } 4931 4932 if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { 4933 nvRemoveUnicastEvent(pOpen); 4934 } 4935 4936 nvFree(pOpen); 4937 } 4938 4939 4940 /*! 4941 * Open callback. 4942 * 4943 * Allocate, initialize, and return an opaque pointer to an NvKmsPerOpen. 4944 * 4945 * \return If successful, return an NvKmsPerOpen pointer. Otherwise, 4946 * return NULL. 4947 */ 4948 void *nvKmsOpen( 4949 NvU32 pid, 4950 enum NvKmsClientType clientType, 4951 nvkms_per_open_handle_t *pOpenKernel) 4952 { 4953 struct NvKmsPerOpen *pOpen = nvCalloc(1, sizeof(*pOpen)); 4954 4955 if (pOpen == NULL) { 4956 goto fail; 4957 } 4958 4959 pOpen->pid = pid; 4960 pOpen->clientType = clientType; 4961 pOpen->type = NvKmsPerOpenTypeUndefined; 4962 pOpen->pOpenKernel = pOpenKernel; 4963 4964 nvListAppend(&pOpen->perOpenListEntry, &perOpenList); 4965 4966 return pOpen; 4967 4968 fail: 4969 nvKmsClose(pOpen); 4970 return NULL; 4971 } 4972 4973 extern const char *const pNV_KMS_ID; 4974 4975 #if NVKMS_PROCFS_ENABLE 4976 4977 static const char *ProcFsPerOpenTypeString( 4978 enum NvKmsPerOpenType type) 4979 { 4980 switch (type) { 4981 case NvKmsPerOpenTypeIoctl: return "ioctl"; 4982 case NvKmsPerOpenTypeGrantSurface: return "grantSurface"; 4983 case NvKmsPerOpenTypeGrantSwapGroup: return "grantSwapGroup"; 4984 case NvKmsPerOpenTypeGrantPermissions: return "grantPermissions"; 4985 case NvKmsPerOpenTypeUnicastEvent: return "unicastEvent"; 4986 case NvKmsPerOpenTypeUndefined: return "undefined"; 4987 } 4988 4989 return "unknown"; 4990 } 4991 4992 static const char *ProcFsUnicastEventTypeString( 4993 enum NvKmsUnicastEventType type) 4994 { 4995 switch (type) { 4996 case NvKmsUnicastEventTypeDeferredRequest: return "DeferredRequest"; 4997 case NvKmsUnicastEventTypeVblankNotification: return "VblankNotification"; 4998 case NvKmsUnicastEventTypeUndefined: return "undefined"; 4999 } 5000 5001 return "unknown"; 5002 } 5003 5004 static const char *ProcFsPerOpenClientTypeString( 5005 enum NvKmsClientType clientType) 5006 { 5007 switch (clientType) { 5008 case NVKMS_CLIENT_USER_SPACE: return "user-space"; 5009 case NVKMS_CLIENT_KERNEL_SPACE: return "kernel-space"; 5010 } 5011 5012 return "unknown"; 5013 } 5014 5015 static const char *ProcFsPermissionsTypeString( 5016 enum NvKmsPermissionsType permissionsType) 5017 { 5018 switch (permissionsType) { 5019 case NV_KMS_PERMISSIONS_TYPE_FLIPPING: return "flipping"; 5020 case NV_KMS_PERMISSIONS_TYPE_MODESET: return "modeset"; 5021 case NV_KMS_PERMISSIONS_TYPE_SUB_OWNER:return "sub-owner"; 5022 } 5023 5024 return "unknown"; 5025 } 5026 5027 static void 5028 ProcFsPrintClients( 5029 void *data, 5030 char *buffer, 5031 size_t size, 5032 nvkms_procfs_out_string_func_t *outString) 5033 { 5034 struct NvKmsPerOpen *pOpen; 5035 NVEvoInfoStringRec infoString; 5036 5037 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { 5038 5039 const char *extra = ""; 5040 5041 nvInitInfoString(&infoString, buffer, size); 5042 5043 if (pOpen == nvEvoGlobal.nvKmsPerOpen) { 5044 extra = " (NVKMS-internal client)"; 5045 } 5046 5047 nvEvoLogInfoString(&infoString, 5048 "Client (pOpen) : %p", pOpen); 5049 nvEvoLogInfoString(&infoString, 5050 " pid : %d%s", pOpen->pid, extra); 5051 nvEvoLogInfoString(&infoString, 5052 " clientType : %s", 5053 ProcFsPerOpenClientTypeString(pOpen->clientType)); 5054 nvEvoLogInfoString(&infoString, 5055 " type : %s", 5056 ProcFsPerOpenTypeString(pOpen->type)); 5057 5058 if (pOpen->type == NvKmsPerOpenTypeIoctl) { 5059 5060 NvKmsGenericHandle deviceHandle; 5061 struct NvKmsPerOpenDev *pOpenDev; 5062 5063 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 5064 pOpenDev, deviceHandle) { 5065 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 5066 5067 nvEvoLogInfoString(&infoString, 5068 " pDevEvo (deviceId:%02d) : %p", 5069 pDevEvo->deviceId, pDevEvo); 5070 nvEvoLogInfoString(&infoString, 5071 " NvKmsDeviceHandle : %d", deviceHandle); 5072 } 5073 5074 } else if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { 5075 5076 NVSurfaceEvoPtr pSurfaceEvo = pOpen->grantSurface.pSurfaceEvo; 5077 5078 nvEvoLogInfoString(&infoString, 5079 " pSurfaceEvo : %p", pSurfaceEvo); 5080 5081 } else if (pOpen->type == NvKmsPerOpenTypeGrantPermissions) { 5082 5083 NVDevEvoPtr pDevEvo = pOpen->grantPermissions.pDevEvo; 5084 const struct NvKmsPermissions *pPerms = 5085 &pOpen->grantPermissions.permissions; 5086 5087 nvEvoLogInfoString(&infoString, 5088 " pDevEvo (deviceId:%02d) : %p", 5089 pDevEvo->deviceId, pDevEvo); 5090 5091 nvEvoLogInfoString(&infoString, 5092 " PermissionsType : %s", 5093 ProcFsPermissionsTypeString(pPerms->type)); 5094 5095 if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 5096 NvU32 d, h; 5097 5098 for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { 5099 for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { 5100 5101 const NvU8 layerMask = 5102 pPerms->flip.disp[d].head[h].layerMask; 5103 5104 if (layerMask == 0) { 5105 continue; 5106 } 5107 5108 nvEvoLogInfoString(&infoString, 5109 " disp:%02d, head:%02d : 0x%08x", d, h, 5110 layerMask); 5111 } 5112 } 5113 } else if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { 5114 NvU32 d, h; 5115 5116 for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { 5117 for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { 5118 5119 NVDpyIdList dpyIdList = 5120 pPerms->modeset.disp[d].head[h].dpyIdList; 5121 NVDispEvoPtr pDispEvo; 5122 char *dpys; 5123 5124 if (nvDpyIdListIsEmpty(dpyIdList)) { 5125 continue; 5126 } 5127 5128 pDispEvo = pDevEvo->pDispEvo[d]; 5129 5130 dpys = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); 5131 5132 if (dpys == NULL) { 5133 continue; 5134 } 5135 5136 nvEvoLogInfoString(&infoString, 5137 " disp:%02d, head:%02d : %s", d, h, dpys); 5138 5139 nvFree(dpys); 5140 } 5141 } 5142 } 5143 } else if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { 5144 5145 NVDevEvoPtr pDevEvo = pOpen->grantSwapGroup.pDevEvo; 5146 5147 nvEvoLogInfoString(&infoString, 5148 " pDevEvo (deviceId:%02d) : %p", 5149 pDevEvo->deviceId, pDevEvo); 5150 nvEvoLogInfoString(&infoString, 5151 " pSwapGroup : %p", 5152 pOpen->grantSwapGroup.pSwapGroup); 5153 5154 } else if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { 5155 nvEvoLogInfoString(&infoString, 5156 " unicastEvent type : %s", 5157 ProcFsUnicastEventTypeString(pOpen->unicastEvent.type)); 5158 switch(pOpen->unicastEvent.type) { 5159 case NvKmsUnicastEventTypeDeferredRequest: 5160 nvEvoLogInfoString(&infoString, 5161 " pDeferredRequestFifo : %p", 5162 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo); 5163 break; 5164 case NvKmsUnicastEventTypeVblankNotification: 5165 nvEvoLogInfoString(&infoString, 5166 " head : %x", 5167 pOpen->unicastEvent.e.vblankNotification.apiHead); 5168 break; 5169 default: 5170 break; 5171 } 5172 } 5173 5174 nvEvoLogInfoString(&infoString, ""); 5175 outString(data, buffer); 5176 } 5177 } 5178 5179 static void PrintSurfacePlanes( 5180 NVEvoInfoStringRec *pInfoString, 5181 const NVSurfaceEvoRec *pSurfaceEvo) 5182 { 5183 NvU8 planeIndex; 5184 5185 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { 5186 nvEvoLogInfoString(pInfoString, 5187 "plane[%u] disp ctxDma:0x%08x pitch:%u offset:%" NvU64_fmtu 5188 " rmObjectSizeInBytes:%" NvU64_fmtu, 5189 planeIndex, 5190 pSurfaceEvo->planes[planeIndex].surfaceDesc.ctxDmaHandle, 5191 pSurfaceEvo->planes[planeIndex].pitch, 5192 pSurfaceEvo->planes[planeIndex].offset, 5193 pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes); 5194 } 5195 } 5196 5197 static void PrintSurfaceClients( 5198 NVEvoInfoStringRec *pInfoString, 5199 const NVSurfaceEvoRec *pSurfaceEvo, 5200 const NVDevEvoRec *pDevEvo) 5201 { 5202 struct NvKmsPerOpen *pOpen; 5203 5204 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5205 NvKmsGenericHandle deviceHandle; 5206 struct NvKmsPerOpenDev *pOpenDev; 5207 5208 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 5209 pOpenDev, deviceHandle) { 5210 NvKmsGenericHandle surfaceHandle; 5211 NVSurfaceEvoPtr pTmpSurfaceEvo; 5212 5213 if (pOpenDev->pDevEvo != pDevEvo) { 5214 continue; 5215 } 5216 5217 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 5218 pTmpSurfaceEvo, surfaceHandle) { 5219 if (pTmpSurfaceEvo != pSurfaceEvo) { 5220 continue; 5221 } 5222 5223 nvEvoLogInfoString(pInfoString, 5224 " pOpen : %p", pOpen); 5225 nvEvoLogInfoString(pInfoString, 5226 " pOpenDev : %p", pOpenDev); 5227 nvEvoLogInfoString(pInfoString, 5228 " NvKmsSurfaceHandle : %d", surfaceHandle); 5229 } 5230 } 5231 } 5232 } 5233 5234 static void PrintSurface( 5235 NVEvoInfoStringRec *pInfoString, 5236 const NVSurfaceEvoRec *pSurfaceEvo, 5237 const NVDevEvoRec *pDevEvo) 5238 { 5239 NvU32 sd; 5240 5241 nvEvoLogInfoString(pInfoString, 5242 "pSurfaceEvo : %p", pSurfaceEvo); 5243 nvEvoLogInfoString(pInfoString, 5244 " pDevEvo (deviceId:%02d) : %p", pDevEvo->deviceId, pDevEvo); 5245 nvEvoLogInfoString(pInfoString, 5246 " owner : " 5247 "pOpenDev:%p, NvKmsSurfaceHandle:%d", 5248 pSurfaceEvo->owner.pOpenDev, 5249 pSurfaceEvo->owner.surfaceHandle); 5250 nvEvoLogInfoString(pInfoString, 5251 " {width,height}InPixels : %d x %d", 5252 pSurfaceEvo->widthInPixels, 5253 pSurfaceEvo->heightInPixels); 5254 nvEvoLogInfoString(pInfoString, 5255 " misc : " 5256 "log2GobsPerBlockY:%d", 5257 pSurfaceEvo->log2GobsPerBlockY); 5258 nvEvoLogInfoString(pInfoString, 5259 " gpuAddress : 0x%016" NvU64_fmtx, 5260 pSurfaceEvo->gpuAddress); 5261 nvEvoLogInfoString(pInfoString, 5262 " memory : layout:%s format:%s", 5263 NvKmsSurfaceMemoryLayoutToString(pSurfaceEvo->layout), 5264 nvKmsSurfaceMemoryFormatToString(pSurfaceEvo->format)); 5265 nvEvoLogInfoString(pInfoString, 5266 " refCnts : " 5267 "rmRefCnt:%" NvU64_fmtx" structRefCnt:%" NvU64_fmtx, 5268 pSurfaceEvo->rmRefCnt, 5269 pSurfaceEvo->structRefCnt); 5270 5271 PrintSurfacePlanes(pInfoString, pSurfaceEvo); 5272 5273 nvEvoLogInfoString(pInfoString, 5274 " clients :"); 5275 5276 PrintSurfaceClients(pInfoString, pSurfaceEvo, pDevEvo); 5277 5278 for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { 5279 if (pSurfaceEvo->cpuAddress[sd] != NULL) { 5280 nvEvoLogInfoString(pInfoString, 5281 " cpuAddress[%02d] : %p", 5282 sd, pSurfaceEvo->cpuAddress[sd]); 5283 } 5284 } 5285 5286 nvEvoLogInfoString(pInfoString, ""); 5287 } 5288 5289 static void 5290 ProcFsPrintSurfaces( 5291 void *data, 5292 char *buffer, 5293 size_t size, 5294 nvkms_procfs_out_string_func_t *outString) 5295 { 5296 struct NvKmsPerOpen *pOpen; 5297 NVEvoInfoStringRec infoString; 5298 NvU32 i; 5299 5300 for (i = 0; i < 2; i++) { 5301 5302 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5303 NvKmsGenericHandle deviceHandle; 5304 struct NvKmsPerOpenDev *pOpenDev; 5305 5306 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 5307 pOpenDev, deviceHandle) { 5308 5309 NvKmsGenericHandle surfaceHandle; 5310 NVSurfaceEvoPtr pSurfaceEvo; 5311 5312 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 5313 pSurfaceEvo, 5314 surfaceHandle) { 5315 /* 5316 * Because clients can grant surfaces between each 5317 * other, a pSurfaceEvo could be in multiple clients' 5318 * lists. So, we loop over all surfaces on all clients 5319 * twice: the first time we print unique surfaces and set 5320 * 'procFsFlag' to recognize duplicates. The second time, 5321 * we clear 'procFsFlag'. 5322 */ 5323 if (i == 0) { 5324 if (pSurfaceEvo->procFsFlag) { 5325 continue; 5326 } 5327 5328 nvInitInfoString(&infoString, buffer, size); 5329 PrintSurface(&infoString, pSurfaceEvo, 5330 pOpenDev->pDevEvo); 5331 outString(data, buffer); 5332 5333 pSurfaceEvo->procFsFlag = TRUE; 5334 } else { 5335 pSurfaceEvo->procFsFlag = FALSE; 5336 } 5337 } 5338 } 5339 } 5340 } 5341 } 5342 5343 static void 5344 ProcFsPrintHeadSurface( 5345 void *data, 5346 char *buffer, 5347 size_t size, 5348 nvkms_procfs_out_string_func_t *outString) 5349 { 5350 NVDevEvoPtr pDevEvo; 5351 NVDispEvoPtr pDispEvo; 5352 NvU32 dispIndex, apiHead; 5353 NVEvoInfoStringRec infoString; 5354 5355 FOR_ALL_EVO_DEVS(pDevEvo) { 5356 5357 nvInitInfoString(&infoString, buffer, size); 5358 nvEvoLogInfoString(&infoString, 5359 "pDevEvo (deviceId:%02d) : %p", 5360 pDevEvo->deviceId, pDevEvo); 5361 outString(data, buffer); 5362 5363 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { 5364 5365 nvInitInfoString(&infoString, buffer, size); 5366 nvEvoLogInfoString(&infoString, 5367 " pDispEvo (dispIndex:%02d) : %p", 5368 dispIndex, pDispEvo); 5369 outString(data, buffer); 5370 5371 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 5372 nvInitInfoString(&infoString, buffer, size); 5373 nvHsProcFs(&infoString, pDevEvo, dispIndex, apiHead); 5374 nvEvoLogInfoString(&infoString, ""); 5375 outString(data, buffer); 5376 } 5377 } 5378 } 5379 } 5380 5381 static const char *SwapGroupPerEyeStereoString(const NvU32 request) 5382 { 5383 const NvU32 value = 5384 DRF_VAL(KMS, _DEFERRED_REQUEST, 5385 _SWAP_GROUP_READY_PER_EYE_STEREO, request); 5386 5387 switch (value) { 5388 5389 case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR: 5390 return "PerPair"; 5391 case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE: 5392 return "PerEye"; 5393 } 5394 5395 return "Unknown"; 5396 } 5397 5398 static void ProcFsPrintOneDeferredRequestFifo( 5399 void *data, 5400 char *buffer, 5401 size_t size, 5402 nvkms_procfs_out_string_func_t *outString, 5403 const NVDeferredRequestFifoRec *pDeferredRequestFifo, 5404 const struct NvKmsPerOpen *pOpen, 5405 const struct NvKmsPerOpenDev *pOpenDev, 5406 const NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle) 5407 { 5408 NVEvoInfoStringRec infoString; 5409 5410 const struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; 5411 NvU32 i, prevI; 5412 5413 nvInitInfoString(&infoString, buffer, size); 5414 5415 nvEvoLogInfoString(&infoString, 5416 "pDeferredRequestFifo : %p", pDeferredRequestFifo); 5417 5418 nvEvoLogInfoString(&infoString, 5419 " Client (pOpen) : %p", pOpen); 5420 5421 nvEvoLogInfoString(&infoString, 5422 " pOpenDev : %p", pOpenDev); 5423 5424 nvEvoLogInfoString(&infoString, 5425 " pSurfaceEvo : %p", pDeferredRequestFifo->pSurfaceEvo); 5426 5427 nvEvoLogInfoString(&infoString, 5428 " NvKms...RequestFifoHandle : %d", deferredRequestFifoHandle); 5429 5430 if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) { 5431 5432 nvEvoLogInfoString(&infoString, 5433 " swapGroup :"); 5434 nvEvoLogInfoString(&infoString, 5435 " pSwapGroup : %p", 5436 pDeferredRequestFifo->swapGroup.pSwapGroup); 5437 nvEvoLogInfoString(&infoString, 5438 " pOpenUnicastEvent : %p", 5439 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent); 5440 nvEvoLogInfoString(&infoString, 5441 " ready : %d", 5442 pDeferredRequestFifo->swapGroup.ready); 5443 nvEvoLogInfoString(&infoString, 5444 " semaphoreIndex : 0x%02x", 5445 pDeferredRequestFifo->swapGroup.semaphoreIndex); 5446 } 5447 5448 nvEvoLogInfoString(&infoString, 5449 " put : %d", fifo->put); 5450 5451 nvEvoLogInfoString(&infoString, 5452 " get : %d", fifo->get); 5453 5454 outString(data, buffer); 5455 5456 for (i = 0; i < ARRAY_LEN(fifo->request); i++) { 5457 5458 const NvU32 request = fifo->request[i]; 5459 const NvU32 opcode = DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); 5460 const NvU32 semaphoreIndex = 5461 DRF_VAL(KMS, _DEFERRED_REQUEST, _SEMAPHORE_INDEX, request); 5462 5463 switch (opcode) { 5464 5465 case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: 5466 break; 5467 5468 case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY: 5469 nvInitInfoString(&infoString, buffer, size); 5470 nvEvoLogInfoString(&infoString, 5471 " request[0x%02x] : " 5472 "opcode:SWAP_GROUP_READY, semaphoreIndex:0x%02x, " 5473 "perEyeStereo:%s", 5474 i, semaphoreIndex, 5475 SwapGroupPerEyeStereoString(request)); 5476 outString(data, buffer); 5477 break; 5478 5479 default: 5480 nvInitInfoString(&infoString, buffer, size); 5481 nvEvoLogInfoString(&infoString, 5482 " request[0x%02x] : opcode:INVALID", i); 5483 outString(data, buffer); 5484 break; 5485 } 5486 } 5487 5488 /* 5489 * Print the fifo->semaphore[] array, but collapse multiple lines with 5490 * duplicate values. 5491 * 5492 * To collapse duplicates, loop over all semaphore[] elements. If the 5493 * current element is the same as semaphore[prev], continue. If they 5494 * differ, print the value in semaphore[prev .. i-1], and update prev. 5495 */ 5496 prevI = 0; 5497 5498 for (i = 1; i <= ARRAY_LEN(fifo->semaphore); i++) { 5499 5500 const NvU32 prevValue = fifo->semaphore[prevI].data[0]; 5501 5502 if (i != ARRAY_LEN(fifo->semaphore)) { 5503 const NvU32 currentValue = fifo->semaphore[i].data[0]; 5504 5505 /* 5506 * If the value in this element matches the previous element, don't 5507 * print anything, yet. 5508 */ 5509 if (currentValue == prevValue) { 5510 continue; 5511 } 5512 } 5513 5514 nvInitInfoString(&infoString, buffer, size); 5515 5516 if (prevI == (i - 1)) { 5517 nvEvoLogInfoString(&infoString, 5518 " semaphore[0x%02x] : 0x%08x", 5519 prevI, prevValue); 5520 } else { 5521 nvEvoLogInfoString(&infoString, 5522 " semaphore[0x%02x..0x%02x] : 0x%08x", 5523 prevI, i - 1, prevValue); 5524 } 5525 5526 outString(data, buffer); 5527 5528 prevI = i; 5529 } 5530 5531 nvInitInfoString(&infoString, buffer, size); 5532 nvEvoLogInfoString(&infoString, ""); 5533 outString(data, buffer); 5534 } 5535 5536 static void 5537 ProcFsPrintDeferredRequestFifos( 5538 void *data, 5539 char *buffer, 5540 size_t size, 5541 nvkms_procfs_out_string_func_t *outString) 5542 { 5543 struct NvKmsPerOpen *pOpen; 5544 5545 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5546 5547 struct NvKmsPerOpenDev *pOpenDev; 5548 NvKmsGenericHandle devHandle; 5549 5550 FOR_ALL_POINTERS_IN_EVO_API_HANDLES( 5551 &pOpen->ioctl.devHandles, 5552 pOpenDev, devHandle) { 5553 5554 NVDeferredRequestFifoRec *pDeferredRequestFifo; 5555 NvKmsGenericHandle fifoHandle; 5556 5557 FOR_ALL_POINTERS_IN_EVO_API_HANDLES( 5558 &pOpenDev->deferredRequestFifoHandles, 5559 pDeferredRequestFifo, fifoHandle) { 5560 5561 ProcFsPrintOneDeferredRequestFifo( 5562 data, buffer, size, outString, 5563 pDeferredRequestFifo, 5564 pOpen, 5565 pOpenDev, 5566 fifoHandle); 5567 } 5568 } 5569 } 5570 } 5571 5572 static void 5573 ProcFsPrintDpyCrcs( 5574 void *data, 5575 char *buffer, 5576 size_t size, 5577 nvkms_procfs_out_string_func_t *outString) 5578 { 5579 NVDevEvoPtr pDevEvo; 5580 NVDispEvoPtr pDispEvo; 5581 NvU32 dispIndex, head; 5582 NVEvoInfoStringRec infoString; 5583 5584 FOR_ALL_EVO_DEVS(pDevEvo) { 5585 5586 nvInitInfoString(&infoString, buffer, size); 5587 nvEvoLogInfoString(&infoString, 5588 "pDevEvo (deviceId:%02d) : %p", 5589 pDevEvo->deviceId, pDevEvo); 5590 outString(data, buffer); 5591 5592 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { 5593 5594 nvInitInfoString(&infoString, buffer, size); 5595 nvEvoLogInfoString(&infoString, 5596 " pDispEvo (dispIndex:%02d) : %p", 5597 dispIndex, pDispEvo); 5598 outString(data, buffer); 5599 5600 for (head = 0; head < pDevEvo->numHeads; head++) { 5601 const NVDispHeadStateEvoRec *pHeadState = 5602 &pDispEvo->headState[head]; 5603 struct NvKmsDpyCRC32 compCrc; 5604 struct NvKmsDpyCRC32 rgCrc; 5605 struct NvKmsDpyCRC32 outputCrc; 5606 CRC32NotifierCrcOut crcOut; 5607 crcOut.compositorCrc32 = &compCrc; 5608 crcOut.rasterGeneratorCrc32 = &rgCrc; 5609 crcOut.outputCrc32 = &outputCrc; 5610 5611 if (pHeadState->pConnectorEvo == NULL) { 5612 continue; 5613 } 5614 5615 nvInitInfoString(&infoString, buffer, size); 5616 if (nvReadCRC32Evo(pDispEvo, head, &crcOut)) { 5617 nvEvoLogInfoString(&infoString, 5618 " head %d :", 5619 head); 5620 if (compCrc.supported) { 5621 nvEvoLogInfoString(&infoString, 5622 " compositor CRC : 0x%08x", 5623 compCrc.value); 5624 } else { 5625 nvEvoLogInfoString(&infoString, 5626 " compositor CRC : unsupported"); 5627 } 5628 if (rgCrc.supported) { 5629 nvEvoLogInfoString(&infoString, 5630 " raster generator CRC : 0x%08x", 5631 rgCrc.value); 5632 } else { 5633 nvEvoLogInfoString(&infoString, 5634 " raster generator CRC : unsupported"); 5635 } 5636 if (outputCrc.supported) { 5637 nvEvoLogInfoString(&infoString, 5638 " output CRC : 0x%08x", 5639 outputCrc.value); 5640 } else { 5641 nvEvoLogInfoString(&infoString, 5642 " output CRC : unsupported"); 5643 } 5644 } else { 5645 nvEvoLogInfoString(&infoString, 5646 " head %d : error", 5647 head); 5648 } 5649 outString(data, buffer); 5650 } 5651 } 5652 } 5653 } 5654 5655 static const char * 5656 SignalFormatString(NvKmsConnectorSignalFormat signalFormat) 5657 { 5658 switch (signalFormat) { 5659 case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA: return "VGA"; 5660 case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS: return "LVDS"; 5661 case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS: return "TMDS"; 5662 case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP: return "DP"; 5663 case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI: return "DSI"; 5664 case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN: break; 5665 } 5666 5667 return "unknown"; 5668 } 5669 5670 static const char * 5671 PixelDepthString(enum nvKmsPixelDepth pixelDepth) 5672 { 5673 switch (pixelDepth) { 5674 case NVKMS_PIXEL_DEPTH_18_444: return "18bpp 4:4:4"; 5675 case NVKMS_PIXEL_DEPTH_24_444: return "24bpp 4:4:4"; 5676 case NVKMS_PIXEL_DEPTH_30_444: return "30bpp 4:4:4"; 5677 case NVKMS_PIXEL_DEPTH_20_422: return "20bpp 4:2:2"; 5678 case NVKMS_PIXEL_DEPTH_16_422: return "16bpp 4:2:2"; 5679 } 5680 5681 return "unknown"; 5682 } 5683 5684 static void 5685 ProcFsPrintHeads( 5686 void *data, 5687 char *buffer, 5688 size_t size, 5689 nvkms_procfs_out_string_func_t *outString) 5690 { 5691 NVDevEvoPtr pDevEvo; 5692 NVDispEvoPtr pDispEvo; 5693 NvU32 dispIndex, head; 5694 NVEvoInfoStringRec infoString; 5695 5696 FOR_ALL_EVO_DEVS(pDevEvo) { 5697 5698 nvInitInfoString(&infoString, buffer, size); 5699 nvEvoLogInfoString(&infoString, 5700 "pDevEvo (deviceId:%02d) : %p", 5701 pDevEvo->deviceId, pDevEvo); 5702 outString(data, buffer); 5703 5704 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { 5705 5706 nvInitInfoString(&infoString, buffer, size); 5707 nvEvoLogInfoString(&infoString, 5708 " pDispEvo (dispIndex:%02d) : %p", 5709 dispIndex, pDispEvo); 5710 outString(data, buffer); 5711 5712 if (pDevEvo->coreInitMethodsPending) { 5713 /* If the core channel has been allocated but no mode has yet 5714 * been set, pConnectorEvo will be non-NULL for heads being 5715 * driven by the console, but data like the mode timings will 5716 * be bogus. */ 5717 nvInitInfoString(&infoString, buffer, size); 5718 nvEvoLogInfoString(&infoString, " (not yet initialized)"); 5719 outString(data, buffer); 5720 continue; 5721 } 5722 5723 for (head = 0; head < pDevEvo->numHeads; head++) { 5724 const NVDispHeadStateEvoRec *pHeadState = 5725 &pDispEvo->headState[head]; 5726 const NVConnectorEvoRec *pConnectorEvo = 5727 pHeadState->pConnectorEvo; 5728 const NVHwModeTimingsEvo *pHwModeTimings = 5729 &pHeadState->timings; 5730 5731 nvInitInfoString(&infoString, buffer, size); 5732 if (pConnectorEvo == NULL) { 5733 nvEvoLogInfoString(&infoString, 5734 " head %d : inactive", 5735 head); 5736 } else { 5737 const NvU32 refreshRate10kHz = 5738 nvGetRefreshRate10kHz(pHwModeTimings); 5739 5740 nvEvoLogInfoString(&infoString, 5741 " head %d : %s", 5742 head, pConnectorEvo->name); 5743 5744 nvEvoLogInfoString(&infoString, 5745 " protocol : %s", 5746 SignalFormatString(pConnectorEvo->signalFormat)); 5747 5748 nvEvoLogInfoString(&infoString, 5749 " mode : %u x %u @ %u.%04u Hz", 5750 nvEvoVisibleWidth(pHwModeTimings), 5751 nvEvoVisibleHeight(pHwModeTimings), 5752 refreshRate10kHz / 10000, 5753 refreshRate10kHz % 10000); 5754 5755 nvEvoLogInfoString(&infoString, 5756 " depth : %s", 5757 PixelDepthString(pHeadState->pixelDepth)); 5758 } 5759 outString(data, buffer); 5760 } 5761 } 5762 } 5763 } 5764 5765 #endif /* NVKMS_PROCFS_ENABLE */ 5766 5767 void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles) 5768 { 5769 #if NVKMS_PROCFS_ENABLE 5770 static const nvkms_procfs_file_t procFiles[] = { 5771 { "clients", ProcFsPrintClients }, 5772 { "surfaces", ProcFsPrintSurfaces }, 5773 { "headsurface", ProcFsPrintHeadSurface }, 5774 { "deferred-request-fifos", ProcFsPrintDeferredRequestFifos }, 5775 { "crcs", ProcFsPrintDpyCrcs }, 5776 { "heads", ProcFsPrintHeads }, 5777 { NULL, NULL }, 5778 }; 5779 5780 *ppProcFiles = procFiles; 5781 #else 5782 *ppProcFiles = NULL; 5783 #endif 5784 } 5785 5786 static void FreeGlobalState(void) 5787 { 5788 nvInvalidateRasterLockGroupsEvo(); 5789 5790 nvKmsClose(nvEvoGlobal.nvKmsPerOpen); 5791 nvEvoGlobal.nvKmsPerOpen = NULL; 5792 5793 if (nvEvoGlobal.clientHandle != 0) { 5794 nvRmApiFree(nvEvoGlobal.clientHandle, nvEvoGlobal.clientHandle, 5795 nvEvoGlobal.clientHandle); 5796 nvEvoGlobal.clientHandle = 0; 5797 } 5798 5799 nvClearDpyOverrides(); 5800 } 5801 5802 NvBool nvKmsModuleLoad(void) 5803 { 5804 NvU32 ret = NVOS_STATUS_ERROR_GENERIC; 5805 5806 nvEvoLog(EVO_LOG_INFO, "Loading %s", pNV_KMS_ID); 5807 5808 ret = nvRmApiAlloc(NV01_NULL_OBJECT, 5809 NV01_NULL_OBJECT, 5810 NV01_NULL_OBJECT, 5811 NV01_ROOT, 5812 &nvEvoGlobal.clientHandle); 5813 5814 if (ret != NVOS_STATUS_SUCCESS) { 5815 nvEvoLog(EVO_LOG_ERROR, "Failed to initialize client"); 5816 goto fail; 5817 } 5818 5819 nvEvoGlobal.nvKmsPerOpen = nvKmsOpen(0, NVKMS_CLIENT_KERNEL_SPACE, NULL); 5820 if (!nvEvoGlobal.nvKmsPerOpen) { 5821 nvEvoLog(EVO_LOG_ERROR, "Failed to initialize internal modeset client"); 5822 goto fail; 5823 } 5824 5825 if (!AssignNvKmsPerOpenType(nvEvoGlobal.nvKmsPerOpen, 5826 NvKmsPerOpenTypeIoctl, FALSE)) { 5827 goto fail; 5828 } 5829 5830 return TRUE; 5831 fail: 5832 FreeGlobalState(); 5833 5834 return FALSE; 5835 } 5836 5837 5838 void nvKmsModuleUnload(void) 5839 { 5840 FreeGlobalState(); 5841 5842 nvAssert(nvListIsEmpty(&nvEvoGlobal.frameLockList)); 5843 nvAssert(nvListIsEmpty(&nvEvoGlobal.devList)); 5844 #if defined(DEBUG) 5845 nvReportUnfreedAllocations(); 5846 #endif 5847 nvEvoLog(EVO_LOG_INFO, "Unloading"); 5848 } 5849 5850 5851 static void SendEvent(struct NvKmsPerOpen *pOpen, 5852 const struct NvKmsEvent *pEvent) 5853 { 5854 struct NvKmsPerOpenEventListEntry *pEntry = nvAlloc(sizeof(*pEntry)); 5855 5856 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 5857 5858 if (pEntry == NULL) { 5859 return; 5860 } 5861 5862 pEntry->event = *pEvent; 5863 nvListAppend(&pEntry->eventListEntry, &pOpen->ioctl.eventList); 5864 5865 nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); 5866 } 5867 5868 static void ConsoleRestoreTimerFired(void *dataPtr, NvU32 dataU32) 5869 { 5870 NVDevEvoPtr pDevEvo = dataPtr; 5871 5872 if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { 5873 pDevEvo->skipConsoleRestore = FALSE; 5874 nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */); 5875 } 5876 } 5877 5878 /*! 5879 * Generate a dpy event. 5880 * 5881 * \param[in] pDpyEvo The dpy for which the event should be generated. 5882 * \param[in] eventType The NVKMS_EVENT_TYPE_ 5883 * \param[in] attribute The NvKmsDpyAttribute; only used for 5884 * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. 5885 * \param[in] NvS64 The NvKmsDpyAttribute value; only used for 5886 * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. 5887 */ 5888 static void SendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, 5889 const NvU32 eventType, 5890 const enum NvKmsDpyAttribute attribute, 5891 const NvS64 value) 5892 { 5893 struct NvKmsPerOpen *pOpen; 5894 const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; 5895 5896 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5897 5898 struct NvKmsEvent event = { 0 }; 5899 NvKmsDeviceHandle deviceHandle; 5900 NvKmsDispHandle dispHandle; 5901 5902 if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, 5903 &deviceHandle, &dispHandle)) { 5904 continue; 5905 } 5906 5907 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { 5908 continue; 5909 } 5910 5911 event.eventType = eventType; 5912 5913 switch (eventType) { 5914 5915 case NVKMS_EVENT_TYPE_DPY_CHANGED: 5916 event.u.dpyChanged.deviceHandle = deviceHandle; 5917 event.u.dpyChanged.dispHandle = dispHandle; 5918 event.u.dpyChanged.dpyId = pDpyEvo->id; 5919 break; 5920 5921 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: 5922 event.u.dynamicDpyConnected.deviceHandle = deviceHandle; 5923 event.u.dynamicDpyConnected.dispHandle = dispHandle; 5924 event.u.dynamicDpyConnected.dpyId = pDpyEvo->id; 5925 break; 5926 5927 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED: 5928 event.u.dynamicDpyDisconnected.deviceHandle = deviceHandle; 5929 event.u.dynamicDpyDisconnected.dispHandle = dispHandle; 5930 event.u.dynamicDpyDisconnected.dpyId = pDpyEvo->id; 5931 break; 5932 5933 case NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED: 5934 event.u.dpyAttributeChanged.deviceHandle = deviceHandle; 5935 event.u.dpyAttributeChanged.dispHandle = dispHandle; 5936 event.u.dpyAttributeChanged.dpyId = pDpyEvo->id; 5937 event.u.dpyAttributeChanged.attribute = attribute; 5938 event.u.dpyAttributeChanged.value = value; 5939 break; 5940 5941 default: 5942 nvAssert(!"Bad eventType"); 5943 return; 5944 } 5945 5946 SendEvent(pOpen, &event); 5947 } 5948 5949 if (eventType == NVKMS_EVENT_TYPE_DPY_CHANGED) { 5950 NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo; 5951 5952 if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { 5953 nvkms_free_timer(pDevEvo->consoleRestoreTimer); 5954 pDevEvo->consoleRestoreTimer = 5955 nvkms_alloc_timer(ConsoleRestoreTimerFired, pDevEvo, 0, 500); 5956 } 5957 } 5958 } 5959 5960 void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType) 5961 { 5962 nvAssert(eventType != NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED); 5963 SendDpyEventEvo(pDpyEvo, eventType, 5964 0 /* attribute (unused) */, 5965 0 /* value (unused) */ ); 5966 } 5967 5968 void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo, 5969 const enum NvKmsDpyAttribute attribute, 5970 const NvS64 value) 5971 { 5972 SendDpyEventEvo(pDpyEvo, 5973 NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, 5974 attribute, value); 5975 } 5976 5977 void nvSendFrameLockAttributeChangedEventEvo( 5978 const NVFrameLockEvoRec *pFrameLockEvo, 5979 const enum NvKmsFrameLockAttribute attribute, 5980 const NvS64 value) 5981 { 5982 struct NvKmsPerOpen *pOpen; 5983 const NvU32 eventType = NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED; 5984 5985 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5986 5987 struct NvKmsEvent event = { 0 }; 5988 NvKmsFrameLockHandle frameLockHandle; 5989 5990 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { 5991 continue; 5992 } 5993 5994 if (!FrameLockEvoToFrameLockHandle(pOpen, pFrameLockEvo, 5995 &frameLockHandle)) { 5996 continue; 5997 } 5998 5999 event.eventType = eventType; 6000 event.u.frameLockAttributeChanged.frameLockHandle = frameLockHandle; 6001 event.u.frameLockAttributeChanged.attribute = attribute; 6002 event.u.frameLockAttributeChanged.value = value; 6003 6004 SendEvent(pOpen, &event); 6005 } 6006 } 6007 6008 6009 void nvSendFlipOccurredEventEvo(const NVDispEvoRec *pDispEvo, 6010 const NvU32 apiHead, const NvU32 layer) 6011 { 6012 struct NvKmsPerOpen *pOpen; 6013 const NvU32 eventType = NVKMS_EVENT_TYPE_FLIP_OCCURRED; 6014 6015 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 6016 6017 struct NvKmsEvent event = { 0 }; 6018 NvKmsDeviceHandle deviceHandle; 6019 NvKmsDispHandle dispHandle; 6020 6021 struct NvKmsPerOpenDev *pOpenDev; 6022 const struct NvKmsFlipPermissions *pFlipPermissions; 6023 6024 pOpenDev = DevEvoToOpenDev(pOpen, pDispEvo->pDevEvo); 6025 6026 if (pOpenDev == NULL) { 6027 continue; 6028 } 6029 6030 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { 6031 continue; 6032 } 6033 6034 pFlipPermissions = &pOpenDev->flipPermissions; 6035 6036 if ((pFlipPermissions->disp[pDispEvo->displayOwner]. 6037 head[apiHead].layerMask & NVBIT(layer)) == 0x0) { 6038 continue; 6039 } 6040 6041 if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, 6042 &deviceHandle, &dispHandle)) { 6043 continue; 6044 } 6045 6046 event.eventType = eventType; 6047 event.u.flipOccurred.deviceHandle = deviceHandle; 6048 event.u.flipOccurred.dispHandle = dispHandle; 6049 event.u.flipOccurred.head = apiHead; 6050 event.u.flipOccurred.layer = layer; 6051 6052 SendEvent(pOpen, &event); 6053 } 6054 } 6055 6056 void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen) 6057 { 6058 if (pOpen == NULL) { 6059 return; 6060 } 6061 6062 nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); 6063 nvAssert(pOpen->unicastEvent.type != NvKmsUnicastEventTypeUndefined); 6064 6065 nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); 6066 } 6067 6068 void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen) 6069 { 6070 NVDeferredRequestFifoPtr pDeferredRequestFifo; 6071 NvKmsGenericHandle callbackHandle; 6072 NVVBlankCallbackPtr pCallbackData; 6073 struct NvKmsPerOpenDisp *pOpenDisp; 6074 NvU32 apiHead; 6075 6076 if (pOpen == NULL) { 6077 return; 6078 } 6079 6080 nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); 6081 6082 switch(pOpen->unicastEvent.type) 6083 { 6084 case NvKmsUnicastEventTypeDeferredRequest: 6085 pDeferredRequestFifo = 6086 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo; 6087 6088 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = NULL; 6089 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo = NULL; 6090 break; 6091 case NvKmsUnicastEventTypeVblankNotification: 6092 /* grab fields from the unicast fd */ 6093 callbackHandle = 6094 pOpen->unicastEvent.e.vblankNotification.hCallback; 6095 pOpenDisp = 6096 pOpen->unicastEvent.e.vblankNotification.pOpenDisp; 6097 apiHead = pOpen->unicastEvent.e.vblankNotification.apiHead; 6098 6099 /* Unregister the vblank callback */ 6100 pCallbackData = 6101 nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], 6102 callbackHandle); 6103 6104 nvApiHeadUnregisterVBlankCallback(pOpenDisp->pDispEvo, 6105 pCallbackData); 6106 6107 nvEvoDestroyApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], 6108 callbackHandle); 6109 6110 /* invalidate the pOpen data */ 6111 pOpen->unicastEvent.e.vblankNotification.hCallback = 0; 6112 pOpen->unicastEvent.e.vblankNotification.pOpenDisp = NULL; 6113 pOpen->unicastEvent.e.vblankNotification.apiHead = NV_INVALID_HEAD; 6114 break; 6115 default: 6116 nvAssert("Invalid Unicast Event Type!"); 6117 break; 6118 } 6119 6120 pOpen->unicastEvent.type = NvKmsUnicastEventTypeUndefined; 6121 } 6122 6123 static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) 6124 { 6125 struct NvKmsPerOpen *pOpen; 6126 6127 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 6128 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 6129 6130 NvKmsGenericHandle surfaceHandle; 6131 NVSurfaceEvoPtr pSurfaceEvo; 6132 6133 if (pOpenDev == NULL) { 6134 continue; 6135 } 6136 6137 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 6138 pSurfaceEvo, surfaceHandle) { 6139 6140 NvU8 planeIndex; 6141 6142 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { 6143 continue; 6144 } 6145 6146 if (!pSurfaceEvo->requireDisplayHardwareAccess) { 6147 nvAssert(pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle == 0); 6148 continue; 6149 } 6150 6151 /* 6152 * Orphan surfaces should not get this far: they should 6153 * fail the owner check above. 6154 */ 6155 nvAssert(pSurfaceEvo->rmRefCnt > 0); 6156 6157 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { 6158 NvU32 ret = 6159 nvRmAllocAndBindSurfaceDescriptor( 6160 pDevEvo, 6161 pSurfaceEvo->planes[planeIndex].rmHandle, 6162 pSurfaceEvo->layout, 6163 pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes - 1, 6164 &pSurfaceEvo->planes[planeIndex].surfaceDesc); 6165 if (ret != NVOS_STATUS_SUCCESS) { 6166 FreeSurfaceCtxDmasForAllOpens(pDevEvo); 6167 nvAssert(!"Failed to re-allocate surface descriptor"); 6168 return; 6169 } 6170 } 6171 } 6172 } 6173 } 6174 6175 6176 static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) 6177 { 6178 struct NvKmsPerOpen *pOpen; 6179 6180 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 6181 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 6182 6183 NvKmsGenericHandle surfaceHandle; 6184 NVSurfaceEvoPtr pSurfaceEvo; 6185 6186 if (pOpenDev == NULL) { 6187 continue; 6188 } 6189 6190 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 6191 pSurfaceEvo, surfaceHandle) { 6192 6193 NvU8 planeIndex; 6194 6195 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { 6196 continue; 6197 } 6198 6199 /* 6200 * Orphan surfaces should not get this far: they should 6201 * fail the owner check above. 6202 */ 6203 nvAssert(pSurfaceEvo->rmRefCnt > 0); 6204 6205 if (!pSurfaceEvo->requireDisplayHardwareAccess) { 6206 nvAssert(pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle == 0); 6207 continue; 6208 } 6209 6210 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { 6211 pDevEvo->hal->FreeSurfaceDescriptor( 6212 pDevEvo, 6213 nvEvoGlobal.clientHandle, 6214 &pSurfaceEvo->planes[planeIndex].surfaceDesc); 6215 } 6216 } 6217 } 6218 } 6219 6220 #if defined(DEBUG) 6221 NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo) 6222 { 6223 struct NvKmsPerOpen *pOpen; 6224 6225 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { 6226 6227 if (pOpen->type == NvKmsPerOpenTypeIoctl) { 6228 struct NvKmsPerOpenDev *pOpenDev; 6229 NvKmsGenericHandle dev; 6230 6231 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 6232 pOpenDev, dev) { 6233 6234 NvKmsGenericHandle surfaceHandleUnused; 6235 NVSurfaceEvoPtr pSurfaceEvoTmp; 6236 6237 if (pOpenDev == NULL) { 6238 continue; 6239 } 6240 6241 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 6242 pSurfaceEvoTmp, 6243 surfaceHandleUnused) { 6244 if (pSurfaceEvoTmp == pSurfaceEvo) { 6245 return TRUE; 6246 } 6247 } 6248 } 6249 } else if ((pOpen->type == NvKmsPerOpenTypeGrantSurface) && 6250 (pOpen->grantSurface.pSurfaceEvo == pSurfaceEvo)) { 6251 return TRUE; 6252 } 6253 } 6254 6255 return FALSE; 6256 } 6257 #endif 6258 6259 const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev( 6260 const struct NvKmsPerOpenDev *pOpenDev) 6261 { 6262 nvAssert(pOpenDev != NULL); 6263 return &pOpenDev->flipPermissions; 6264 } 6265 6266 const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev( 6267 const struct NvKmsPerOpenDev *pOpenDev) 6268 { 6269 nvAssert(pOpenDev != NULL); 6270 return &pOpenDev->modesetPermissions; 6271 } 6272 6273 NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev( 6274 struct NvKmsPerOpenDev *pOpenDev) 6275 { 6276 if (pOpenDev == NULL) { 6277 return NULL; 6278 } 6279 6280 return &pOpenDev->surfaceHandles; 6281 } 6282 6283 const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst( 6284 const struct NvKmsPerOpenDev *pOpenDev) 6285 { 6286 if (pOpenDev == NULL) { 6287 return NULL; 6288 } 6289 6290 return &pOpenDev->surfaceHandles; 6291 } 6292 6293 static int suspendCounter = 0; 6294 6295 /* 6296 * Suspend NVKMS. 6297 * 6298 * This function is called by RM once per GPU, but NVKMS just counts the number 6299 * of suspend calls so that it can deallocate the core channels on the first 6300 * call to suspend(), and reallocate them on the last call to resume(). 6301 */ 6302 void nvKmsSuspend(NvU32 gpuId) 6303 { 6304 if (suspendCounter == 0) { 6305 NVDevEvoPtr pDevEvo; 6306 6307 FOR_ALL_EVO_DEVS(pDevEvo) { 6308 nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Suspending"); 6309 6310 /* 6311 * Shut down all heads and skip console restore. 6312 * 6313 * This works around an RM bug where it fails to train DisplayPort 6314 * links during resume if the system was suspended while heads were 6315 * active. 6316 * 6317 * XXX TODO bug 1850734: In addition to fixing the above 6318 * RM bug, NVKMS should clear pDispEvo head and connector state 6319 * that becomes stale after suspend. Shutting the heads down here 6320 * clears the relevant state explicitly. 6321 */ 6322 nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev, 6323 NULL /* pTestFunc, shut down all heads */, 6324 NULL /* pData */, 6325 TRUE /* doRasterLock */); 6326 pDevEvo->skipConsoleRestore = TRUE; 6327 6328 DisableAndCleanVblankSyncObjectForAllOpens(pDevEvo); 6329 6330 FreeSurfaceCtxDmasForAllOpens(pDevEvo); 6331 6332 nvSuspendDevEvo(pDevEvo); 6333 } 6334 } 6335 6336 suspendCounter++; 6337 } 6338 6339 void nvKmsResume(NvU32 gpuId) 6340 { 6341 suspendCounter--; 6342 6343 if (suspendCounter == 0) { 6344 NVDevEvoPtr pDevEvo; 6345 6346 FOR_ALL_EVO_DEVS(pDevEvo) { 6347 nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Resuming"); 6348 6349 if (nvResumeDevEvo(pDevEvo)) { 6350 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); 6351 EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo); 6352 AllocSurfaceCtxDmasForAllOpens(pDevEvo); 6353 } 6354 6355 if (pDevEvo->modesetOwner == NULL) { 6356 // Hardware state was lost, so we need to force a console 6357 // restore. 6358 pDevEvo->skipConsoleRestore = FALSE; 6359 RestoreConsole(pDevEvo); 6360 } 6361 } 6362 } 6363 } 6364 6365 static void ServiceOneDeferredRequestFifo( 6366 NVDevEvoPtr pDevEvo, 6367 NVDeferredRequestFifoRec *pDeferredRequestFifo) 6368 { 6369 struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; 6370 NvU32 get, put; 6371 6372 nvAssert(fifo != NULL); 6373 6374 get = fifo->get; 6375 put = fifo->put; 6376 6377 if (put == get) { 6378 return; 6379 } 6380 6381 if ((get >= ARRAY_LEN(fifo->request)) || 6382 (put >= ARRAY_LEN(fifo->request))) { 6383 return; 6384 } 6385 6386 while (get != put) { 6387 6388 const NvU32 request = fifo->request[get]; 6389 const NvU32 opcode = 6390 DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); 6391 6392 switch (opcode) { 6393 6394 case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: 6395 break; 6396 6397 case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY: 6398 nvHsSwapGroupReady( 6399 pDevEvo, 6400 pDeferredRequestFifo, 6401 request); 6402 break; 6403 6404 default: 6405 nvAssert(!"Invalid NVKMS deferred request opcode"); 6406 break; 6407 } 6408 6409 get = (get + 1) % ARRAY_LEN(fifo->request); 6410 } 6411 6412 fifo->get = put; 6413 } 6414 6415 /*! 6416 * Respond to a non-stall interrupt. 6417 */ 6418 void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32) 6419 { 6420 NVDevEvoPtr pDevEvo = dataPtr; 6421 struct NvKmsPerOpen *pOpen; 6422 6423 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 6424 6425 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 6426 NVDeferredRequestFifoRec *pDeferredRequestFifo; 6427 NvKmsGenericHandle handle; 6428 6429 if (pOpenDev == NULL) { 6430 continue; 6431 } 6432 6433 FOR_ALL_POINTERS_IN_EVO_API_HANDLES( 6434 &pOpenDev->deferredRequestFifoHandles, 6435 pDeferredRequestFifo, 6436 handle) { 6437 6438 ServiceOneDeferredRequestFifo(pDevEvo, pDeferredRequestFifo); 6439 } 6440 } 6441 6442 nvHsProcessPendingViewportFlips(pDevEvo); 6443 } 6444 6445 NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness) 6446 { 6447 NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; 6448 NV_STATUS status = NV_ERR_INVALID_STATE; 6449 NVDispEvoRec *pDispEvo = drv_priv; 6450 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; 6451 6452 params.subDeviceInstance = pDispEvo->displayOwner; 6453 params.displayId = display_id; 6454 6455 status = nvRmApiControl(nvEvoGlobal.clientHandle, 6456 pDevEvo->displayCommonHandle, 6457 NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, 6458 ¶ms, sizeof(params)); 6459 6460 if (status == NV_OK) { 6461 *brightness = params.brightness; 6462 } 6463 6464 return status == NV_OK; 6465 } 6466 6467 NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness) 6468 { 6469 NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; 6470 NV_STATUS status = NV_ERR_INVALID_STATE; 6471 NVDispEvoRec *pDispEvo = drv_priv; 6472 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; 6473 6474 params.subDeviceInstance = pDispEvo->displayOwner; 6475 params.displayId = display_id; 6476 params.brightness = brightness; 6477 6478 status = nvRmApiControl(nvEvoGlobal.clientHandle, 6479 pDevEvo->displayCommonHandle, 6480 NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, 6481 ¶ms, sizeof(params)); 6482 6483 return status == NV_OK; 6484 } 6485 6486 NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev) 6487 { 6488 return pOpenDev->isPrivileged || 6489 pOpenDev->pDevEvo->modesetOwner == pOpenDev || 6490 pOpenDev->pDevEvo->modesetSubOwner == pOpenDev; 6491 } 6492