1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "nvkms.h" 25 #include "nvkms-private.h" 26 #include "nvkms-api.h" 27 28 #include "nvkms-types.h" 29 #include "nvkms-utils.h" 30 #include "nvkms-console-restore.h" 31 #include "nvkms-dpy.h" 32 #include "nvkms-dma.h" 33 #include "nvkms-evo.h" 34 #include "nvkms-rm.h" 35 #include "nvkms-rmapi.h" 36 #include "nvkms-modepool.h" 37 #include "nvkms-modeset.h" 38 #include "nvkms-attributes.h" 39 #include "nvkms-dpy-override.h" 40 #include "nvkms-framelock.h" 41 #include "nvkms-surface.h" 42 #include "nvkms-3dvision.h" 43 #include "nvkms-ioctl.h" 44 #include "nvkms-headsurface.h" 45 #include "nvkms-headsurface-ioctl.h" 46 #include "nvkms-headsurface-swapgroup.h" 47 #include "nvkms-flip.h" /* nvFlipEvo */ 48 #include "nvkms-vrr.h" 49 50 #include "dp/nvdp-connector.h" 51 52 #include "nvUnixVersion.h" /* NV_VERSION_STRING */ 53 #include <class/cl0000.h> /* NV01_NULL_OBJECT/NV01_ROOT */ 54 55 #include "nv_list.h" 56 57 58 /*! \file 59 * 60 * This source file implements the API of NVKMS, built around open, 61 * close, and ioctl file operations. 62 * 63 * An NvKmsPerOpen is stored "per-open"; all API handles are specific 64 * to a per-open instance. The NvKmsPerOpen is allocated during each 65 * nvKmsOpen() call, and freed during the corresponding nvKmsClose() 66 * call. 67 * 68 * An NvKmsPerOpenDev stores the API handles for the device and all 69 * the disps and connectors on the device. It is allocated during 70 * nvKmsIoctl(ALLOC_DEVICE), and freed during nvKmsIoctl(FREE_DEVICE). 71 */ 72 73 74 /* 75 * When the NVKMS device file is opened, the per-open structure could 76 * be used for one of several actions, denoted by its "type". The 77 * per-open type starts as Undefined. The per-open's first use 78 * defines its type. Once the type transitions from Undefined to 79 * anything, it can never transition to any other type. 80 */ 81 enum NvKmsPerOpenType { 82 /* 83 * The per-open is used for making ioctl calls to make requests of 84 * NVKMS. 85 */ 86 NvKmsPerOpenTypeIoctl, 87 88 /* 89 * The per-open is used for granting access to a NVKMS registered 90 * surface. 91 */ 92 NvKmsPerOpenTypeGrantSurface, 93 94 /* 95 * The per-open is used for granting permissions. 96 */ 97 NvKmsPerOpenTypeGrantPermissions, 98 99 /* 100 * The per-open is used for granting access to a swap group 101 */ 102 NvKmsPerOpenTypeGrantSwapGroup, 103 104 /* 105 * The per-open is used to unicast a specific event. 106 */ 107 NvKmsPerOpenTypeUnicastEvent, 108 109 /* 110 * The per-open is currently undefined (this is the initial 111 * state). 112 */ 113 NvKmsPerOpenTypeUndefined, 114 }; 115 116 enum NvKmsUnicastEventType { 117 /* Used by: 118 * NVKMS_IOCTL_JOIN_SWAP_GROUP */ 119 NvKmsUnicastEventTypeDeferredRequest, 120 121 /* Used by: 122 * NVKMS_IOCTL_NOTIFY_VBLANK */ 123 NvKmsUnicastEventTypeVblankNotification, 124 125 /* Undefined, this indicates the unicast fd is available for use. */ 126 NvKmsUnicastEventTypeUndefined, 127 }; 128 129 struct NvKmsPerOpenConnector { 130 NVConnectorEvoPtr pConnectorEvo; 131 NvKmsConnectorHandle nvKmsApiHandle; 132 }; 133 134 struct NvKmsPerOpenFrameLock { 135 NVFrameLockEvoPtr pFrameLockEvo; 136 int refCnt; 137 NvKmsFrameLockHandle nvKmsApiHandle; 138 }; 139 140 struct NvKmsPerOpenDisp { 141 NVDispEvoPtr pDispEvo; 142 NvKmsDispHandle nvKmsApiHandle; 143 NvKmsFrameLockHandle frameLockHandle; 144 NVEvoApiHandlesRec connectorHandles; 145 struct NvKmsPerOpenConnector connector[NVKMS_MAX_CONNECTORS_PER_DISP]; 146 NVEvoApiHandlesRec vblankSyncObjectHandles[NVKMS_MAX_HEADS_PER_DISP]; 147 NVEvoApiHandlesRec vblankCallbackHandles[NVKMS_MAX_HEADS_PER_DISP]; 148 }; 149 150 struct NvKmsPerOpenDev { 151 NVDevEvoPtr pDevEvo; 152 NvKmsDeviceHandle nvKmsApiHandle; 153 NVEvoApiHandlesRec dispHandles; 154 NVEvoApiHandlesRec surfaceHandles; 155 struct NvKmsFlipPermissions flipPermissions; 156 struct NvKmsModesetPermissions modesetPermissions; 157 struct NvKmsPerOpenDisp disp[NVKMS_MAX_SUBDEVICES]; 158 NvBool isPrivileged; 159 NVEvoApiHandlesRec deferredRequestFifoHandles; 160 NVEvoApiHandlesRec swapGroupHandles; 161 }; 162 163 struct NvKmsPerOpenEventListEntry { 164 NVListRec eventListEntry; 165 struct NvKmsEvent event; 166 }; 167 168 struct NvKmsPerOpen { 169 nvkms_per_open_handle_t *pOpenKernel; 170 NvU32 pid; 171 enum NvKmsClientType clientType; 172 NVListRec perOpenListEntry; 173 NVListRec perOpenIoctlListEntry; 174 enum NvKmsPerOpenType type; 175 176 union { 177 struct { 178 NVListRec eventList; 179 NvU32 eventInterestMask; 180 NVEvoApiHandlesRec devHandles; 181 NVEvoApiHandlesRec frameLockHandles; 182 } ioctl; 183 184 struct { 185 NVSurfaceEvoPtr pSurfaceEvo; 186 } grantSurface; 187 188 struct { 189 NVDevEvoPtr pDevEvo; 190 NVSwapGroupPtr pSwapGroup; 191 } grantSwapGroup; 192 193 struct { 194 NVDevEvoPtr pDevEvo; 195 struct NvKmsPermissions permissions; 196 } grantPermissions; 197 198 struct { 199 /* 200 * A unicast event NvKmsPerOpen is assigned to an object, so that 201 * that object can generate events on the unicast event. Store a 202 * pointer to that object, so that we can clear the pointer when the 203 * unicast event NvKmsPerOpen is closed. 204 */ 205 enum NvKmsUnicastEventType type; 206 union { 207 struct { 208 NVDeferredRequestFifoPtr pDeferredRequestFifo; 209 } deferred; 210 211 struct { 212 NvKmsGenericHandle hCallback; 213 struct NvKmsPerOpenDisp *pOpenDisp; 214 NvU32 apiHead; 215 } vblankNotification; 216 } e; 217 } unicastEvent; 218 }; 219 }; 220 221 static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); 222 static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); 223 224 static NVListRec perOpenList = NV_LIST_INIT(&perOpenList); 225 static NVListRec perOpenIoctlList = NV_LIST_INIT(&perOpenIoctlList); 226 227 /*! 228 * Check if there is an NvKmsPerOpenDev on this NvKmsPerOpen that has 229 * the specified deviceId. 230 */ 231 static NvBool DeviceIdAlreadyPresent(struct NvKmsPerOpen *pOpen, NvU32 deviceId) 232 { 233 struct NvKmsPerOpenDev *pOpenDev; 234 NvKmsGenericHandle dev; 235 236 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 237 238 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 239 pOpenDev, dev) { 240 if (pOpenDev->pDevEvo->usesTegraDevice && 241 (deviceId == NVKMS_DEVICE_ID_TEGRA)) { 242 return TRUE; 243 } else if (pOpenDev->pDevEvo->deviceId == deviceId) { 244 return TRUE; 245 } 246 } 247 248 return FALSE; 249 } 250 251 252 /*! 253 * Get the NvKmsPerOpenDev described by NvKmsPerOpen + deviceHandle. 254 */ 255 static struct NvKmsPerOpenDev *GetPerOpenDev( 256 const struct NvKmsPerOpen *pOpen, 257 const NvKmsDeviceHandle deviceHandle) 258 { 259 if (pOpen == NULL) { 260 return NULL; 261 } 262 263 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 264 265 return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.devHandles, deviceHandle); 266 } 267 268 269 /*! 270 * Get the NvKmsPerOpenDev and NvKmsPerOpenDisp described by 271 * NvKmsPerOpen + deviceHandle + dispHandle. 272 */ 273 static NvBool GetPerOpenDevAndDisp( 274 const struct NvKmsPerOpen *pOpen, 275 const NvKmsDeviceHandle deviceHandle, 276 const NvKmsDispHandle dispHandle, 277 struct NvKmsPerOpenDev **ppOpenDev, 278 struct NvKmsPerOpenDisp **ppOpenDisp) 279 { 280 struct NvKmsPerOpenDev *pOpenDev; 281 struct NvKmsPerOpenDisp *pOpenDisp; 282 283 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 284 285 if (pOpenDev == NULL) { 286 return FALSE; 287 } 288 289 pOpenDisp = nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, 290 dispHandle); 291 292 if (pOpenDisp == NULL) { 293 return FALSE; 294 } 295 296 *ppOpenDev = pOpenDev; 297 *ppOpenDisp = pOpenDisp; 298 299 return TRUE; 300 } 301 302 303 /*! 304 * Get the NvKmsPerOpenDisp described by NvKmsPerOpen + deviceHandle + 305 * dispHandle. 306 */ 307 static struct NvKmsPerOpenDisp *GetPerOpenDisp( 308 const struct NvKmsPerOpen *pOpen, 309 const NvKmsDeviceHandle deviceHandle, 310 const NvKmsDispHandle dispHandle) 311 { 312 struct NvKmsPerOpenDev *pOpenDev; 313 314 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 315 316 if (pOpenDev == NULL) { 317 return NULL; 318 } 319 320 return nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle); 321 } 322 323 324 /*! 325 * Get the NvKmsPerOpenConnector described by NvKmsPerOpen + 326 * deviceHandle + dispHandle + connectorHandle. 327 */ 328 static struct NvKmsPerOpenConnector *GetPerOpenConnector( 329 const struct NvKmsPerOpen *pOpen, 330 const NvKmsDeviceHandle deviceHandle, 331 const NvKmsDispHandle dispHandle, 332 const NvKmsConnectorHandle connectorHandle) 333 { 334 struct NvKmsPerOpenDisp *pOpenDisp; 335 336 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); 337 338 if (pOpenDisp == NULL) { 339 return NULL; 340 } 341 342 return nvEvoGetPointerFromApiHandle(&pOpenDisp->connectorHandles, 343 connectorHandle); 344 } 345 346 347 /*! 348 * Get the NVDpyEvoRec described by NvKmsPerOpen + deviceHandle + 349 * dispHandle + dpyId. 350 */ 351 static NVDpyEvoRec *GetPerOpenDpy( 352 const struct NvKmsPerOpen *pOpen, 353 const NvKmsDeviceHandle deviceHandle, 354 const NvKmsDispHandle dispHandle, 355 const NVDpyId dpyId) 356 { 357 struct NvKmsPerOpenDisp *pOpenDisp; 358 359 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); 360 361 if (pOpenDisp == NULL) { 362 return NULL; 363 } 364 365 return nvGetDpyEvoFromDispEvo(pOpenDisp->pDispEvo, dpyId); 366 } 367 368 369 /*! 370 * Get the NvKmsPerOpenFrameLock described by pOpen + frameLockHandle. 371 */ 372 static struct NvKmsPerOpenFrameLock *GetPerOpenFrameLock( 373 const struct NvKmsPerOpen *pOpen, 374 NvKmsFrameLockHandle frameLockHandle) 375 { 376 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 377 378 return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, 379 frameLockHandle); 380 } 381 382 383 /*! 384 * Free the NvKmsPerOpenFrameLock associated with this NvKmsPerOpenDisp. 385 * 386 * Multiple disps can be assigned to the same framelock object, so 387 * NvKmsPerOpenFrameLock is reference counted: the object is freed 388 * once all NvKmsPerOpenDisps remove their reference to it. 389 * 390 * \param[in,out] pOpen The per-open data, to which the 391 * NvKmsPerOpenFrameLock is assigned. 392 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding 393 * NvKmsPerOpenFrameLock should be freed. 394 */ 395 static void FreePerOpenFrameLock(struct NvKmsPerOpen *pOpen, 396 struct NvKmsPerOpenDisp *pOpenDisp) 397 { 398 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 399 400 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 401 402 pOpenFrameLock = 403 nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, 404 pOpenDisp->frameLockHandle); 405 if (pOpenFrameLock == NULL) { 406 return; 407 } 408 409 pOpenDisp->frameLockHandle = 0; 410 411 pOpenFrameLock->refCnt--; 412 413 if (pOpenFrameLock->refCnt != 0) { 414 return; 415 } 416 417 nvEvoDestroyApiHandle(&pOpen->ioctl.frameLockHandles, 418 pOpenFrameLock->nvKmsApiHandle); 419 nvFree(pOpenFrameLock); 420 } 421 422 423 /*! 424 * Allocate and initialize an NvKmsPerOpenFrameLock. 425 * 426 * If the disp described by the specified NvKmsPerOpenDisp has a 427 * framelock object, allocate an NvKmsPerOpenFrameLock for it. 428 * 429 * Multiple disps can be assigned to the same framelock object, so 430 * NvKmsPerOpenFrameLock is reference counted: we first look to see if 431 * an NvKmsPerOpenFrameLock for this disp's framelock object already 432 * exists. If so, we increment its reference count. Otherwise, we 433 * allocate a new NvKmsPerOpenFrameLock. 434 * 435 * \param[in,out] pOpen The per-open data, to which the 436 * new NvKmsPerOpenFrameLock should be assigned. 437 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding 438 * NvKmsPerOpenFrameLock should be allocated. 439 */ 440 static NvBool AllocPerOpenFrameLock( 441 struct NvKmsPerOpen *pOpen, 442 struct NvKmsPerOpenDisp *pOpenDisp) 443 { 444 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 445 NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; 446 NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; 447 NvKmsGenericHandle handle; 448 449 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 450 451 if (pFrameLockEvo == NULL) { 452 return TRUE; 453 } 454 455 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, 456 pOpenFrameLock, handle) { 457 if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { 458 goto done; 459 } 460 } 461 462 pOpenFrameLock = nvCalloc(1, sizeof(*pOpenFrameLock)); 463 464 if (pOpenFrameLock == NULL) { 465 return FALSE; 466 } 467 468 pOpenFrameLock->pFrameLockEvo = pFrameLockEvo; 469 pOpenFrameLock->nvKmsApiHandle = 470 nvEvoCreateApiHandle(&pOpen->ioctl.frameLockHandles, pOpenFrameLock); 471 472 if (pOpenFrameLock->nvKmsApiHandle == 0) { 473 nvFree(pOpenFrameLock); 474 return FALSE; 475 } 476 477 done: 478 pOpenDisp->frameLockHandle = pOpenFrameLock->nvKmsApiHandle; 479 pOpenFrameLock->refCnt++; 480 return TRUE; 481 } 482 483 484 /*! 485 * Get the NvKmsConnectorHandle that corresponds to the given 486 * NVConnectorEvoRec on the NvKmsPerOpen + deviceHandle + dispHandle. 487 */ 488 static NvKmsConnectorHandle ConnectorEvoToConnectorHandle( 489 const struct NvKmsPerOpen *pOpen, 490 const NvKmsDeviceHandle deviceHandle, 491 const NvKmsDispHandle dispHandle, 492 const NVConnectorEvoRec *pConnectorEvo) 493 { 494 struct NvKmsPerOpenDisp *pOpenDisp; 495 struct NvKmsPerOpenConnector *pOpenConnector; 496 NvKmsGenericHandle connector; 497 498 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); 499 500 if (pOpenDisp == NULL) { 501 return 0; 502 } 503 504 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, 505 pOpenConnector, connector) { 506 if (pOpenConnector->pConnectorEvo == pConnectorEvo) { 507 return pOpenConnector->nvKmsApiHandle; 508 } 509 } 510 511 return 0; 512 } 513 514 515 /*! 516 * Get the NvKmsDeviceHandle and NvKmsDispHandle that corresponds to 517 * the given NVDispEvoRec on the NvKmsPerOpen. 518 */ 519 static NvBool DispEvoToDevAndDispHandles( 520 const struct NvKmsPerOpen *pOpen, 521 const NVDispEvoRec *pDispEvo, 522 NvKmsDeviceHandle *pDeviceHandle, 523 NvKmsDispHandle *pDispHandle) 524 { 525 NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; 526 struct NvKmsPerOpenDev *pOpenDev; 527 NvKmsGenericHandle dev; 528 529 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 530 531 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 532 pOpenDev, dev) { 533 534 struct NvKmsPerOpenDisp *pOpenDisp; 535 NvKmsGenericHandle disp; 536 537 if (pOpenDev->pDevEvo != pDevEvo) { 538 continue; 539 } 540 541 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, 542 pOpenDisp, disp) { 543 if (pOpenDisp->pDispEvo != pDispEvo) { 544 continue; 545 } 546 547 *pDeviceHandle = pOpenDev->nvKmsApiHandle; 548 *pDispHandle = pOpenDisp->nvKmsApiHandle; 549 550 return TRUE; 551 } 552 } 553 554 return FALSE; 555 } 556 557 558 /*! 559 * Get the NvKmsPerOpenDev that corresponds to the given NVDevEvoRec 560 * on the NvKmsPerOpen. 561 */ 562 static struct NvKmsPerOpenDev *DevEvoToOpenDev( 563 const struct NvKmsPerOpen *pOpen, 564 const NVDevEvoRec *pDevEvo) 565 { 566 struct NvKmsPerOpenDev *pOpenDev; 567 NvKmsGenericHandle dev; 568 569 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 570 571 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 572 pOpenDev, dev) { 573 if (pOpenDev->pDevEvo == pDevEvo) { 574 return pOpenDev; 575 } 576 } 577 578 return NULL; 579 } 580 581 582 /*! 583 * Get the NvKmsFrameLockHandle that corresponds to the given 584 * NVFrameLockEvoRec on the NvKmsPerOpen. 585 */ 586 static NvBool FrameLockEvoToFrameLockHandle( 587 const struct NvKmsPerOpen *pOpen, 588 const NVFrameLockEvoRec *pFrameLockEvo, 589 NvKmsFrameLockHandle *pFrameLockHandle) 590 { 591 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 592 NvKmsGenericHandle handle; 593 594 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 595 596 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, 597 pOpenFrameLock, handle) { 598 599 if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { 600 *pFrameLockHandle = pOpenFrameLock->nvKmsApiHandle; 601 return TRUE; 602 } 603 } 604 605 return FALSE; 606 } 607 608 609 /*! 610 * Clear the specified NvKmsPerOpenConnector. 611 * 612 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the 613 * NvKmsPerOpenConnector is assigned. 614 * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to be cleared. 615 */ 616 static void ClearPerOpenConnector( 617 struct NvKmsPerOpenDisp *pOpenDisp, 618 struct NvKmsPerOpenConnector *pOpenConnector) 619 { 620 nvEvoDestroyApiHandle(&pOpenDisp->connectorHandles, 621 pOpenConnector->nvKmsApiHandle); 622 nvkms_memset(pOpenConnector, 0, sizeof(*pOpenConnector)); 623 } 624 625 626 /*! 627 * Initialize an NvKmsPerOpenConnector. 628 * 629 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the 630 * NvKmsPerOpenConnector is assigned. 631 * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to initialize. 632 * \param[in] pConnectorEvo The connector that the NvKmsPerOpenConnector 633 * corresponds to. 634 * 635 * \return If the NvKmsPerOpenConnector is successfully initialized, 636 * return TRUE. Otherwise, return FALSE. 637 */ 638 static NvBool InitPerOpenConnector( 639 struct NvKmsPerOpenDisp *pOpenDisp, 640 struct NvKmsPerOpenConnector *pOpenConnector, 641 NVConnectorEvoPtr pConnectorEvo) 642 { 643 pOpenConnector->nvKmsApiHandle = 644 nvEvoCreateApiHandle(&pOpenDisp->connectorHandles, pOpenConnector); 645 646 if (pOpenConnector->nvKmsApiHandle == 0) { 647 goto fail; 648 } 649 650 pOpenConnector->pConnectorEvo = pConnectorEvo; 651 652 return TRUE; 653 654 fail: 655 ClearPerOpenConnector(pOpenDisp, pOpenConnector); 656 return FALSE; 657 } 658 659 /*! 660 * Clear the specified NvKmsPerOpenDisp. 661 * 662 * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp 663 * is assigned. 664 * \param[in,out] pDispEvo The NvKmsPerOpenDisp to be cleared. 665 */ 666 static void ClearPerOpenDisp( 667 struct NvKmsPerOpen *pOpen, 668 struct NvKmsPerOpenDev *pOpenDev, 669 struct NvKmsPerOpenDisp *pOpenDisp) 670 { 671 struct NvKmsPerOpenConnector *pOpenConnector; 672 NvKmsGenericHandle connector; 673 674 NVVBlankCallbackPtr pCallbackData; 675 NvKmsGenericHandle callback; 676 677 FreePerOpenFrameLock(pOpen, pOpenDisp); 678 679 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, 680 pOpenConnector, connector) { 681 ClearPerOpenConnector(pOpenDisp, pOpenConnector); 682 } 683 684 /* Destroy the API handle structures. */ 685 nvEvoDestroyApiHandles(&pOpenDisp->connectorHandles); 686 687 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { 688 nvEvoDestroyApiHandles(&pOpenDisp->vblankSyncObjectHandles[i]); 689 690 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankCallbackHandles[i], 691 pCallbackData, callback) { 692 nvRemoveUnicastEvent(pCallbackData->pUserData); 693 } 694 nvEvoDestroyApiHandles(&pOpenDisp->vblankCallbackHandles[i]); 695 } 696 697 nvEvoDestroyApiHandle(&pOpenDev->dispHandles, pOpenDisp->nvKmsApiHandle); 698 699 nvkms_memset(pOpenDisp, 0, sizeof(*pOpenDisp)); 700 } 701 702 703 /*! 704 * Initialize an NvKmsPerOpenDisp. 705 * 706 * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp 707 * is assigned. 708 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to initialize. 709 * \param[in] pDispEvo The disp that the NvKmsPerOpenDisp corresponds to. 710 * 711 * \return If the NvKmsPerOpenDisp is successfully initialized, return TRUE. 712 * Otherwise, return FALSE. 713 */ 714 static NvBool InitPerOpenDisp( 715 struct NvKmsPerOpen *pOpen, 716 struct NvKmsPerOpenDev *pOpenDev, 717 struct NvKmsPerOpenDisp *pOpenDisp, 718 NVDispEvoPtr pDispEvo) 719 { 720 NVConnectorEvoPtr pConnectorEvo; 721 NvU32 connector; 722 723 pOpenDisp->nvKmsApiHandle = 724 nvEvoCreateApiHandle(&pOpenDev->dispHandles, pOpenDisp); 725 726 if (pOpenDisp->nvKmsApiHandle == 0) { 727 goto fail; 728 } 729 730 pOpenDisp->pDispEvo = pDispEvo; 731 732 if (nvListCount(&pDispEvo->connectorList) >= 733 ARRAY_LEN(pOpenDisp->connector)) { 734 nvAssert(!"More connectors on this disp than NVKMS can handle."); 735 goto fail; 736 } 737 738 if (!nvEvoInitApiHandles(&pOpenDisp->connectorHandles, 739 ARRAY_LEN(pOpenDisp->connector))) { 740 goto fail; 741 } 742 743 connector = 0; 744 FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { 745 if (!InitPerOpenConnector(pOpenDisp, &pOpenDisp->connector[connector], 746 pConnectorEvo)) { 747 goto fail; 748 } 749 connector++; 750 } 751 752 /* Initialize the vblankSyncObjectHandles for each head. */ 753 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { 754 if (!nvEvoInitApiHandles(&pOpenDisp->vblankSyncObjectHandles[i], 755 NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { 756 goto fail; 757 } 758 } 759 760 /* Initialize the vblankCallbackHandles for each head. 761 * 762 * The limit of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply here, but 763 * we need something. */ 764 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { 765 if (!nvEvoInitApiHandles(&pOpenDisp->vblankCallbackHandles[i], 766 NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { 767 goto fail; 768 } 769 } 770 771 if (!AllocPerOpenFrameLock(pOpen, pOpenDisp)) { 772 goto fail; 773 } 774 775 return TRUE; 776 777 fail: 778 ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); 779 return FALSE; 780 } 781 782 /*! 783 * Free any SwapGroups tracked by this pOpenDev. 784 */ 785 static void FreeSwapGroups(struct NvKmsPerOpenDev *pOpenDev) 786 { 787 NVSwapGroupRec *pSwapGroup; 788 NvKmsSwapGroupHandle handle; 789 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 790 791 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->swapGroupHandles, 792 pSwapGroup, 793 handle) { 794 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); 795 796 if (pDevEvo->modesetOwner == pOpenDev) { 797 nvHsFreeSwapGroup(pDevEvo, pSwapGroup); 798 } else { 799 nvHsDecrementSwapGroupRefCnt(pSwapGroup); 800 } 801 } 802 } 803 804 /*! 805 * Check that the NvKmsPermissions make sense. 806 */ 807 static NvBool ValidateNvKmsPermissions( 808 const NVDevEvoRec *pDevEvo, 809 const struct NvKmsPermissions *pPermissions) 810 { 811 if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 812 NvU32 d, h; 813 814 for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { 815 for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { 816 817 NvU8 layerMask = pPermissions->flip.disp[d].head[h].layerMask; 818 819 if (layerMask == 0) { 820 continue; 821 } 822 823 if (nvHasBitAboveMax(layerMask, pDevEvo->apiHead[h].numLayers)) { 824 return FALSE; 825 } 826 827 /* 828 * If the above blocks didn't 'continue', then there 829 * are permissions specified for this disp+head. Is 830 * the specified disp+head in range for the current 831 * configuration? 832 */ 833 if (d >= pDevEvo->nDispEvo) { 834 return FALSE; 835 } 836 837 if (h >= pDevEvo->numApiHeads) { 838 return FALSE; 839 } 840 } 841 } 842 } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { 843 NvU32 d, h; 844 845 for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { 846 for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { 847 848 NVDpyIdList dpyIdList = 849 pPermissions->modeset.disp[d].head[h].dpyIdList; 850 851 if (nvDpyIdListIsEmpty(dpyIdList)) { 852 continue; 853 } 854 855 /* 856 * If the above blocks didn't 'continue', then there 857 * are permissions specified for this disp+head. Is 858 * the specified disp+head in range for the current 859 * configuration? 860 */ 861 if (d >= pDevEvo->nDispEvo) { 862 return FALSE; 863 } 864 865 if (h >= pDevEvo->numApiHeads) { 866 return FALSE; 867 } 868 } 869 } 870 } else { 871 return FALSE; 872 } 873 874 return TRUE; 875 } 876 877 /*! 878 * Assign pPermissions with the maximum permissions possible for 879 * the pDevEvo. 880 */ 881 static void AssignFullNvKmsFlipPermissions( 882 const NVDevEvoRec *pDevEvo, 883 struct NvKmsFlipPermissions *pPermissions) 884 { 885 NvU32 dispIndex, apiHead; 886 887 nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); 888 889 for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { 890 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 891 pPermissions->disp[dispIndex].head[apiHead].layerMask = 892 NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1; 893 } 894 } 895 } 896 897 static void AssignFullNvKmsModesetPermissions( 898 const NVDevEvoRec *pDevEvo, 899 struct NvKmsModesetPermissions *pPermissions) 900 { 901 NvU32 dispIndex, apiHead; 902 903 nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); 904 905 for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { 906 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 907 pPermissions->disp[dispIndex].head[apiHead].dpyIdList = 908 nvAllDpyIdList(); 909 } 910 } 911 } 912 913 /*! 914 * Set the modeset owner to pOpenDev 915 * 916 * \param pOpenDev The per-open device structure for the new modeset owner. 917 * \return FALSE if there was already a modeset owner. TRUE otherwise. 918 */ 919 static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) 920 { 921 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 922 923 if (pDevEvo->modesetOwner == pOpenDev) { 924 return TRUE; 925 } 926 927 if (pDevEvo->modesetOwner != NULL) { 928 return FALSE; 929 } 930 931 /* 932 * If claiming modeset ownership, undo any SST forcing imposed by 933 * console restore. 934 */ 935 if (pOpenDev != pDevEvo->pNvKmsOpenDev) { 936 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); 937 } 938 939 pDevEvo->modesetOwner = pOpenDev; 940 pDevEvo->modesetOwnerChanged = TRUE; 941 942 AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions); 943 AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions); 944 945 return TRUE; 946 } 947 948 /* 949 * If not NULL, remove pRemoveFlip from pFlip. Returns true if there are still 950 * some remaining permissions. 951 */ 952 static NvBool RemoveFlipPermissions(struct NvKmsFlipPermissions *pFlip, 953 const struct NvKmsFlipPermissions *pRemoveFlip) 954 { 955 NvU32 d, h, dLen, hLen; 956 NvBool remainingPermissions = FALSE; 957 958 dLen = ARRAY_LEN(pFlip->disp); 959 for (d = 0; d < dLen; d++) { 960 hLen = ARRAY_LEN(pFlip->disp[d].head); 961 for (h = 0; h < hLen; h++) { 962 963 if (pRemoveFlip) { 964 pFlip->disp[d].head[h].layerMask &= 965 ~pRemoveFlip->disp[d].head[h].layerMask; 966 } 967 968 remainingPermissions |= (pFlip->disp[d].head[h].layerMask != 0); 969 } 970 } 971 972 return remainingPermissions; 973 } 974 975 /* 976 * If not NULL, remove pRemoveModeset from pModeset. Returns true if there are 977 * still some remaining permissions. 978 */ 979 static NvBool RemoveModesetPermissions(struct NvKmsModesetPermissions *pModeset, 980 const struct NvKmsModesetPermissions *pRemoveModeset) 981 { 982 NvU32 d, h, dLen, hLen; 983 NvBool remainingPermissions = FALSE; 984 985 dLen = ARRAY_LEN(pModeset->disp); 986 for (d = 0; d < dLen; d++) { 987 hLen = ARRAY_LEN(pModeset->disp[d].head); 988 for (h = 0; h < hLen; h++) { 989 990 if (pRemoveModeset) { 991 pModeset->disp[d].head[h].dpyIdList = nvDpyIdListMinusDpyIdList( 992 pModeset->disp[d].head[h].dpyIdList, 993 pRemoveModeset->disp[d].head[h].dpyIdList); 994 } 995 996 remainingPermissions |= 997 !nvDpyIdListIsEmpty(pModeset->disp[d].head[h].dpyIdList); 998 } 999 } 1000 1001 return remainingPermissions; 1002 } 1003 1004 /*! 1005 * Clear permissions on the specified device for all NvKmsPerOpens. 1006 * 1007 * For NvKmsPerOpen::type==Ioctl, clear the permissions, except for the 1008 * specified pOpenDevExclude. 1009 * 1010 * For NvKmsPerOpen::type==GrantPermissions, clear 1011 * NvKmsPerOpen::grantPermissions and reset NvKmsPerOpen::type to 1012 * Undefined. 1013 */ 1014 static void RevokePermissionsInternal( 1015 const NvU32 typeBitmask, 1016 const NVDevEvoRec *pDevEvo, 1017 const struct NvKmsPerOpenDev *pOpenDevExclude) 1018 { 1019 struct NvKmsPerOpen *pOpen; 1020 1021 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { 1022 1023 if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && 1024 (pOpen->grantPermissions.pDevEvo == pDevEvo) && 1025 (typeBitmask & NVBIT(pOpen->grantPermissions.permissions.type))) { 1026 nvkms_memset(&pOpen->grantPermissions, 0, 1027 sizeof(pOpen->grantPermissions)); 1028 pOpen->type = NvKmsPerOpenTypeUndefined; 1029 } 1030 1031 if (pOpen->type == NvKmsPerOpenTypeIoctl) { 1032 1033 struct NvKmsPerOpenDev *pOpenDev = 1034 DevEvoToOpenDev(pOpen, pDevEvo); 1035 1036 if (pOpenDev == NULL) { 1037 continue; 1038 } 1039 1040 if (pOpenDev == pOpenDevExclude || pOpenDev->isPrivileged) { 1041 continue; 1042 } 1043 1044 if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING)) { 1045 nvkms_memset(&pOpenDev->flipPermissions, 0, 1046 sizeof(pOpenDev->flipPermissions)); 1047 } 1048 1049 if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET)) { 1050 nvkms_memset(&pOpenDev->modesetPermissions, 0, 1051 sizeof(pOpenDev->modesetPermissions)); 1052 } 1053 } 1054 } 1055 } 1056 1057 static void RestoreConsole(NVDevEvoPtr pDevEvo) 1058 { 1059 // Try to issue a modeset and flip to the framebuffer console surface. 1060 if (!nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */)) { 1061 // If that didn't work, free the core channel to trigger RM's console 1062 // restore code. 1063 FreeSurfaceCtxDmasForAllOpens(pDevEvo); 1064 nvFreeCoreChannelEvo(pDevEvo); 1065 1066 // Reallocate the core channel right after freeing it. This makes sure 1067 // that it's allocated and ready right away if another NVKMS client is 1068 // started. 1069 if (nvAllocCoreChannelEvo(pDevEvo)) { 1070 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); 1071 AllocSurfaceCtxDmasForAllOpens(pDevEvo); 1072 } 1073 } 1074 } 1075 1076 /*! 1077 * Release modeset ownership previously set by GrabModesetOwnership 1078 * 1079 * \param pOpenDev The per-open device structure relinquishing modeset 1080 * ownership. 1081 * \return FALSE if pOpenDev is not the modeset owner, TRUE otherwise. 1082 */ 1083 static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) 1084 { 1085 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 1086 1087 if (pDevEvo->modesetOwner != pOpenDev) { 1088 // Only the current owner can release ownership. 1089 return FALSE; 1090 } 1091 1092 FreeSwapGroups(pOpenDev); 1093 1094 pDevEvo->modesetOwner = NULL; 1095 pDevEvo->modesetOwnerChanged = TRUE; 1096 pDevEvo->handleConsoleHotplugs = TRUE; 1097 1098 RestoreConsole(pDevEvo); 1099 RevokePermissionsInternal(NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | 1100 NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET), 1101 pDevEvo, NULL /* pOpenDevExclude */); 1102 return TRUE; 1103 } 1104 1105 /*! 1106 * Free the specified NvKmsPerOpenDev. 1107 * 1108 * \param[in,out] pOpen The per-open data, to which the 1109 * NvKmsPerOpenDev is assigned. 1110 * \param[in,out] pOpenDev The NvKmsPerOpenDev to free. 1111 */ 1112 void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, 1113 struct NvKmsPerOpenDev *pOpenDev) 1114 { 1115 struct NvKmsPerOpenDisp *pOpenDisp; 1116 NvKmsGenericHandle disp; 1117 1118 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 1119 1120 if (pOpenDev == NULL) { 1121 return; 1122 } 1123 1124 nvEvoDestroyApiHandles(&pOpenDev->surfaceHandles); 1125 1126 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, 1127 pOpenDisp, disp) { 1128 ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); 1129 } 1130 1131 nvEvoDestroyApiHandles(&pOpenDev->dispHandles); 1132 1133 nvEvoDestroyApiHandle(&pOpen->ioctl.devHandles, pOpenDev->nvKmsApiHandle); 1134 1135 nvEvoDestroyApiHandles(&pOpenDev->deferredRequestFifoHandles); 1136 1137 nvEvoDestroyApiHandles(&pOpenDev->swapGroupHandles); 1138 1139 nvFree(pOpenDev); 1140 } 1141 1142 1143 /*! 1144 * Allocate and initialize an NvKmsPerOpenDev. 1145 * 1146 * \param[in,out] pOpen The per-open data, to which the 1147 * new NvKmsPerOpenDev should be assigned. 1148 * \param[in] pDevEvo The device to which the new NvKmsPerOpenDev 1149 * corresponds. 1150 * \param[in] isPrivileged The NvKmsPerOpenDev is privileged which can 1151 * do modeset anytime. 1152 * 1153 * \return On success, return a pointer to the new NvKmsPerOpenDev. 1154 * On failure, return NULL. 1155 */ 1156 struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, 1157 NVDevEvoPtr pDevEvo, NvBool isPrivileged) 1158 { 1159 struct NvKmsPerOpenDev *pOpenDev = nvCalloc(1, sizeof(*pOpenDev)); 1160 NVDispEvoPtr pDispEvo; 1161 NvU32 disp; 1162 1163 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 1164 1165 if (pOpenDev == NULL) { 1166 goto fail; 1167 } 1168 1169 pOpenDev->nvKmsApiHandle = 1170 nvEvoCreateApiHandle(&pOpen->ioctl.devHandles, pOpenDev); 1171 1172 if (pOpenDev->nvKmsApiHandle == 0) { 1173 goto fail; 1174 } 1175 1176 pOpenDev->pDevEvo = pDevEvo; 1177 1178 if (!nvEvoInitApiHandles(&pOpenDev->dispHandles, 1179 ARRAY_LEN(pOpenDev->disp))) { 1180 goto fail; 1181 } 1182 1183 if (pDevEvo->nDispEvo > ARRAY_LEN(pOpenDev->disp)) { 1184 nvAssert(!"More disps on this device than NVKMS can handle."); 1185 goto fail; 1186 } 1187 1188 FOR_ALL_EVO_DISPLAYS(pDispEvo, disp, pDevEvo) { 1189 if (!InitPerOpenDisp(pOpen, pOpenDev, &pOpenDev->disp[disp], pDispEvo)) { 1190 goto fail; 1191 } 1192 } 1193 1194 if (!nvEvoInitApiHandles(&pOpenDev->surfaceHandles, 32)) { 1195 goto fail; 1196 } 1197 1198 pOpenDev->isPrivileged = isPrivileged; 1199 if (pOpenDev->isPrivileged) { 1200 AssignFullNvKmsFlipPermissions(pDevEvo, 1201 &pOpenDev->flipPermissions); 1202 AssignFullNvKmsModesetPermissions(pOpenDev->pDevEvo, 1203 &pOpenDev->modesetPermissions); 1204 } 1205 1206 if (!nvEvoInitApiHandles(&pOpenDev->deferredRequestFifoHandles, 4)) { 1207 goto fail; 1208 } 1209 1210 if (!nvEvoInitApiHandles(&pOpenDev->swapGroupHandles, 4)) { 1211 goto fail; 1212 } 1213 1214 return pOpenDev; 1215 1216 fail: 1217 nvFreePerOpenDev(pOpen, pOpenDev); 1218 return NULL; 1219 } 1220 1221 1222 /*! 1223 * Assign NvKmsPerOpen::type. 1224 * 1225 * This succeeds only if NvKmsPerOpen::type is Undefined, or already 1226 * has the requested type and allowRedundantAssignment is TRUE. 1227 */ 1228 static NvBool AssignNvKmsPerOpenType(struct NvKmsPerOpen *pOpen, 1229 enum NvKmsPerOpenType type, 1230 NvBool allowRedundantAssignment) 1231 { 1232 if ((pOpen->type == type) && allowRedundantAssignment) { 1233 return TRUE; 1234 } 1235 1236 if (pOpen->type != NvKmsPerOpenTypeUndefined) { 1237 return FALSE; 1238 } 1239 1240 switch (type) { 1241 case NvKmsPerOpenTypeIoctl: 1242 nvListInit(&pOpen->ioctl.eventList); 1243 1244 if (!nvEvoInitApiHandles(&pOpen->ioctl.devHandles, NV_MAX_DEVICES)) { 1245 return FALSE; 1246 } 1247 1248 if (!nvEvoInitApiHandles(&pOpen->ioctl.frameLockHandles, 4)) { 1249 nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); 1250 return FALSE; 1251 } 1252 1253 nvListAppend(&pOpen->perOpenIoctlListEntry, &perOpenIoctlList); 1254 break; 1255 1256 case NvKmsPerOpenTypeGrantSurface: 1257 /* Nothing to do, here. */ 1258 break; 1259 1260 case NvKmsPerOpenTypeGrantSwapGroup: 1261 /* Nothing to do, here. */ 1262 break; 1263 1264 case NvKmsPerOpenTypeGrantPermissions: 1265 /* Nothing to do, here. */ 1266 break; 1267 1268 case NvKmsPerOpenTypeUnicastEvent: 1269 /* Nothing to do, here. */ 1270 break; 1271 1272 case NvKmsPerOpenTypeUndefined: 1273 nvAssert(!"unexpected NvKmsPerOpenType"); 1274 break; 1275 } 1276 1277 pOpen->type = type; 1278 return TRUE; 1279 } 1280 1281 /*! 1282 * Return whether the PerOpen can be used as a unicast event. 1283 */ 1284 static inline NvBool PerOpenIsValidForUnicastEvent( 1285 const struct NvKmsPerOpen *pOpen) 1286 { 1287 /* If the type is Undefined, it can be made a unicast event. */ 1288 1289 if (pOpen->type == NvKmsPerOpenTypeUndefined) { 1290 return TRUE; 1291 } 1292 1293 /* 1294 * If the type is already UnicastEvent but there is no active user, it can 1295 * be made a unicast event. 1296 */ 1297 if ((pOpen->type == NvKmsPerOpenTypeUnicastEvent) && 1298 (pOpen->unicastEvent.type == NvKmsUnicastEventTypeUndefined)) { 1299 return TRUE; 1300 } 1301 1302 return FALSE; 1303 } 1304 1305 /*! 1306 * Allocate the specified device. 1307 */ 1308 static NvBool AllocDevice(struct NvKmsPerOpen *pOpen, 1309 void *pParamsVoid) 1310 { 1311 struct NvKmsAllocDeviceParams *pParams = pParamsVoid; 1312 NVDevEvoPtr pDevEvo; 1313 struct NvKmsPerOpenDev *pOpenDev; 1314 NvU32 disp, apiHead; 1315 NvU8 layer; 1316 1317 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1318 1319 if (nvkms_strcmp(pParams->request.versionString, NV_VERSION_STRING) != 0) { 1320 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH; 1321 return FALSE; 1322 } 1323 1324 /* 1325 * It is an error to call NVKMS_IOCTL_ALLOC_DEVICE multiple times 1326 * on the same device with the same fd. 1327 */ 1328 if (DeviceIdAlreadyPresent(pOpen, pParams->request.deviceId)) { 1329 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; 1330 return FALSE; 1331 } 1332 1333 pDevEvo = nvFindDevEvoByDeviceId(pParams->request.deviceId); 1334 1335 if (pDevEvo == NULL) { 1336 pDevEvo = nvAllocDevEvo(&pParams->request, &pParams->reply.status); 1337 if (pDevEvo == NULL) { 1338 return FALSE; 1339 } 1340 } else { 1341 if (!pParams->request.tryInferSliMosaicFromExistingDevice && 1342 (pDevEvo->sli.mosaic != pParams->request.sliMosaic)) { 1343 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; 1344 return FALSE; 1345 } 1346 1347 if (pDevEvo->usesTegraDevice && 1348 (pParams->request.deviceId != NVKMS_DEVICE_ID_TEGRA)) { 1349 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; 1350 return FALSE; 1351 } 1352 pDevEvo->allocRefCnt++; 1353 } 1354 1355 pOpenDev = nvAllocPerOpenDev(pOpen, pDevEvo, FALSE /* isPrivileged */); 1356 1357 if (pOpenDev == NULL) { 1358 nvFreeDevEvo(pDevEvo); 1359 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; 1360 return FALSE; 1361 } 1362 1363 /* Beyond this point, the function cannot fail. */ 1364 1365 if (pParams->request.enableConsoleHotplugHandling) { 1366 pDevEvo->handleConsoleHotplugs = TRUE; 1367 } 1368 1369 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; 1370 pParams->reply.subDeviceMask = 1371 NV_TWO_N_MINUS_ONE(pDevEvo->numSubDevices); 1372 pParams->reply.numHeads = pDevEvo->numApiHeads; 1373 pParams->reply.numDisps = pDevEvo->nDispEvo; 1374 1375 ct_assert(ARRAY_LEN(pParams->reply.dispHandles) == 1376 ARRAY_LEN(pOpenDev->disp)); 1377 1378 for (disp = 0; disp < ARRAY_LEN(pParams->reply.dispHandles); disp++) { 1379 pParams->reply.dispHandles[disp] = pOpenDev->disp[disp].nvKmsApiHandle; 1380 } 1381 1382 pParams->reply.inputLutAppliesToBase = pDevEvo->caps.inputLutAppliesToBase; 1383 1384 ct_assert(ARRAY_LEN(pParams->reply.layerCaps) == 1385 ARRAY_LEN(pDevEvo->caps.layerCaps)); 1386 1387 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 1388 pParams->reply.numLayers[apiHead] = pDevEvo->apiHead[apiHead].numLayers; 1389 } 1390 1391 for (layer = 0; 1392 layer < ARRAY_LEN(pParams->reply.layerCaps); 1393 layer++) { 1394 pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer]; 1395 } 1396 1397 pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT; 1398 pParams->reply.requiresVrrSemaphores = !pDevEvo->hal->caps.supportsDisplayRate; 1399 1400 pParams->reply.nIsoSurfacesInVidmemOnly = 1401 !!NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, 1402 NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY); 1403 1404 pParams->reply.requiresAllAllocationsInSysmem = 1405 pDevEvo->requiresAllAllocationsInSysmem; 1406 pParams->reply.supportsHeadSurface = pDevEvo->isHeadSurfaceSupported; 1407 1408 pParams->reply.validNIsoFormatMask = pDevEvo->caps.validNIsoFormatMask; 1409 1410 pParams->reply.maxWidthInBytes = pDevEvo->caps.maxWidthInBytes; 1411 pParams->reply.maxWidthInPixels = pDevEvo->caps.maxWidthInPixels; 1412 pParams->reply.maxHeightInPixels = pDevEvo->caps.maxHeight; 1413 pParams->reply.cursorCompositionCaps = pDevEvo->caps.cursorCompositionCaps; 1414 pParams->reply.genericPageKind = pDevEvo->caps.genericPageKind; 1415 1416 pParams->reply.maxCursorSize = pDevEvo->cursorHal->caps.maxSize; 1417 1418 /* NVKMS swap groups and warp&blend depends on headSurface functionality. */ 1419 pParams->reply.supportsSwapGroups = pDevEvo->isHeadSurfaceSupported; 1420 pParams->reply.supportsWarpAndBlend = pDevEvo->isHeadSurfaceSupported; 1421 1422 pParams->reply.validLayerRRTransforms = pDevEvo->caps.validLayerRRTransforms; 1423 1424 pParams->reply.isoIOCoherencyModes = pDevEvo->isoIOCoherencyModes; 1425 pParams->reply.nisoIOCoherencyModes = pDevEvo->nisoIOCoherencyModes; 1426 1427 /* 1428 * TODO: Replace the isSOCDisplay check with an RM query. See Bug 3689635. 1429 */ 1430 pParams->reply.displayIsGpuL2Coherent = !pDevEvo->isSOCDisplay; 1431 1432 pParams->reply.supportsSyncpts = pDevEvo->supportsSyncpts; 1433 1434 pParams->reply.supportsIndependentAcqRelSemaphore = 1435 pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore; 1436 1437 pParams->reply.supportsVblankSyncObjects = 1438 pDevEvo->hal->caps.supportsVblankSyncObjects; 1439 1440 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; 1441 1442 return TRUE; 1443 } 1444 1445 static void UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev *pOpenDev) 1446 { 1447 NVDeferredRequestFifoRec *pDeferredRequestFifo; 1448 NvKmsGenericHandle handle; 1449 1450 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->deferredRequestFifoHandles, 1451 pDeferredRequestFifo, 1452 handle) { 1453 1454 nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); 1455 1456 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, 1457 pDeferredRequestFifo); 1458 } 1459 } 1460 1461 /* 1462 * Forward declaration since this function is used by 1463 * DisableRemainingVblankSyncObjects(). 1464 */ 1465 static void DisableAndCleanVblankSyncObject(struct NvKmsPerOpenDisp *pOpenDisp, 1466 NvU32 apiHead, 1467 NVVblankSyncObjectRec *pVblankSyncObject, 1468 NVEvoUpdateState *pUpdateState, 1469 NvKmsVblankSyncObjectHandle handle); 1470 1471 static void DisableRemainingVblankSyncObjects(struct NvKmsPerOpen *pOpen, 1472 struct NvKmsPerOpenDev *pOpenDev) 1473 { 1474 struct NvKmsPerOpenDisp *pOpenDisp; 1475 NvKmsGenericHandle disp; 1476 NVVblankSyncObjectRec *pVblankSyncObject; 1477 NvKmsVblankSyncObjectHandle handle; 1478 NvU32 apiHead = 0; 1479 1480 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 1481 1482 if (pOpenDev == NULL) { 1483 return; 1484 } 1485 1486 /* For each pOpenDisp: */ 1487 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, 1488 pOpenDisp, disp) { 1489 /* 1490 * A single update state can handle changes across multiple heads on a 1491 * given Disp. 1492 */ 1493 NVEvoUpdateState updateState = { }; 1494 1495 /* For each head: */ 1496 for (apiHead = 0; apiHead < ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) { 1497 NVEvoApiHandlesRec *pHandles = 1498 &pOpenDisp->vblankSyncObjectHandles[apiHead]; 1499 1500 /* For each still-active vblank sync object: */ 1501 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, 1502 pVblankSyncObject, handle) { 1503 DisableAndCleanVblankSyncObject(pOpenDisp, apiHead, 1504 pVblankSyncObject, 1505 &updateState, 1506 handle); 1507 } 1508 } 1509 1510 if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { 1511 /* 1512 * Instruct hardware to execute the staged commands from the 1513 * ConfigureVblankSyncObject() calls (inherent in 1514 * DisableAndCleanVblankSyncObject()) above. This will set up 1515 * and wait for a notification that the hardware execution 1516 * has completed. 1517 */ 1518 nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, 1519 TRUE); 1520 } 1521 } 1522 } 1523 1524 static void FreeDeviceReference(struct NvKmsPerOpen *pOpen, 1525 struct NvKmsPerOpenDev *pOpenDev) 1526 { 1527 /* Disable all client-owned vblank sync objects that still exist. */ 1528 DisableRemainingVblankSyncObjects(pOpen, pOpenDev); 1529 1530 FreeSwapGroups(pOpenDev); 1531 1532 UnregisterDeferredRequestFifos(pOpenDev); 1533 1534 nvEvoFreeClientSurfaces(pOpenDev->pDevEvo, pOpenDev, 1535 &pOpenDev->surfaceHandles); 1536 1537 if (!nvFreeDevEvo(pOpenDev->pDevEvo)) { 1538 // If this pOpenDev is the modeset owner, implicitly release it. Does 1539 // nothing if this pOpenDev is not the modeset owner. 1540 // 1541 // If nvFreeDevEvo() freed the device, then it also implicitly released 1542 // ownership. 1543 ReleaseModesetOwnership(pOpenDev); 1544 1545 nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev); 1546 } 1547 1548 nvFreePerOpenDev(pOpen, pOpenDev); 1549 } 1550 1551 /*! 1552 * Free the specified device. 1553 */ 1554 static NvBool FreeDevice(struct NvKmsPerOpen *pOpen, 1555 void *pParamsVoid) 1556 { 1557 struct NvKmsFreeDeviceParams *pParams = pParamsVoid; 1558 struct NvKmsPerOpenDev *pOpenDev; 1559 1560 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 1561 1562 if (pOpenDev == NULL) { 1563 return FALSE; 1564 } 1565 1566 FreeDeviceReference(pOpen, pOpenDev); 1567 1568 return TRUE; 1569 } 1570 1571 1572 /*! 1573 * Get the disp data. This information should remain static for the 1574 * lifetime of the disp. 1575 */ 1576 static NvBool QueryDisp(struct NvKmsPerOpen *pOpen, 1577 void *pParamsVoid) 1578 { 1579 struct NvKmsQueryDispParams *pParams = pParamsVoid; 1580 struct NvKmsPerOpenDisp *pOpenDisp; 1581 const NVEvoSubDeviceRec *pSubDevice; 1582 NVDispEvoPtr pDispEvo; 1583 NvU32 connector; 1584 1585 pOpenDisp = GetPerOpenDisp(pOpen, 1586 pParams->request.deviceHandle, 1587 pParams->request.dispHandle); 1588 if (pOpenDisp == NULL) { 1589 return FALSE; 1590 } 1591 1592 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1593 1594 pDispEvo = pOpenDisp->pDispEvo; 1595 1596 // Don't include dynamic displays in validDpys. The data returned here is 1597 // supposed to be static for the lifetime of the pDispEvo. 1598 pParams->reply.validDpys = 1599 nvDpyIdListMinusDpyIdList(pDispEvo->validDisplays, 1600 pDispEvo->dynamicDpyIds); 1601 pParams->reply.bootDpys = pDispEvo->bootDisplays; 1602 pParams->reply.muxDpys = pDispEvo->muxDisplays; 1603 pParams->reply.frameLockHandle = pOpenDisp->frameLockHandle; 1604 pParams->reply.numConnectors = nvListCount(&pDispEvo->connectorList); 1605 1606 ct_assert(ARRAY_LEN(pParams->reply.connectorHandles) == 1607 ARRAY_LEN(pOpenDisp->connector)); 1608 1609 for (connector = 0; connector < ARRAY_LEN(pParams->reply.connectorHandles); 1610 connector++) { 1611 pParams->reply.connectorHandles[connector] = 1612 pOpenDisp->connector[connector].nvKmsApiHandle; 1613 } 1614 1615 pSubDevice = pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]; 1616 if (pSubDevice != NULL) { 1617 ct_assert(sizeof(pParams->reply.gpuString) >= 1618 sizeof(pSubDevice->gpuString)); 1619 nvkms_memcpy(pParams->reply.gpuString, pSubDevice->gpuString, 1620 sizeof(pSubDevice->gpuString)); 1621 } 1622 1623 return TRUE; 1624 } 1625 1626 1627 /*! 1628 * Get the connector static data. This information should remain static for the 1629 * lifetime of the connector. 1630 */ 1631 static NvBool QueryConnectorStaticData(struct NvKmsPerOpen *pOpen, 1632 void *pParamsVoid) 1633 { 1634 struct NvKmsQueryConnectorStaticDataParams *pParams = pParamsVoid; 1635 struct NvKmsPerOpenConnector *pOpenConnector; 1636 NVConnectorEvoPtr pConnectorEvo; 1637 1638 pOpenConnector = GetPerOpenConnector(pOpen, 1639 pParams->request.deviceHandle, 1640 pParams->request.dispHandle, 1641 pParams->request.connectorHandle); 1642 if (pOpenConnector == NULL) { 1643 return FALSE; 1644 } 1645 1646 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1647 1648 pConnectorEvo = pOpenConnector->pConnectorEvo; 1649 1650 pParams->reply.dpyId = pConnectorEvo->displayId; 1651 pParams->reply.isDP = nvConnectorUsesDPLib(pConnectorEvo) || 1652 nvConnectorIsDPSerializer(pConnectorEvo); 1653 pParams->reply.legacyTypeIndex = pConnectorEvo->legacyTypeIndex; 1654 pParams->reply.type = pConnectorEvo->type; 1655 pParams->reply.typeIndex = pConnectorEvo->typeIndex; 1656 pParams->reply.signalFormat = pConnectorEvo->signalFormat; 1657 pParams->reply.physicalIndex = pConnectorEvo->physicalIndex; 1658 pParams->reply.physicalLocation = pConnectorEvo->physicalLocation; 1659 pParams->reply.headMask = pConnectorEvo->validApiHeadMask; 1660 1661 pParams->reply.isLvds = 1662 (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && 1663 (pConnectorEvo->or.protocol == 1664 NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM); 1665 1666 pParams->reply.locationOnChip = (pConnectorEvo->or.location == 1667 NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP); 1668 return TRUE; 1669 } 1670 1671 1672 /*! 1673 * Get the connector dynamic data. This information should reflects changes to 1674 * the connector over time (e.g. for DisplayPort MST devices). 1675 */ 1676 static NvBool QueryConnectorDynamicData(struct NvKmsPerOpen *pOpen, 1677 void *pParamsVoid) 1678 { 1679 struct NvKmsQueryConnectorDynamicDataParams *pParams = pParamsVoid; 1680 struct NvKmsPerOpenConnector *pOpenConnector; 1681 NVConnectorEvoPtr pConnectorEvo; 1682 NVDispEvoPtr pDispEvo; 1683 NVDpyEvoPtr pDpyEvo; 1684 1685 pOpenConnector = GetPerOpenConnector(pOpen, 1686 pParams->request.deviceHandle, 1687 pParams->request.dispHandle, 1688 pParams->request.connectorHandle); 1689 if (pOpenConnector == NULL) { 1690 return FALSE; 1691 } 1692 1693 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1694 1695 pConnectorEvo = pOpenConnector->pConnectorEvo; 1696 pDispEvo = pConnectorEvo->pDispEvo; 1697 1698 if (nvConnectorUsesDPLib(pConnectorEvo)) { 1699 pParams->reply.detectComplete = pConnectorEvo->detectComplete; 1700 } else { 1701 pParams->reply.detectComplete = TRUE; 1702 } 1703 1704 // Find the dynamic dpys on this connector. 1705 pParams->reply.dynamicDpyIdList = nvEmptyDpyIdList(); 1706 FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->dynamicDpyIds, pDispEvo) { 1707 if (pDpyEvo->pConnectorEvo == pConnectorEvo) { 1708 pParams->reply.dynamicDpyIdList = 1709 nvAddDpyIdToDpyIdList(pDpyEvo->id, 1710 pParams->reply.dynamicDpyIdList); 1711 } 1712 } 1713 1714 return TRUE; 1715 } 1716 1717 1718 /*! 1719 * Get the static data for the specified dpy. This information should 1720 * remain static for the lifetime of the dpy. 1721 */ 1722 static NvBool QueryDpyStaticData(struct NvKmsPerOpen *pOpen, 1723 void *pParamsVoid) 1724 { 1725 struct NvKmsQueryDpyStaticDataParams *pParams = pParamsVoid; 1726 NVDpyEvoPtr pDpyEvo; 1727 1728 pDpyEvo = GetPerOpenDpy(pOpen, 1729 pParams->request.deviceHandle, 1730 pParams->request.dispHandle, 1731 pParams->request.dpyId); 1732 if (pDpyEvo == NULL) { 1733 return FALSE; 1734 } 1735 1736 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 1737 1738 pParams->reply.connectorHandle = 1739 ConnectorEvoToConnectorHandle(pOpen, 1740 pParams->request.deviceHandle, 1741 pParams->request.dispHandle, 1742 pDpyEvo->pConnectorEvo); 1743 /* 1744 * All pConnectorEvos should have corresponding pOpenConnectors, 1745 * so we should always be able to find the NvKmsConnectorHandle. 1746 */ 1747 nvAssert(pParams->reply.connectorHandle != 0); 1748 1749 pParams->reply.type = pDpyEvo->pConnectorEvo->legacyType; 1750 1751 if (pDpyEvo->dp.addressString != NULL) { 1752 const size_t len = nvkms_strlen(pDpyEvo->dp.addressString) + 1; 1753 nvkms_memcpy(pParams->reply.dpAddress, pDpyEvo->dp.addressString, 1754 NV_MIN(sizeof(pParams->reply.dpAddress), len)); 1755 pParams->reply.dpAddress[sizeof(pParams->reply.dpAddress) - 1] = '\0'; 1756 } 1757 1758 pParams->reply.mobileInternal = pDpyEvo->internal; 1759 pParams->reply.isDpMST = nvDpyEvoIsDPMST(pDpyEvo); 1760 1761 return TRUE; 1762 } 1763 1764 1765 /*! 1766 * Get the dynamic data for the specified dpy. This information can 1767 * change when a hotplug occurs. 1768 */ 1769 static NvBool QueryDpyDynamicData(struct NvKmsPerOpen *pOpen, 1770 void *pParamsVoid) 1771 { 1772 struct NvKmsQueryDpyDynamicDataParams *pParams = pParamsVoid; 1773 NVDpyEvoPtr pDpyEvo; 1774 1775 pDpyEvo = GetPerOpenDpy(pOpen, 1776 pParams->request.deviceHandle, 1777 pParams->request.dispHandle, 1778 pParams->request.dpyId); 1779 if (pDpyEvo == NULL) { 1780 return FALSE; 1781 } 1782 1783 return nvDpyGetDynamicData(pDpyEvo, pParams); 1784 } 1785 1786 /* Store a copy of the user's infoString pointer, so we can copy out to it when 1787 * we're done. */ 1788 struct InfoStringExtraUserStateCommon 1789 { 1790 NvU64 userInfoString; 1791 }; 1792 1793 /* 1794 * Allocate a kernel buffer to populate the infoString which will be copied out 1795 * to userspace upon completion. 1796 */ 1797 static NvBool InfoStringPrepUserCommon( 1798 NvU32 infoStringSize, 1799 NvU64 *ppInfoString, 1800 struct InfoStringExtraUserStateCommon *pExtra) 1801 { 1802 char *kernelInfoString = NULL; 1803 1804 if (infoStringSize == 0) { 1805 *ppInfoString = 0; 1806 return TRUE; 1807 } 1808 1809 if (!nvKmsNvU64AddressIsSafe(*ppInfoString)) { 1810 return FALSE; 1811 } 1812 1813 if (infoStringSize > NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH) { 1814 return FALSE; 1815 } 1816 1817 kernelInfoString = nvCalloc(1, infoStringSize); 1818 if (kernelInfoString == NULL) { 1819 return FALSE; 1820 } 1821 1822 pExtra->userInfoString = *ppInfoString; 1823 *ppInfoString = nvKmsPointerToNvU64(kernelInfoString); 1824 1825 return TRUE; 1826 } 1827 1828 /* 1829 * Copy the infoString out to userspace and free the kernel-internal buffer. 1830 */ 1831 static NvBool InfoStringDoneUserCommon( 1832 NvU32 infoStringSize, 1833 NvU64 pInfoString, 1834 NvU32 *infoStringLenWritten, 1835 struct InfoStringExtraUserStateCommon *pExtra) 1836 { 1837 char *kernelInfoString = nvKmsNvU64ToPointer(pInfoString); 1838 int status; 1839 NvBool ret; 1840 1841 if ((infoStringSize == 0) || (*infoStringLenWritten == 0)) { 1842 ret = TRUE; 1843 goto done; 1844 } 1845 1846 nvAssert(*infoStringLenWritten <= infoStringSize); 1847 1848 status = nvkms_copyout(pExtra->userInfoString, 1849 kernelInfoString, 1850 *infoStringLenWritten); 1851 if (status == 0) { 1852 ret = TRUE; 1853 } else { 1854 ret = FALSE; 1855 *infoStringLenWritten = 0; 1856 } 1857 1858 done: 1859 nvFree(kernelInfoString); 1860 1861 return ret; 1862 } 1863 1864 struct NvKmsValidateModeIndexExtraUserState 1865 { 1866 struct InfoStringExtraUserStateCommon common; 1867 }; 1868 1869 static NvBool ValidateModeIndexPrepUser( 1870 void *pParamsVoid, 1871 void *pExtraUserStateVoid) 1872 { 1873 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; 1874 struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; 1875 1876 return InfoStringPrepUserCommon( 1877 pParams->request.infoStringSize, 1878 &pParams->request.pInfoString, 1879 &pExtra->common); 1880 } 1881 1882 static NvBool ValidateModeIndexDoneUser( 1883 void *pParamsVoid, 1884 void *pExtraUserStateVoid) 1885 { 1886 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; 1887 struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; 1888 1889 return InfoStringDoneUserCommon( 1890 pParams->request.infoStringSize, 1891 pParams->request.pInfoString, 1892 &pParams->reply.infoStringLenWritten, 1893 &pExtra->common); 1894 } 1895 1896 /*! 1897 * Validate the requested mode. 1898 */ 1899 static NvBool ValidateModeIndex(struct NvKmsPerOpen *pOpen, 1900 void *pParamsVoid) 1901 { 1902 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; 1903 NVDpyEvoPtr pDpyEvo; 1904 1905 pDpyEvo = GetPerOpenDpy(pOpen, 1906 pParams->request.deviceHandle, 1907 pParams->request.dispHandle, 1908 pParams->request.dpyId); 1909 if (pDpyEvo == NULL) { 1910 return FALSE; 1911 } 1912 1913 nvValidateModeIndex(pDpyEvo, &pParams->request, &pParams->reply); 1914 1915 return TRUE; 1916 } 1917 1918 struct NvKmsValidateModeExtraUserState 1919 { 1920 struct InfoStringExtraUserStateCommon common; 1921 }; 1922 1923 static NvBool ValidateModePrepUser( 1924 void *pParamsVoid, 1925 void *pExtraUserStateVoid) 1926 { 1927 struct NvKmsValidateModeParams *pParams = pParamsVoid; 1928 struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; 1929 1930 return InfoStringPrepUserCommon( 1931 pParams->request.infoStringSize, 1932 &pParams->request.pInfoString, 1933 &pExtra->common); 1934 } 1935 1936 static NvBool ValidateModeDoneUser( 1937 void *pParamsVoid, 1938 void *pExtraUserStateVoid) 1939 { 1940 struct NvKmsValidateModeParams *pParams = pParamsVoid; 1941 struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; 1942 1943 return InfoStringDoneUserCommon( 1944 pParams->request.infoStringSize, 1945 pParams->request.pInfoString, 1946 &pParams->reply.infoStringLenWritten, 1947 &pExtra->common); 1948 } 1949 1950 /*! 1951 * Validate the requested mode. 1952 */ 1953 static NvBool ValidateMode(struct NvKmsPerOpen *pOpen, 1954 void *pParamsVoid) 1955 { 1956 struct NvKmsValidateModeParams *pParams = pParamsVoid; 1957 NVDpyEvoPtr pDpyEvo; 1958 1959 pDpyEvo = GetPerOpenDpy(pOpen, 1960 pParams->request.deviceHandle, 1961 pParams->request.dispHandle, 1962 pParams->request.dpyId); 1963 if (pDpyEvo == NULL) { 1964 return FALSE; 1965 } 1966 1967 nvValidateModeEvo(pDpyEvo, &pParams->request, &pParams->reply); 1968 1969 return TRUE; 1970 } 1971 1972 static NvBool 1973 CopyInOneLut(NvU64 pRampsUser, struct NvKmsLutRamps **ppRampsKernel) 1974 { 1975 struct NvKmsLutRamps *pRampsKernel = NULL; 1976 int status; 1977 1978 if (pRampsUser == 0) { 1979 return TRUE; 1980 } 1981 1982 if (!nvKmsNvU64AddressIsSafe(pRampsUser)) { 1983 return FALSE; 1984 } 1985 1986 pRampsKernel = nvAlloc(sizeof(*pRampsKernel)); 1987 if (!pRampsKernel) { 1988 return FALSE; 1989 } 1990 1991 status = nvkms_copyin((char *)pRampsKernel, pRampsUser, 1992 sizeof(*pRampsKernel)); 1993 if (status != 0) { 1994 nvFree(pRampsKernel); 1995 return FALSE; 1996 } 1997 1998 *ppRampsKernel = pRampsKernel; 1999 2000 return TRUE; 2001 } 2002 2003 static NvBool 2004 CopyInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) 2005 { 2006 struct NvKmsLutRamps *pInputRamps = NULL; 2007 struct NvKmsLutRamps *pOutputRamps = NULL; 2008 2009 if (!CopyInOneLut(pCommonLutParams->input.pRamps, &pInputRamps)) { 2010 goto fail; 2011 } 2012 if (!CopyInOneLut(pCommonLutParams->output.pRamps, &pOutputRamps)) { 2013 goto fail; 2014 } 2015 2016 pCommonLutParams->input.pRamps = nvKmsPointerToNvU64(pInputRamps); 2017 pCommonLutParams->output.pRamps = nvKmsPointerToNvU64(pOutputRamps); 2018 2019 return TRUE; 2020 2021 fail: 2022 nvFree(pInputRamps); 2023 nvFree(pOutputRamps); 2024 return FALSE; 2025 } 2026 2027 static void 2028 FreeCopiedInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) 2029 { 2030 struct NvKmsLutRamps *pInputRamps = 2031 nvKmsNvU64ToPointer(pCommonLutParams->input.pRamps); 2032 struct NvKmsLutRamps *pOutputRamps = 2033 nvKmsNvU64ToPointer(pCommonLutParams->output.pRamps); 2034 2035 nvFree(pInputRamps); 2036 nvFree(pOutputRamps); 2037 } 2038 2039 /* No extra user state needed for SetMode; although we lose the user pointers 2040 * for the LUT ramps after copying them in, that's okay because we don't need 2041 * to copy them back out again. */ 2042 struct NvKmsSetModeExtraUserState 2043 { 2044 }; 2045 2046 /*! 2047 * Copy in any data referenced by pointer for the SetMode request. Currently 2048 * this is only the LUT ramps. 2049 */ 2050 static NvBool SetModePrepUser( 2051 void *pParamsVoid, 2052 void *pExtraUserStateVoid) 2053 { 2054 struct NvKmsSetModeParams *pParams = pParamsVoid; 2055 struct NvKmsSetModeRequest *pReq = &pParams->request; 2056 NvU32 disp, apiHead, dispFailed, apiHeadFailed; 2057 2058 /* Iterate over all of the common LUT ramp pointers embedded in the SetMode 2059 * request, and copy in each one. */ 2060 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { 2061 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { 2062 struct NvKmsSetLutCommonParams *pCommonLutParams = 2063 &pReq->disp[disp].head[apiHead].lut; 2064 2065 if (!CopyInLutParams(pCommonLutParams)) { 2066 /* Remember how far we got through these loops before we 2067 * failed, so that we can undo everything up to this point. */ 2068 dispFailed = disp; 2069 apiHeadFailed = apiHead; 2070 goto fail; 2071 } 2072 } 2073 } 2074 2075 return TRUE; 2076 2077 fail: 2078 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { 2079 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { 2080 struct NvKmsSetLutCommonParams *pCommonLutParams = 2081 &pReq->disp[disp].head[apiHead].lut; 2082 2083 if (disp > dispFailed || 2084 (disp == dispFailed && apiHead >= apiHeadFailed)) { 2085 break; 2086 } 2087 2088 FreeCopiedInLutParams(pCommonLutParams); 2089 } 2090 } 2091 2092 return FALSE; 2093 } 2094 2095 /*! 2096 * Free buffers allocated in SetModePrepUser. 2097 */ 2098 static NvBool SetModeDoneUser( 2099 void *pParamsVoid, 2100 void *pExtraUserStateVoid) 2101 { 2102 struct NvKmsSetModeParams *pParams = pParamsVoid; 2103 struct NvKmsSetModeRequest *pReq = &pParams->request; 2104 NvU32 disp, apiHead; 2105 2106 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { 2107 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) { 2108 struct NvKmsSetLutCommonParams *pCommonLutParams = 2109 &pReq->disp[disp].head[apiHead].lut; 2110 2111 FreeCopiedInLutParams(pCommonLutParams); 2112 } 2113 } 2114 2115 return TRUE; 2116 } 2117 2118 /*! 2119 * Perform a modeset on the device. 2120 */ 2121 static NvBool SetMode(struct NvKmsPerOpen *pOpen, 2122 void *pParamsVoid) 2123 { 2124 struct NvKmsSetModeParams *pParams = pParamsVoid; 2125 struct NvKmsPerOpenDev *pOpenDev; 2126 2127 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2128 2129 if (pOpenDev == NULL) { 2130 return FALSE; 2131 } 2132 2133 return nvSetDispModeEvo(pOpenDev->pDevEvo, pOpenDev, 2134 &pParams->request, &pParams->reply, 2135 FALSE /* bypassComposition */, 2136 TRUE /* doRasterLock */); 2137 } 2138 2139 /*! 2140 * Set the cursor image. 2141 */ 2142 static NvBool SetCursorImage(struct NvKmsPerOpen *pOpen, 2143 void *pParamsVoid) 2144 { 2145 struct NvKmsSetCursorImageParams *pParams = pParamsVoid; 2146 struct NvKmsPerOpenDev *pOpenDev; 2147 struct NvKmsPerOpenDisp *pOpenDisp; 2148 NVDispEvoPtr pDispEvo; 2149 2150 if (!GetPerOpenDevAndDisp(pOpen, 2151 pParams->request.deviceHandle, 2152 pParams->request.dispHandle, 2153 &pOpenDev, 2154 &pOpenDisp)) { 2155 return FALSE; 2156 } 2157 2158 pDispEvo = pOpenDisp->pDispEvo; 2159 2160 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { 2161 return FALSE; 2162 } 2163 2164 return nvHsIoctlSetCursorImage(pDispEvo, 2165 pOpenDev, 2166 &pOpenDev->surfaceHandles, 2167 pParams->request.head, 2168 &pParams->request.common); 2169 } 2170 2171 /*! 2172 * Change the cursor position. 2173 */ 2174 static NvBool MoveCursor(struct NvKmsPerOpen *pOpen, 2175 void *pParamsVoid) 2176 { 2177 struct NvKmsMoveCursorParams *pParams = pParamsVoid; 2178 struct NvKmsPerOpenDisp *pOpenDisp; 2179 NVDispEvoPtr pDispEvo; 2180 2181 pOpenDisp = GetPerOpenDisp(pOpen, 2182 pParams->request.deviceHandle, 2183 pParams->request.dispHandle); 2184 if (pOpenDisp == NULL) { 2185 return FALSE; 2186 } 2187 2188 pDispEvo = pOpenDisp->pDispEvo; 2189 2190 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { 2191 return FALSE; 2192 } 2193 2194 return nvHsIoctlMoveCursor(pDispEvo, 2195 pParams->request.head, 2196 &pParams->request.common); 2197 } 2198 2199 /* No extra user state needed for SetLut; although we lose the user pointers 2200 * for the LUT ramps after copying them in, that's okay because we don't need 2201 * to copy them back out again. */ 2202 struct NvKmsSetLutExtraUserState 2203 { 2204 }; 2205 2206 /*! 2207 * Copy in any data referenced by pointer for the SetLut request. Currently 2208 * this is only the LUT ramps. 2209 */ 2210 static NvBool SetLutPrepUser( 2211 void *pParamsVoid, 2212 void *pExtraUserStateVoid) 2213 { 2214 struct NvKmsSetLutParams *pParams = pParamsVoid; 2215 struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; 2216 2217 return CopyInLutParams(pCommonLutParams); 2218 } 2219 2220 /*! 2221 * Free buffers allocated in SetLutPrepUser. 2222 */ 2223 static NvBool SetLutDoneUser( 2224 void *pParamsVoid, 2225 void *pExtraUserStateVoid) 2226 { 2227 struct NvKmsSetLutParams *pParams = pParamsVoid; 2228 struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; 2229 2230 FreeCopiedInLutParams(pCommonLutParams); 2231 2232 return TRUE; 2233 } 2234 2235 /*! 2236 * Set the LUT on the specified head. 2237 */ 2238 static NvBool SetLut(struct NvKmsPerOpen *pOpen, 2239 void *pParamsVoid) 2240 { 2241 struct NvKmsSetLutParams *pParams = pParamsVoid; 2242 struct NvKmsPerOpenDisp *pOpenDisp; 2243 NVDispEvoPtr pDispEvo; 2244 2245 pOpenDisp = GetPerOpenDisp(pOpen, 2246 pParams->request.deviceHandle, 2247 pParams->request.dispHandle); 2248 if (pOpenDisp == NULL) { 2249 return FALSE; 2250 } 2251 2252 pDispEvo = pOpenDisp->pDispEvo; 2253 2254 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { 2255 return FALSE; 2256 } 2257 2258 if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, 2259 &pParams->request.common)) { 2260 return FALSE; 2261 } 2262 2263 nvEvoSetLut(pDispEvo, 2264 pParams->request.head, TRUE /* kickoff */, 2265 &pParams->request.common); 2266 2267 return TRUE; 2268 } 2269 2270 2271 /*! 2272 * Return whether the specified head is idle. 2273 */ 2274 static NvBool IdleMainLayerChannelCheckIdleOneApiHead( 2275 NVDispEvoPtr pDispEvo, 2276 NvU32 apiHead) 2277 { 2278 if (pDispEvo->pHsChannel[apiHead] != NULL) { 2279 return nvHsIdleFlipQueue(pDispEvo->pHsChannel[apiHead], 2280 FALSE /* force */); 2281 } 2282 return nvIdleMainLayerChannelCheckIdleOneApiHead(pDispEvo, apiHead); 2283 } 2284 2285 /*! 2286 * Return whether all heads described in pRequest are idle. 2287 * 2288 * Note that we loop over all requested heads, rather than return FALSE once we 2289 * find the first non-idle head, because checking for idle has side effects: in 2290 * headSurface, checking for idle gives the headSurface flip queue the 2291 * opportunity to proceed another frame. 2292 */ 2293 static NvBool IdleBaseChannelCheckIdle( 2294 NVDevEvoPtr pDevEvo, 2295 const struct NvKmsIdleBaseChannelRequest *pRequest, 2296 struct NvKmsIdleBaseChannelReply *pReply) 2297 { 2298 NvU32 apiHead, sd; 2299 NVDispEvoPtr pDispEvo; 2300 NvBool allIdle = TRUE; 2301 2302 FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { 2303 2304 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 2305 2306 NvBool idle; 2307 2308 if (!nvApiHeadIsActive(pDispEvo, apiHead)) { 2309 continue; 2310 } 2311 2312 if ((pRequest->subDevicesPerHead[apiHead] & NVBIT(sd)) == 0) { 2313 continue; 2314 } 2315 2316 idle = IdleMainLayerChannelCheckIdleOneApiHead(pDispEvo, apiHead); 2317 2318 if (!idle) { 2319 pReply->stopSubDevicesPerHead[apiHead] |= NVBIT(sd); 2320 } 2321 allIdle = allIdle && idle; 2322 } 2323 } 2324 2325 return allIdle; 2326 } 2327 2328 /*! 2329 * Idle all requested heads. 2330 * 2331 * First, wait for the heads to idle naturally. If a timeout is exceeded, then 2332 * force the non-idle heads to idle, and record these in pReply. 2333 */ 2334 static NvBool IdleBaseChannelAll( 2335 NVDevEvoPtr pDevEvo, 2336 const struct NvKmsIdleBaseChannelRequest *pRequest, 2337 struct NvKmsIdleBaseChannelReply *pReply) 2338 { 2339 NvU64 startTime = 0; 2340 2341 /* 2342 * Each element in subDevicesPerHead[] must be large enough to hold one bit 2343 * per subdevice. 2344 */ 2345 ct_assert(NVKMS_MAX_SUBDEVICES <= 2346 (sizeof(pRequest->subDevicesPerHead[0]) * 8)); 2347 2348 /* Loop until all head,sd pairs are idle, or we time out. */ 2349 do { 2350 const NvU32 timeout = 2000000; /* 2 seconds */ 2351 2352 2353 /* 2354 * Clear the pReply data, 2355 * IdleBaseChannelCheckIdle() will fill it afresh. 2356 */ 2357 nvkms_memset(pReply, 0, sizeof(*pReply)); 2358 2359 /* If all heads are idle, we are done. */ 2360 if (IdleBaseChannelCheckIdle(pDevEvo, pRequest, pReply)) { 2361 return TRUE; 2362 } 2363 2364 /* Break out of the loop if we exceed the timeout. */ 2365 if (nvExceedsTimeoutUSec(&startTime, timeout)) { 2366 break; 2367 } 2368 2369 /* At least one head is not idle; yield, and try again. */ 2370 nvkms_yield(); 2371 2372 } while (TRUE); 2373 2374 return TRUE; 2375 } 2376 2377 2378 /*! 2379 * Wait for the requested base channels to be idle, returning whether 2380 * stopping the base channels was necessary. 2381 */ 2382 static NvBool IdleBaseChannel(struct NvKmsPerOpen *pOpen, 2383 void *pParamsVoid) 2384 { 2385 struct NvKmsIdleBaseChannelParams *pParams = pParamsVoid; 2386 struct NvKmsPerOpenDev *pOpenDev; 2387 2388 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2389 2390 if (pOpenDev == NULL) { 2391 return FALSE; 2392 } 2393 2394 /* Only the modesetOwner can idle base. */ 2395 2396 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 2397 return FALSE; 2398 } 2399 2400 return IdleBaseChannelAll(pOpenDev->pDevEvo, 2401 &pParams->request, &pParams->reply); 2402 } 2403 2404 2405 struct NvKmsFlipExtraUserState 2406 { 2407 // Nothing needed. 2408 }; 2409 2410 static NvBool FlipPrepUser( 2411 void *pParamsVoid, 2412 void *pExtraUserStateVoid) 2413 { 2414 struct NvKmsFlipParams *pParams = pParamsVoid; 2415 struct NvKmsFlipRequest *pRequest = &pParams->request; 2416 struct NvKmsFlipRequestOneHead *pFlipHeadKernel = NULL; 2417 NvU64 pFlipHeadUser = pRequest->pFlipHead; 2418 size_t size; 2419 int status; 2420 2421 if (!nvKmsNvU64AddressIsSafe(pFlipHeadUser)) { 2422 return FALSE; 2423 } 2424 2425 if (pRequest->numFlipHeads <= 0 || 2426 pRequest->numFlipHeads > NV_MAX_FLIP_REQUEST_HEADS) { 2427 return FALSE; 2428 } 2429 2430 size = sizeof(*pFlipHeadKernel) * pRequest->numFlipHeads; 2431 pFlipHeadKernel = nvAlloc(size); 2432 if (!pFlipHeadKernel) { 2433 return FALSE; 2434 } 2435 2436 status = nvkms_copyin((char *)pFlipHeadKernel, pFlipHeadUser, size); 2437 if (status != 0) { 2438 nvFree(pFlipHeadKernel); 2439 return FALSE; 2440 } 2441 2442 pRequest->pFlipHead = nvKmsPointerToNvU64(pFlipHeadKernel); 2443 2444 return TRUE; 2445 } 2446 2447 static NvBool FlipDoneUser( 2448 void *pParamsVoid, 2449 void *pExtraUserStateVoid) 2450 { 2451 struct NvKmsFlipParams *pParams = pParamsVoid; 2452 struct NvKmsFlipRequest *pRequest = &pParams->request; 2453 2454 nvFree(nvKmsNvU64ToPointer(pRequest->pFlipHead)); 2455 /* The request is not copied back out to userspace (only the reply is), so 2456 * we don't need to worry about restoring the user pointer */ 2457 pRequest->pFlipHead = 0; 2458 2459 return TRUE; 2460 } 2461 2462 /*! 2463 * For each entry in the array pointed to by 'pFlipHead', of length 2464 * 'numFlipHeads', verify that the sd and head values specified are within 2465 * bounds and that there are no duplicates. 2466 */ 2467 static NvBool ValidateFlipHeads( 2468 NVDevEvoPtr pDevEvo, 2469 const struct NvKmsFlipRequestOneHead *pFlipHead, 2470 NvU32 numFlipHeads) 2471 { 2472 NvU32 i; 2473 ct_assert(NVKMS_MAX_HEADS_PER_DISP <= 8); 2474 NvU8 apiHeadsUsed[NVKMS_MAX_SUBDEVICES] = { }; 2475 2476 for (i = 0; i < numFlipHeads; i++) { 2477 const NvU32 sd = pFlipHead[i].sd; 2478 const NvU32 apiHead = pFlipHead[i].head; 2479 2480 if (sd >= pDevEvo->numSubDevices) { 2481 return FALSE; 2482 } 2483 if (apiHead >= pDevEvo->numApiHeads) { 2484 return FALSE; 2485 } 2486 if ((apiHeadsUsed[sd] & (1 << apiHead)) != 0) { 2487 return FALSE; 2488 } 2489 apiHeadsUsed[sd] |= (1 << apiHead); 2490 } 2491 2492 return TRUE; 2493 } 2494 2495 /*! 2496 * Flip the specified head. 2497 */ 2498 static NvBool Flip(struct NvKmsPerOpen *pOpen, 2499 void *pParamsVoid) 2500 { 2501 struct NvKmsFlipParams *pParams = pParamsVoid; 2502 struct NvKmsPerOpenDev *pOpenDev; 2503 NVDevEvoPtr pDevEvo = NULL; 2504 const struct NvKmsFlipRequest *pRequest = &pParams->request; 2505 const struct NvKmsFlipRequestOneHead *pFlipHead = 2506 nvKmsNvU64ToPointer(pRequest->pFlipHead); 2507 const NvU32 numFlipHeads = pRequest->numFlipHeads; 2508 2509 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2510 2511 if (pOpenDev == NULL) { 2512 return FALSE; 2513 } 2514 2515 pDevEvo = pOpenDev->pDevEvo; 2516 2517 if (!ValidateFlipHeads(pDevEvo, pFlipHead, numFlipHeads)) { 2518 return FALSE; 2519 } 2520 2521 return nvHsIoctlFlip(pDevEvo, pOpenDev, 2522 pFlipHead, numFlipHeads, 2523 pRequest->commit, pRequest->allowVrr, 2524 &pParams->reply); 2525 } 2526 2527 2528 /*! 2529 * Record whether this client is interested in the specified dynamic 2530 * dpy. 2531 */ 2532 static NvBool DeclareDynamicDpyInterest(struct NvKmsPerOpen *pOpen, 2533 void *pParamsVoid) 2534 { 2535 /* XXX NVKMS TODO: implement me. */ 2536 2537 return TRUE; 2538 } 2539 2540 2541 /*! 2542 * Register a surface with the specified per-open + device. 2543 */ 2544 static NvBool RegisterSurface(struct NvKmsPerOpen *pOpen, 2545 void *pParamsVoid) 2546 { 2547 struct NvKmsRegisterSurfaceParams *pParams = pParamsVoid; 2548 struct NvKmsPerOpenDev *pOpenDev; 2549 2550 /* 2551 * Only allow userspace clients to specify memory objects by FD. 2552 * This prevents clients from specifying (hClient, hObject) tuples that 2553 * really belong to other clients. 2554 */ 2555 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE && 2556 !pParams->request.useFd) { 2557 return FALSE; 2558 } 2559 2560 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2561 2562 if (pOpenDev == NULL) { 2563 return FALSE; 2564 } 2565 2566 nvEvoRegisterSurface(pOpenDev->pDevEvo, pOpenDev, pParams, 2567 NvHsMapPermissionsReadOnly); 2568 return TRUE; 2569 } 2570 2571 2572 /*! 2573 * Unregister a surface from the specified per-open + device. 2574 */ 2575 static NvBool UnregisterSurface(struct NvKmsPerOpen *pOpen, 2576 void *pParamsVoid) 2577 { 2578 struct NvKmsUnregisterSurfaceParams *pParams = pParamsVoid; 2579 struct NvKmsPerOpenDev *pOpenDev; 2580 2581 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2582 2583 if (pOpenDev == NULL) { 2584 return FALSE; 2585 } 2586 2587 nvEvoUnregisterSurface(pOpenDev->pDevEvo, pOpenDev, 2588 pParams->request.surfaceHandle, 2589 FALSE /* skipUpdate */); 2590 return TRUE; 2591 } 2592 2593 2594 /*! 2595 * Associate a surface with the NvKmsPerOpen specified by 2596 * NvKmsGrantSurfaceParams::request::fd. 2597 */ 2598 static NvBool GrantSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2599 { 2600 struct NvKmsGrantSurfaceParams *pParams = pParamsVoid; 2601 struct NvKmsPerOpenDev *pOpenDev; 2602 NVSurfaceEvoPtr pSurfaceEvo; 2603 struct NvKmsPerOpen *pOpenFd; 2604 2605 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2606 2607 if (pOpenDev == NULL) { 2608 return FALSE; 2609 } 2610 2611 pSurfaceEvo = 2612 nvEvoGetSurfaceFromHandleNoCtxDmaOk(pOpenDev->pDevEvo, 2613 &pOpenDev->surfaceHandles, 2614 pParams->request.surfaceHandle); 2615 if (pSurfaceEvo == NULL) { 2616 return FALSE; 2617 } 2618 2619 if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) { 2620 return FALSE; 2621 } 2622 2623 /* Only the owner of the surface can grant it to other clients. */ 2624 2625 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, 2626 pParams->request.surfaceHandle)) { 2627 return FALSE; 2628 } 2629 2630 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 2631 2632 if (pOpenFd == NULL) { 2633 return FALSE; 2634 } 2635 2636 if (!AssignNvKmsPerOpenType( 2637 pOpenFd, NvKmsPerOpenTypeGrantSurface, FALSE)) { 2638 return FALSE; 2639 } 2640 2641 nvEvoIncrementSurfaceStructRefCnt(pSurfaceEvo); 2642 pOpenFd->grantSurface.pSurfaceEvo = pSurfaceEvo; 2643 2644 return TRUE; 2645 } 2646 2647 2648 /*! 2649 * Retrieve the surface and device associated with 2650 * NvKmsAcquireSurfaceParams::request::fd, and give the client an 2651 * NvKmsSurfaceHandle to the surface. 2652 */ 2653 static NvBool AcquireSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2654 { 2655 struct NvKmsAcquireSurfaceParams *pParams = pParamsVoid; 2656 struct NvKmsPerOpen *pOpenFd; 2657 struct NvKmsPerOpenDev *pOpenDev; 2658 NvKmsSurfaceHandle surfaceHandle = 0; 2659 2660 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 2661 2662 if (pOpenFd == NULL) { 2663 return FALSE; 2664 } 2665 2666 if (pOpenFd->type != NvKmsPerOpenTypeGrantSurface) { 2667 return FALSE; 2668 } 2669 2670 nvAssert(pOpenFd->grantSurface.pSurfaceEvo != NULL); 2671 2672 if (pOpenFd->grantSurface.pSurfaceEvo->rmRefCnt == 0) { /* orphan */ 2673 return FALSE; 2674 } 2675 2676 if (nvEvoSurfaceRefCntsTooLarge(pOpenFd->grantSurface.pSurfaceEvo)) { 2677 return FALSE; 2678 } 2679 2680 /* Since the surface isn't orphaned, it should have an owner, with a 2681 * pOpenDev and a pDevEvo. Get the pOpenDev for the acquiring client that 2682 * matches the owner's pDevEvo. */ 2683 nvAssert(pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo != NULL); 2684 pOpenDev = DevEvoToOpenDev(pOpen, 2685 pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo); 2686 2687 if (pOpenDev == NULL) { 2688 return FALSE; 2689 } 2690 2691 surfaceHandle = 2692 nvEvoCreateApiHandle(&pOpenDev->surfaceHandles, 2693 pOpenFd->grantSurface.pSurfaceEvo); 2694 2695 if (surfaceHandle == 0) { 2696 return FALSE; 2697 } 2698 2699 nvEvoIncrementSurfaceStructRefCnt(pOpenFd->grantSurface.pSurfaceEvo); 2700 2701 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; 2702 pParams->reply.surfaceHandle = surfaceHandle; 2703 2704 return TRUE; 2705 } 2706 2707 static NvBool ReleaseSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2708 { 2709 struct NvKmsReleaseSurfaceParams *pParams = pParamsVoid; 2710 struct NvKmsPerOpenDev *pOpenDev; 2711 2712 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2713 2714 if (pOpenDev == NULL) { 2715 return FALSE; 2716 } 2717 2718 nvEvoReleaseSurface(pOpenDev->pDevEvo, pOpenDev, 2719 pParams->request.surfaceHandle); 2720 return TRUE; 2721 } 2722 2723 2724 /*! 2725 * Associate a swap group with the NvKmsPerOpen specified by 2726 * NvKmsGrantSwapGroupParams::request::fd. 2727 */ 2728 static NvBool GrantSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2729 { 2730 struct NvKmsGrantSwapGroupParams *pParams = pParamsVoid; 2731 struct NvKmsPerOpenDev *pOpenDev; 2732 NVSwapGroupRec *pSwapGroup; 2733 struct NvKmsPerOpen *pOpenFd; 2734 2735 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2736 2737 if (pOpenDev == NULL) { 2738 return FALSE; 2739 } 2740 2741 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 2742 return FALSE; 2743 } 2744 2745 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, 2746 pParams->request.swapGroupHandle); 2747 2748 if (pSwapGroup == NULL) { 2749 return FALSE; 2750 } 2751 2752 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 2753 2754 if (pOpenFd == NULL) { 2755 return FALSE; 2756 } 2757 2758 /* 2759 * Increment the swap group refcnt while granting it so the SwapGroup 2760 * won't be freed out from under the grant fd. To complement this, 2761 * nvKmsClose() on NvKmsPerOpenTypeGrantSwapGroup calls 2762 * DecrementSwapGroupRefCnt(). 2763 */ 2764 if (!nvHsIncrementSwapGroupRefCnt(pSwapGroup)) { 2765 return FALSE; 2766 } 2767 2768 if (!AssignNvKmsPerOpenType( 2769 pOpenFd, NvKmsPerOpenTypeGrantSwapGroup, FALSE)) { 2770 nvHsDecrementSwapGroupRefCnt(pSwapGroup); 2771 return FALSE; 2772 } 2773 2774 /* we must not fail beyond this point */ 2775 2776 pOpenFd->grantSwapGroup.pSwapGroup = pSwapGroup; 2777 2778 pOpenFd->grantSwapGroup.pDevEvo = pOpenDev->pDevEvo; 2779 2780 return TRUE; 2781 } 2782 2783 2784 /*! 2785 * Retrieve the swap group and device associated with 2786 * NvKmsAcquireSwapGroupParams::request::fd, give the client an 2787 * NvKmsSwapGroupHandle to the swap group, and increment the 2788 * swap group's reference count. 2789 */ 2790 static NvBool AcquireSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2791 { 2792 struct NvKmsAcquireSwapGroupParams *pParams = pParamsVoid; 2793 struct NvKmsPerOpen *pOpenFd; 2794 struct NvKmsPerOpenDev *pOpenDev; 2795 NvKmsSwapGroupHandle swapGroupHandle = 0; 2796 2797 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 2798 2799 if (pOpenFd == NULL) { 2800 return FALSE; 2801 } 2802 2803 if (pOpenFd->type != NvKmsPerOpenTypeGrantSwapGroup) { 2804 return FALSE; 2805 } 2806 2807 /* 2808 * pSwapGroup is only freed when its last reference goes away; if pOpenFd 2809 * hasn't yet been closed, then its reference incremented in 2810 * GrantSwapGroup() couldn't have been decremented in nvKmsClose() 2811 */ 2812 nvAssert(pOpenFd->grantSwapGroup.pSwapGroup != NULL); 2813 nvAssert(pOpenFd->grantSwapGroup.pDevEvo != NULL); 2814 2815 if (pOpenFd->grantSwapGroup.pSwapGroup->zombie) { 2816 return FALSE; 2817 } 2818 2819 pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantSwapGroup.pDevEvo); 2820 2821 if (pOpenDev == NULL) { 2822 return FALSE; 2823 } 2824 2825 if (nvEvoApiHandlePointerIsPresent(&pOpenDev->swapGroupHandles, 2826 pOpenFd->grantSwapGroup.pSwapGroup)) { 2827 return FALSE; 2828 } 2829 2830 if (!nvHsIncrementSwapGroupRefCnt(pOpenFd->grantSwapGroup.pSwapGroup)) { 2831 return FALSE; 2832 } 2833 2834 swapGroupHandle = 2835 nvEvoCreateApiHandle(&pOpenDev->swapGroupHandles, 2836 pOpenFd->grantSwapGroup.pSwapGroup); 2837 2838 if (swapGroupHandle == 0) { 2839 nvHsDecrementSwapGroupRefCnt(pOpenFd->grantSwapGroup.pSwapGroup); 2840 return FALSE; 2841 } 2842 2843 /* we must not fail beyond this point */ 2844 2845 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; 2846 pParams->reply.swapGroupHandle = swapGroupHandle; 2847 2848 return TRUE; 2849 } 2850 2851 2852 /*! 2853 * Free this client's reference to the swap group. 2854 * 2855 * This is meant to be called by clients that have acquired the swap group 2856 * handle through AcquireSwapGroup(). 2857 */ 2858 static NvBool ReleaseSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 2859 { 2860 struct NvKmsReleaseSwapGroupParams *pParams = pParamsVoid; 2861 struct NvKmsPerOpenDev *pOpenDev; 2862 NVSwapGroupRec *pSwapGroup; 2863 NvKmsSwapGroupHandle handle = pParams->request.swapGroupHandle; 2864 2865 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 2866 2867 if (pOpenDev == NULL) { 2868 return FALSE; 2869 } 2870 2871 /* 2872 * This may operate on a swap group that has already been freed 2873 * (pSwapGroup->zombie is TRUE). 2874 */ 2875 pSwapGroup = nvHsGetSwapGroupStruct(&pOpenDev->swapGroupHandles, 2876 handle); 2877 if (pSwapGroup == NULL) { 2878 return FALSE; 2879 } 2880 2881 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); 2882 2883 nvHsDecrementSwapGroupRefCnt(pSwapGroup); 2884 2885 return TRUE; 2886 } 2887 2888 /*! 2889 * Change the value of the specified attribute. 2890 */ 2891 static NvBool SetDpyAttribute(struct NvKmsPerOpen *pOpen, 2892 void *pParamsVoid) 2893 { 2894 struct NvKmsSetDpyAttributeParams *pParams = pParamsVoid; 2895 NVDpyEvoPtr pDpyEvo; 2896 2897 pDpyEvo = GetPerOpenDpy(pOpen, 2898 pParams->request.deviceHandle, 2899 pParams->request.dispHandle, 2900 pParams->request.dpyId); 2901 if (pDpyEvo == NULL) { 2902 return FALSE; 2903 } 2904 2905 return nvSetDpyAttributeEvo(pDpyEvo, pParams); 2906 } 2907 2908 2909 /*! 2910 * Get the value of the specified attribute. 2911 */ 2912 static NvBool GetDpyAttribute(struct NvKmsPerOpen *pOpen, 2913 void *pParamsVoid) 2914 { 2915 struct NvKmsGetDpyAttributeParams *pParams = pParamsVoid; 2916 NVDpyEvoPtr pDpyEvo; 2917 2918 pDpyEvo = GetPerOpenDpy(pOpen, 2919 pParams->request.deviceHandle, 2920 pParams->request.dispHandle, 2921 pParams->request.dpyId); 2922 if (pDpyEvo == NULL) { 2923 return FALSE; 2924 } 2925 2926 return nvGetDpyAttributeEvo(pDpyEvo, pParams); 2927 } 2928 2929 2930 /*! 2931 * Get the valid values of the specified attribute. 2932 */ 2933 static NvBool GetDpyAttributeValidValues(struct NvKmsPerOpen *pOpen, 2934 void *pParamsVoid) 2935 { 2936 struct NvKmsGetDpyAttributeValidValuesParams *pParams = pParamsVoid; 2937 NVDpyEvoPtr pDpyEvo; 2938 2939 pDpyEvo = GetPerOpenDpy(pOpen, 2940 pParams->request.deviceHandle, 2941 pParams->request.dispHandle, 2942 pParams->request.dpyId); 2943 if (pDpyEvo == NULL) { 2944 return FALSE; 2945 } 2946 2947 return nvGetDpyAttributeValidValuesEvo(pDpyEvo, pParams); 2948 } 2949 2950 2951 /*! 2952 * Set the value of the specified attribute. 2953 */ 2954 static NvBool SetDispAttribute(struct NvKmsPerOpen *pOpen, 2955 void *pParamsVoid) 2956 { 2957 struct NvKmsSetDispAttributeParams *pParams = pParamsVoid; 2958 struct NvKmsPerOpenDisp *pOpenDisp; 2959 2960 pOpenDisp = GetPerOpenDisp(pOpen, 2961 pParams->request.deviceHandle, 2962 pParams->request.dispHandle); 2963 if (pOpenDisp == NULL) { 2964 return FALSE; 2965 } 2966 2967 return nvSetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); 2968 } 2969 2970 2971 /*! 2972 * Get the value of the specified attribute. 2973 */ 2974 static NvBool GetDispAttribute(struct NvKmsPerOpen *pOpen, 2975 void *pParamsVoid) 2976 { 2977 struct NvKmsGetDispAttributeParams *pParams = pParamsVoid; 2978 struct NvKmsPerOpenDisp *pOpenDisp; 2979 2980 pOpenDisp = GetPerOpenDisp(pOpen, 2981 pParams->request.deviceHandle, 2982 pParams->request.dispHandle); 2983 if (pOpenDisp == NULL) { 2984 return FALSE; 2985 } 2986 2987 return nvGetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); 2988 } 2989 2990 2991 /*! 2992 * Get the valid values of the specified attribute. 2993 */ 2994 static NvBool GetDispAttributeValidValues(struct NvKmsPerOpen *pOpen, 2995 void *pParamsVoid) 2996 { 2997 struct NvKmsGetDispAttributeValidValuesParams *pParams = pParamsVoid; 2998 2999 struct NvKmsPerOpenDisp *pOpenDisp; 3000 3001 pOpenDisp = GetPerOpenDisp(pOpen, 3002 pParams->request.deviceHandle, 3003 pParams->request.dispHandle); 3004 if (pOpenDisp == NULL) { 3005 return FALSE; 3006 } 3007 3008 return nvGetDispAttributeValidValuesEvo(pOpenDisp->pDispEvo, pParams); 3009 } 3010 3011 3012 /*! 3013 * Get information about the specified framelock device. 3014 */ 3015 static NvBool QueryFrameLock(struct NvKmsPerOpen *pOpen, 3016 void *pParamsVoid) 3017 { 3018 struct NvKmsQueryFrameLockParams *pParams = pParamsVoid; 3019 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 3020 const NVFrameLockEvoRec *pFrameLockEvo; 3021 NvU32 gpu; 3022 3023 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 3024 3025 pOpenFrameLock = 3026 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); 3027 3028 if (pOpenFrameLock == NULL) { 3029 return FALSE; 3030 } 3031 3032 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; 3033 3034 ct_assert(ARRAY_LEN(pFrameLockEvo->gpuIds) <= 3035 ARRAY_LEN(pParams->reply.gpuIds)); 3036 3037 for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) { 3038 pParams->reply.gpuIds[gpu] = pFrameLockEvo->gpuIds[gpu]; 3039 } 3040 3041 return TRUE; 3042 } 3043 3044 3045 static NvBool SetFrameLockAttribute(struct NvKmsPerOpen *pOpen, 3046 void *pParamsVoid) 3047 { 3048 struct NvKmsSetFrameLockAttributeParams *pParams = pParamsVoid; 3049 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 3050 NVFrameLockEvoRec *pFrameLockEvo; 3051 3052 pOpenFrameLock = 3053 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); 3054 3055 if (pOpenFrameLock == NULL) { 3056 return FALSE; 3057 } 3058 3059 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; 3060 3061 return nvSetFrameLockAttributeEvo(pFrameLockEvo, pParams); 3062 } 3063 3064 3065 static NvBool GetFrameLockAttribute(struct NvKmsPerOpen *pOpen, 3066 void *pParamsVoid) 3067 { 3068 struct NvKmsGetFrameLockAttributeParams *pParams = pParamsVoid; 3069 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 3070 const NVFrameLockEvoRec *pFrameLockEvo; 3071 3072 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 3073 3074 pOpenFrameLock = 3075 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); 3076 3077 if (pOpenFrameLock == NULL) { 3078 return FALSE; 3079 } 3080 3081 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; 3082 3083 return nvGetFrameLockAttributeEvo(pFrameLockEvo, pParams); 3084 } 3085 3086 3087 static NvBool GetFrameLockAttributeValidValues(struct NvKmsPerOpen *pOpen, 3088 void *pParamsVoid) 3089 { 3090 struct NvKmsGetFrameLockAttributeValidValuesParams *pParams = pParamsVoid; 3091 struct NvKmsPerOpenFrameLock *pOpenFrameLock; 3092 const NVFrameLockEvoRec *pFrameLockEvo; 3093 3094 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); 3095 3096 pOpenFrameLock = 3097 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); 3098 3099 if (pOpenFrameLock == NULL) { 3100 return FALSE; 3101 } 3102 3103 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; 3104 3105 return nvGetFrameLockAttributeValidValuesEvo(pFrameLockEvo, pParams); 3106 } 3107 3108 3109 /*! 3110 * Pop the next event off of the client's event queue. 3111 */ 3112 static NvBool GetNextEvent(struct NvKmsPerOpen *pOpen, 3113 void *pParamsVoid) 3114 { 3115 struct NvKmsGetNextEventParams *pParams = pParamsVoid; 3116 struct NvKmsPerOpenEventListEntry *pEntry; 3117 3118 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 3119 3120 if (nvListIsEmpty(&pOpen->ioctl.eventList)) { 3121 pParams->reply.valid = FALSE; 3122 return TRUE; 3123 } 3124 3125 pEntry = nvListFirstEntry(&pOpen->ioctl.eventList, 3126 struct NvKmsPerOpenEventListEntry, 3127 eventListEntry); 3128 3129 pParams->reply.valid = TRUE; 3130 pParams->reply.event = pEntry->event; 3131 3132 nvListDel(&pEntry->eventListEntry); 3133 3134 nvFree(pEntry); 3135 3136 if (nvListIsEmpty(&pOpen->ioctl.eventList)) { 3137 nvkms_event_queue_changed(pOpen->pOpenKernel, FALSE); 3138 } 3139 3140 return TRUE; 3141 } 3142 3143 3144 /*! 3145 * Record the client's event interest for the specified device. 3146 */ 3147 static NvBool DeclareEventInterest(struct NvKmsPerOpen *pOpen, 3148 void *pParamsVoid) 3149 { 3150 struct NvKmsDeclareEventInterestParams *pParams = pParamsVoid; 3151 3152 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 3153 3154 pOpen->ioctl.eventInterestMask = pParams->request.interestMask; 3155 3156 return TRUE; 3157 } 3158 3159 static NvBool ClearUnicastEvent(struct NvKmsPerOpen *pOpen, 3160 void *pParamsVoid) 3161 { 3162 struct NvKmsClearUnicastEventParams *pParams = pParamsVoid; 3163 struct NvKmsPerOpen *pOpenFd = NULL; 3164 3165 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 3166 3167 pOpenFd = nvkms_get_per_open_data(pParams->request.unicastEventFd); 3168 3169 if (pOpenFd == NULL) { 3170 return FALSE; 3171 } 3172 3173 if (pOpenFd->type != NvKmsPerOpenTypeUnicastEvent) { 3174 return FALSE; 3175 } 3176 3177 nvkms_event_queue_changed(pOpenFd->pOpenKernel, FALSE); 3178 3179 return TRUE; 3180 } 3181 3182 static NvBool SetLayerPosition(struct NvKmsPerOpen *pOpen, 3183 void *pParamsVoid) 3184 { 3185 struct NvKmsSetLayerPositionParams *pParams = pParamsVoid; 3186 struct NvKmsPerOpenDev *pOpenDev; 3187 3188 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3189 3190 if (pOpenDev == NULL) { 3191 return FALSE; 3192 } 3193 3194 /* XXX NVKMS HEADSURFACE TODO: intercept */ 3195 3196 return nvLayerSetPositionEvo(pOpenDev->pDevEvo, &pParams->request); 3197 } 3198 3199 static NvBool GrabOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3200 { 3201 struct NvKmsGrabOwnershipParams *pParams = pParamsVoid; 3202 struct NvKmsPerOpenDev *pOpenDev; 3203 3204 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3205 3206 if (pOpenDev == NULL) { 3207 return FALSE; 3208 } 3209 3210 // The only kind of ownership right now is modeset ownership. 3211 return GrabModesetOwnership(pOpenDev); 3212 } 3213 3214 static NvBool ReleaseOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3215 { 3216 struct NvKmsReleaseOwnershipParams *pParams = pParamsVoid; 3217 struct NvKmsPerOpenDev *pOpenDev; 3218 3219 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3220 3221 if (pOpenDev == NULL) { 3222 return FALSE; 3223 } 3224 3225 // The only kind of ownership right now is modeset ownership. 3226 return ReleaseModesetOwnership(pOpenDev); 3227 } 3228 3229 static NvBool GrantPermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3230 { 3231 struct NvKmsGrantPermissionsParams *pParams = pParamsVoid; 3232 struct NvKmsPerOpenDev *pOpenDev; 3233 struct NvKmsPerOpen *pOpenFd; 3234 3235 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3236 3237 if (pOpenDev == NULL) { 3238 return FALSE; 3239 } 3240 3241 /* Only the modesetOwner can grant permissions. */ 3242 3243 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 3244 return FALSE; 3245 } 3246 3247 if (!ValidateNvKmsPermissions(pOpenDev->pDevEvo, 3248 &pParams->request.permissions)) { 3249 return FALSE; 3250 } 3251 3252 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 3253 3254 if (pOpenFd == NULL) { 3255 return FALSE; 3256 } 3257 3258 if (!AssignNvKmsPerOpenType( 3259 pOpenFd, NvKmsPerOpenTypeGrantPermissions, FALSE)) { 3260 return FALSE; 3261 } 3262 3263 pOpenFd->grantPermissions.permissions = pParams->request.permissions; 3264 3265 pOpenFd->grantPermissions.pDevEvo = pOpenDev->pDevEvo; 3266 3267 return TRUE; 3268 } 3269 3270 static NvBool AcquirePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3271 { 3272 struct NvKmsAcquirePermissionsParams *pParams = pParamsVoid; 3273 struct NvKmsPerOpenDev *pOpenDev; 3274 struct NvKmsPerOpen *pOpenFd; 3275 const struct NvKmsPermissions *pPermissionsNew; 3276 enum NvKmsPermissionsType type; 3277 3278 pOpenFd = nvkms_get_per_open_data(pParams->request.fd); 3279 3280 if (pOpenFd == NULL) { 3281 return FALSE; 3282 } 3283 3284 if (pOpenFd->type != NvKmsPerOpenTypeGrantPermissions) { 3285 return FALSE; 3286 } 3287 3288 pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantPermissions.pDevEvo); 3289 3290 if (pOpenDev == NULL) { 3291 return FALSE; 3292 } 3293 3294 type = pOpenFd->grantPermissions.permissions.type; 3295 3296 pPermissionsNew = &pOpenFd->grantPermissions.permissions; 3297 3298 if (type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 3299 NvU32 d, h; 3300 3301 for (d = 0; d < ARRAY_LEN(pOpenDev->flipPermissions.disp); d++) { 3302 for (h = 0; h < ARRAY_LEN(pOpenDev->flipPermissions. 3303 disp[d].head); h++) { 3304 pOpenDev->flipPermissions.disp[d].head[h].layerMask |= 3305 pPermissionsNew->flip.disp[d].head[h].layerMask; 3306 } 3307 } 3308 3309 pParams->reply.permissions.flip = pOpenDev->flipPermissions; 3310 3311 } else if (type == NV_KMS_PERMISSIONS_TYPE_MODESET) { 3312 NvU32 d, h; 3313 3314 for (d = 0; d < ARRAY_LEN(pOpenDev->modesetPermissions.disp); d++) { 3315 for (h = 0; h < ARRAY_LEN(pOpenDev->modesetPermissions. 3316 disp[d].head); h++) { 3317 pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList = 3318 nvAddDpyIdListToDpyIdList( 3319 pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList, 3320 pPermissionsNew->modeset.disp[d].head[h].dpyIdList); 3321 } 3322 } 3323 3324 pParams->reply.permissions.modeset = pOpenDev->modesetPermissions; 3325 3326 } else { 3327 /* 3328 * GrantPermissions() should ensure that 3329 * pOpenFd->grantPermissions.permissions.type is always valid. 3330 */ 3331 nvAssert(!"AcquirePermissions validation failure"); 3332 return FALSE; 3333 } 3334 3335 pParams->reply.permissions.type = type; 3336 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; 3337 3338 return TRUE; 3339 } 3340 3341 /*! 3342 * Clear the set of permissions from pRevokingOpenDev. 3343 * 3344 * For NvKmsPerOpen::type==Ioctl, clear from permissions. It doesn't clear 3345 * itself or privileged. 3346 * 3347 * For NvKmsPerOpen::type==GrantPermissions, clear from 3348 * NvKmsPerOpen::grantPermissions, and reset NvKmsPerOpen::type to Undefined 3349 * if it is empty. 3350 */ 3351 static NvBool RevokePermissionsSet( 3352 struct NvKmsPerOpenDev *pRevokingOpenDev, 3353 const struct NvKmsPermissions *pRevokingPermissions) 3354 { 3355 const NVDevEvoRec *pDevEvo; 3356 struct NvKmsPerOpen *pOpen; 3357 const struct NvKmsFlipPermissions *pRemoveFlip; 3358 const struct NvKmsModesetPermissions *pRemoveModeset; 3359 3360 // Only process valid permissions. 3361 if (pRevokingPermissions->type != NV_KMS_PERMISSIONS_TYPE_FLIPPING && 3362 pRevokingPermissions->type != NV_KMS_PERMISSIONS_TYPE_MODESET) { 3363 return FALSE; 3364 } 3365 3366 pDevEvo = pRevokingOpenDev->pDevEvo; 3367 pRemoveFlip = 3368 (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) 3369 ? &pRevokingPermissions->flip 3370 : NULL; 3371 pRemoveModeset = 3372 (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) 3373 ? &pRevokingPermissions->modeset 3374 : NULL; 3375 3376 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { 3377 if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && 3378 (pOpen->grantPermissions.pDevEvo == pDevEvo)) { 3379 NvBool remainingPermissions = FALSE; 3380 struct NvKmsPermissions *pFdPermissions = 3381 &pOpen->grantPermissions.permissions; 3382 3383 if (pFdPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 3384 remainingPermissions = 3385 RemoveFlipPermissions(&pFdPermissions->flip, pRemoveFlip); 3386 } else { 3387 remainingPermissions = RemoveModesetPermissions( 3388 &pFdPermissions->modeset, pRemoveModeset); 3389 } 3390 3391 // Reset if it is empty. 3392 if (!remainingPermissions) { 3393 nvkms_memset(&pOpen->grantPermissions, 0, 3394 sizeof(pOpen->grantPermissions)); 3395 pOpen->type = NvKmsPerOpenTypeUndefined; 3396 } 3397 3398 } else if (pOpen->type == NvKmsPerOpenTypeIoctl) { 3399 3400 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 3401 if (pOpenDev == NULL) { 3402 continue; 3403 } 3404 3405 if (pOpenDev == pRevokingOpenDev || pOpenDev->isPrivileged) { 3406 continue; 3407 } 3408 3409 if (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 3410 RemoveFlipPermissions(&pOpenDev->flipPermissions, pRemoveFlip); 3411 } else { 3412 RemoveModesetPermissions(&pOpenDev->modesetPermissions, 3413 pRemoveModeset); 3414 } 3415 } 3416 } 3417 3418 return TRUE; 3419 } 3420 3421 static NvBool RevokePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) 3422 { 3423 struct NvKmsRevokePermissionsParams *pParams = pParamsVoid; 3424 struct NvKmsPerOpenDev *pOpenDev = 3425 GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3426 const NvU32 validBitmask = 3427 NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | 3428 NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET); 3429 3430 if (pOpenDev == NULL) { 3431 return FALSE; 3432 } 3433 3434 /* Only the modeset owner can revoke permissions. */ 3435 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 3436 return FALSE; 3437 } 3438 3439 /* Reject invalid bitmasks. */ 3440 3441 if ((pParams->request.permissionsTypeBitmask & ~validBitmask) != 0) { 3442 return FALSE; 3443 } 3444 3445 if (pParams->request.permissionsTypeBitmask > 0) { 3446 // Old behavior, revoke all permissions of a type. 3447 3448 /* Revoke permissions for everyone except the caller. */ 3449 RevokePermissionsInternal(pParams->request.permissionsTypeBitmask, 3450 pOpenDev->pDevEvo, 3451 pOpenDev /* pOpenDevExclude */); 3452 } else { 3453 /* If not using bitmask, revoke using the set. */ 3454 return RevokePermissionsSet(pOpenDev, &pParams->request.permissions); 3455 } 3456 3457 return TRUE; 3458 } 3459 3460 static NvBool RegisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, 3461 void *pParamsVoid) 3462 { 3463 struct NvKmsRegisterDeferredRequestFifoParams *pParams = pParamsVoid; 3464 struct NvKmsPerOpenDev *pOpenDev; 3465 NVSurfaceEvoPtr pSurfaceEvo; 3466 NVDeferredRequestFifoRec *pDeferredRequestFifo; 3467 NvKmsDeferredRequestFifoHandle handle; 3468 3469 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3470 3471 if (pOpenDev == NULL) { 3472 return FALSE; 3473 } 3474 3475 pSurfaceEvo = nvEvoGetSurfaceFromHandleNoCtxDmaOk( 3476 pOpenDev->pDevEvo, 3477 &pOpenDev->surfaceHandles, 3478 pParams->request.surfaceHandle); 3479 3480 if (pSurfaceEvo == NULL) { 3481 return FALSE; 3482 } 3483 3484 /* 3485 * WAR Bug 2050970: If a surface is unregistered and it wasn't registered 3486 * with NvKmsRegisterSurfaceRequest::noDisplayHardwareAccess, then the call 3487 * to nvRMSyncEvoChannel() in nvEvoDecrementSurfaceRefCnts() may hang 3488 * if any flips in flight acquire on semaphore releases that haven't 3489 * occurred yet. 3490 * 3491 * Since a ctxdma is not necessary for the deferred request fifo surface, 3492 * we work around this by forcing all surfaces that will be registered as 3493 * a deferred request fifo to be registered with 3494 * noDisplayHardwareAccess==TRUE, then skip the idle in 3495 * nvEvoDecrementSurfaceRefCnts() for these surfaces. 3496 */ 3497 if (pSurfaceEvo->requireCtxDma) { 3498 return FALSE; 3499 } 3500 3501 pDeferredRequestFifo = 3502 nvEvoRegisterDeferredRequestFifo(pOpenDev->pDevEvo, pSurfaceEvo); 3503 3504 if (pDeferredRequestFifo == NULL) { 3505 return FALSE; 3506 } 3507 3508 handle = nvEvoCreateApiHandle(&pOpenDev->deferredRequestFifoHandles, 3509 pDeferredRequestFifo); 3510 3511 if (handle == 0) { 3512 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, 3513 pDeferredRequestFifo); 3514 return FALSE; 3515 } 3516 3517 pParams->reply.deferredRequestFifoHandle = handle; 3518 3519 return TRUE; 3520 } 3521 3522 static NvBool UnregisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, 3523 void *pParamsVoid) 3524 { 3525 struct NvKmsUnregisterDeferredRequestFifoParams *pParams = pParamsVoid; 3526 NvKmsDeferredRequestFifoHandle handle = 3527 pParams->request.deferredRequestFifoHandle; 3528 NVDeferredRequestFifoRec *pDeferredRequestFifo; 3529 struct NvKmsPerOpenDev *pOpenDev = 3530 GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3531 3532 if (pOpenDev == NULL) { 3533 return FALSE; 3534 } 3535 3536 pDeferredRequestFifo = 3537 nvEvoGetPointerFromApiHandle( 3538 &pOpenDev->deferredRequestFifoHandles, handle); 3539 3540 if (pDeferredRequestFifo == NULL) { 3541 return FALSE; 3542 } 3543 3544 nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); 3545 3546 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, pDeferredRequestFifo); 3547 3548 return TRUE; 3549 } 3550 3551 /*! 3552 * Get the CRC32 data for the specified dpy. 3553 */ 3554 static NvBool QueryDpyCRC32(struct NvKmsPerOpen *pOpen, 3555 void *pParamsVoid) 3556 { 3557 struct NvKmsQueryDpyCRC32Params *pParams = pParamsVoid; 3558 struct NvKmsPerOpenDev *pOpenDev; 3559 struct NvKmsPerOpenDisp *pOpenDisp; 3560 NVDispEvoPtr pDispEvo; 3561 CRC32NotifierCrcOut crcOut; 3562 3563 if (!GetPerOpenDevAndDisp(pOpen, 3564 pParams->request.deviceHandle, 3565 pParams->request.dispHandle, 3566 &pOpenDev, 3567 &pOpenDisp)) { 3568 return FALSE; 3569 } 3570 3571 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 3572 // Only the current owner can query CRC32 values. 3573 return FALSE; 3574 } 3575 3576 pDispEvo = pOpenDisp->pDispEvo; 3577 3578 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) { 3579 return FALSE; 3580 } 3581 3582 nvkms_memset(&(pParams->reply), 0, sizeof(pParams->reply)); 3583 3584 // Since will only read 1 frame of CRCs, point to single reply struct vals 3585 crcOut.rasterGeneratorCrc32 = &(pParams->reply.rasterGeneratorCrc32); 3586 crcOut.compositorCrc32 = &(pParams->reply.compositorCrc32); 3587 crcOut.outputCrc32 = &(pParams->reply.outputCrc32); 3588 3589 { 3590 /* 3591 * XXX[2Heads1OR] Is it sufficient to query CRC only for the primary 3592 * hardware head? 3593 */ 3594 NvU32 head = nvGetPrimaryHwHead(pDispEvo, pParams->request.head); 3595 3596 nvAssert(head != NV_INVALID_HEAD); 3597 3598 if (!nvReadCRC32Evo(pDispEvo, head, &crcOut)) { 3599 return FALSE; 3600 } 3601 } 3602 3603 return TRUE; 3604 } 3605 3606 static NvBool AllocSwapGroup( 3607 struct NvKmsPerOpen *pOpen, 3608 void *pParamsVoid) 3609 { 3610 struct NvKmsAllocSwapGroupParams *pParams = pParamsVoid; 3611 struct NvKmsPerOpenDev *pOpenDev; 3612 NVSwapGroupRec *pSwapGroup; 3613 NvKmsSwapGroupHandle handle; 3614 3615 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3616 3617 if (pOpenDev == NULL) { 3618 return FALSE; 3619 } 3620 3621 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 3622 return FALSE; 3623 } 3624 3625 pSwapGroup = nvHsAllocSwapGroup(pOpenDev->pDevEvo, &pParams->request); 3626 3627 if (pSwapGroup == NULL) { 3628 return FALSE; 3629 } 3630 3631 handle = nvEvoCreateApiHandle(&pOpenDev->swapGroupHandles, pSwapGroup); 3632 3633 if (handle == 0) { 3634 nvHsFreeSwapGroup(pOpenDev->pDevEvo, pSwapGroup); 3635 return FALSE; 3636 } 3637 3638 pParams->reply.swapGroupHandle = handle; 3639 3640 return TRUE; 3641 } 3642 3643 static NvBool FreeSwapGroup( 3644 struct NvKmsPerOpen *pOpen, 3645 void *pParamsVoid) 3646 { 3647 struct NvKmsFreeSwapGroupParams *pParams = pParamsVoid; 3648 struct NvKmsPerOpenDev *pOpenDev; 3649 NVSwapGroupRec *pSwapGroup; 3650 NvKmsSwapGroupHandle handle = pParams->request.swapGroupHandle; 3651 3652 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3653 3654 if (pOpenDev == NULL) { 3655 return FALSE; 3656 } 3657 3658 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 3659 return FALSE; 3660 } 3661 3662 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, 3663 handle); 3664 if (pSwapGroup == NULL) { 3665 return FALSE; 3666 } 3667 3668 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle); 3669 3670 nvHsFreeSwapGroup(pOpenDev->pDevEvo, pSwapGroup); 3671 3672 return TRUE; 3673 } 3674 3675 static NvBool JoinSwapGroup( 3676 struct NvKmsPerOpen *pOpen, 3677 void *pParamsVoid) 3678 { 3679 struct NvKmsJoinSwapGroupParams *pParams = pParamsVoid; 3680 const struct NvKmsJoinSwapGroupRequestOneMember *pMember = 3681 pParams->request.member; 3682 NvU32 i; 3683 NvBool anySwapGroupsPending = FALSE; 3684 NVHsJoinSwapGroupWorkArea *pJoinSwapGroupWorkArea; 3685 3686 if ((pParams->request.numMembers == 0) || 3687 (pParams->request.numMembers > 3688 ARRAY_LEN(pParams->request.member))) { 3689 return FALSE; 3690 } 3691 3692 pJoinSwapGroupWorkArea = nvCalloc(pParams->request.numMembers, 3693 sizeof(NVHsJoinSwapGroupWorkArea)); 3694 3695 if (!pJoinSwapGroupWorkArea) { 3696 return FALSE; 3697 } 3698 3699 /* 3700 * When a client is joining multiple swap groups simultaneously, all of its 3701 * deferred request fifos must enter the pendingJoined state if any of the 3702 * swap groups it's joining have pending flips. Otherwise, this sequence 3703 * can lead to a deadlock: 3704 * 3705 * - Client 0 joins DRF 0 to SG 0, DRF 1 to SG 1, with SG 0 and SG 1 3706 * fliplocked 3707 * - Client 0 submits DRF 0 ready, SG 0 flips, but the flip won't complete 3708 * and [Client 0.DRF 0] won't be released until SG 1 flips due to 3709 * fliplock 3710 * - Client 1 joins DRF 0 to SG 0, DRF 1 to SG 1 3711 * - Client 0 submits DRF 1 ready, but SG 1 doesn't flip because 3712 * [Client 1.DRF 0] has joined. 3713 * 3714 * With the pendingJoined behavior, this sequence works as follows: 3715 * 3716 * - Client 0 joins DRF 0 to SG 0, DRF 1 to SG 1, with SG 0 and SG 1 3717 * fliplocked 3718 * - Client 0 submits DRF 0 ready, SG 0 flips, but the flip won't complete 3719 * and [Client 0.DRF 0] won't be released until SG 1 flips due to 3720 * fliplock 3721 * - Client 1 joins DRF 0 to SG 0, DRF 1 to SG 1, but both enter the 3722 * pendingJoined state because [Client 0.DRF 0] has a pending flip. 3723 * - Client 0 submits DRF 1 ready, both swap groups flip, Client 0's 3724 * DRFs are both released, and Client 1's DRFs both leave the 3725 * pendingJoined state. 3726 */ 3727 for (i = 0; i < pParams->request.numMembers; i++) { 3728 struct NvKmsPerOpenDev *pOpenDev; 3729 NVSwapGroupRec *pSwapGroup; 3730 NVDeferredRequestFifoRec *pDeferredRequestFifo; 3731 struct NvKmsPerOpen *pEventOpenFd = NULL; 3732 NvKmsDeviceHandle deviceHandle = pMember[i].deviceHandle; 3733 NvKmsSwapGroupHandle swapGroupHandle = pMember[i].swapGroupHandle; 3734 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = 3735 pMember[i].deferredRequestFifoHandle; 3736 3737 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 3738 3739 if (pOpenDev == NULL) { 3740 goto fail; 3741 } 3742 3743 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, 3744 swapGroupHandle); 3745 3746 if (pSwapGroup == NULL) { 3747 goto fail; 3748 } 3749 3750 if (pSwapGroup->pendingFlip) { 3751 anySwapGroupsPending = TRUE; 3752 } 3753 3754 /* 3755 * In addition to the check for pending swap groups above, validate 3756 * the remainder of the request now. 3757 */ 3758 3759 /* 3760 * Prevent pSwapGroup->nMembers from overflowing NV_U32_MAX. 3761 * 3762 * Ideally we would want to count how many members are being added to 3763 * each swap group in the request, but as an optimization, just verify 3764 * that the number of {fifo, swapgroup} tuples joining would not 3765 * overflow any swapgroup even if every one was joining the same 3766 * swapgroup. 3767 */ 3768 if (NV_U32_MAX - pSwapGroup->nMembers < pParams->request.numMembers) { 3769 goto fail; 3770 } 3771 3772 pDeferredRequestFifo = 3773 nvEvoGetPointerFromApiHandle( 3774 &pOpenDev->deferredRequestFifoHandles, 3775 deferredRequestFifoHandle); 3776 3777 if (pDeferredRequestFifo == NULL) { 3778 goto fail; 3779 } 3780 3781 /* 3782 * If the pDeferredRequestFifo is already a member of a SwapGroup, then 3783 * fail. 3784 */ 3785 if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) { 3786 goto fail; 3787 } 3788 3789 if (pMember[i].unicastEvent.specified) { 3790 pEventOpenFd = nvkms_get_per_open_data(pMember[i].unicastEvent.fd); 3791 3792 if (pEventOpenFd == NULL) { 3793 goto fail; 3794 } 3795 3796 if (!PerOpenIsValidForUnicastEvent(pEventOpenFd)) { 3797 goto fail; 3798 } 3799 } 3800 3801 pJoinSwapGroupWorkArea[i].pDevEvo = pOpenDev->pDevEvo; 3802 pJoinSwapGroupWorkArea[i].pSwapGroup = pSwapGroup; 3803 pJoinSwapGroupWorkArea[i].pDeferredRequestFifo = pDeferredRequestFifo; 3804 pJoinSwapGroupWorkArea[i].pEventOpenFd = pEventOpenFd; 3805 pJoinSwapGroupWorkArea[i].enabledHeadSurface = FALSE; 3806 } 3807 3808 if (!nvHsJoinSwapGroup(pJoinSwapGroupWorkArea, 3809 pParams->request.numMembers, 3810 anySwapGroupsPending)) { 3811 goto fail; 3812 } 3813 3814 /* Beyond this point, the function cannot fail. */ 3815 3816 for (i = 0; i < pParams->request.numMembers; i++) { 3817 struct NvKmsPerOpen *pEventOpenFd = 3818 pJoinSwapGroupWorkArea[i].pEventOpenFd; 3819 NVDeferredRequestFifoRec *pDeferredRequestFifo = 3820 pJoinSwapGroupWorkArea[i].pDeferredRequestFifo; 3821 3822 if (pEventOpenFd) { 3823 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = pEventOpenFd; 3824 3825 pEventOpenFd->unicastEvent.type = 3826 NvKmsUnicastEventTypeDeferredRequest; 3827 pEventOpenFd->unicastEvent.e.deferred.pDeferredRequestFifo = 3828 pDeferredRequestFifo; 3829 3830 pEventOpenFd->type = NvKmsPerOpenTypeUnicastEvent; 3831 } 3832 } 3833 3834 nvFree(pJoinSwapGroupWorkArea); 3835 return TRUE; 3836 3837 fail: 3838 nvFree(pJoinSwapGroupWorkArea); 3839 return FALSE; 3840 } 3841 3842 static NvBool LeaveSwapGroup( 3843 struct NvKmsPerOpen *pOpen, 3844 void *pParamsVoid) 3845 { 3846 struct NvKmsLeaveSwapGroupParams *pParams = pParamsVoid; 3847 const struct NvKmsLeaveSwapGroupRequestOneMember *pMember = 3848 pParams->request.member; 3849 NvU32 i; 3850 3851 if ((pParams->request.numMembers == 0) || 3852 (pParams->request.numMembers > 3853 ARRAY_LEN(pParams->request.member))) { 3854 return FALSE; 3855 } 3856 3857 /* 3858 * Validate all handles passed by the caller and fail if any are invalid. 3859 */ 3860 for (i = 0; i < pParams->request.numMembers; i++) { 3861 struct NvKmsPerOpenDev *pOpenDev; 3862 NVDeferredRequestFifoRec *pDeferredRequestFifo; 3863 NvKmsDeviceHandle deviceHandle = 3864 pMember[i].deviceHandle; 3865 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = 3866 pMember[i].deferredRequestFifoHandle; 3867 3868 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 3869 3870 if (pOpenDev == NULL) { 3871 return FALSE; 3872 } 3873 3874 pDeferredRequestFifo = 3875 nvEvoGetPointerFromApiHandle( 3876 &pOpenDev->deferredRequestFifoHandles, 3877 deferredRequestFifoHandle); 3878 3879 if (pDeferredRequestFifo == NULL) { 3880 return FALSE; 3881 } 3882 3883 if (pDeferredRequestFifo->swapGroup.pSwapGroup == NULL) { 3884 return FALSE; 3885 } 3886 } 3887 3888 /* Beyond this point, the function cannot fail. */ 3889 3890 for (i = 0; i < pParams->request.numMembers; i++) { 3891 struct NvKmsPerOpenDev *pOpenDev; 3892 NVDeferredRequestFifoRec *pDeferredRequestFifo; 3893 NvKmsDeviceHandle deviceHandle = 3894 pMember[i].deviceHandle; 3895 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle = 3896 pMember[i].deferredRequestFifoHandle; 3897 3898 pOpenDev = GetPerOpenDev(pOpen, deviceHandle); 3899 3900 pDeferredRequestFifo = 3901 nvEvoGetPointerFromApiHandle( 3902 &pOpenDev->deferredRequestFifoHandles, 3903 deferredRequestFifoHandle); 3904 3905 nvHsLeaveSwapGroup(pOpenDev->pDevEvo, pDeferredRequestFifo, 3906 FALSE /* teardown */); 3907 } 3908 3909 return TRUE; 3910 } 3911 3912 static NvBool SetSwapGroupClipList( 3913 struct NvKmsPerOpen *pOpen, 3914 void *pParamsVoid) 3915 { 3916 struct NvKmsSetSwapGroupClipListParams *pParams = pParamsVoid; 3917 struct NvKmsPerOpenDev *pOpenDev; 3918 NVSwapGroupRec *pSwapGroup; 3919 struct NvKmsRect *pClipList; 3920 NvBool ret; 3921 3922 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); 3923 3924 if (pOpenDev == NULL) { 3925 return FALSE; 3926 } 3927 3928 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { 3929 return FALSE; 3930 } 3931 3932 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles, 3933 pParams->request.swapGroupHandle); 3934 3935 if (pSwapGroup == NULL) { 3936 return FALSE; 3937 } 3938 3939 /* 3940 * Create a copy of the passed-in pClipList, to be stored in pSwapGroup. 3941 * Copy from the client using nvkms_copyin() or nvkms_memcpy(), depending on 3942 * the clientType. 3943 * 3944 * We do not use the nvKmsIoctl() prepUser/doneUser infrastructure here 3945 * because that would require creating two copies of pClipList in the 3946 * user-space client case: one allocated in prepUser and freed in doneUser, 3947 * and a second in nvHsSetSwapGroupClipList(). 3948 */ 3949 if (pParams->request.nClips == 0) { 3950 pClipList = NULL; 3951 } else { 3952 const size_t len = sizeof(struct NvKmsRect) * pParams->request.nClips; 3953 3954 if ((pParams->request.pClipList == 0) || 3955 !nvKmsNvU64AddressIsSafe(pParams->request.pClipList)) { 3956 return FALSE; 3957 } 3958 3959 pClipList = nvAlloc(len); 3960 3961 if (pClipList == NULL) { 3962 return FALSE; 3963 } 3964 3965 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { 3966 int status = 3967 nvkms_copyin(pClipList, pParams->request.pClipList, len); 3968 3969 if (status != 0) { 3970 nvFree(pClipList); 3971 return FALSE; 3972 } 3973 } else { 3974 const void *pKernelPointer = 3975 nvKmsNvU64ToPointer(pParams->request.pClipList); 3976 3977 nvkms_memcpy(pClipList, pKernelPointer, len); 3978 } 3979 } 3980 3981 ret = nvHsSetSwapGroupClipList( 3982 pOpenDev->pDevEvo, 3983 pSwapGroup, 3984 pParams->request.nClips, 3985 pClipList); 3986 3987 if (!ret) { 3988 nvFree(pClipList); 3989 } 3990 3991 return ret; 3992 } 3993 3994 static NvBool SwitchMux( 3995 struct NvKmsPerOpen *pOpen, 3996 void *pParamsVoid) 3997 { 3998 struct NvKmsSwitchMuxParams *pParams = pParamsVoid; 3999 const struct NvKmsSwitchMuxRequest *r = &pParams->request; 4000 NVDpyEvoPtr pDpyEvo; 4001 NVDevEvoPtr pDevEvo; 4002 4003 pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); 4004 if (pDpyEvo == NULL) { 4005 return FALSE; 4006 } 4007 4008 pDevEvo = pDpyEvo->pDispEvo->pDevEvo; 4009 if (pDevEvo->modesetOwner != GetPerOpenDev(pOpen, r->deviceHandle)) { 4010 return FALSE; 4011 } 4012 4013 switch (pParams->request.operation) { 4014 case NVKMS_SWITCH_MUX_PRE: 4015 return nvRmMuxPre(pDpyEvo, r->state); 4016 case NVKMS_SWITCH_MUX: 4017 return nvRmMuxSwitch(pDpyEvo, r->state); 4018 case NVKMS_SWITCH_MUX_POST: 4019 return nvRmMuxPost(pDpyEvo, r->state); 4020 default: 4021 return FALSE; 4022 } 4023 } 4024 4025 static NvBool GetMuxState( 4026 struct NvKmsPerOpen *pOpen, 4027 void *pParamsVoid) 4028 { 4029 struct NvKmsGetMuxStateParams *pParams = pParamsVoid; 4030 const struct NvKmsGetMuxStateRequest *r = &pParams->request; 4031 NVDpyEvoPtr pDpyEvo; 4032 4033 pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); 4034 if (pDpyEvo == NULL) { 4035 return FALSE; 4036 } 4037 4038 pParams->reply.state = nvRmMuxState(pDpyEvo); 4039 4040 return pParams->reply.state != MUX_STATE_GET; 4041 } 4042 4043 static NvBool ExportVrrSemaphoreSurface( 4044 struct NvKmsPerOpen *pOpen, 4045 void *pParamsVoid) 4046 { 4047 struct NvKmsExportVrrSemaphoreSurfaceParams *pParams = pParamsVoid; 4048 const struct NvKmsExportVrrSemaphoreSurfaceRequest *req = &pParams->request; 4049 const struct NvKmsPerOpenDev *pOpenDev = 4050 GetPerOpenDev(pOpen, pParams->request.deviceHandle); 4051 4052 if (pOpenDev == NULL) { 4053 return FALSE; 4054 } 4055 4056 return nvExportVrrSemaphoreSurface(pOpenDev->pDevEvo, req->memFd); 4057 } 4058 4059 static NvBool EnableVblankSyncObject( 4060 struct NvKmsPerOpen *pOpen, 4061 void *pParamsVoid) 4062 { 4063 struct NvKmsEnableVblankSyncObjectParams *pParams = pParamsVoid; 4064 struct NvKmsPerOpenDisp* pOpenDisp = NULL; 4065 NVDispApiHeadStateEvoRec *pApiHeadState = NULL; 4066 NVDevEvoPtr pDevEvo = NULL; 4067 NvKmsVblankSyncObjectHandle vblankHandle = 0; 4068 int freeVblankSyncObjectIdx = 0; 4069 NvU32 apiHead = pParams->request.head; 4070 NVVblankSyncObjectRec *vblankSyncObjects = NULL; 4071 NVDispEvoPtr pDispEvo = NULL; 4072 NVEvoUpdateState updateState = { }; 4073 4074 /* Obtain the Head State. */ 4075 pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle, 4076 pParams->request.dispHandle); 4077 if (pOpenDisp == NULL) { 4078 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); 4079 return FALSE; 4080 } 4081 4082 pDispEvo = pOpenDisp->pDispEvo; 4083 pDevEvo = pDispEvo->pDevEvo; 4084 4085 /* Ensure Vblank Sync Object API is supported on this chip. */ 4086 if (!pDevEvo->supportsSyncpts || 4087 !pDevEvo->hal->caps.supportsVblankSyncObjects) { 4088 nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " 4089 "supported on this chip."); 4090 return FALSE; 4091 } 4092 4093 /* Validate requested head because it comes from user input. */ 4094 if (apiHead >= ARRAY_LEN(pDispEvo->apiHeadState)) { 4095 nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", apiHead); 4096 return FALSE; 4097 } 4098 pApiHeadState = &pDispEvo->apiHeadState[apiHead]; 4099 vblankSyncObjects = pApiHeadState->vblankSyncObjects; 4100 pDevEvo = pDispEvo->pDevEvo; 4101 4102 /* 4103 * Find the available sync object. Sync Objects with handle=0 are not in 4104 * use. 4105 */ 4106 for (freeVblankSyncObjectIdx = 0; 4107 freeVblankSyncObjectIdx < pApiHeadState->numVblankSyncObjectsCreated; 4108 freeVblankSyncObjectIdx++) { 4109 if (!vblankSyncObjects[freeVblankSyncObjectIdx].inUse) { 4110 break; 4111 } 4112 } 4113 if (freeVblankSyncObjectIdx == pApiHeadState->numVblankSyncObjectsCreated) { 4114 return FALSE; 4115 } 4116 4117 /* Save the created vblank handle if it is valid. */ 4118 vblankHandle = 4119 nvEvoCreateApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], 4120 &vblankSyncObjects[freeVblankSyncObjectIdx]); 4121 if (vblankHandle == 0) { 4122 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to create vblank handle."); 4123 return FALSE; 4124 } 4125 4126 if (nvApiHeadIsActive(pDispEvo, apiHead)) { 4127 NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); 4128 4129 nvAssert(head != NV_INVALID_HEAD); 4130 4131 /* 4132 * Instruct the hardware to enable a semaphore corresponding to this 4133 * syncpt. The Update State will be populated. 4134 */ 4135 pDevEvo->hal->ConfigureVblankSyncObject( 4136 pDevEvo, 4137 pDispEvo->headState[head].timings.rasterBlankStart.y, 4138 head, 4139 freeVblankSyncObjectIdx, 4140 vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.hCtxDma, 4141 &updateState); 4142 4143 /* 4144 * Instruct hardware to execute the staged commands from the 4145 * ConfigureVblankSyncObject() call above. This will set up and wait for a 4146 * notification that the hardware execution actually completed. 4147 */ 4148 nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE); 4149 4150 vblankSyncObjects[freeVblankSyncObjectIdx].enabled = TRUE; 4151 } 4152 4153 /* Populate the vblankSyncObjects array. */ 4154 vblankSyncObjects[freeVblankSyncObjectIdx].inUse = TRUE; 4155 4156 /* Populate the reply field. */ 4157 pParams->reply.vblankHandle = vblankHandle; 4158 /* Note: the syncpt ID is NOT the same as the vblank handle. */ 4159 pParams->reply.syncptId = 4160 pApiHeadState->vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.id; 4161 4162 return TRUE; 4163 } 4164 4165 static void DisableAndCleanVblankSyncObject(struct NvKmsPerOpenDisp *pOpenDisp, 4166 NvU32 apiHead, 4167 NVVblankSyncObjectRec *pVblankSyncObject, 4168 NVEvoUpdateState *pUpdateState, 4169 NvKmsVblankSyncObjectHandle handle) 4170 { 4171 NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; 4172 4173 if (nvApiHeadIsActive(pDispEvo, apiHead)) { 4174 NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead); 4175 4176 nvAssert(head != NV_INVALID_HEAD); 4177 4178 /* 4179 * Instruct the hardware to disable the semaphore corresponding to this 4180 * syncpt. The Update State will be populated. 4181 * 4182 * Note: Using dummy zero value for rasterLine because the disable 4183 * codepath in ConfigureVblankSyncObject() does not use that argument. 4184 */ 4185 pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject(pDispEvo->pDevEvo, 4186 0, /* rasterLine */ 4187 head, 4188 pVblankSyncObject->index, 4189 0, /* hCtxDma */ 4190 pUpdateState); 4191 /* 4192 * Note: it is the caller's responsibility to call 4193 * nvEvoUpdateAndKickOff(). 4194 */ 4195 } 4196 4197 pVblankSyncObject->inUse = FALSE; 4198 pVblankSyncObject->enabled = FALSE; 4199 4200 /* Remove the handle from the map. */ 4201 nvEvoDestroyApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], handle); 4202 } 4203 4204 static NvBool DisableVblankSyncObject( 4205 struct NvKmsPerOpen *pOpen, 4206 void *pParamsVoid) 4207 { 4208 struct NvKmsDisableVblankSyncObjectParams *pParams = pParamsVoid; 4209 struct NvKmsPerOpenDisp* pOpenDisp = 4210 GetPerOpenDisp(pOpen, pParams->request.deviceHandle, 4211 pParams->request.dispHandle); 4212 NVVblankSyncObjectRec *pVblankSyncObject = NULL; 4213 NvU32 apiHead = pParams->request.head; 4214 NVDevEvoPtr pDevEvo = NULL; 4215 NVEvoUpdateState updateState = { }; 4216 4217 if (pOpenDisp == NULL) { 4218 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); 4219 return FALSE; 4220 } 4221 4222 pDevEvo = pOpenDisp->pDispEvo->pDevEvo; 4223 4224 /* Ensure Vblank Sync Object API is supported on this chip. */ 4225 if (!pDevEvo->supportsSyncpts || 4226 !pDevEvo->hal->caps.supportsVblankSyncObjects) { 4227 nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " 4228 "supported on this chip."); 4229 return FALSE; 4230 } 4231 4232 /* Validate requested head because it comes from user input. */ 4233 if (apiHead >= ARRAY_LEN(pOpenDisp->pDispEvo->apiHeadState)) { 4234 nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", apiHead); 4235 return FALSE; 4236 } 4237 4238 /* Mark the indicated object as free. */ 4239 pVblankSyncObject = 4240 nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead], 4241 pParams->request.vblankHandle); 4242 if (pVblankSyncObject == NULL) { 4243 nvEvoLogDebug(EVO_LOG_ERROR, "unable to find object with provided " 4244 "handle."); 4245 return FALSE; 4246 } 4247 4248 DisableAndCleanVblankSyncObject(pOpenDisp, apiHead, pVblankSyncObject, 4249 &updateState, pParams->request.vblankHandle); 4250 4251 if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { 4252 /* 4253 * Instruct hardware to execute the staged commands from the 4254 * ConfigureVblankSyncObject() call inside of the 4255 * DisableAndCleanVblankSyncObject() call above. This will set up and 4256 * wait for a notification that the hardware execution has completed. 4257 */ 4258 nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, TRUE); 4259 } 4260 4261 return TRUE; 4262 } 4263 4264 static void NotifyVblankCallback(NVDispEvoRec *pDispEvo, 4265 NVVBlankCallbackPtr pCallbackData) 4266 { 4267 struct NvKmsPerOpen *pEventOpenFd = pCallbackData->pUserData; 4268 4269 /* 4270 * NOTIFY_VBLANK events are single-shot so notify the unicast FD, then 4271 * immediately unregister the callback. The unregister step is done in 4272 * nvRemoveUnicastEvent which resets the unicast event data. 4273 */ 4274 nvSendUnicastEvent(pEventOpenFd); 4275 nvRemoveUnicastEvent(pEventOpenFd); 4276 } 4277 4278 static NvBool NotifyVblank( 4279 struct NvKmsPerOpen *pOpen, 4280 void *pParamsVoid) 4281 { 4282 struct NvKmsNotifyVblankParams *pParams = pParamsVoid; 4283 struct NvKmsPerOpen *pEventOpenFd = NULL; 4284 NVVBlankCallbackPtr pCallbackData = NULL; 4285 struct NvKmsPerOpenDisp* pOpenDisp = 4286 GetPerOpenDisp(pOpen, pParams->request.deviceHandle, 4287 pParams->request.dispHandle); 4288 const NvU32 apiHead = pParams->request.head; 4289 4290 pEventOpenFd = nvkms_get_per_open_data(pParams->request.unicastEvent.fd); 4291 4292 if (pEventOpenFd == NULL) { 4293 return NV_FALSE; 4294 } 4295 4296 if (!PerOpenIsValidForUnicastEvent(pEventOpenFd)) { 4297 return NV_FALSE; 4298 } 4299 4300 pEventOpenFd->type = NvKmsPerOpenTypeUnicastEvent; 4301 4302 pCallbackData = nvApiHeadRegisterVBlankCallback(pOpenDisp->pDispEvo, 4303 apiHead, 4304 NotifyVblankCallback, 4305 pEventOpenFd); 4306 if (pCallbackData == NULL) { 4307 return NV_FALSE; 4308 } 4309 4310 pEventOpenFd->unicastEvent.type = NvKmsUnicastEventTypeVblankNotification; 4311 pEventOpenFd->unicastEvent.e.vblankNotification.pOpenDisp = pOpenDisp; 4312 pEventOpenFd->unicastEvent.e.vblankNotification.apiHead = apiHead; 4313 pEventOpenFd->unicastEvent.e.vblankNotification.hCallback 4314 = nvEvoCreateApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], 4315 pCallbackData); 4316 4317 if (pEventOpenFd->unicastEvent.e.vblankNotification.hCallback == 0) { 4318 nvApiHeadUnregisterVBlankCallback(pOpenDisp->pDispEvo, pCallbackData); 4319 return NV_FALSE; 4320 } 4321 4322 return NV_TRUE; 4323 } 4324 4325 /*! 4326 * Perform the ioctl operation requested by the client. 4327 * 4328 * \param[in,out] pOpenVoid The per-open data, allocated by 4329 * nvKmsOpen(). 4330 * \param[in] cmdOpaque The NVKMS_IOCTL_ operation to perform. 4331 * \param[in,out] paramsAddress A pointer, in the client process's 4332 * address space, to the parameter 4333 * structure. This is cmd-specific. 4334 * \param[in] paramSize The client-specified size of the params. 4335 * 4336 * \return Return TRUE if the ioctl operation was successfully 4337 * performed. Otherwise, return FALSE. 4338 */ 4339 NvBool nvKmsIoctl( 4340 void *pOpenVoid, 4341 const NvU32 cmdOpaque, 4342 const NvU64 paramsAddress, 4343 const size_t paramSize) 4344 { 4345 static const struct { 4346 4347 NvBool (*proc)(struct NvKmsPerOpen *pOpen, void *pParamsVoid); 4348 NvBool (*prepUser)(void *pParamsVoid, void *pExtraStateVoid); 4349 NvBool (*doneUser)(void *pParamsVoid, void *pExtraStateVoid); 4350 const size_t paramSize; 4351 /* Size of extra state tracked for user parameters */ 4352 const size_t extraSize; 4353 4354 const size_t requestSize; 4355 const size_t requestOffset; 4356 4357 const size_t replySize; 4358 const size_t replyOffset; 4359 4360 } dispatch[] = { 4361 4362 #define _ENTRY_WITH_USER(_cmd, _func, _prepUser, _doneUser, _extraSize) \ 4363 [_cmd] = { \ 4364 .proc = _func, \ 4365 .prepUser = _prepUser, \ 4366 .doneUser = _doneUser, \ 4367 .paramSize = sizeof(struct NvKms##_func##Params), \ 4368 .requestSize = sizeof(struct NvKms##_func##Request), \ 4369 .requestOffset = offsetof(struct NvKms##_func##Params, request), \ 4370 .replySize = sizeof(struct NvKms##_func##Reply), \ 4371 .replyOffset = offsetof(struct NvKms##_func##Params, reply), \ 4372 .extraSize = _extraSize, \ 4373 } 4374 4375 #define ENTRY(_cmd, _func) \ 4376 _ENTRY_WITH_USER(_cmd, _func, NULL, NULL, 0) 4377 4378 #define ENTRY_CUSTOM_USER(_cmd, _func) \ 4379 _ENTRY_WITH_USER(_cmd, _func, \ 4380 _func##PrepUser, _func##DoneUser, \ 4381 sizeof(struct NvKms##_func##ExtraUserState)) 4382 4383 ENTRY(NVKMS_IOCTL_ALLOC_DEVICE, AllocDevice), 4384 ENTRY(NVKMS_IOCTL_FREE_DEVICE, FreeDevice), 4385 ENTRY(NVKMS_IOCTL_QUERY_DISP, QueryDisp), 4386 ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, QueryConnectorStaticData), 4387 ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, QueryConnectorDynamicData), 4388 ENTRY(NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, QueryDpyStaticData), 4389 ENTRY(NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, QueryDpyDynamicData), 4390 ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE_INDEX, ValidateModeIndex), 4391 ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE, ValidateMode), 4392 ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_MODE, SetMode), 4393 ENTRY(NVKMS_IOCTL_SET_CURSOR_IMAGE, SetCursorImage), 4394 ENTRY(NVKMS_IOCTL_MOVE_CURSOR, MoveCursor), 4395 ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_LUT, SetLut), 4396 ENTRY(NVKMS_IOCTL_IDLE_BASE_CHANNEL, IdleBaseChannel), 4397 ENTRY_CUSTOM_USER(NVKMS_IOCTL_FLIP, Flip), 4398 ENTRY(NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, 4399 DeclareDynamicDpyInterest), 4400 ENTRY(NVKMS_IOCTL_REGISTER_SURFACE, RegisterSurface), 4401 ENTRY(NVKMS_IOCTL_UNREGISTER_SURFACE, UnregisterSurface), 4402 ENTRY(NVKMS_IOCTL_GRANT_SURFACE, GrantSurface), 4403 ENTRY(NVKMS_IOCTL_ACQUIRE_SURFACE, AcquireSurface), 4404 ENTRY(NVKMS_IOCTL_RELEASE_SURFACE, ReleaseSurface), 4405 ENTRY(NVKMS_IOCTL_SET_DPY_ATTRIBUTE, SetDpyAttribute), 4406 ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE, GetDpyAttribute), 4407 ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES, 4408 GetDpyAttributeValidValues), 4409 ENTRY(NVKMS_IOCTL_SET_DISP_ATTRIBUTE, SetDispAttribute), 4410 ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE, GetDispAttribute), 4411 ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES, 4412 GetDispAttributeValidValues), 4413 ENTRY(NVKMS_IOCTL_QUERY_FRAMELOCK, QueryFrameLock), 4414 ENTRY(NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, SetFrameLockAttribute), 4415 ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, GetFrameLockAttribute), 4416 ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES, 4417 GetFrameLockAttributeValidValues), 4418 ENTRY(NVKMS_IOCTL_GET_NEXT_EVENT, GetNextEvent), 4419 ENTRY(NVKMS_IOCTL_DECLARE_EVENT_INTEREST, DeclareEventInterest), 4420 ENTRY(NVKMS_IOCTL_CLEAR_UNICAST_EVENT, ClearUnicastEvent), 4421 ENTRY(NVKMS_IOCTL_SET_LAYER_POSITION, SetLayerPosition), 4422 ENTRY(NVKMS_IOCTL_GRAB_OWNERSHIP, GrabOwnership), 4423 ENTRY(NVKMS_IOCTL_RELEASE_OWNERSHIP, ReleaseOwnership), 4424 ENTRY(NVKMS_IOCTL_GRANT_PERMISSIONS, GrantPermissions), 4425 ENTRY(NVKMS_IOCTL_ACQUIRE_PERMISSIONS, AcquirePermissions), 4426 ENTRY(NVKMS_IOCTL_REVOKE_PERMISSIONS, RevokePermissions), 4427 ENTRY(NVKMS_IOCTL_QUERY_DPY_CRC32, QueryDpyCRC32), 4428 ENTRY(NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO, 4429 RegisterDeferredRequestFifo), 4430 ENTRY(NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO, 4431 UnregisterDeferredRequestFifo), 4432 ENTRY(NVKMS_IOCTL_ALLOC_SWAP_GROUP, AllocSwapGroup), 4433 ENTRY(NVKMS_IOCTL_FREE_SWAP_GROUP, FreeSwapGroup), 4434 ENTRY(NVKMS_IOCTL_JOIN_SWAP_GROUP, JoinSwapGroup), 4435 ENTRY(NVKMS_IOCTL_LEAVE_SWAP_GROUP, LeaveSwapGroup), 4436 ENTRY(NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST, SetSwapGroupClipList), 4437 ENTRY(NVKMS_IOCTL_GRANT_SWAP_GROUP, GrantSwapGroup), 4438 ENTRY(NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, AcquireSwapGroup), 4439 ENTRY(NVKMS_IOCTL_RELEASE_SWAP_GROUP, ReleaseSwapGroup), 4440 ENTRY(NVKMS_IOCTL_SWITCH_MUX, SwitchMux), 4441 ENTRY(NVKMS_IOCTL_GET_MUX_STATE, GetMuxState), 4442 ENTRY(NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, ExportVrrSemaphoreSurface), 4443 ENTRY(NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, EnableVblankSyncObject), 4444 ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, DisableVblankSyncObject), 4445 ENTRY(NVKMS_IOCTL_NOTIFY_VBLANK, NotifyVblank), 4446 }; 4447 4448 struct NvKmsPerOpen *pOpen = pOpenVoid; 4449 void *pParamsKernelPointer; 4450 NvBool ret; 4451 enum NvKmsIoctlCommand cmd = cmdOpaque; 4452 void *pExtraUserState = NULL; 4453 4454 if (!AssignNvKmsPerOpenType(pOpen, NvKmsPerOpenTypeIoctl, TRUE)) { 4455 return FALSE; 4456 } 4457 4458 if (cmd >= ARRAY_LEN(dispatch)) { 4459 return FALSE; 4460 } 4461 4462 if (dispatch[cmd].proc == NULL) { 4463 return FALSE; 4464 } 4465 4466 if (paramSize != dispatch[cmd].paramSize) { 4467 return FALSE; 4468 } 4469 4470 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { 4471 pParamsKernelPointer = nvCalloc(1, paramSize + dispatch[cmd].extraSize); 4472 if (pParamsKernelPointer == NULL) { 4473 return FALSE; 4474 } 4475 4476 if (dispatch[cmd].requestSize > 0) { 4477 int status = 4478 nvkms_copyin((char *) pParamsKernelPointer + 4479 dispatch[cmd].requestOffset, 4480 paramsAddress + dispatch[cmd].requestOffset, 4481 dispatch[cmd].requestSize); 4482 if (status != 0) { 4483 nvFree(pParamsKernelPointer); 4484 return FALSE; 4485 } 4486 } 4487 4488 if (dispatch[cmd].prepUser) { 4489 pExtraUserState = (char *)pParamsKernelPointer + paramSize; 4490 4491 if (!dispatch[cmd].prepUser(pParamsKernelPointer, 4492 pExtraUserState)) { 4493 nvFree(pParamsKernelPointer); 4494 return FALSE; 4495 } 4496 } 4497 } else { 4498 pParamsKernelPointer = nvKmsNvU64ToPointer(paramsAddress); 4499 } 4500 4501 ret = dispatch[cmd].proc(pOpen, pParamsKernelPointer); 4502 4503 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { 4504 4505 if (dispatch[cmd].doneUser) { 4506 pExtraUserState = (char *)pParamsKernelPointer + paramSize; 4507 4508 if (!dispatch[cmd].doneUser(pParamsKernelPointer, 4509 pExtraUserState)) { 4510 ret = FALSE; 4511 } 4512 } 4513 4514 if (dispatch[cmd].replySize > 0) { 4515 int status = 4516 nvkms_copyout(paramsAddress + dispatch[cmd].replyOffset, 4517 (char *) pParamsKernelPointer + 4518 dispatch[cmd].replyOffset, 4519 dispatch[cmd].replySize); 4520 if (status != 0) { 4521 ret = FALSE; 4522 } 4523 } 4524 4525 nvFree(pParamsKernelPointer); 4526 } 4527 4528 return ret; 4529 } 4530 4531 4532 /*! 4533 * Close callback. 4534 * 4535 * \param[in,out] pOpenVoid The per-open data, allocated by nvKmsOpen(). 4536 */ 4537 void nvKmsClose(void *pOpenVoid) 4538 { 4539 struct NvKmsPerOpen *pOpen = pOpenVoid; 4540 4541 if (pOpen == NULL) { 4542 return; 4543 } 4544 4545 /* 4546 * First remove the pOpen from global tracking. Otherwise, assertions can 4547 * fail in the free paths below -- the assertions check that the object 4548 * being freed is not tracked by any pOpen. 4549 */ 4550 nvListDel(&pOpen->perOpenListEntry); 4551 4552 if (pOpen->type == NvKmsPerOpenTypeIoctl) { 4553 4554 struct NvKmsPerOpenEventListEntry *pEntry, *pEntryTmp; 4555 struct NvKmsPerOpenDev *pOpenDev; 4556 NvKmsGenericHandle dev; 4557 4558 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 4559 pOpenDev, dev) { 4560 FreeDeviceReference(pOpen, pOpenDev); 4561 } 4562 4563 nvEvoDestroyApiHandles(&pOpen->ioctl.frameLockHandles); 4564 4565 nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); 4566 4567 nvListForEachEntry_safe(pEntry, pEntryTmp, 4568 &pOpen->ioctl.eventList, eventListEntry) { 4569 nvListDel(&pEntry->eventListEntry); 4570 nvFree(pEntry); 4571 } 4572 4573 nvListDel(&pOpen->perOpenIoctlListEntry); 4574 } 4575 4576 if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { 4577 nvAssert(pOpen->grantSurface.pSurfaceEvo != NULL); 4578 nvEvoDecrementSurfaceStructRefCnt(pOpen->grantSurface.pSurfaceEvo); 4579 } 4580 4581 if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { 4582 nvAssert(pOpen->grantSwapGroup.pSwapGroup != NULL); 4583 nvHsDecrementSwapGroupRefCnt(pOpen->grantSwapGroup.pSwapGroup); 4584 } 4585 4586 if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { 4587 nvRemoveUnicastEvent(pOpen); 4588 } 4589 4590 nvFree(pOpen); 4591 } 4592 4593 4594 /*! 4595 * Open callback. 4596 * 4597 * Allocate, initialize, and return an opaque pointer to an NvKmsPerOpen. 4598 * 4599 * \return If successful, return an NvKmsPerOpen pointer. Otherwise, 4600 * return NULL. 4601 */ 4602 void *nvKmsOpen( 4603 NvU32 pid, 4604 enum NvKmsClientType clientType, 4605 nvkms_per_open_handle_t *pOpenKernel) 4606 { 4607 struct NvKmsPerOpen *pOpen = nvCalloc(1, sizeof(*pOpen)); 4608 4609 if (pOpen == NULL) { 4610 goto fail; 4611 } 4612 4613 pOpen->pid = pid; 4614 pOpen->clientType = clientType; 4615 pOpen->type = NvKmsPerOpenTypeUndefined; 4616 pOpen->pOpenKernel = pOpenKernel; 4617 4618 nvListAppend(&pOpen->perOpenListEntry, &perOpenList); 4619 4620 return pOpen; 4621 4622 fail: 4623 nvKmsClose(pOpen); 4624 return NULL; 4625 } 4626 4627 extern const char *const pNV_KMS_ID; 4628 4629 #if NVKMS_PROCFS_ENABLE 4630 4631 static const char *ProcFsPerOpenTypeString( 4632 enum NvKmsPerOpenType type) 4633 { 4634 switch (type) { 4635 case NvKmsPerOpenTypeIoctl: return "ioctl"; 4636 case NvKmsPerOpenTypeGrantSurface: return "grantSurface"; 4637 case NvKmsPerOpenTypeGrantSwapGroup: return "grantSwapGroup"; 4638 case NvKmsPerOpenTypeGrantPermissions: return "grantPermissions"; 4639 case NvKmsPerOpenTypeUnicastEvent: return "unicastEvent"; 4640 case NvKmsPerOpenTypeUndefined: return "undefined"; 4641 } 4642 4643 return "unknown"; 4644 } 4645 4646 static const char *ProcFsUnicastEventTypeString( 4647 enum NvKmsUnicastEventType type) 4648 { 4649 switch (type) { 4650 case NvKmsUnicastEventTypeDeferredRequest: return "DeferredRequest"; 4651 case NvKmsUnicastEventTypeVblankNotification: return "VblankNotification"; 4652 case NvKmsUnicastEventTypeUndefined: return "undefined"; 4653 } 4654 4655 return "unknown"; 4656 } 4657 4658 static const char *ProcFsPerOpenClientTypeString( 4659 enum NvKmsClientType clientType) 4660 { 4661 switch (clientType) { 4662 case NVKMS_CLIENT_USER_SPACE: return "user-space"; 4663 case NVKMS_CLIENT_KERNEL_SPACE: return "kernel-space"; 4664 } 4665 4666 return "unknown"; 4667 } 4668 4669 static const char *ProcFsPermissionsTypeString( 4670 enum NvKmsPermissionsType permissionsType) 4671 { 4672 switch (permissionsType) { 4673 case NV_KMS_PERMISSIONS_TYPE_FLIPPING: return "flipping"; 4674 case NV_KMS_PERMISSIONS_TYPE_MODESET: return "modeset"; 4675 } 4676 4677 return "unknown"; 4678 } 4679 4680 static void 4681 ProcFsPrintClients( 4682 void *data, 4683 char *buffer, 4684 size_t size, 4685 nvkms_procfs_out_string_func_t *outString) 4686 { 4687 struct NvKmsPerOpen *pOpen; 4688 NVEvoInfoStringRec infoString; 4689 4690 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { 4691 4692 const char *extra = ""; 4693 4694 nvInitInfoString(&infoString, buffer, size); 4695 4696 if (pOpen == nvEvoGlobal.nvKmsPerOpen) { 4697 extra = " (NVKMS-internal client)"; 4698 } 4699 4700 nvEvoLogInfoString(&infoString, 4701 "Client (pOpen) : %p", pOpen); 4702 nvEvoLogInfoString(&infoString, 4703 " pid : %d%s", pOpen->pid, extra); 4704 nvEvoLogInfoString(&infoString, 4705 " clientType : %s", 4706 ProcFsPerOpenClientTypeString(pOpen->clientType)); 4707 nvEvoLogInfoString(&infoString, 4708 " type : %s", 4709 ProcFsPerOpenTypeString(pOpen->type)); 4710 4711 if (pOpen->type == NvKmsPerOpenTypeIoctl) { 4712 4713 NvKmsGenericHandle deviceHandle; 4714 struct NvKmsPerOpenDev *pOpenDev; 4715 4716 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 4717 pOpenDev, deviceHandle) { 4718 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; 4719 4720 nvEvoLogInfoString(&infoString, 4721 " pDevEvo (deviceId:%02d) : %p", 4722 pDevEvo->deviceId, pDevEvo); 4723 nvEvoLogInfoString(&infoString, 4724 " NvKmsDeviceHandle : %d", deviceHandle); 4725 } 4726 4727 } else if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { 4728 4729 NVSurfaceEvoPtr pSurfaceEvo = pOpen->grantSurface.pSurfaceEvo; 4730 4731 nvEvoLogInfoString(&infoString, 4732 " pSurfaceEvo : %p", pSurfaceEvo); 4733 4734 } else if (pOpen->type == NvKmsPerOpenTypeGrantPermissions) { 4735 4736 NVDevEvoPtr pDevEvo = pOpen->grantPermissions.pDevEvo; 4737 const struct NvKmsPermissions *pPerms = 4738 &pOpen->grantPermissions.permissions; 4739 4740 nvEvoLogInfoString(&infoString, 4741 " pDevEvo (deviceId:%02d) : %p", 4742 pDevEvo->deviceId, pDevEvo); 4743 4744 nvEvoLogInfoString(&infoString, 4745 " PermissionsType : %s", 4746 ProcFsPermissionsTypeString(pPerms->type)); 4747 4748 if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { 4749 NvU32 d, h; 4750 4751 for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { 4752 for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { 4753 4754 const NvU8 layerMask = 4755 pPerms->flip.disp[d].head[h].layerMask; 4756 4757 if (layerMask == 0) { 4758 continue; 4759 } 4760 4761 nvEvoLogInfoString(&infoString, 4762 " disp:%02d, head:%02d : 0x%08x", d, h, 4763 layerMask); 4764 } 4765 } 4766 } else if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { 4767 NvU32 d, h; 4768 4769 for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { 4770 for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { 4771 4772 NVDpyIdList dpyIdList = 4773 pPerms->modeset.disp[d].head[h].dpyIdList; 4774 NVDispEvoPtr pDispEvo; 4775 char *dpys; 4776 4777 if (nvDpyIdListIsEmpty(dpyIdList)) { 4778 continue; 4779 } 4780 4781 pDispEvo = pDevEvo->pDispEvo[d]; 4782 4783 dpys = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); 4784 4785 if (dpys == NULL) { 4786 continue; 4787 } 4788 4789 nvEvoLogInfoString(&infoString, 4790 " disp:%02d, head:%02d : %s", d, h, dpys); 4791 4792 nvFree(dpys); 4793 } 4794 } 4795 } 4796 } else if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { 4797 4798 NVDevEvoPtr pDevEvo = pOpen->grantSwapGroup.pDevEvo; 4799 4800 nvEvoLogInfoString(&infoString, 4801 " pDevEvo (deviceId:%02d) : %p", 4802 pDevEvo->deviceId, pDevEvo); 4803 nvEvoLogInfoString(&infoString, 4804 " pSwapGroup : %p", 4805 pOpen->grantSwapGroup.pSwapGroup); 4806 4807 } else if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { 4808 nvEvoLogInfoString(&infoString, 4809 " unicastEvent type : %s", 4810 ProcFsUnicastEventTypeString(pOpen->unicastEvent.type)); 4811 switch(pOpen->unicastEvent.type) { 4812 case NvKmsUnicastEventTypeDeferredRequest: 4813 nvEvoLogInfoString(&infoString, 4814 " pDeferredRequestFifo : %p", 4815 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo); 4816 break; 4817 case NvKmsUnicastEventTypeVblankNotification: 4818 nvEvoLogInfoString(&infoString, 4819 " head : %x", 4820 pOpen->unicastEvent.e.vblankNotification.apiHead); 4821 break; 4822 default: 4823 break; 4824 } 4825 } 4826 4827 nvEvoLogInfoString(&infoString, ""); 4828 outString(data, buffer); 4829 } 4830 } 4831 4832 static void PrintSurfacePlanes( 4833 NVEvoInfoStringRec *pInfoString, 4834 const NVSurfaceEvoRec *pSurfaceEvo) 4835 { 4836 NvU8 planeIndex; 4837 4838 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { 4839 nvEvoLogInfoString(pInfoString, 4840 "plane[%u] disp ctxDma:0x%08x pitch:%u offset:%" NvU64_fmtu 4841 " rmObjectSizeInBytes:%" NvU64_fmtu, 4842 planeIndex, 4843 pSurfaceEvo->planes[planeIndex].ctxDma, 4844 pSurfaceEvo->planes[planeIndex].pitch, 4845 pSurfaceEvo->planes[planeIndex].offset, 4846 pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes); 4847 } 4848 } 4849 4850 static void PrintSurfaceClients( 4851 NVEvoInfoStringRec *pInfoString, 4852 const NVSurfaceEvoRec *pSurfaceEvo, 4853 const NVDevEvoRec *pDevEvo) 4854 { 4855 struct NvKmsPerOpen *pOpen; 4856 4857 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 4858 NvKmsGenericHandle deviceHandle; 4859 struct NvKmsPerOpenDev *pOpenDev; 4860 4861 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 4862 pOpenDev, deviceHandle) { 4863 NvKmsGenericHandle surfaceHandle; 4864 NVSurfaceEvoPtr pTmpSurfaceEvo; 4865 4866 if (pOpenDev->pDevEvo != pDevEvo) { 4867 continue; 4868 } 4869 4870 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 4871 pTmpSurfaceEvo, surfaceHandle) { 4872 if (pTmpSurfaceEvo != pSurfaceEvo) { 4873 continue; 4874 } 4875 4876 nvEvoLogInfoString(pInfoString, 4877 " pOpen : %p", pOpen); 4878 nvEvoLogInfoString(pInfoString, 4879 " pOpenDev : %p", pOpenDev); 4880 nvEvoLogInfoString(pInfoString, 4881 " NvKmsSurfaceHandle : %d", surfaceHandle); 4882 } 4883 } 4884 } 4885 } 4886 4887 static void PrintSurface( 4888 NVEvoInfoStringRec *pInfoString, 4889 const NVSurfaceEvoRec *pSurfaceEvo, 4890 const NVDevEvoRec *pDevEvo) 4891 { 4892 NvU32 sd; 4893 4894 nvEvoLogInfoString(pInfoString, 4895 "pSurfaceEvo : %p", pSurfaceEvo); 4896 nvEvoLogInfoString(pInfoString, 4897 " pDevEvo (deviceId:%02d) : %p", pDevEvo->deviceId, pDevEvo); 4898 nvEvoLogInfoString(pInfoString, 4899 " owner : " 4900 "pOpenDev:%p, NvKmsSurfaceHandle:%d", 4901 pSurfaceEvo->owner.pOpenDev, 4902 pSurfaceEvo->owner.surfaceHandle); 4903 nvEvoLogInfoString(pInfoString, 4904 " {width,height}InPixels : %d x %d", 4905 pSurfaceEvo->widthInPixels, 4906 pSurfaceEvo->heightInPixels); 4907 nvEvoLogInfoString(pInfoString, 4908 " misc : " 4909 "log2GobsPerBlockY:%d", 4910 pSurfaceEvo->log2GobsPerBlockY); 4911 nvEvoLogInfoString(pInfoString, 4912 " gpuAddress : 0x%016" NvU64_fmtx, 4913 pSurfaceEvo->gpuAddress); 4914 nvEvoLogInfoString(pInfoString, 4915 " memory : layout:%s format:%s", 4916 NvKmsSurfaceMemoryLayoutToString(pSurfaceEvo->layout), 4917 nvKmsSurfaceMemoryFormatToString(pSurfaceEvo->format)); 4918 nvEvoLogInfoString(pInfoString, 4919 " refCnts : " 4920 "rmRefCnt:%" NvU64_fmtx" structRefCnt:%" NvU64_fmtx, 4921 pSurfaceEvo->rmRefCnt, 4922 pSurfaceEvo->structRefCnt); 4923 4924 PrintSurfacePlanes(pInfoString, pSurfaceEvo); 4925 4926 nvEvoLogInfoString(pInfoString, 4927 " clients :"); 4928 4929 PrintSurfaceClients(pInfoString, pSurfaceEvo, pDevEvo); 4930 4931 for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { 4932 if (pSurfaceEvo->cpuAddress[sd] != NULL) { 4933 nvEvoLogInfoString(pInfoString, 4934 " cpuAddress[%02d] : %p", 4935 sd, pSurfaceEvo->cpuAddress[sd]); 4936 } 4937 } 4938 4939 nvEvoLogInfoString(pInfoString, ""); 4940 } 4941 4942 static void 4943 ProcFsPrintSurfaces( 4944 void *data, 4945 char *buffer, 4946 size_t size, 4947 nvkms_procfs_out_string_func_t *outString) 4948 { 4949 struct NvKmsPerOpen *pOpen; 4950 NVEvoInfoStringRec infoString; 4951 NvU32 i; 4952 4953 for (i = 0; i < 2; i++) { 4954 4955 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 4956 NvKmsGenericHandle deviceHandle; 4957 struct NvKmsPerOpenDev *pOpenDev; 4958 4959 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 4960 pOpenDev, deviceHandle) { 4961 4962 NvKmsGenericHandle surfaceHandle; 4963 NVSurfaceEvoPtr pSurfaceEvo; 4964 4965 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 4966 pSurfaceEvo, 4967 surfaceHandle) { 4968 /* 4969 * Because clients can grant surfaces between each 4970 * other, a pSurfaceEvo could be in multiple clients' 4971 * lists. So, we loop over all surfaces on all clients 4972 * twice: the first time we print unique surfaces and set 4973 * 'procFsFlag' to recognize duplicates. The second time, 4974 * we clear 'procFsFlag'. 4975 */ 4976 if (i == 0) { 4977 if (pSurfaceEvo->procFsFlag) { 4978 continue; 4979 } 4980 4981 nvInitInfoString(&infoString, buffer, size); 4982 PrintSurface(&infoString, pSurfaceEvo, 4983 pOpenDev->pDevEvo); 4984 outString(data, buffer); 4985 4986 pSurfaceEvo->procFsFlag = TRUE; 4987 } else { 4988 pSurfaceEvo->procFsFlag = FALSE; 4989 } 4990 } 4991 } 4992 } 4993 } 4994 } 4995 4996 static void 4997 ProcFsPrintHeadSurface( 4998 void *data, 4999 char *buffer, 5000 size_t size, 5001 nvkms_procfs_out_string_func_t *outString) 5002 { 5003 NVDevEvoPtr pDevEvo; 5004 NVDispEvoPtr pDispEvo; 5005 NvU32 dispIndex, apiHead; 5006 NVEvoInfoStringRec infoString; 5007 5008 FOR_ALL_EVO_DEVS(pDevEvo) { 5009 5010 nvInitInfoString(&infoString, buffer, size); 5011 nvEvoLogInfoString(&infoString, 5012 "pDevEvo (deviceId:%02d) : %p", 5013 pDevEvo->deviceId, pDevEvo); 5014 outString(data, buffer); 5015 5016 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { 5017 5018 nvInitInfoString(&infoString, buffer, size); 5019 nvEvoLogInfoString(&infoString, 5020 " pDispEvo (dispIndex:%02d) : %p", 5021 dispIndex, pDispEvo); 5022 outString(data, buffer); 5023 5024 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 5025 nvInitInfoString(&infoString, buffer, size); 5026 nvHsProcFs(&infoString, pDevEvo, dispIndex, apiHead); 5027 nvEvoLogInfoString(&infoString, ""); 5028 outString(data, buffer); 5029 } 5030 } 5031 } 5032 } 5033 5034 static const char *SwapGroupPerEyeStereoString(const NvU32 request) 5035 { 5036 const NvU32 value = 5037 DRF_VAL(KMS, _DEFERRED_REQUEST, 5038 _SWAP_GROUP_READY_PER_EYE_STEREO, request); 5039 5040 switch (value) { 5041 5042 case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR: 5043 return "PerPair"; 5044 case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE: 5045 return "PerEye"; 5046 } 5047 5048 return "Unknown"; 5049 } 5050 5051 static void ProcFsPrintOneDeferredRequestFifo( 5052 void *data, 5053 char *buffer, 5054 size_t size, 5055 nvkms_procfs_out_string_func_t *outString, 5056 const NVDeferredRequestFifoRec *pDeferredRequestFifo, 5057 const struct NvKmsPerOpen *pOpen, 5058 const struct NvKmsPerOpenDev *pOpenDev, 5059 const NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle) 5060 { 5061 NVEvoInfoStringRec infoString; 5062 5063 const struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; 5064 NvU32 i, prevI; 5065 5066 nvInitInfoString(&infoString, buffer, size); 5067 5068 nvEvoLogInfoString(&infoString, 5069 "pDeferredRequestFifo : %p", pDeferredRequestFifo); 5070 5071 nvEvoLogInfoString(&infoString, 5072 " Client (pOpen) : %p", pOpen); 5073 5074 nvEvoLogInfoString(&infoString, 5075 " pOpenDev : %p", pOpenDev); 5076 5077 nvEvoLogInfoString(&infoString, 5078 " pSurfaceEvo : %p", pDeferredRequestFifo->pSurfaceEvo); 5079 5080 nvEvoLogInfoString(&infoString, 5081 " NvKms...RequestFifoHandle : %d", deferredRequestFifoHandle); 5082 5083 if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) { 5084 5085 nvEvoLogInfoString(&infoString, 5086 " swapGroup :"); 5087 nvEvoLogInfoString(&infoString, 5088 " pSwapGroup : %p", 5089 pDeferredRequestFifo->swapGroup.pSwapGroup); 5090 nvEvoLogInfoString(&infoString, 5091 " pOpenUnicastEvent : %p", 5092 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent); 5093 nvEvoLogInfoString(&infoString, 5094 " ready : %d", 5095 pDeferredRequestFifo->swapGroup.ready); 5096 nvEvoLogInfoString(&infoString, 5097 " semaphoreIndex : 0x%02x", 5098 pDeferredRequestFifo->swapGroup.semaphoreIndex); 5099 } 5100 5101 nvEvoLogInfoString(&infoString, 5102 " put : %d", fifo->put); 5103 5104 nvEvoLogInfoString(&infoString, 5105 " get : %d", fifo->get); 5106 5107 outString(data, buffer); 5108 5109 for (i = 0; i < ARRAY_LEN(fifo->request); i++) { 5110 5111 const NvU32 request = fifo->request[i]; 5112 const NvU32 opcode = DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); 5113 const NvU32 semaphoreIndex = 5114 DRF_VAL(KMS, _DEFERRED_REQUEST, _SEMAPHORE_INDEX, request); 5115 5116 switch (opcode) { 5117 5118 case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: 5119 break; 5120 5121 case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY: 5122 nvInitInfoString(&infoString, buffer, size); 5123 nvEvoLogInfoString(&infoString, 5124 " request[0x%02x] : " 5125 "opcode:SWAP_GROUP_READY, semaphoreIndex:0x%02x, " 5126 "perEyeStereo:%s", 5127 i, semaphoreIndex, 5128 SwapGroupPerEyeStereoString(request)); 5129 outString(data, buffer); 5130 break; 5131 5132 default: 5133 nvInitInfoString(&infoString, buffer, size); 5134 nvEvoLogInfoString(&infoString, 5135 " request[0x%02x] : opcode:INVALID", i); 5136 outString(data, buffer); 5137 break; 5138 } 5139 } 5140 5141 /* 5142 * Print the fifo->semaphore[] array, but collapse multiple lines with 5143 * duplicate values. 5144 * 5145 * To collapse duplicates, loop over all semaphore[] elements. If the 5146 * current element is the same as semaphore[prev], continue. If they 5147 * differ, print the value in semaphore[prev .. i-1], and update prev. 5148 */ 5149 prevI = 0; 5150 5151 for (i = 1; i <= ARRAY_LEN(fifo->semaphore); i++) { 5152 5153 const NvU32 prevValue = fifo->semaphore[prevI].data[0]; 5154 5155 if (i != ARRAY_LEN(fifo->semaphore)) { 5156 const NvU32 currentValue = fifo->semaphore[i].data[0]; 5157 5158 /* 5159 * If the value in this element matches the previous element, don't 5160 * print anything, yet. 5161 */ 5162 if (currentValue == prevValue) { 5163 continue; 5164 } 5165 } 5166 5167 nvInitInfoString(&infoString, buffer, size); 5168 5169 if (prevI == (i - 1)) { 5170 nvEvoLogInfoString(&infoString, 5171 " semaphore[0x%02x] : 0x%08x", 5172 prevI, prevValue); 5173 } else { 5174 nvEvoLogInfoString(&infoString, 5175 " semaphore[0x%02x..0x%02x] : 0x%08x", 5176 prevI, i - 1, prevValue); 5177 } 5178 5179 outString(data, buffer); 5180 5181 prevI = i; 5182 } 5183 5184 nvInitInfoString(&infoString, buffer, size); 5185 nvEvoLogInfoString(&infoString, ""); 5186 outString(data, buffer); 5187 } 5188 5189 static void 5190 ProcFsPrintDeferredRequestFifos( 5191 void *data, 5192 char *buffer, 5193 size_t size, 5194 nvkms_procfs_out_string_func_t *outString) 5195 { 5196 struct NvKmsPerOpen *pOpen; 5197 5198 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5199 5200 struct NvKmsPerOpenDev *pOpenDev; 5201 NvKmsGenericHandle devHandle; 5202 5203 FOR_ALL_POINTERS_IN_EVO_API_HANDLES( 5204 &pOpen->ioctl.devHandles, 5205 pOpenDev, devHandle) { 5206 5207 NVDeferredRequestFifoRec *pDeferredRequestFifo; 5208 NvKmsGenericHandle fifoHandle; 5209 5210 FOR_ALL_POINTERS_IN_EVO_API_HANDLES( 5211 &pOpenDev->deferredRequestFifoHandles, 5212 pDeferredRequestFifo, fifoHandle) { 5213 5214 ProcFsPrintOneDeferredRequestFifo( 5215 data, buffer, size, outString, 5216 pDeferredRequestFifo, 5217 pOpen, 5218 pOpenDev, 5219 fifoHandle); 5220 } 5221 } 5222 } 5223 } 5224 5225 static void 5226 ProcFsPrintDpyCrcs( 5227 void *data, 5228 char *buffer, 5229 size_t size, 5230 nvkms_procfs_out_string_func_t *outString) 5231 { 5232 NVDevEvoPtr pDevEvo; 5233 NVDispEvoPtr pDispEvo; 5234 NvU32 dispIndex, head; 5235 NVEvoInfoStringRec infoString; 5236 5237 FOR_ALL_EVO_DEVS(pDevEvo) { 5238 5239 nvInitInfoString(&infoString, buffer, size); 5240 nvEvoLogInfoString(&infoString, 5241 "pDevEvo (deviceId:%02d) : %p", 5242 pDevEvo->deviceId, pDevEvo); 5243 outString(data, buffer); 5244 5245 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { 5246 5247 nvInitInfoString(&infoString, buffer, size); 5248 nvEvoLogInfoString(&infoString, 5249 " pDispEvo (dispIndex:%02d) : %p", 5250 dispIndex, pDispEvo); 5251 outString(data, buffer); 5252 5253 for (head = 0; head < pDevEvo->numHeads; head++) { 5254 const NVDispHeadStateEvoRec *pHeadState = 5255 &pDispEvo->headState[head]; 5256 struct NvKmsDpyCRC32 compCrc; 5257 struct NvKmsDpyCRC32 rgCrc; 5258 struct NvKmsDpyCRC32 outputCrc; 5259 CRC32NotifierCrcOut crcOut; 5260 crcOut.compositorCrc32 = &compCrc; 5261 crcOut.rasterGeneratorCrc32 = &rgCrc; 5262 crcOut.outputCrc32 = &outputCrc; 5263 5264 if (pHeadState->pConnectorEvo == NULL) { 5265 continue; 5266 } 5267 5268 nvInitInfoString(&infoString, buffer, size); 5269 if (nvReadCRC32Evo(pDispEvo, head, &crcOut)) { 5270 nvEvoLogInfoString(&infoString, 5271 " head %d :", 5272 head); 5273 if (compCrc.supported) { 5274 nvEvoLogInfoString(&infoString, 5275 " compositor CRC : 0x%08x", 5276 compCrc.value); 5277 } else { 5278 nvEvoLogInfoString(&infoString, 5279 " compositor CRC : unsupported"); 5280 } 5281 if (rgCrc.supported) { 5282 nvEvoLogInfoString(&infoString, 5283 " raster generator CRC : 0x%08x", 5284 rgCrc.value); 5285 } else { 5286 nvEvoLogInfoString(&infoString, 5287 " raster generator CRC : unsupported"); 5288 } 5289 if (outputCrc.supported) { 5290 nvEvoLogInfoString(&infoString, 5291 " output CRC : 0x%08x", 5292 outputCrc.value); 5293 } else { 5294 nvEvoLogInfoString(&infoString, 5295 " output CRC : unsupported"); 5296 } 5297 } else { 5298 nvEvoLogInfoString(&infoString, 5299 " head %d : error", 5300 head); 5301 } 5302 outString(data, buffer); 5303 } 5304 } 5305 } 5306 } 5307 5308 #endif /* NVKMS_PROCFS_ENABLE */ 5309 5310 void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles) 5311 { 5312 #if NVKMS_PROCFS_ENABLE 5313 static const nvkms_procfs_file_t procFiles[] = { 5314 { "clients", ProcFsPrintClients }, 5315 { "surfaces", ProcFsPrintSurfaces }, 5316 { "headsurface", ProcFsPrintHeadSurface }, 5317 { "deferred-request-fifos", ProcFsPrintDeferredRequestFifos }, 5318 { "crcs", ProcFsPrintDpyCrcs }, 5319 { NULL, NULL }, 5320 }; 5321 5322 *ppProcFiles = procFiles; 5323 #else 5324 *ppProcFiles = NULL; 5325 #endif 5326 } 5327 5328 static void FreeGlobalState(void) 5329 { 5330 nvKmsClose(nvEvoGlobal.nvKmsPerOpen); 5331 nvEvoGlobal.nvKmsPerOpen = NULL; 5332 5333 if (nvEvoGlobal.clientHandle != 0) { 5334 nvRmApiFree(nvEvoGlobal.clientHandle, nvEvoGlobal.clientHandle, 5335 nvEvoGlobal.clientHandle); 5336 nvEvoGlobal.clientHandle = 0; 5337 } 5338 5339 nvClearDpyOverrides(); 5340 } 5341 5342 NvBool nvKmsModuleLoad(void) 5343 { 5344 NvU32 ret = NVOS_STATUS_ERROR_GENERIC; 5345 5346 nvEvoLog(EVO_LOG_INFO, "Loading %s", pNV_KMS_ID); 5347 5348 ret = nvRmApiAlloc(NV01_NULL_OBJECT, 5349 NV01_NULL_OBJECT, 5350 NV01_NULL_OBJECT, 5351 NV01_ROOT, 5352 &nvEvoGlobal.clientHandle); 5353 5354 if (ret != NVOS_STATUS_SUCCESS) { 5355 nvEvoLog(EVO_LOG_ERROR, "Failed to initialize client"); 5356 goto fail; 5357 } 5358 5359 nvEvoGlobal.nvKmsPerOpen = nvKmsOpen(0, NVKMS_CLIENT_KERNEL_SPACE, NULL); 5360 if (!nvEvoGlobal.nvKmsPerOpen) { 5361 nvEvoLog(EVO_LOG_ERROR, "Failed to initialize internal modeset client"); 5362 goto fail; 5363 } 5364 5365 if (!AssignNvKmsPerOpenType(nvEvoGlobal.nvKmsPerOpen, 5366 NvKmsPerOpenTypeIoctl, FALSE)) { 5367 goto fail; 5368 } 5369 5370 return TRUE; 5371 fail: 5372 FreeGlobalState(); 5373 5374 return FALSE; 5375 } 5376 5377 5378 void nvKmsModuleUnload(void) 5379 { 5380 FreeGlobalState(); 5381 5382 nvAssert(nvListIsEmpty(&nvEvoGlobal.frameLockList)); 5383 nvAssert(nvListIsEmpty(&nvEvoGlobal.devList)); 5384 #if defined(DEBUG) 5385 nvReportUnfreedAllocations(); 5386 #endif 5387 nvEvoLog(EVO_LOG_INFO, "Unloading"); 5388 } 5389 5390 5391 static void SendEvent(struct NvKmsPerOpen *pOpen, 5392 const struct NvKmsEvent *pEvent) 5393 { 5394 struct NvKmsPerOpenEventListEntry *pEntry = nvAlloc(sizeof(*pEntry)); 5395 5396 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); 5397 5398 if (pEntry == NULL) { 5399 return; 5400 } 5401 5402 pEntry->event = *pEvent; 5403 nvListAppend(&pEntry->eventListEntry, &pOpen->ioctl.eventList); 5404 5405 nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); 5406 } 5407 5408 static void ConsoleRestoreTimerFired(void *dataPtr, NvU32 dataU32) 5409 { 5410 NVDevEvoPtr pDevEvo = dataPtr; 5411 5412 if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { 5413 pDevEvo->skipConsoleRestore = FALSE; 5414 nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */); 5415 } 5416 } 5417 5418 /*! 5419 * Generate a dpy event. 5420 * 5421 * \param[in] pDpyEvo The dpy for which the event should be generated. 5422 * \param[in] eventType The NVKMS_EVENT_TYPE_ 5423 * \param[in] attribute The NvKmsDpyAttribute; only used for 5424 * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. 5425 * \param[in] NvS64 The NvKmsDpyAttribute value; only used for 5426 * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. 5427 */ 5428 static void SendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, 5429 const NvU32 eventType, 5430 const enum NvKmsDpyAttribute attribute, 5431 const NvS64 value) 5432 { 5433 struct NvKmsPerOpen *pOpen; 5434 const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; 5435 5436 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5437 5438 struct NvKmsEvent event = { 0 }; 5439 NvKmsDeviceHandle deviceHandle; 5440 NvKmsDispHandle dispHandle; 5441 5442 if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, 5443 &deviceHandle, &dispHandle)) { 5444 continue; 5445 } 5446 5447 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { 5448 continue; 5449 } 5450 5451 event.eventType = eventType; 5452 5453 switch (eventType) { 5454 5455 case NVKMS_EVENT_TYPE_DPY_CHANGED: 5456 event.u.dpyChanged.deviceHandle = deviceHandle; 5457 event.u.dpyChanged.dispHandle = dispHandle; 5458 event.u.dpyChanged.dpyId = pDpyEvo->id; 5459 break; 5460 5461 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: 5462 event.u.dynamicDpyConnected.deviceHandle = deviceHandle; 5463 event.u.dynamicDpyConnected.dispHandle = dispHandle; 5464 event.u.dynamicDpyConnected.dpyId = pDpyEvo->id; 5465 break; 5466 5467 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED: 5468 event.u.dynamicDpyDisconnected.deviceHandle = deviceHandle; 5469 event.u.dynamicDpyDisconnected.dispHandle = dispHandle; 5470 event.u.dynamicDpyDisconnected.dpyId = pDpyEvo->id; 5471 break; 5472 5473 case NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED: 5474 event.u.dpyAttributeChanged.deviceHandle = deviceHandle; 5475 event.u.dpyAttributeChanged.dispHandle = dispHandle; 5476 event.u.dpyAttributeChanged.dpyId = pDpyEvo->id; 5477 event.u.dpyAttributeChanged.attribute = attribute; 5478 event.u.dpyAttributeChanged.value = value; 5479 break; 5480 5481 default: 5482 nvAssert(!"Bad eventType"); 5483 return; 5484 } 5485 5486 SendEvent(pOpen, &event); 5487 } 5488 5489 if (eventType == NVKMS_EVENT_TYPE_DPY_CHANGED) { 5490 NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo; 5491 5492 if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { 5493 nvkms_free_timer(pDevEvo->consoleRestoreTimer); 5494 pDevEvo->consoleRestoreTimer = 5495 nvkms_alloc_timer(ConsoleRestoreTimerFired, pDevEvo, 0, 500); 5496 } 5497 } 5498 } 5499 5500 void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType) 5501 { 5502 nvAssert(eventType != NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED); 5503 SendDpyEventEvo(pDpyEvo, eventType, 5504 0 /* attribute (unused) */, 5505 0 /* value (unused) */ ); 5506 } 5507 5508 void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo, 5509 const enum NvKmsDpyAttribute attribute, 5510 const NvS64 value) 5511 { 5512 SendDpyEventEvo(pDpyEvo, 5513 NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, 5514 attribute, value); 5515 } 5516 5517 void nvSendFrameLockAttributeChangedEventEvo( 5518 const NVFrameLockEvoRec *pFrameLockEvo, 5519 const enum NvKmsFrameLockAttribute attribute, 5520 const NvS64 value) 5521 { 5522 struct NvKmsPerOpen *pOpen; 5523 const NvU32 eventType = NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED; 5524 5525 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5526 5527 struct NvKmsEvent event = { 0 }; 5528 NvKmsFrameLockHandle frameLockHandle; 5529 5530 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { 5531 continue; 5532 } 5533 5534 if (!FrameLockEvoToFrameLockHandle(pOpen, pFrameLockEvo, 5535 &frameLockHandle)) { 5536 continue; 5537 } 5538 5539 event.eventType = eventType; 5540 event.u.frameLockAttributeChanged.frameLockHandle = frameLockHandle; 5541 event.u.frameLockAttributeChanged.attribute = attribute; 5542 event.u.frameLockAttributeChanged.value = value; 5543 5544 SendEvent(pOpen, &event); 5545 } 5546 } 5547 5548 5549 void nvSendFlipOccurredEventEvo(const NVDispEvoRec *pDispEvo, 5550 const NvU32 apiHead, const NvU32 layer) 5551 { 5552 struct NvKmsPerOpen *pOpen; 5553 const NvU32 eventType = NVKMS_EVENT_TYPE_FLIP_OCCURRED; 5554 5555 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5556 5557 struct NvKmsEvent event = { 0 }; 5558 NvKmsDeviceHandle deviceHandle; 5559 NvKmsDispHandle dispHandle; 5560 5561 struct NvKmsPerOpenDev *pOpenDev; 5562 const struct NvKmsFlipPermissions *pFlipPermissions; 5563 5564 pOpenDev = DevEvoToOpenDev(pOpen, pDispEvo->pDevEvo); 5565 5566 if (pOpenDev == NULL) { 5567 continue; 5568 } 5569 5570 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { 5571 continue; 5572 } 5573 5574 pFlipPermissions = &pOpenDev->flipPermissions; 5575 5576 if ((pFlipPermissions->disp[pDispEvo->displayOwner]. 5577 head[apiHead].layerMask & NVBIT(layer)) == 0x0) { 5578 continue; 5579 } 5580 5581 if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, 5582 &deviceHandle, &dispHandle)) { 5583 continue; 5584 } 5585 5586 event.eventType = eventType; 5587 event.u.flipOccurred.deviceHandle = deviceHandle; 5588 event.u.flipOccurred.dispHandle = dispHandle; 5589 event.u.flipOccurred.head = apiHead; 5590 event.u.flipOccurred.layer = layer; 5591 5592 SendEvent(pOpen, &event); 5593 } 5594 } 5595 5596 void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen) 5597 { 5598 if (pOpen == NULL) { 5599 return; 5600 } 5601 5602 nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); 5603 nvAssert(pOpen->unicastEvent.type != NvKmsUnicastEventTypeUndefined); 5604 5605 nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); 5606 } 5607 5608 void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen) 5609 { 5610 NVDeferredRequestFifoPtr pDeferredRequestFifo; 5611 NvKmsGenericHandle callbackHandle; 5612 NVVBlankCallbackPtr pCallbackData; 5613 struct NvKmsPerOpenDisp *pOpenDisp; 5614 NvU32 apiHead; 5615 5616 if (pOpen == NULL) { 5617 return; 5618 } 5619 5620 nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); 5621 5622 switch(pOpen->unicastEvent.type) 5623 { 5624 case NvKmsUnicastEventTypeDeferredRequest: 5625 pDeferredRequestFifo = 5626 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo; 5627 5628 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = NULL; 5629 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo = NULL; 5630 break; 5631 case NvKmsUnicastEventTypeVblankNotification: 5632 /* grab fields from the unicast fd */ 5633 callbackHandle = 5634 pOpen->unicastEvent.e.vblankNotification.hCallback; 5635 pOpenDisp = 5636 pOpen->unicastEvent.e.vblankNotification.pOpenDisp; 5637 apiHead = pOpen->unicastEvent.e.vblankNotification.apiHead; 5638 5639 /* Unregister the vblank callback */ 5640 pCallbackData = 5641 nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], 5642 callbackHandle); 5643 5644 nvApiHeadUnregisterVBlankCallback(pOpenDisp->pDispEvo, 5645 pCallbackData); 5646 5647 nvEvoDestroyApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead], 5648 callbackHandle); 5649 5650 /* invalidate the pOpen data */ 5651 pOpen->unicastEvent.e.vblankNotification.hCallback = 0; 5652 pOpen->unicastEvent.e.vblankNotification.pOpenDisp = NULL; 5653 pOpen->unicastEvent.e.vblankNotification.apiHead = NV_INVALID_HEAD; 5654 break; 5655 default: 5656 nvAssert("Invalid Unicast Event Type!"); 5657 break; 5658 } 5659 5660 pOpen->unicastEvent.type = NvKmsUnicastEventTypeUndefined; 5661 } 5662 5663 static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) 5664 { 5665 struct NvKmsPerOpen *pOpen; 5666 5667 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5668 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 5669 5670 NvKmsGenericHandle surfaceHandle; 5671 NVSurfaceEvoPtr pSurfaceEvo; 5672 5673 if (pOpenDev == NULL) { 5674 continue; 5675 } 5676 5677 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 5678 pSurfaceEvo, surfaceHandle) { 5679 5680 NvU8 planeIndex; 5681 5682 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { 5683 continue; 5684 } 5685 5686 if (!pSurfaceEvo->requireCtxDma) { 5687 nvAssert(pSurfaceEvo->planes[0].ctxDma == 0); 5688 continue; 5689 } 5690 5691 /* 5692 * Orphan surfaces should not get this far: they should 5693 * fail the owner check above. 5694 */ 5695 nvAssert(pSurfaceEvo->rmRefCnt > 0); 5696 5697 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { 5698 5699 pSurfaceEvo->planes[planeIndex].ctxDma = 5700 nvRmEvoAllocateAndBindDispContextDMA( 5701 pDevEvo, 5702 pSurfaceEvo->planes[planeIndex].rmHandle, 5703 pSurfaceEvo->layout, 5704 pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes - 1); 5705 if (!pSurfaceEvo->planes[planeIndex].ctxDma) { 5706 FreeSurfaceCtxDmasForAllOpens(pDevEvo); 5707 nvAssert(!"Failed to re-allocate surface ctx dma"); 5708 return; 5709 } 5710 } 5711 } 5712 } 5713 } 5714 5715 5716 static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) 5717 { 5718 struct NvKmsPerOpen *pOpen; 5719 5720 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5721 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 5722 5723 NvKmsGenericHandle surfaceHandle; 5724 NVSurfaceEvoPtr pSurfaceEvo; 5725 5726 if (pOpenDev == NULL) { 5727 continue; 5728 } 5729 5730 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 5731 pSurfaceEvo, surfaceHandle) { 5732 5733 NvU8 planeIndex; 5734 5735 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { 5736 continue; 5737 } 5738 5739 /* 5740 * Orphan surfaces should not get this far: they should 5741 * fail the owner check above. 5742 */ 5743 nvAssert(pSurfaceEvo->rmRefCnt > 0); 5744 5745 if (!pSurfaceEvo->requireCtxDma) { 5746 nvAssert(pSurfaceEvo->planes[0].ctxDma == 0); 5747 continue; 5748 } 5749 5750 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { 5751 nvRmEvoFreeDispContextDMA( 5752 pDevEvo, 5753 &pSurfaceEvo->planes[planeIndex].ctxDma); 5754 } 5755 } 5756 } 5757 } 5758 5759 #if defined(DEBUG) 5760 NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo) 5761 { 5762 struct NvKmsPerOpen *pOpen; 5763 5764 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { 5765 5766 if (pOpen->type == NvKmsPerOpenTypeIoctl) { 5767 struct NvKmsPerOpenDev *pOpenDev; 5768 NvKmsGenericHandle dev; 5769 5770 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, 5771 pOpenDev, dev) { 5772 5773 NvKmsGenericHandle surfaceHandleUnused; 5774 NVSurfaceEvoPtr pSurfaceEvoTmp; 5775 5776 if (pOpenDev == NULL) { 5777 continue; 5778 } 5779 5780 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, 5781 pSurfaceEvoTmp, 5782 surfaceHandleUnused) { 5783 if (pSurfaceEvoTmp == pSurfaceEvo) { 5784 return TRUE; 5785 } 5786 } 5787 } 5788 } else if ((pOpen->type == NvKmsPerOpenTypeGrantSurface) && 5789 (pOpen->grantSurface.pSurfaceEvo == pSurfaceEvo)) { 5790 return TRUE; 5791 } 5792 } 5793 5794 return FALSE; 5795 } 5796 #endif 5797 5798 NVDevEvoPtr nvGetDevEvoFromOpenDev( 5799 const struct NvKmsPerOpenDev *pOpenDev) 5800 { 5801 nvAssert(pOpenDev != NULL); 5802 return pOpenDev->pDevEvo; 5803 } 5804 5805 const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev( 5806 const struct NvKmsPerOpenDev *pOpenDev) 5807 { 5808 nvAssert(pOpenDev != NULL); 5809 return &pOpenDev->flipPermissions; 5810 } 5811 5812 const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev( 5813 const struct NvKmsPerOpenDev *pOpenDev) 5814 { 5815 nvAssert(pOpenDev != NULL); 5816 return &pOpenDev->modesetPermissions; 5817 } 5818 5819 NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev( 5820 struct NvKmsPerOpenDev *pOpenDev) 5821 { 5822 if (pOpenDev == NULL) { 5823 return NULL; 5824 } 5825 5826 return &pOpenDev->surfaceHandles; 5827 } 5828 5829 const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst( 5830 const struct NvKmsPerOpenDev *pOpenDev) 5831 { 5832 if (pOpenDev == NULL) { 5833 return NULL; 5834 } 5835 5836 return &pOpenDev->surfaceHandles; 5837 } 5838 5839 static int suspendCounter = 0; 5840 5841 /* 5842 * Suspend NVKMS. 5843 * 5844 * This function is called by RM once per GPU, but NVKMS just counts the number 5845 * of suspend calls so that it can deallocate the core channels on the first 5846 * call to suspend(), and reallocate them on the last call to resume(). 5847 */ 5848 void nvKmsSuspend(NvU32 gpuId) 5849 { 5850 if (suspendCounter == 0) { 5851 NVDevEvoPtr pDevEvo; 5852 5853 FOR_ALL_EVO_DEVS(pDevEvo) { 5854 nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Suspending"); 5855 5856 /* 5857 * Shut down all heads and skip console restore. 5858 * 5859 * This works around an RM bug where it fails to train DisplayPort 5860 * links during resume if the system was suspended while heads were 5861 * active. 5862 * 5863 * XXX TODO bug 1850734: In addition to fixing the above 5864 * RM bug, NVKMS should clear pDispEvo head and connector state 5865 * that becomes stale after suspend. Shutting the heads down here 5866 * clears the relevant state explicitly. 5867 */ 5868 nvShutDownApiHeads(pDevEvo, 5869 NULL /* pTestFunc, shut down all heads */); 5870 pDevEvo->skipConsoleRestore = TRUE; 5871 5872 FreeSurfaceCtxDmasForAllOpens(pDevEvo); 5873 5874 nvSuspendDevEvo(pDevEvo); 5875 } 5876 } 5877 5878 suspendCounter++; 5879 } 5880 5881 void nvKmsResume(NvU32 gpuId) 5882 { 5883 suspendCounter--; 5884 5885 if (suspendCounter == 0) { 5886 NVDevEvoPtr pDevEvo; 5887 5888 FOR_ALL_EVO_DEVS(pDevEvo) { 5889 nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Resuming"); 5890 5891 if (nvResumeDevEvo(pDevEvo)) { 5892 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); 5893 AllocSurfaceCtxDmasForAllOpens(pDevEvo); 5894 } 5895 5896 if (pDevEvo->modesetOwner == NULL) { 5897 // Hardware state was lost, so we need to force a console 5898 // restore. 5899 pDevEvo->skipConsoleRestore = FALSE; 5900 RestoreConsole(pDevEvo); 5901 } 5902 } 5903 } 5904 } 5905 5906 static void ServiceOneDeferredRequestFifo( 5907 NVDevEvoPtr pDevEvo, 5908 NVDeferredRequestFifoRec *pDeferredRequestFifo) 5909 { 5910 struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; 5911 NvU32 get, put; 5912 5913 nvAssert(fifo != NULL); 5914 5915 get = fifo->get; 5916 put = fifo->put; 5917 5918 if (put == get) { 5919 return; 5920 } 5921 5922 if ((get >= ARRAY_LEN(fifo->request)) || 5923 (put >= ARRAY_LEN(fifo->request))) { 5924 return; 5925 } 5926 5927 while (get != put) { 5928 5929 const NvU32 request = fifo->request[get]; 5930 const NvU32 opcode = 5931 DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); 5932 5933 switch (opcode) { 5934 5935 case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: 5936 break; 5937 5938 case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY: 5939 nvHsSwapGroupReady( 5940 pDevEvo, 5941 pDeferredRequestFifo, 5942 request); 5943 break; 5944 5945 default: 5946 nvAssert(!"Invalid NVKMS deferred request opcode"); 5947 break; 5948 } 5949 5950 get = (get + 1) % ARRAY_LEN(fifo->request); 5951 } 5952 5953 fifo->get = put; 5954 } 5955 5956 /*! 5957 * Respond to a non-stall interrupt. 5958 */ 5959 void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32) 5960 { 5961 NVDevEvoPtr pDevEvo = dataPtr; 5962 struct NvKmsPerOpen *pOpen; 5963 5964 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { 5965 5966 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); 5967 NVDeferredRequestFifoRec *pDeferredRequestFifo; 5968 NvKmsGenericHandle handle; 5969 5970 if (pOpenDev == NULL) { 5971 continue; 5972 } 5973 5974 FOR_ALL_POINTERS_IN_EVO_API_HANDLES( 5975 &pOpenDev->deferredRequestFifoHandles, 5976 pDeferredRequestFifo, 5977 handle) { 5978 5979 ServiceOneDeferredRequestFifo(pDevEvo, pDeferredRequestFifo); 5980 } 5981 } 5982 5983 nvHsProcessPendingViewportFlips(pDevEvo); 5984 } 5985 5986 NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness) 5987 { 5988 NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; 5989 NV_STATUS status = NV_ERR_INVALID_STATE; 5990 NVDispEvoRec *pDispEvo = drv_priv; 5991 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; 5992 5993 params.subDeviceInstance = pDispEvo->displayOwner; 5994 params.displayId = display_id; 5995 5996 status = nvRmApiControl(nvEvoGlobal.clientHandle, 5997 pDevEvo->displayCommonHandle, 5998 NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, 5999 ¶ms, sizeof(params)); 6000 6001 if (status == NV_OK) { 6002 *brightness = params.brightness; 6003 } 6004 6005 return status == NV_OK; 6006 } 6007 6008 NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness) 6009 { 6010 NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; 6011 NV_STATUS status = NV_ERR_INVALID_STATE; 6012 NVDispEvoRec *pDispEvo = drv_priv; 6013 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; 6014 6015 params.subDeviceInstance = pDispEvo->displayOwner; 6016 params.displayId = display_id; 6017 params.brightness = brightness; 6018 6019 status = nvRmApiControl(nvEvoGlobal.clientHandle, 6020 pDevEvo->displayCommonHandle, 6021 NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, 6022 ¶ms, sizeof(params)); 6023 6024 return status == NV_OK; 6025 } 6026