1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "nvUnixVersion.h" 25 26 #include "nvidia-modeset-os-interface.h" 27 28 #include "nvkms-api.h" 29 #include "nvkms-rmapi.h" 30 #include "nvkms-vrr.h" 31 32 #include "nvkms-kapi.h" 33 #include "nvkms-kapi-private.h" 34 #include "nvkms-kapi-internal.h" 35 #include "nvkms-kapi-notifiers.h" 36 37 #include <class/cl0000.h> /* NV01_ROOT/NV01_NULL_OBJECT */ 38 #include <class/cl003e.h> /* NV01_MEMORY_SYSTEM */ 39 #include <class/cl0080.h> /* NV01_DEVICE */ 40 #include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */ 41 #include <class/cl0071.h> /* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */ 42 #include <class/cl2080.h> /* NV20_SUBDEVICE_0 */ 43 44 #include <ctrl/ctrl0000/ctrl0000gpu.h> /* NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 */ 45 #include <ctrl/ctrl0000/ctrl0000unix.h> /* NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD */ 46 #include <ctrl/ctrl0000/ctrl0000client.h> /* NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM */ 47 #include <ctrl/ctrl0080/ctrl0080gpu.h> /* NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES */ 48 #include <ctrl/ctrl0080/ctrl0080fb.h> /* NV0080_CTRL_CMD_FB_GET_CAPS_V2 */ 49 #include <ctrl/ctrl2080/ctrl2080unix.h> /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */ 50 51 #include "ctrl/ctrl003e.h" /* NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES */ 52 #include "ctrl/ctrl0041.h" /* NV0041_CTRL_SURFACE_INFO */ 53 54 55 ct_assert(NVKMS_KAPI_LAYER_PRIMARY_IDX == NVKMS_MAIN_LAYER); 56 ct_assert(NVKMS_KAPI_LAYER_MAX == NVKMS_MAX_LAYERS_PER_HEAD); 57 58 /* XXX Move to NVKMS */ 59 #define NV_EVO_PITCH_ALIGNMENT 0x100 60 61 #define NVKMS_KAPI_SUPPORTED_EVENTS_MASK \ 62 ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) | \ 63 (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) | \ 64 (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)) 65 66 static NvU32 EnumerateGpus(nv_gpu_info_t *gpuInfo) 67 { 68 return nvkms_enumerate_gpus(gpuInfo); 69 } 70 71 /* 72 * Helper function to free RM objects allocated for NvKmsKapiDevice. 73 */ 74 static void RmFreeDevice(struct NvKmsKapiDevice *device) 75 { 76 if (device->hRmSubDevice != 0x0) { 77 nvRmApiFree(device->hRmClient, 78 device->hRmDevice, 79 device->hRmSubDevice); 80 nvKmsKapiFreeRmHandle(device, device->hRmSubDevice); 81 device->hRmSubDevice = 0x0; 82 } 83 84 /* Free RM device object */ 85 86 if (device->hRmDevice != 0x0) { 87 nvRmApiFree(device->hRmClient, 88 device->hRmClient, 89 device->hRmDevice); 90 nvKmsKapiFreeRmHandle(device, device->hRmDevice); 91 92 device->hRmDevice = 0x0; 93 } 94 95 nvTearDownUnixRmHandleAllocator(&device->handleAllocator); 96 97 device->deviceInstance = 0; 98 99 /* Free RM client */ 100 101 if (device->hRmClient != 0x0) { 102 nvRmApiFree(device->hRmClient, 103 device->hRmClient, 104 device->hRmClient); 105 106 device->hRmClient = 0x0; 107 } 108 } 109 110 /* 111 * Helper function to allocate RM objects for NvKmsKapiDevice. 112 */ 113 static NvBool RmAllocateDevice(struct NvKmsKapiDevice *device) 114 { 115 NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 }; 116 NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS idInfoParams = { }; 117 NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 }; 118 NV0080_ALLOC_PARAMETERS allocParams = { }; 119 NV0080_CTRL_FB_GET_CAPS_V2_PARAMS fbCapsParams = { 0 }; 120 121 NvU32 hRmDevice, hRmSubDevice; 122 NvBool supportsGenericPageKind; 123 NvU32 ret; 124 125 /* Allocate RM client */ 126 127 ret = nvRmApiAlloc(NV01_NULL_OBJECT, 128 NV01_NULL_OBJECT, 129 NV01_NULL_OBJECT, 130 NV01_ROOT, 131 &device->hRmClient); 132 133 if (ret != NVOS_STATUS_SUCCESS || device->hRmClient == 0x0) { 134 nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM client"); 135 goto failed; 136 } 137 138 /* Query device instance */ 139 140 idInfoParams.gpuId = device->gpuId; 141 142 ret = nvRmApiControl(device->hRmClient, 143 device->hRmClient, 144 NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2, 145 &idInfoParams, 146 sizeof(idInfoParams)); 147 148 if (ret != NVOS_STATUS_SUCCESS) { 149 nvKmsKapiLogDeviceDebug(device, "Failed to query device instance"); 150 goto failed; 151 } 152 153 device->deviceInstance = idInfoParams.deviceInstance; 154 device->isSOC = 155 FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE, 156 idInfoParams.gpuFlags); 157 158 /* Initialize RM handle allocator */ 159 160 if (!nvInitUnixRmHandleAllocator(&device->handleAllocator, 161 device->hRmClient, 162 device->deviceInstance + 1)) { 163 nvKmsKapiLogDeviceDebug(device, "Failed to initialize RM handle allocator"); 164 goto failed; 165 } 166 167 /* Allocate RM device object */ 168 169 hRmDevice = nvKmsKapiGenerateRmHandle(device); 170 171 if (hRmDevice == 0x0) { 172 nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle"); 173 goto failed; 174 } 175 176 allocParams.deviceId = device->deviceInstance; 177 178 allocParams.hClientShare = device->hRmClient; 179 180 ret = nvRmApiAlloc(device->hRmClient, 181 device->hRmClient, 182 hRmDevice, 183 NV01_DEVICE_0, 184 &allocParams); 185 186 if (ret != NVOS_STATUS_SUCCESS) { 187 nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM device object"); 188 nvKmsKapiFreeRmHandle(device, hRmDevice); 189 goto failed; 190 } 191 192 device->hRmDevice = hRmDevice; 193 194 ret = nvRmApiControl(device->hRmClient, 195 device->hRmDevice, 196 NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, 197 &getNumSubDevicesParams, 198 sizeof(getNumSubDevicesParams)); 199 200 if (ret != NVOS_STATUS_SUCCESS) { 201 nvKmsKapiLogDeviceDebug(device, "Failed to determine number of GPUs"); 202 goto failed; 203 } 204 205 if (getNumSubDevicesParams.numSubDevices != 1) { 206 nvKmsKapiLogDeviceDebug( 207 device, 208 "Unsupported number of GPUs: %d", 209 getNumSubDevicesParams.numSubDevices); 210 goto failed; 211 } 212 213 hRmSubDevice = nvKmsKapiGenerateRmHandle(device); 214 215 if (hRmDevice == 0x0) { 216 nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle"); 217 goto failed; 218 } 219 220 subdevAllocParams.subDeviceId = 0; 221 222 ret = nvRmApiAlloc(device->hRmClient, 223 device->hRmDevice, 224 hRmSubDevice, 225 NV20_SUBDEVICE_0, 226 &subdevAllocParams); 227 228 if (ret != NVOS_STATUS_SUCCESS) { 229 nvKmsKapiLogDeviceDebug(device, "Failed to initialize subDevice"); 230 nvKmsKapiFreeRmHandle(device, hRmSubDevice); 231 goto failed; 232 } 233 234 device->hRmSubDevice = hRmSubDevice; 235 236 if (device->isSOC) { 237 /* NVKMS is only used on T23X and later chips, 238 * which all support generic memory. */ 239 supportsGenericPageKind = NV_TRUE; 240 } else { 241 ret = nvRmApiControl(device->hRmClient, 242 device->hRmDevice, 243 NV0080_CTRL_CMD_FB_GET_CAPS_V2, 244 &fbCapsParams, 245 sizeof (fbCapsParams)); 246 if (ret != NVOS_STATUS_SUCCESS) { 247 nvKmsKapiLogDeviceDebug(device, "Failed to query framebuffer capabilities"); 248 goto failed; 249 } 250 supportsGenericPageKind = 251 NV0080_CTRL_FB_GET_CAP(fbCapsParams.capsTbl, 252 NV0080_CTRL_FB_CAPS_GENERIC_PAGE_KIND); 253 } 254 255 device->caps.genericPageKind = 256 supportsGenericPageKind ? 257 0x06 /* NV_MMU_PTE_KIND_GENERIC_MEMORY */ : 258 0xfe /* NV_MMU_PTE_KIND_GENERIC_16BX2 */; 259 260 return NV_TRUE; 261 262 failed: 263 264 RmFreeDevice(device); 265 266 return NV_FALSE; 267 } 268 269 /* 270 * Helper function to free NVKMS objects allocated for NvKmsKapiDevice. 271 */ 272 static void KmsFreeDevice(struct NvKmsKapiDevice *device) 273 { 274 /* Free notifier memory */ 275 276 nvKmsKapiFreeNotifiers(device); 277 278 /* Free NVKMS device */ 279 280 if (device->hKmsDevice != 0x0) { 281 struct NvKmsFreeDeviceParams paramsFree = { }; 282 283 paramsFree.request.deviceHandle = device->hKmsDevice; 284 285 nvkms_ioctl_from_kapi(device->pKmsOpen, 286 NVKMS_IOCTL_FREE_DEVICE, 287 ¶msFree, sizeof(paramsFree)); 288 289 device->hKmsDevice = device->hKmsDisp = 0x0; 290 } 291 292 /* Close NVKMS */ 293 294 if (device->pKmsOpen != NULL) { 295 nvkms_close_from_kapi(device->pKmsOpen); 296 device->pKmsOpen = NULL; 297 } 298 } 299 300 /* 301 * Helper function to allocate NVKMS objects for NvKmsKapiDevice. 302 */ 303 static NvBool KmsAllocateDevice(struct NvKmsKapiDevice *device) 304 { 305 struct NvKmsAllocDeviceParams *paramsAlloc; 306 NvBool status; 307 NvBool inVideoMemory = FALSE; 308 NvU32 head; 309 NvBool ret = FALSE; 310 NvU32 layer; 311 312 paramsAlloc = nvKmsKapiCalloc(1, sizeof(*paramsAlloc)); 313 if (paramsAlloc == NULL) { 314 return FALSE; 315 } 316 317 /* Open NVKMS */ 318 319 device->pKmsOpen = nvkms_open_from_kapi(device); 320 321 if (device->pKmsOpen == NULL) { 322 nvKmsKapiLogDeviceDebug(device, "Failed to Open NVKMS"); 323 goto done; 324 } 325 326 /* Allocate NVKMS device */ 327 328 nvkms_strncpy( 329 paramsAlloc->request.versionString, 330 NV_VERSION_STRING, 331 sizeof(paramsAlloc->request.versionString)); 332 333 if (device->isSOC) { 334 paramsAlloc->request.deviceId = NVKMS_DEVICE_ID_TEGRA; 335 } else { 336 paramsAlloc->request.deviceId = device->deviceInstance; 337 } 338 paramsAlloc->request.sliMosaic = NV_FALSE; 339 paramsAlloc->request.enableConsoleHotplugHandling = NV_TRUE; 340 341 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 342 NVKMS_IOCTL_ALLOC_DEVICE, 343 paramsAlloc, sizeof(*paramsAlloc)); 344 345 if (!status || 346 paramsAlloc->reply.status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { 347 348 if (paramsAlloc->reply.status == 349 NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE) { 350 nvKmsKapiLogDeviceDebug( 351 device, 352 "Display hardware is not available; falling back to " 353 "displayless mode"); 354 355 ret = TRUE; 356 goto done; 357 } 358 359 nvKmsKapiLogDeviceDebug( 360 device, 361 "Failed to NVKM device %u(%u): %d %d\n", 362 device->gpuId, 363 paramsAlloc->request.deviceId, 364 status, 365 paramsAlloc->reply.status); 366 367 goto done; 368 } 369 370 device->hKmsDevice = paramsAlloc->reply.deviceHandle; 371 372 device->caps.cursorCompositionCaps = 373 paramsAlloc->reply.cursorCompositionCaps; 374 375 device->caps.overlayCompositionCaps = 376 paramsAlloc->reply.layerCaps[NVKMS_OVERLAY_LAYER].composition; 377 378 device->caps.validLayerRRTransforms = 379 paramsAlloc->reply.validLayerRRTransforms; 380 381 device->caps.maxWidthInPixels = paramsAlloc->reply.maxWidthInPixels; 382 device->caps.maxHeightInPixels = paramsAlloc->reply.maxHeightInPixels; 383 device->caps.maxCursorSizeInPixels = paramsAlloc->reply.maxCursorSize; 384 device->caps.requiresVrrSemaphores = paramsAlloc->reply.requiresVrrSemaphores; 385 /* The generic page kind was determined during RM device allocation, 386 * but it should match what NVKMS reports */ 387 nvAssert(device->caps.genericPageKind == paramsAlloc->reply.genericPageKind); 388 389 /* XXX Add LUT support */ 390 391 device->numHeads = paramsAlloc->reply.numHeads; 392 393 for (head = 0; head < device->numHeads; head++) { 394 if (paramsAlloc->reply.numLayers[head] < 2) { 395 goto done; 396 } 397 device->numLayers[head] = paramsAlloc->reply.numLayers[head]; 398 } 399 400 for (layer = 0; layer < NVKMS_KAPI_LAYER_MAX; layer++) { 401 device->supportedSurfaceMemoryFormats[layer] = 402 paramsAlloc->reply.layerCaps[layer].supportedSurfaceMemoryFormats; 403 device->supportsHDR[layer] = paramsAlloc->reply.layerCaps[layer].supportsHDR; 404 } 405 406 if (paramsAlloc->reply.validNIsoFormatMask & 407 (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) { 408 device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY; 409 } else if (paramsAlloc->reply.validNIsoFormatMask & 410 (1 << NVKMS_NISO_FORMAT_FOUR_WORD)) { 411 device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD; 412 } else { 413 nvAssert(paramsAlloc->reply.validNIsoFormatMask & 414 (1 << NVKMS_NISO_FORMAT_LEGACY)); 415 device->notifier.format = NVKMS_NISO_FORMAT_LEGACY; 416 } 417 418 /* XXX Add support for SLI/multiple display engines per device */ 419 if (paramsAlloc->reply.numDisps != 1) 420 { 421 nvKmsKapiLogDeviceDebug(device, "Found unsupported SLI configuration"); 422 goto done; 423 } 424 425 device->hKmsDisp = paramsAlloc->reply.dispHandles[0]; 426 device->dispIdx = 0; 427 428 device->subDeviceMask = paramsAlloc->reply.subDeviceMask; 429 430 device->isoIOCoherencyModes = paramsAlloc->reply.isoIOCoherencyModes; 431 device->nisoIOCoherencyModes = paramsAlloc->reply.nisoIOCoherencyModes; 432 433 device->supportsSyncpts = paramsAlloc->reply.supportsSyncpts; 434 435 if (paramsAlloc->reply.nIsoSurfacesInVidmemOnly) { 436 inVideoMemory = TRUE; 437 } 438 439 /* Allocate notifier memory */ 440 if (!nvKmsKapiAllocateNotifiers(device, inVideoMemory)) { 441 nvKmsKapiLogDebug( 442 "Failed to allocate Notifier objects for GPU ID 0x%08x", 443 device->gpuId); 444 goto done; 445 } 446 447 ret = NV_TRUE; 448 449 done: 450 if (!ret) { 451 KmsFreeDevice(device); 452 } 453 454 nvKmsKapiFree(paramsAlloc); 455 456 return ret; 457 } 458 459 static void FreeDevice(struct NvKmsKapiDevice *device) 460 { 461 /* Free NVKMS objects allocated for NvKmsKapiDevice */ 462 463 KmsFreeDevice(device); 464 465 /* Free RM objects allocated for NvKmsKapiDevice */ 466 467 RmFreeDevice(device); 468 469 /* Lower the reference count of gpu. */ 470 471 nvkms_close_gpu(device->gpuId); 472 473 if (device->pSema != NULL) { 474 nvkms_sema_free(device->pSema); 475 } 476 477 nvKmsKapiFree(device); 478 } 479 480 NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device, 481 NvU32 hRmHandle, 482 enum NvKmsSurfaceMemoryLayout layout, 483 NvU64 size, 484 enum NvKmsKapiAllocationType type, 485 NvU8 *compressible) 486 { 487 NvU32 ret; 488 NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; 489 const NvKmsDispIOCoherencyModes *pIOCoherencyModes = NULL; 490 491 memAllocParams.owner = NVKMS_RM_HEAP_ID; 492 memAllocParams.size = size; 493 494 switch (layout) { 495 case NvKmsSurfaceMemoryLayoutBlockLinear: 496 memAllocParams.attr = 497 FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, 498 memAllocParams.attr); 499 if (*compressible) { 500 /* 501 * RM will choose a compressed page kind and hence allocate 502 * comptags for color surfaces >= 32bpp. The actual kind 503 * chosen isn't important, as it can be overridden by creating 504 * a virtual alloc with a different kind when mapping the 505 * memory into the GPU. 506 */ 507 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, 508 memAllocParams.attr); 509 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY, 510 memAllocParams.attr); 511 } else { 512 memAllocParams.attr = 513 FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN, 514 memAllocParams.attr); 515 } 516 break; 517 518 case NvKmsSurfaceMemoryLayoutPitch: 519 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, 520 memAllocParams.attr); 521 break; 522 523 default: 524 nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout"); 525 return NV_FALSE; 526 } 527 528 switch (type) { 529 case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT: 530 /* XXX Note compression and scanout do not work together on 531 * any current GPUs. However, some use cases do involve scanning 532 * out a compression-capable surface: 533 * 534 * 1) Mapping the compressible surface as non-compressed when 535 * generating its content. 536 * 537 * 2) Using decompress-in-place to decompress the surface content 538 * before scanning it out. 539 * 540 * Hence creating compressed allocations of TYPE_SCANOUT is allowed. 541 */ 542 543 pIOCoherencyModes = &device->isoIOCoherencyModes; 544 545 break; 546 case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER: 547 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { 548 nvKmsKapiLogDeviceDebug(device, 549 "Attempting creation of BlockLinear notifier memory"); 550 return NV_FALSE; 551 } 552 553 memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY, 554 _YES, memAllocParams.attr2); 555 556 pIOCoherencyModes = &device->nisoIOCoherencyModes; 557 558 break; 559 case NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN: 560 memAllocParams.flags |= NVOS32_ALLOC_FLAGS_NO_SCANOUT; 561 break; 562 default: 563 nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type"); 564 return NV_FALSE; 565 } 566 567 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, 568 memAllocParams.attr); 569 memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, 570 memAllocParams.attr2); 571 572 if (pIOCoherencyModes == NULL || !pIOCoherencyModes->coherent) { 573 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, 574 _WRITE_COMBINE, memAllocParams.attr); 575 } else { 576 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, 577 _WRITE_BACK, memAllocParams.attr); 578 } 579 580 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, 581 memAllocParams.attr); 582 583 ret = nvRmApiAlloc(device->hRmClient, 584 device->hRmDevice, 585 hRmHandle, 586 NV01_MEMORY_SYSTEM, 587 &memAllocParams); 588 if (ret != NVOS_STATUS_SUCCESS) { 589 nvKmsKapiLogDeviceDebug( 590 device, 591 "nvRmApiAlloc failed with error code 0x%08x", 592 ret); 593 594 return NV_FALSE; 595 } 596 597 if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, 598 memAllocParams.attr)) { 599 *compressible = 0; 600 } else { 601 *compressible = 1; 602 } 603 604 return TRUE; 605 } 606 607 NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device, 608 NvU32 hRmHandle, 609 enum NvKmsSurfaceMemoryLayout layout, 610 NvU64 size, 611 enum NvKmsKapiAllocationType type, 612 NvU8 *compressible) 613 { 614 NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; 615 NvU32 ret; 616 617 memAllocParams.owner = NVKMS_RM_HEAP_ID; 618 memAllocParams.size = size; 619 620 switch (layout) { 621 case NvKmsSurfaceMemoryLayoutBlockLinear: 622 memAllocParams.attr = 623 FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, 624 memAllocParams.attr); 625 626 if (*compressible) { 627 /* 628 * RM will choose a compressed page kind and hence allocate 629 * comptags for color surfaces >= 32bpp. The actual kind 630 * chosen isn't important, as it can be overridden by creating 631 * a virtual alloc with a different kind when mapping the 632 * memory into the GPU. 633 */ 634 memAllocParams.attr = 635 FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, 636 memAllocParams.attr); 637 memAllocParams.attr = 638 FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY, 639 memAllocParams.attr); 640 } else { 641 memAllocParams.attr = 642 FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN, 643 memAllocParams.attr); 644 } 645 break; 646 647 case NvKmsSurfaceMemoryLayoutPitch: 648 memAllocParams.attr = 649 FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, 650 memAllocParams.attr); 651 break; 652 653 default: 654 nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout"); 655 return NV_FALSE; 656 } 657 658 659 memAllocParams.attr = 660 FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, 661 memAllocParams.attr); 662 memAllocParams.attr2 = 663 FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, 664 memAllocParams.attr2); 665 666 switch (type) { 667 case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT: 668 /* XXX [JRJ] Not quite right. This can also be used to allocate 669 * cursor images. The stuff RM does with this field is kind of 670 * black magic, and I can't tell if it actually matters. 671 */ 672 memAllocParams.type = NVOS32_TYPE_PRIMARY; 673 674 memAllocParams.alignment = NV_EVO_SURFACE_ALIGNMENT; 675 memAllocParams.flags |= 676 NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | /* Pick up above EVO alignment */ 677 NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; /* X sets this for cursors */ 678 memAllocParams.attr = 679 FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, 680 memAllocParams.attr); 681 682 /* XXX [JRJ] Note compression and scanout do not work together on 683 * any current GPUs. However, some use cases do involve scanning 684 * out a compression-capable surface: 685 * 686 * 1) Mapping the compressible surface as non-compressed when 687 * generating its content. 688 * 689 * 2) Using decompress-in-place to decompress the surface content 690 * before scanning it out. 691 * 692 * Hence creating compressed allocations of TYPE_SCANOUT is allowed. 693 */ 694 695 break; 696 case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER: 697 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { 698 nvKmsKapiLogDeviceDebug(device, 699 "Attempting creation of BlockLinear notifier memory"); 700 return NV_FALSE; 701 } 702 703 memAllocParams.type = NVOS32_TYPE_DMA; 704 705 memAllocParams.attr = 706 FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, 707 memAllocParams.attr); 708 memAllocParams.attr = 709 FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, 710 memAllocParams.attr); 711 712 break; 713 case NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN: 714 memAllocParams.type = NVOS32_TYPE_IMAGE; 715 memAllocParams.flags |= 716 NVOS32_ALLOC_FLAGS_NO_SCANOUT | 717 NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; 718 memAllocParams.attr = 719 FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, 720 memAllocParams.attr); 721 break; 722 default: 723 nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type"); 724 return NV_FALSE; 725 } 726 727 ret = nvRmApiAlloc(device->hRmClient, 728 device->hRmDevice, 729 hRmHandle, 730 NV01_MEMORY_LOCAL_USER, 731 &memAllocParams); 732 733 if (ret != NVOS_STATUS_SUCCESS) { 734 nvKmsKapiLogDeviceDebug( 735 device, 736 "VidHeapControl failed with error code 0x%08x", 737 ret); 738 739 return NV_FALSE; 740 } 741 742 if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, 743 memAllocParams.attr)) { 744 *compressible = 0; 745 } else { 746 *compressible = 1; 747 } 748 749 return NV_TRUE; 750 } 751 752 static struct NvKmsKapiDevice* AllocateDevice 753 ( 754 const struct NvKmsKapiAllocateDeviceParams *params 755 ) 756 { 757 struct NvKmsKapiDevice *device = NULL; 758 759 device = nvKmsKapiCalloc(1, sizeof(*device)); 760 761 if (device == NULL) { 762 nvKmsKapiLogDebug( 763 "Failed to allocate memory for NvKmsKapiDevice of GPU ID 0x%08x", 764 params->gpuId); 765 goto failed; 766 } 767 768 device->pSema = nvkms_sema_alloc(); 769 770 if (device->pSema == NULL) { 771 nvKmsKapiLogDebug( 772 "Failed to allocate semaphore for NvKmsKapiDevice of GPU ID 0x%08x", 773 params->gpuId); 774 goto failed; 775 } 776 777 /* Raise the reference count of gpu. */ 778 779 if (!nvkms_open_gpu(params->gpuId)) { 780 nvKmsKapiLogDebug("Failed to open GPU ID 0x%08x", params->gpuId); 781 goto failed; 782 } 783 784 device->gpuId = params->gpuId; 785 786 nvKmsKapiLogDebug( 787 "Allocating NvKmsKapiDevice 0x%p for GPU ID 0x%08x", 788 device, 789 device->gpuId); 790 791 /* Allocate RM object for NvKmsKapiDevice */ 792 793 if (!RmAllocateDevice(device)) { 794 nvKmsKapiLogDebug( 795 "Failed to allocate RM objects for GPU ID 0x%08x", 796 device->gpuId); 797 goto failed; 798 } 799 800 /* Allocate NVKMS objects for NvKmsKapiDevice */ 801 802 if (!KmsAllocateDevice(device)) { 803 nvKmsKapiLogDebug( 804 "Failed to allocate NVKMS objects for GPU ID 0x%08x", 805 device->gpuId); 806 goto failed; 807 } 808 809 device->privateData = params->privateData; 810 device->eventCallback = params->eventCallback; 811 812 return device; 813 814 failed: 815 816 FreeDevice(device); 817 818 return NULL; 819 } 820 821 static NvBool GrabOwnership(struct NvKmsKapiDevice *device) 822 { 823 struct NvKmsGrabOwnershipParams paramsGrab = { }; 824 825 if (device->hKmsDevice == 0x0) { 826 return NV_TRUE; 827 } 828 829 paramsGrab.request.deviceHandle = device->hKmsDevice; 830 831 return nvkms_ioctl_from_kapi(device->pKmsOpen, 832 NVKMS_IOCTL_GRAB_OWNERSHIP, 833 ¶msGrab, sizeof(paramsGrab)); 834 835 } 836 837 static void ReleaseOwnership(struct NvKmsKapiDevice *device) 838 { 839 struct NvKmsReleaseOwnershipParams paramsRelease = { }; 840 841 if (device->hKmsDevice == 0x0) { 842 return; 843 } 844 845 paramsRelease.request.deviceHandle = device->hKmsDevice; 846 847 nvkms_ioctl_from_kapi(device->pKmsOpen, 848 NVKMS_IOCTL_RELEASE_OWNERSHIP, 849 ¶msRelease, sizeof(paramsRelease)); 850 } 851 852 static NvBool GrantPermissions 853 ( 854 NvS32 fd, 855 struct NvKmsKapiDevice *device, 856 NvU32 head, 857 NvKmsKapiDisplay display 858 ) 859 { 860 struct NvKmsGrantPermissionsParams paramsGrant = { }; 861 struct NvKmsPermissions *perm = ¶msGrant.request.permissions; 862 NvU32 dispIdx = device->dispIdx; 863 864 if (dispIdx >= ARRAY_LEN(perm->modeset.disp) || 865 head >= ARRAY_LEN(perm->modeset.disp[0].head) || device == NULL) { 866 return NV_FALSE; 867 } 868 869 if (device->hKmsDevice == 0x0) { 870 return NV_TRUE; 871 } 872 873 perm->type = NV_KMS_PERMISSIONS_TYPE_MODESET; 874 perm->modeset.disp[dispIdx].head[head].dpyIdList = 875 nvAddDpyIdToEmptyDpyIdList(nvNvU32ToDpyId(display)); 876 877 paramsGrant.request.fd = fd; 878 paramsGrant.request.deviceHandle = device->hKmsDevice; 879 880 return nvkms_ioctl_from_kapi(device->pKmsOpen, 881 NVKMS_IOCTL_GRANT_PERMISSIONS, ¶msGrant, 882 sizeof(paramsGrant)); 883 } 884 885 static NvBool RevokePermissions 886 ( 887 struct NvKmsKapiDevice *device, 888 NvU32 head, 889 NvKmsKapiDisplay display 890 ) 891 { 892 struct NvKmsRevokePermissionsParams paramsRevoke = { }; 893 struct NvKmsPermissions *perm = ¶msRevoke.request.permissions; 894 NvU32 dispIdx = device->dispIdx; 895 896 897 if (dispIdx >= ARRAY_LEN(perm->modeset.disp) || 898 head >= ARRAY_LEN(perm->modeset.disp[0].head) || device == NULL) { 899 return NV_FALSE; 900 } 901 902 if (device->hKmsDevice == 0x0) { 903 return NV_TRUE; 904 } 905 906 perm->type = NV_KMS_PERMISSIONS_TYPE_MODESET; 907 perm->modeset.disp[dispIdx].head[head].dpyIdList = 908 nvAddDpyIdToEmptyDpyIdList(nvNvU32ToDpyId(display)); 909 910 paramsRevoke.request.deviceHandle = device->hKmsDevice; 911 912 return nvkms_ioctl_from_kapi(device->pKmsOpen, 913 NVKMS_IOCTL_REVOKE_PERMISSIONS, ¶msRevoke, 914 sizeof(paramsRevoke)); 915 } 916 917 static NvBool DeclareEventInterest 918 ( 919 const struct NvKmsKapiDevice *device, 920 const NvU32 interestMask 921 ) 922 { 923 struct NvKmsDeclareEventInterestParams kmsEventParams = { }; 924 925 if (device->hKmsDevice == 0x0 || device->eventCallback == NULL) { 926 return NV_TRUE; 927 } 928 929 kmsEventParams.request.interestMask = 930 interestMask & NVKMS_KAPI_SUPPORTED_EVENTS_MASK; 931 932 return nvkms_ioctl_from_kapi(device->pKmsOpen, 933 NVKMS_IOCTL_DECLARE_EVENT_INTEREST, 934 &kmsEventParams, sizeof(kmsEventParams)); 935 } 936 937 static NvBool GetDeviceResourcesInfo 938 ( 939 struct NvKmsKapiDevice *device, 940 struct NvKmsKapiDeviceResourcesInfo *info 941 ) 942 { 943 struct NvKmsQueryDispParams paramsDisp = { }; 944 NvBool status = NV_FALSE; 945 946 NvU32 i; 947 948 nvkms_memset(info, 0, sizeof(*info)); 949 950 info->caps.hasVideoMemory = !device->isSOC; 951 info->caps.genericPageKind = device->caps.genericPageKind; 952 953 if (device->hKmsDevice == 0x0) { 954 info->caps.pitchAlignment = 0x1; 955 return NV_TRUE; 956 } 957 958 paramsDisp.request.deviceHandle = device->hKmsDevice; 959 paramsDisp.request.dispHandle = device->hKmsDisp; 960 961 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 962 NVKMS_IOCTL_QUERY_DISP, 963 ¶msDisp, sizeof(paramsDisp)); 964 965 if (!status) 966 { 967 nvKmsKapiLogDeviceDebug( 968 device, 969 "Failed to query display engine information"); 970 971 goto done; 972 } 973 974 info->numHeads = device->numHeads; 975 976 ct_assert(sizeof(info->numLayers) == sizeof(device->numLayers)); 977 nvkms_memcpy(info->numLayers, device->numLayers, sizeof(device->numLayers)); 978 979 ct_assert(ARRAY_LEN(info->connectorHandles) >= 980 ARRAY_LEN(paramsDisp.reply.connectorHandles)); 981 982 info->numConnectors = paramsDisp.reply.numConnectors; 983 984 for (i = 0; i < paramsDisp.reply.numConnectors; i++) { 985 info->connectorHandles[i] = paramsDisp.reply.connectorHandles[i]; 986 } 987 988 { 989 const struct NvKmsCompositionCapabilities *pCaps = 990 &device->caps.cursorCompositionCaps; 991 992 info->caps.validCursorCompositionModes = 993 pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE]. 994 supportedBlendModes[1]; 995 } 996 997 for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) { 998 if (i == NVKMS_KAPI_LAYER_PRIMARY_IDX) { 999 info->caps.layer[i].validCompositionModes = 1000 NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE); 1001 } else { 1002 const struct NvKmsCompositionCapabilities *pCaps = 1003 &device->caps.overlayCompositionCaps; 1004 1005 info->caps.layer[i].validCompositionModes = 1006 pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE]. 1007 supportedBlendModes[1]; 1008 } 1009 } 1010 1011 for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) { 1012 info->caps.layer[i].validRRTransforms = 1013 device->caps.validLayerRRTransforms; 1014 } 1015 1016 info->caps.maxWidthInPixels = device->caps.maxWidthInPixels; 1017 info->caps.maxHeightInPixels = device->caps.maxHeightInPixels; 1018 info->caps.maxCursorSizeInPixels = device->caps.maxCursorSizeInPixels; 1019 1020 info->caps.pitchAlignment = NV_EVO_PITCH_ALIGNMENT; 1021 1022 info->caps.supportsSyncpts = device->supportsSyncpts; 1023 1024 info->caps.supportedCursorSurfaceMemoryFormats = 1025 NVBIT(NvKmsSurfaceMemoryFormatA8R8G8B8); 1026 1027 ct_assert(sizeof(info->supportedSurfaceMemoryFormats) == 1028 sizeof(device->supportedSurfaceMemoryFormats)); 1029 1030 nvkms_memcpy(info->supportedSurfaceMemoryFormats, 1031 device->supportedSurfaceMemoryFormats, 1032 sizeof(device->supportedSurfaceMemoryFormats)); 1033 1034 ct_assert(sizeof(info->supportsHDR) == 1035 sizeof(device->supportsHDR)); 1036 1037 nvkms_memcpy(info->supportsHDR, 1038 device->supportsHDR, 1039 sizeof(device->supportsHDR)); 1040 done: 1041 1042 return status; 1043 } 1044 1045 /* 1046 * XXX Make it per-connector, query valid dpyId list as dynamic data of 1047 * connector. 1048 */ 1049 static NvBool GetDisplays 1050 ( 1051 struct NvKmsKapiDevice *device, 1052 NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles 1053 ) 1054 { 1055 struct NvKmsQueryDispParams paramsDisp = { }; 1056 NvBool status = NV_FALSE; 1057 1058 NVDpyId dpyId; 1059 1060 if (device->hKmsDevice == 0x0) { 1061 *numDisplays = 0; 1062 return NV_TRUE; 1063 } 1064 1065 paramsDisp.request.deviceHandle = device->hKmsDevice; 1066 paramsDisp.request.dispHandle = device->hKmsDisp; 1067 1068 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 1069 NVKMS_IOCTL_QUERY_DISP, 1070 ¶msDisp, sizeof(paramsDisp)); 1071 1072 if (!status) 1073 { 1074 nvKmsKapiLogDeviceDebug( 1075 device, 1076 "Failed to query display engine information"); 1077 1078 return NV_FALSE; 1079 } 1080 1081 if (*numDisplays == 0) { 1082 goto done; 1083 } 1084 1085 if (*numDisplays < nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys)) { 1086 nvKmsKapiLogDebug( 1087 "Size of display handle array is less than number of displays"); 1088 goto done; 1089 } 1090 1091 FOR_ALL_DPY_IDS(dpyId, paramsDisp.reply.validDpys) { 1092 *(displayHandles++) = nvDpyIdToNvU32(dpyId); 1093 } 1094 1095 done: 1096 1097 *numDisplays = nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys); 1098 1099 return NV_TRUE; 1100 } 1101 1102 static NvBool GetConnectorInfo 1103 ( 1104 struct NvKmsKapiDevice *device, 1105 NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info 1106 ) 1107 { 1108 struct NvKmsQueryConnectorStaticDataParams paramsConnector = { }; 1109 NvBool status = NV_FALSE; 1110 1111 if (device == NULL || info == NULL) { 1112 goto done; 1113 } 1114 1115 paramsConnector.request.deviceHandle = device->hKmsDevice; 1116 paramsConnector.request.dispHandle = device->hKmsDisp; 1117 paramsConnector.request.connectorHandle = connector; 1118 1119 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 1120 NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, 1121 ¶msConnector, sizeof(paramsConnector)); 1122 1123 if (!status) { 1124 nvKmsKapiLogDeviceDebug( 1125 device, 1126 "Failed to query static data of connector 0x%08x", 1127 connector); 1128 1129 goto done; 1130 } 1131 1132 info->handle = connector; 1133 1134 info->physicalIndex = paramsConnector.reply.physicalIndex; 1135 1136 info->signalFormat = paramsConnector.reply.signalFormat; 1137 1138 info->type = paramsConnector.reply.type; 1139 1140 done: 1141 1142 return status; 1143 } 1144 1145 static NvBool GetStaticDisplayInfo 1146 ( 1147 struct NvKmsKapiDevice *device, 1148 NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info 1149 ) 1150 { 1151 struct NvKmsQueryDpyStaticDataParams paramsDpyStatic = { }; 1152 NvBool status = NV_FALSE; 1153 1154 if (device == NULL || info == NULL) { 1155 goto done; 1156 } 1157 1158 /* Query static data of display */ 1159 1160 paramsDpyStatic.request.deviceHandle = device->hKmsDevice; 1161 paramsDpyStatic.request.dispHandle = device->hKmsDisp; 1162 1163 paramsDpyStatic.request.dpyId = nvNvU32ToDpyId(display); 1164 1165 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 1166 NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, 1167 ¶msDpyStatic, sizeof(paramsDpyStatic)); 1168 1169 if (!status) { 1170 nvKmsKapiLogDeviceDebug( 1171 device, 1172 "Failed to query static data of dpy 0x%08x", 1173 display); 1174 1175 goto done; 1176 } 1177 1178 info->handle = display; 1179 1180 info->connectorHandle = paramsDpyStatic.reply.connectorHandle; 1181 1182 ct_assert(sizeof(info->dpAddress) == 1183 sizeof(paramsDpyStatic.reply.dpAddress)); 1184 1185 nvkms_memcpy(info->dpAddress, 1186 paramsDpyStatic.reply.dpAddress, 1187 sizeof(paramsDpyStatic.reply.dpAddress)); 1188 info->dpAddress[sizeof(paramsDpyStatic.reply.dpAddress) - 1] = '\0'; 1189 1190 info->internal = paramsDpyStatic.reply.mobileInternal; 1191 info->headMask = paramsDpyStatic.reply.headMask; 1192 done: 1193 1194 return status; 1195 } 1196 1197 static NvBool GetDynamicDisplayInfo( 1198 struct NvKmsKapiDevice *device, 1199 struct NvKmsKapiDynamicDisplayParams *params) 1200 { 1201 struct NvKmsQueryDpyDynamicDataParams *pParamsDpyDynamic = NULL; 1202 NvBool status = NV_FALSE; 1203 1204 if (device == NULL || params == NULL) { 1205 goto done; 1206 } 1207 1208 pParamsDpyDynamic = nvKmsKapiCalloc(1, sizeof(*pParamsDpyDynamic)); 1209 1210 if (pParamsDpyDynamic == NULL) { 1211 goto done; 1212 } 1213 1214 pParamsDpyDynamic->request.deviceHandle = device->hKmsDevice; 1215 pParamsDpyDynamic->request.dispHandle = device->hKmsDisp; 1216 1217 pParamsDpyDynamic->request.dpyId = nvNvU32ToDpyId(params->handle); 1218 1219 if (params->overrideEdid) { 1220 ct_assert(sizeof(params->edid.buffer) == 1221 sizeof(pParamsDpyDynamic->reply.edid.buffer)); 1222 nvkms_memcpy( 1223 pParamsDpyDynamic->request.edid.buffer, 1224 params->edid.buffer, 1225 sizeof(pParamsDpyDynamic->request.edid.buffer)); 1226 1227 pParamsDpyDynamic->request.edid.bufferSize = params->edid.bufferSize; 1228 1229 pParamsDpyDynamic->request.overrideEdid = NV_TRUE; 1230 } 1231 1232 pParamsDpyDynamic->request.forceConnected = params->forceConnected; 1233 1234 pParamsDpyDynamic->request.forceDisconnected = params->forceDisconnected; 1235 1236 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 1237 NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, 1238 pParamsDpyDynamic, sizeof(*pParamsDpyDynamic)); 1239 1240 if (!status) { 1241 nvKmsKapiLogDeviceDebug( 1242 device, 1243 "Failed to query dynamic data of dpy 0x%08x", 1244 params->handle); 1245 1246 goto done; 1247 } 1248 1249 params->connected = pParamsDpyDynamic->reply.connected; 1250 1251 if (pParamsDpyDynamic->reply.connected && !params->overrideEdid) { 1252 NvBool vrrSupported = 1253 (pParamsDpyDynamic->reply.vrrType != NVKMS_DPY_VRR_TYPE_NONE) ? NV_TRUE : NV_FALSE; 1254 1255 nvkms_memcpy( 1256 params->edid.buffer, 1257 pParamsDpyDynamic->reply.edid.buffer, 1258 sizeof(params->edid.buffer)); 1259 1260 params->edid.bufferSize = pParamsDpyDynamic->reply.edid.bufferSize; 1261 params->vrrSupported = (vrrSupported && !device->caps.requiresVrrSemaphores) ? NV_TRUE : NV_FALSE; 1262 } 1263 1264 done: 1265 1266 if (pParamsDpyDynamic != NULL) { 1267 nvKmsKapiFree(pParamsDpyDynamic); 1268 } 1269 1270 return status; 1271 } 1272 1273 static void FreeMemory 1274 ( 1275 struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory 1276 ) 1277 { 1278 if (device == NULL || memory == NULL) { 1279 return; 1280 } 1281 1282 if (memory->hRmHandle != 0x0) { 1283 NvU32 ret; 1284 1285 ret = nvRmApiFree(device->hRmClient, 1286 device->hRmDevice, 1287 memory->hRmHandle); 1288 1289 if (ret != NVOS_STATUS_SUCCESS) { 1290 nvKmsKapiLogDeviceDebug( 1291 device, 1292 "Failed to free RM memory object 0x%08x allocated for " 1293 "NvKmsKapiMemory 0x%p", 1294 memory->hRmHandle, memory); 1295 } 1296 1297 nvKmsKapiFreeRmHandle(device, memory->hRmHandle); 1298 } 1299 1300 nvKmsKapiFree(memory); 1301 } 1302 1303 static struct NvKmsKapiMemory *AllocMemoryObjectAndHandle( 1304 struct NvKmsKapiDevice *device, 1305 NvU32 *handleOut 1306 ) 1307 { 1308 struct NvKmsKapiMemory *memory; 1309 1310 /* Allocate the container object */ 1311 1312 memory = nvKmsKapiCalloc(1, sizeof(*memory)); 1313 1314 if (memory == NULL) { 1315 nvKmsKapiLogDebug( 1316 "Failed to allocate memory for NVKMS memory object on " 1317 "NvKmsKapiDevice 0x%p", 1318 device); 1319 return NULL; 1320 } 1321 1322 /* Generate RM handle for memory object */ 1323 1324 *handleOut = nvKmsKapiGenerateRmHandle(device); 1325 1326 if (*handleOut == 0x0) { 1327 nvKmsKapiLogDeviceDebug( 1328 device, 1329 "Failed to allocate RM handle for memory"); 1330 nvKmsKapiFree(memory); 1331 return NULL; 1332 } 1333 1334 return memory; 1335 } 1336 1337 static struct NvKmsKapiMemory* AllocateVideoMemory 1338 ( 1339 struct NvKmsKapiDevice *device, 1340 enum NvKmsSurfaceMemoryLayout layout, 1341 enum NvKmsKapiAllocationType type, 1342 NvU64 size, 1343 NvU8 *compressible 1344 ) 1345 { 1346 struct NvKmsKapiMemory *memory = NULL; 1347 NvU32 hRmHandle; 1348 1349 memory = AllocMemoryObjectAndHandle(device, &hRmHandle); 1350 1351 if (!memory) { 1352 return NULL; 1353 } 1354 1355 if (!nvKmsKapiAllocateVideoMemory(device, 1356 hRmHandle, 1357 layout, 1358 size, 1359 type, 1360 compressible)) { 1361 nvKmsKapiFreeRmHandle(device, hRmHandle); 1362 FreeMemory(device, memory); 1363 return NULL; 1364 } 1365 1366 memory->hRmHandle = hRmHandle; 1367 memory->size = size; 1368 memory->surfaceParams.layout = layout; 1369 1370 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { 1371 memory->surfaceParams.blockLinear.genericMemory = NV_TRUE; 1372 } 1373 1374 return memory; 1375 } 1376 1377 static struct NvKmsKapiMemory* AllocateSystemMemory 1378 ( 1379 struct NvKmsKapiDevice *device, 1380 enum NvKmsSurfaceMemoryLayout layout, 1381 enum NvKmsKapiAllocationType type, 1382 NvU64 size, 1383 NvU8 *compressible 1384 ) 1385 { 1386 struct NvKmsKapiMemory *memory = NULL; 1387 NvU32 hRmHandle; 1388 1389 memory = AllocMemoryObjectAndHandle(device, &hRmHandle); 1390 1391 if (!memory) { 1392 return NULL; 1393 } 1394 1395 if (!nvKmsKapiAllocateSystemMemory(device, 1396 hRmHandle, 1397 layout, 1398 size, 1399 type, 1400 compressible)) { 1401 nvKmsKapiFreeRmHandle(device, hRmHandle); 1402 FreeMemory(device, memory); 1403 return NULL; 1404 } 1405 1406 memory->hRmHandle = hRmHandle; 1407 memory->size = size; 1408 memory->surfaceParams.layout = layout; 1409 1410 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { 1411 memory->surfaceParams.blockLinear.genericMemory = NV_TRUE; 1412 } 1413 1414 return memory; 1415 } 1416 1417 static struct NvKmsKapiMemory* ImportMemory 1418 ( 1419 struct NvKmsKapiDevice *device, 1420 NvU64 memorySize, 1421 NvU64 nvKmsParamsUser, 1422 NvU64 nvKmsParamsSize 1423 ) 1424 { 1425 struct NvKmsKapiPrivImportMemoryParams nvKmsParams, *pNvKmsParams = NULL; 1426 NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { }; 1427 struct NvKmsKapiMemory *memory = NULL; 1428 NvU32 hMemory; 1429 NvU32 ret; 1430 int status; 1431 1432 /* Verify the driver-private params size and copy it in from userspace */ 1433 1434 if (nvKmsParamsSize != sizeof(nvKmsParams)) { 1435 nvKmsKapiLogDebug( 1436 "NVKMS private memory import parameter size mismatch - " 1437 "expected: 0x%llx, caller specified: 0x%llx", 1438 (NvU64)sizeof(nvKmsParams), nvKmsParamsSize); 1439 return NULL; 1440 } 1441 1442 /* 1443 * Use a heap allocation as the destination pointer passed to 1444 * nvkms_copyin; stack allocations created within core NVKMS may not 1445 * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY 1446 * checker, triggering false errors. But then save the result to a 1447 * variable on the stack, so that we can free the heap memory 1448 * immediately and not worry about its lifetime. 1449 */ 1450 1451 pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams)); 1452 1453 if (pNvKmsParams == NULL) { 1454 nvKmsKapiLogDebug("Failed to allocate memory for ImportMemory"); 1455 return NULL; 1456 } 1457 1458 status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams)); 1459 1460 nvKmsParams = *pNvKmsParams; 1461 1462 nvKmsKapiFree(pNvKmsParams); 1463 1464 if (status != 0) { 1465 nvKmsKapiLogDebug( 1466 "NVKMS private memory import parameters could not be read from " 1467 "userspace"); 1468 return NULL; 1469 } 1470 1471 memory = AllocMemoryObjectAndHandle(device, &hMemory); 1472 1473 if (!memory) { 1474 return NULL; 1475 } 1476 1477 importParams.fd = nvKmsParams.memFd; 1478 importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; 1479 importParams.object.data.rmObject.hDevice = device->hRmDevice; 1480 importParams.object.data.rmObject.hParent = device->hRmDevice; 1481 importParams.object.data.rmObject.hObject = hMemory; 1482 1483 ret = nvRmApiControl(device->hRmClient, 1484 device->hRmClient, 1485 NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD, 1486 &importParams, 1487 sizeof(importParams)); 1488 1489 if (ret != NVOS_STATUS_SUCCESS) { 1490 nvKmsKapiLogDeviceDebug( 1491 device, 1492 "Failed to import RM memory object (%d) of size %llu bytes", 1493 nvKmsParams.memFd, memorySize); 1494 1495 nvKmsKapiFreeRmHandle(device, hMemory); 1496 goto failed; 1497 } 1498 1499 memory->hRmHandle = hMemory; 1500 memory->size = memorySize; 1501 memory->surfaceParams = nvKmsParams.surfaceParams; 1502 1503 return memory; 1504 1505 failed: 1506 1507 FreeMemory(device, memory); 1508 1509 return NULL; 1510 } 1511 1512 static struct NvKmsKapiMemory* DupMemory 1513 ( 1514 struct NvKmsKapiDevice *device, 1515 const struct NvKmsKapiDevice *srcDevice, 1516 const struct NvKmsKapiMemory *srcMemory 1517 ) 1518 { 1519 struct NvKmsKapiMemory *memory; 1520 NvU32 hMemory; 1521 NvU32 ret; 1522 1523 memory = AllocMemoryObjectAndHandle(device, &hMemory); 1524 1525 if (!memory) { 1526 return NULL; 1527 } 1528 1529 ret = nvRmApiDupObject(device->hRmClient, 1530 device->hRmDevice, 1531 hMemory, 1532 srcDevice->hRmClient, 1533 srcMemory->hRmHandle, 1534 0); 1535 1536 if (ret != NVOS_STATUS_SUCCESS) { 1537 nvKmsKapiLogDeviceDebug( 1538 device, 1539 "Failed to dup NVKMS memory object 0x%p (0x%08x, 0x%08x) " 1540 "of size %llu bytes", 1541 srcMemory, srcDevice->hRmClient, srcMemory->hRmHandle, 1542 srcMemory->size); 1543 1544 nvKmsKapiFreeRmHandle(device, hMemory); 1545 goto failed; 1546 } 1547 1548 memory->hRmHandle = hMemory; 1549 memory->size = srcMemory->size; 1550 memory->surfaceParams = srcMemory->surfaceParams; 1551 1552 return memory; 1553 1554 failed: 1555 FreeMemory(device, memory); 1556 1557 return NULL; 1558 } 1559 1560 static NvBool ExportMemory 1561 ( 1562 const struct NvKmsKapiDevice *device, 1563 const struct NvKmsKapiMemory *memory, 1564 NvU64 nvKmsParamsUser, 1565 NvU64 nvKmsParamsSize 1566 ) 1567 { 1568 struct NvKmsKapiPrivExportMemoryParams nvKmsParams, *pNvKmsParams = NULL; 1569 NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { }; 1570 int status; 1571 NvU32 ret; 1572 1573 if (device == NULL || memory == NULL) { 1574 nvKmsKapiLogDebug( 1575 "Invalid device or memory parameter while exporting memory"); 1576 return NV_FALSE; 1577 } 1578 1579 /* Verify the driver-private params size and copy it in from userspace */ 1580 1581 if (nvKmsParamsSize != sizeof(nvKmsParams)) { 1582 nvKmsKapiLogDebug( 1583 "NVKMS private memory export parameter size mismatch - " 1584 "expected: 0x%llx, caller specified: 0x%llx", 1585 (NvU64)sizeof(nvKmsParams), nvKmsParamsSize); 1586 return NV_FALSE; 1587 } 1588 1589 /* 1590 * Use a heap allocation as the destination pointer passed to 1591 * nvkms_copyin; stack allocations created within core NVKMS may not 1592 * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY 1593 * checker, triggering false errors. But then save the result to a 1594 * variable on the stack, so that we can free the heap memory 1595 * immediately and not worry about its lifetime. 1596 */ 1597 1598 pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams)); 1599 1600 if (pNvKmsParams == NULL) { 1601 nvKmsKapiLogDebug("Failed to allocate scratch memory for ExportMemory"); 1602 return NV_FALSE; 1603 } 1604 1605 status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams)); 1606 1607 nvKmsParams = *pNvKmsParams; 1608 nvKmsKapiFree(pNvKmsParams); 1609 1610 if (status != 0) { 1611 nvKmsKapiLogDebug( 1612 "NVKMS private memory export parameters could not be read from " 1613 "userspace"); 1614 return NV_FALSE; 1615 } 1616 1617 exportParams.fd = nvKmsParams.memFd; 1618 exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; 1619 exportParams.object.data.rmObject.hDevice = device->hRmDevice; 1620 exportParams.object.data.rmObject.hParent = device->hRmDevice; 1621 exportParams.object.data.rmObject.hObject = memory->hRmHandle; 1622 1623 ret = nvRmApiControl(device->hRmClient, 1624 device->hRmClient, 1625 NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD, 1626 &exportParams, 1627 sizeof(exportParams)); 1628 1629 if (ret != NVOS_STATUS_SUCCESS) { 1630 nvKmsKapiLogDeviceDebug( 1631 device, 1632 "Failed to export RM memory object of size %llu bytes " 1633 "to (%d)", memory->size, nvKmsParams.memFd); 1634 return NV_FALSE; 1635 } 1636 1637 return NV_TRUE; 1638 } 1639 1640 static struct NvKmsKapiMemory* 1641 GetSystemMemoryHandleFromDmaBufSgtHelper(struct NvKmsKapiDevice *device, 1642 NvU32 descriptorType, 1643 NvP64 descriptor, 1644 NvU32 limit) 1645 { 1646 NvU32 ret; 1647 NV_OS_DESC_MEMORY_ALLOCATION_PARAMS memAllocParams = {0}; 1648 struct NvKmsKapiMemory *memory = NULL; 1649 NvU32 hRmHandle; 1650 1651 memory = AllocMemoryObjectAndHandle(device, &hRmHandle); 1652 1653 if (!memory) { 1654 return NULL; 1655 } 1656 1657 memAllocParams.type = NVOS32_TYPE_PRIMARY; 1658 memAllocParams.descriptorType = descriptorType; 1659 memAllocParams.descriptor = descriptor; 1660 memAllocParams.limit = limit; 1661 1662 memAllocParams.attr = 1663 FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, memAllocParams.attr); 1664 1665 memAllocParams.attr2 = 1666 FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, memAllocParams.attr2); 1667 1668 /* dmabuf import is currently only used for ISO memory. */ 1669 if (!device->isoIOCoherencyModes.coherent) { 1670 memAllocParams.attr = 1671 FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, 1672 memAllocParams.attr); 1673 } else { 1674 memAllocParams.attr = 1675 FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, 1676 memAllocParams.attr); 1677 } 1678 1679 ret = nvRmApiAlloc(device->hRmClient, 1680 device->hRmDevice, 1681 hRmHandle, 1682 NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, 1683 &memAllocParams); 1684 if (ret != NVOS_STATUS_SUCCESS) { 1685 nvKmsKapiLogDeviceDebug( 1686 device, 1687 "nvRmApiAlloc failed with error code 0x%08x", 1688 ret); 1689 nvKmsKapiFreeRmHandle(device, hRmHandle); 1690 FreeMemory(device, memory); 1691 return NULL; 1692 } 1693 1694 memory->hRmHandle = hRmHandle; 1695 memory->size = limit + 1; 1696 memory->surfaceParams.layout = NvKmsSurfaceMemoryLayoutPitch; 1697 1698 return memory; 1699 } 1700 1701 static struct NvKmsKapiMemory* 1702 GetSystemMemoryHandleFromSgt(struct NvKmsKapiDevice *device, 1703 NvP64 sgt, 1704 NvP64 gem, 1705 NvU32 limit) 1706 { 1707 NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS params = { 1708 .sgt = sgt, 1709 .gem = gem 1710 }; 1711 1712 return GetSystemMemoryHandleFromDmaBufSgtHelper( 1713 device, NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR, ¶ms, limit); 1714 } 1715 1716 static struct NvKmsKapiMemory* 1717 GetSystemMemoryHandleFromDmaBuf(struct NvKmsKapiDevice *device, 1718 NvP64 dmaBuf, 1719 NvU32 limit) 1720 { 1721 return GetSystemMemoryHandleFromDmaBufSgtHelper( 1722 device, NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR, dmaBuf, limit); 1723 } 1724 1725 static NvBool RmGc6BlockerRefCntAction(const struct NvKmsKapiDevice *device, 1726 NvU32 action) 1727 { 1728 NV_STATUS status; 1729 NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { 0 }; 1730 1731 nvAssert((action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC) || 1732 (action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC)); 1733 1734 params.action = action; 1735 1736 status = nvRmApiControl(device->hRmClient, 1737 device->hRmSubDevice, 1738 NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT, 1739 ¶ms, 1740 sizeof(params)); 1741 if (status != NV_OK) { 1742 nvKmsKapiLogDeviceDebug( 1743 device, 1744 "Failed to modify GC6 blocker refcount for 0x%x, status: 0x%x", 1745 device->hRmSubDevice, status); 1746 return NV_FALSE; 1747 } 1748 1749 return NV_TRUE; 1750 } 1751 1752 static NvBool RmGc6BlockerRefCntInc(const struct NvKmsKapiDevice *device) 1753 { 1754 return RmGc6BlockerRefCntAction( 1755 device, 1756 NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC); 1757 } 1758 1759 static NvBool RmGc6BlockerRefCntDec(const struct NvKmsKapiDevice *device) 1760 { 1761 return RmGc6BlockerRefCntAction( 1762 device, 1763 NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC); 1764 } 1765 1766 static NvBool GetMemoryPages 1767 ( 1768 const struct NvKmsKapiDevice *device, 1769 const struct NvKmsKapiMemory *memory, 1770 NvU64 **pPages, 1771 NvU32 *pNumPages 1772 ) 1773 { 1774 NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS paramsGetNumPages = {}; 1775 NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS paramsGetPages = {}; 1776 NvU64 *pages; 1777 NV_STATUS status; 1778 1779 if (device == NULL || memory == NULL) { 1780 return NV_FALSE; 1781 } 1782 1783 status = nvRmApiControl(device->hRmClient, 1784 memory->hRmHandle, 1785 NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES, 1786 ¶msGetNumPages, 1787 sizeof(paramsGetNumPages)); 1788 if (status != NV_OK) { 1789 nvKmsKapiLogDeviceDebug(device, 1790 "Failed to get number of physical allocation pages for RM" 1791 "memory object 0x%x", memory->hRmHandle); 1792 return NV_FALSE; 1793 } 1794 1795 if (!paramsGetNumPages.numPages) { 1796 return NV_FALSE; 1797 } 1798 1799 pages = nvKmsKapiCalloc(paramsGetNumPages.numPages, sizeof(pages)); 1800 if (!pages) { 1801 nvKmsKapiLogDeviceDebug(device, "Failed to allocate memory"); 1802 return NV_FALSE; 1803 } 1804 1805 paramsGetPages.pPages = NV_PTR_TO_NvP64(pages); 1806 paramsGetPages.numPages = paramsGetNumPages.numPages; 1807 1808 status = nvRmApiControl(device->hRmClient, 1809 memory->hRmHandle, 1810 NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES, 1811 ¶msGetPages, 1812 sizeof(paramsGetPages)); 1813 if (status != NV_OK) { 1814 nvKmsKapiFree(pages); 1815 nvKmsKapiLogDeviceDebug(device, 1816 "Failed to get physical allocation pages for RM" 1817 "memory object 0x%x", memory->hRmHandle); 1818 return NV_FALSE; 1819 } 1820 1821 nvAssert(paramsGetPages.numPages == paramsGetNumPages.numPages); 1822 1823 *pPages = pages; 1824 *pNumPages = paramsGetPages.numPages; 1825 1826 return NV_TRUE; 1827 } 1828 1829 /* 1830 * Check if the memory we are creating this framebuffer with is valid. We 1831 * cannot scan out sysmem or compressed buffers. 1832 * 1833 * If we cannot use this memory for display it may be resident in sysmem 1834 * or may belong to another GPU. 1835 */ 1836 static NvBool IsMemoryValidForDisplay 1837 ( 1838 const struct NvKmsKapiDevice *device, 1839 const struct NvKmsKapiMemory *memory 1840 ) 1841 { 1842 NV_STATUS status; 1843 NV0041_CTRL_SURFACE_INFO surfaceInfo = {}; 1844 NV0041_CTRL_GET_SURFACE_INFO_PARAMS surfaceInfoParams = {}; 1845 1846 if (device == NULL || memory == NULL) { 1847 return NV_FALSE; 1848 } 1849 1850 /* 1851 * Don't do these checks on tegra. Tegra has different capabilities. 1852 * Here we always say display is possible so we never fail framebuffer 1853 * creation. 1854 */ 1855 if (device->isSOC) { 1856 return NV_TRUE; 1857 } 1858 1859 /* Get the type of address space this memory is in, i.e. vidmem or sysmem */ 1860 surfaceInfo.index = NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE; 1861 1862 surfaceInfoParams.surfaceInfoListSize = 1; 1863 surfaceInfoParams.surfaceInfoList = (NvP64)&surfaceInfo; 1864 1865 status = nvRmApiControl(device->hRmClient, 1866 memory->hRmHandle, 1867 NV0041_CTRL_CMD_GET_SURFACE_INFO, 1868 &surfaceInfoParams, 1869 sizeof(surfaceInfoParams)); 1870 if (status != NV_OK) { 1871 nvKmsKapiLogDeviceDebug(device, 1872 "Failed to get memory location of RM memory object 0x%x", 1873 memory->hRmHandle); 1874 return NV_FALSE; 1875 } 1876 1877 return surfaceInfo.data == NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM; 1878 } 1879 1880 static void FreeMemoryPages 1881 ( 1882 NvU64 *pPages 1883 ) 1884 { 1885 nvKmsKapiFree(pPages); 1886 } 1887 1888 static NvBool MapMemory 1889 ( 1890 const struct NvKmsKapiDevice *device, 1891 const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, 1892 void **ppLinearAddress 1893 ) 1894 { 1895 NV_STATUS status; 1896 NvU32 flags = 0; 1897 1898 if (device == NULL || memory == NULL) { 1899 return NV_FALSE; 1900 } 1901 1902 switch (type) { 1903 case NVKMS_KAPI_MAPPING_TYPE_USER: 1904 /* 1905 * Usermode clients can't be trusted not to access mappings while 1906 * the GPU is in GC6. 1907 * 1908 * TODO: Revoke/restore mappings rather than blocking GC6 1909 */ 1910 if (!RmGc6BlockerRefCntInc(device)) { 1911 return NV_FALSE; 1912 } 1913 flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER); 1914 break; 1915 case NVKMS_KAPI_MAPPING_TYPE_KERNEL: 1916 /* 1917 * Kernel clients should ensure on their own that the GPU isn't in 1918 * GC6 before making accesses to mapped vidmem surfaces. 1919 */ 1920 break; 1921 } 1922 1923 status = nvRmApiMapMemory( 1924 device->hRmClient, 1925 device->hRmSubDevice, 1926 memory->hRmHandle, 1927 0, 1928 memory->size, 1929 ppLinearAddress, 1930 flags); 1931 1932 if (status != NV_OK) { 1933 nvKmsKapiLogDeviceDebug( 1934 device, 1935 "Failed to Map RM memory object 0x%x allocated for NVKMemory 0x%p", 1936 memory->hRmHandle, memory); 1937 if (type == NVKMS_KAPI_MAPPING_TYPE_USER) { 1938 RmGc6BlockerRefCntDec(device); // XXX Can't handle failure. 1939 } 1940 return NV_FALSE; 1941 } 1942 1943 return NV_TRUE; 1944 } 1945 1946 static void UnmapMemory 1947 ( 1948 const struct NvKmsKapiDevice *device, 1949 const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, 1950 const void *pLinearAddress 1951 ) 1952 { 1953 NV_STATUS status; 1954 NvU32 flags = 0; 1955 1956 if (device == NULL || memory == NULL) { 1957 return; 1958 } 1959 1960 switch (type) { 1961 case NVKMS_KAPI_MAPPING_TYPE_USER: 1962 flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER); 1963 break; 1964 case NVKMS_KAPI_MAPPING_TYPE_KERNEL: 1965 break; 1966 } 1967 1968 status = 1969 nvRmApiUnmapMemory(device->hRmClient, 1970 device->hRmSubDevice, 1971 memory->hRmHandle, 1972 pLinearAddress, 1973 flags); 1974 1975 if (status != NV_OK) { 1976 nvKmsKapiLogDeviceDebug( 1977 device, 1978 "Failed to Ummap RM memory object 0x%x allocated for NVKMemory 0x%p", 1979 memory->hRmHandle, memory); 1980 } 1981 1982 if (type == NVKMS_KAPI_MAPPING_TYPE_USER) { 1983 RmGc6BlockerRefCntDec(device); // XXX Can't handle failure. 1984 } 1985 } 1986 1987 static NvBool GetSurfaceParams( 1988 struct NvKmsKapiCreateSurfaceParams *params, 1989 NvU32 *pNumPlanes, 1990 enum NvKmsSurfaceMemoryLayout *pLayout, 1991 NvU32 *pLog2GobsPerBlockY, 1992 NvU32 pitch[]) 1993 { 1994 const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = 1995 nvKmsGetSurfaceMemoryFormatInfo(params->format); 1996 enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch; 1997 NvU32 log2GobsPerBlockY = 0; 1998 NvU32 i; 1999 2000 if (pFormatInfo->numPlanes == 0) 2001 { 2002 nvKmsKapiLogDebug("Unknown surface format"); 2003 return NV_FALSE; 2004 } 2005 2006 for (i = 0; i < pFormatInfo->numPlanes; i++) { 2007 struct NvKmsKapiMemory *memory = 2008 params->planes[i].memory; 2009 2010 if (memory == NULL) { 2011 return FALSE; 2012 } 2013 2014 pitch[i] = params->planes[i].pitch; 2015 2016 if (i == 0) { 2017 if (params->explicit_layout) { 2018 layout = params->layout; 2019 } else { 2020 layout = memory->surfaceParams.layout; 2021 } 2022 2023 switch (layout) { 2024 case NvKmsSurfaceMemoryLayoutBlockLinear: 2025 if (params->explicit_layout) { 2026 log2GobsPerBlockY = params->log2GobsPerBlockY; 2027 } else { 2028 log2GobsPerBlockY = 2029 memory->surfaceParams.blockLinear.log2GobsPerBlock.y; 2030 } 2031 break; 2032 2033 case NvKmsSurfaceMemoryLayoutPitch: 2034 log2GobsPerBlockY = 0; 2035 break; 2036 2037 default: 2038 nvKmsKapiLogDebug("Invalid surface layout: %u", layout); 2039 return NV_FALSE; 2040 } 2041 } else { 2042 if (!params->explicit_layout) { 2043 if (layout != memory->surfaceParams.layout) { 2044 nvKmsKapiLogDebug("All planes are not of same layout"); 2045 return FALSE; 2046 } 2047 2048 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear && 2049 log2GobsPerBlockY != 2050 memory->surfaceParams.blockLinear.log2GobsPerBlock.y) { 2051 2052 nvKmsKapiLogDebug( 2053 "All planes do not have the same blocklinear parameters"); 2054 return FALSE; 2055 } 2056 } 2057 } 2058 2059 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { 2060 if (params->explicit_layout) { 2061 if (pitch[i] & 63) { 2062 nvKmsKapiLogDebug( 2063 "Invalid block-linear pitch alignment: %u", pitch[i]); 2064 return NV_FALSE; 2065 } 2066 2067 pitch[i] = pitch[i] >> 6; 2068 } else { 2069 /* 2070 * The caller (nvidia-drm) is not blocklinear-aware, so the 2071 * passed-in pitch cannot accurately reflect block information. 2072 * Override the pitch with what was specified when the surface 2073 * was imported. 2074 */ 2075 pitch[i] = memory->surfaceParams.blockLinear.pitchInBlocks; 2076 } 2077 } else { 2078 pitch[i] = params->planes[i].pitch; 2079 } 2080 2081 } 2082 2083 *pNumPlanes = pFormatInfo->numPlanes; 2084 *pLayout = layout; 2085 *pLog2GobsPerBlockY = log2GobsPerBlockY; 2086 2087 return NV_TRUE; 2088 } 2089 static struct NvKmsKapiSurface* CreateSurface 2090 ( 2091 struct NvKmsKapiDevice *device, 2092 struct NvKmsKapiCreateSurfaceParams *params 2093 ) 2094 { 2095 struct NvKmsRegisterSurfaceParams paramsReg = { }; 2096 NvBool status; 2097 2098 struct NvKmsKapiSurface *surface = NULL; 2099 2100 enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch; 2101 NvU32 log2GobsPerBlockY = 0; 2102 NvU32 numPlanes = 0; 2103 NvU32 pitch[NVKMS_MAX_PLANES_PER_SURFACE] = { 0 }; 2104 NvU32 i; 2105 2106 if (!GetSurfaceParams(params, 2107 &numPlanes, 2108 &layout, 2109 &log2GobsPerBlockY, 2110 pitch)) 2111 { 2112 goto failed; 2113 } 2114 2115 surface = nvKmsKapiCalloc(1, sizeof(*surface)); 2116 2117 if (surface == NULL) { 2118 nvKmsKapiLogDebug( 2119 "Failed to allocate memory for NVKMS surface object on " 2120 "NvKmsKapiDevice 0x%p", 2121 device); 2122 goto failed; 2123 } 2124 2125 if (device->hKmsDevice == 0x0) { 2126 goto done; 2127 } 2128 2129 /* Create NVKMS surface */ 2130 2131 paramsReg.request.deviceHandle = device->hKmsDevice; 2132 2133 paramsReg.request.useFd = FALSE; 2134 paramsReg.request.rmClient = device->hRmClient; 2135 2136 paramsReg.request.widthInPixels = params->width; 2137 paramsReg.request.heightInPixels = params->height; 2138 2139 paramsReg.request.format = params->format; 2140 2141 paramsReg.request.layout = layout; 2142 paramsReg.request.log2GobsPerBlockY = log2GobsPerBlockY; 2143 2144 for (i = 0; i < numPlanes; i++) { 2145 struct NvKmsKapiMemory *memory = 2146 params->planes[i].memory; 2147 2148 paramsReg.request.planes[i].u.rmObject = memory->hRmHandle; 2149 paramsReg.request.planes[i].rmObjectSizeInBytes = memory->size; 2150 paramsReg.request.planes[i].offset = params->planes[i].offset; 2151 paramsReg.request.planes[i].pitch = pitch[i]; 2152 } 2153 2154 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 2155 NVKMS_IOCTL_REGISTER_SURFACE, 2156 ¶msReg, sizeof(paramsReg)); 2157 if (!status) { 2158 nvKmsKapiLogDeviceDebug( 2159 device, 2160 "Failed to register NVKMS surface of dimensions %ux%u " 2161 "and %s format", 2162 params->width, 2163 params->height, 2164 nvKmsSurfaceMemoryFormatToString(params->format)); 2165 2166 goto failed; 2167 } 2168 2169 surface->hKmsHandle = paramsReg.reply.surfaceHandle; 2170 2171 done: 2172 return surface; 2173 2174 failed: 2175 nvKmsKapiFree(surface); 2176 2177 return NULL; 2178 } 2179 2180 static void DestroySurface 2181 ( 2182 struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface 2183 ) 2184 { 2185 struct NvKmsUnregisterSurfaceParams paramsUnreg = { }; 2186 NvBool status; 2187 2188 if (device->hKmsDevice == 0x0) { 2189 goto done; 2190 } 2191 2192 paramsUnreg.request.deviceHandle = device->hKmsDevice; 2193 paramsUnreg.request.surfaceHandle = surface->hKmsHandle; 2194 2195 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 2196 NVKMS_IOCTL_UNREGISTER_SURFACE, 2197 ¶msUnreg, sizeof(paramsUnreg)); 2198 2199 if (!status) { 2200 nvKmsKapiLogDeviceDebug( 2201 device, 2202 "Failed to unregister NVKMS surface registered for " 2203 "NvKmsKapiSurface 0x%p", 2204 surface); 2205 } 2206 2207 done: 2208 nvKmsKapiFree(surface); 2209 } 2210 2211 /* 2212 * Helper function to convert NvKmsMode to NvKmsKapiDisplayMode. 2213 */ 2214 static void NvKmsModeToKapi 2215 ( 2216 const struct NvKmsMode *kmsMode, 2217 struct NvKmsKapiDisplayMode *mode 2218 ) 2219 { 2220 const NvModeTimings *timings = &kmsMode->timings; 2221 2222 nvkms_memset(mode, 0, sizeof(*mode)); 2223 2224 mode->timings.refreshRate = timings->RRx1k; 2225 mode->timings.pixelClockHz = timings->pixelClockHz; 2226 mode->timings.hVisible = timings->hVisible; 2227 mode->timings.hSyncStart = timings->hSyncStart; 2228 mode->timings.hSyncEnd = timings->hSyncEnd; 2229 mode->timings.hTotal = timings->hTotal; 2230 mode->timings.hSkew = timings->hSkew; 2231 mode->timings.vVisible = timings->vVisible; 2232 mode->timings.vSyncStart = timings->vSyncStart; 2233 mode->timings.vSyncEnd = timings->vSyncEnd; 2234 mode->timings.vTotal = timings->vTotal; 2235 2236 mode->timings.flags.interlaced = timings->interlaced; 2237 mode->timings.flags.doubleScan = timings->doubleScan; 2238 mode->timings.flags.hSyncPos = timings->hSyncPos; 2239 mode->timings.flags.hSyncNeg = timings->hSyncNeg; 2240 mode->timings.flags.vSyncPos = timings->vSyncPos; 2241 mode->timings.flags.vSyncNeg = timings->vSyncNeg; 2242 2243 mode->timings.widthMM = timings->sizeMM.w; 2244 mode->timings.heightMM = timings->sizeMM.h; 2245 2246 ct_assert(sizeof(mode->name) == sizeof(kmsMode->name)); 2247 2248 nvkms_memcpy(mode->name, kmsMode->name, sizeof(mode->name)); 2249 } 2250 2251 static void InitNvKmsModeValidationParams( 2252 const struct NvKmsKapiDevice *device, 2253 struct NvKmsModeValidationParams *params) 2254 { 2255 /* 2256 * Mode timings structures of KAPI clients may not have field like 2257 * RRx1k, it does not guarantee that computed RRx1k value during 2258 * conversion from - 2259 * KAPI client's mode-timings structure 2260 * -> NvKmsKapiDisplayMode -> NvModeTimings 2261 * is same as what we get from edid, this may cause mode-set to fail. 2262 * 2263 * The RRx1k filed don't impact hardware modetiming values, therefore 2264 * override RRx1k check. 2265 * 2266 * XXX NVKMS TODO: Bug 200156338 is filed to delete NvModeTimings::RRx1k 2267 * if possible. 2268 */ 2269 params->overrides = NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK; 2270 } 2271 2272 static int GetDisplayMode 2273 ( 2274 struct NvKmsKapiDevice *device, 2275 NvKmsKapiDisplay display, NvU32 modeIndex, 2276 struct NvKmsKapiDisplayMode *mode, NvBool *valid, 2277 NvBool *preferredMode 2278 ) 2279 { 2280 struct NvKmsValidateModeIndexParams paramsValidate = { }; 2281 NvBool status; 2282 2283 if (device == NULL) { 2284 return -1; 2285 } 2286 2287 paramsValidate.request.deviceHandle = device->hKmsDevice; 2288 paramsValidate.request.dispHandle = device->hKmsDisp; 2289 2290 paramsValidate.request.dpyId = nvNvU32ToDpyId(display); 2291 2292 InitNvKmsModeValidationParams(device, 2293 ¶msValidate.request.modeValidation); 2294 2295 paramsValidate.request.modeIndex = modeIndex; 2296 2297 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 2298 NVKMS_IOCTL_VALIDATE_MODE_INDEX, 2299 ¶msValidate, sizeof(paramsValidate)); 2300 2301 if (!status) { 2302 nvKmsKapiLogDeviceDebug( 2303 device, 2304 "Failed to get validated mode index 0x%x for NvKmsKapiDisplay 0x%08x", 2305 modeIndex, display); 2306 return -1; 2307 } 2308 2309 if (mode != NULL) { 2310 NvKmsModeToKapi(¶msValidate.reply.mode, mode); 2311 } 2312 2313 2314 if (valid != NULL) { 2315 *valid = paramsValidate.reply.valid; 2316 } 2317 2318 if (preferredMode != NULL) { 2319 *preferredMode = paramsValidate.reply.preferredMode; 2320 } 2321 2322 return paramsValidate.reply.end ? 0 : 1; 2323 } 2324 2325 /* 2326 * Helper function to convert NvKmsKapiDisplayMode to NvKmsMode. 2327 */ 2328 static void NvKmsKapiDisplayModeToKapi 2329 ( 2330 const struct NvKmsKapiDisplayMode *mode, 2331 struct NvKmsMode *kmsMode 2332 ) 2333 { 2334 NvModeTimings *timings = &kmsMode->timings; 2335 2336 nvkms_memset(kmsMode, 0, sizeof(*kmsMode)); 2337 2338 nvkms_memcpy(kmsMode->name, mode->name, sizeof(mode->name)); 2339 2340 timings->RRx1k = mode->timings.refreshRate; 2341 timings->pixelClockHz = mode->timings.pixelClockHz; 2342 timings->hVisible = mode->timings.hVisible; 2343 timings->hSyncStart = mode->timings.hSyncStart; 2344 timings->hSyncEnd = mode->timings.hSyncEnd; 2345 timings->hTotal = mode->timings.hTotal; 2346 timings->hSkew = mode->timings.hSkew; 2347 timings->vVisible = mode->timings.vVisible; 2348 timings->vSyncStart = mode->timings.vSyncStart; 2349 timings->vSyncEnd = mode->timings.vSyncEnd; 2350 timings->vTotal = mode->timings.vTotal; 2351 2352 timings->interlaced = mode->timings.flags.interlaced; 2353 timings->doubleScan = mode->timings.flags.doubleScan; 2354 timings->hSyncPos = mode->timings.flags.hSyncPos; 2355 timings->hSyncNeg = mode->timings.flags.hSyncNeg; 2356 timings->vSyncPos = mode->timings.flags.vSyncPos; 2357 timings->vSyncNeg = mode->timings.flags.vSyncNeg; 2358 2359 timings->sizeMM.w = mode->timings.widthMM; 2360 timings->sizeMM.h = mode->timings.heightMM; 2361 } 2362 2363 static NvBool ValidateDisplayMode 2364 ( 2365 struct NvKmsKapiDevice *device, 2366 NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode 2367 ) 2368 { 2369 struct NvKmsValidateModeParams paramsValidate; 2370 NvBool status; 2371 2372 if (device == NULL) { 2373 return NV_FALSE; 2374 } 2375 2376 nvkms_memset(¶msValidate, 0, sizeof(paramsValidate)); 2377 2378 paramsValidate.request.deviceHandle = device->hKmsDevice; 2379 paramsValidate.request.dispHandle = device->hKmsDisp; 2380 2381 paramsValidate.request.dpyId = nvNvU32ToDpyId(display); 2382 2383 InitNvKmsModeValidationParams(device, 2384 ¶msValidate.request.modeValidation); 2385 2386 2387 NvKmsKapiDisplayModeToKapi(mode, ¶msValidate.request.mode); 2388 2389 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 2390 NVKMS_IOCTL_VALIDATE_MODE, 2391 ¶msValidate, sizeof(paramsValidate)); 2392 2393 if (!status) { 2394 nvKmsKapiLogDeviceDebug( 2395 device, 2396 "Failed to get validated mode %ux%u@%uHz for NvKmsKapiDisplay 0x%08x of " 2397 "NvKmsKapiDevice 0x%p", 2398 mode->timings.hVisible, mode->timings.vVisible, 2399 mode->timings.refreshRate/1000, display, 2400 device); 2401 return NV_FALSE; 2402 } 2403 2404 return paramsValidate.reply.valid; 2405 } 2406 2407 static NvBool AssignSyncObjectConfig( 2408 struct NvKmsKapiDevice *device, 2409 const struct NvKmsKapiLayerConfig *pLayerConfig, 2410 struct NvKmsChannelSyncObjects *pSyncObject) 2411 { 2412 if (!device->supportsSyncpts) { 2413 if (pLayerConfig->syncptParams.preSyncptSpecified || 2414 pLayerConfig->syncptParams.postSyncptRequested) { 2415 return NV_FALSE; 2416 } 2417 } 2418 2419 pSyncObject->useSyncpt = FALSE; 2420 2421 if (pLayerConfig->syncptParams.preSyncptSpecified) { 2422 pSyncObject->useSyncpt = TRUE; 2423 2424 pSyncObject->u.syncpts.pre.type = NVKMS_SYNCPT_TYPE_RAW; 2425 pSyncObject->u.syncpts.pre.u.raw.id = pLayerConfig->syncptParams.preSyncptId; 2426 pSyncObject->u.syncpts.pre.u.raw.value = pLayerConfig->syncptParams.preSyncptValue; 2427 } 2428 2429 if (pLayerConfig->syncptParams.postSyncptRequested) { 2430 pSyncObject->useSyncpt = TRUE; 2431 2432 pSyncObject->u.syncpts.requestedPostType = NVKMS_SYNCPT_TYPE_FD; 2433 } 2434 return NV_TRUE; 2435 } 2436 2437 static void AssignHDRMetadataConfig( 2438 const struct NvKmsKapiLayerConfig *layerConfig, 2439 const NvU32 layer, 2440 struct NvKmsFlipCommonParams *params) 2441 { 2442 if (layerConfig->hdrMetadataSpecified) { 2443 params->layer[layer].hdr.enabled = TRUE; 2444 params->layer[layer].hdr.specified = TRUE; 2445 params->layer[layer].hdr.staticMetadata = layerConfig->hdrMetadata; 2446 } else { 2447 params->layer[layer].hdr.enabled = FALSE; 2448 params->layer[layer].hdr.specified = TRUE; 2449 } 2450 } 2451 2452 static void NvKmsKapiCursorConfigToKms( 2453 const struct NvKmsKapiCursorRequestedConfig *requestedConfig, 2454 struct NvKmsFlipCommonParams *params, 2455 NvBool bFromKmsSetMode) 2456 { 2457 if (requestedConfig->flags.surfaceChanged || bFromKmsSetMode) { 2458 params->cursor.imageSpecified = NV_TRUE; 2459 2460 if (requestedConfig->surface != NULL) { 2461 params->cursor.image.surfaceHandle[NVKMS_LEFT] = 2462 requestedConfig->surface->hKmsHandle; 2463 } 2464 2465 params->cursor.image.cursorCompParams.colorKeySelect = 2466 NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; 2467 params->cursor.image.cursorCompParams.blendingMode[1] = 2468 requestedConfig->compParams.compMode; 2469 params->cursor.image.cursorCompParams.surfaceAlpha = 2470 requestedConfig->compParams.surfaceAlpha; 2471 } 2472 2473 if (requestedConfig->flags.dstXYChanged || bFromKmsSetMode) { 2474 params->cursor.position.x = requestedConfig->dstX; 2475 params->cursor.position.y = requestedConfig->dstY; 2476 2477 params->cursor.positionSpecified = NV_TRUE; 2478 } 2479 } 2480 2481 static NvBool NvKmsKapiOverlayLayerConfigToKms( 2482 struct NvKmsKapiDevice *device, 2483 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, 2484 const NvU32 layer, 2485 const NvU32 head, 2486 struct NvKmsFlipCommonParams *params, 2487 NvBool commit, 2488 NvBool bFromKmsSetMode) 2489 { 2490 NvBool ret = NV_FALSE; 2491 const struct NvKmsKapiLayerConfig *layerConfig = 2492 &layerRequestedConfig->config; 2493 2494 if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) { 2495 params->layer[layer].syncObjects.specified = NV_TRUE; 2496 params->layer[layer].completionNotifier.specified = NV_TRUE; 2497 params->layer[layer].surface.specified = NV_TRUE; 2498 2499 if (layerConfig->surface != NULL) { 2500 params->layer[layer].surface.handle[NVKMS_LEFT] = 2501 layerConfig->surface->hKmsHandle; 2502 } 2503 2504 params->layer[layer].surface.rrParams = 2505 layerConfig->rrParams; 2506 2507 params->layer[layer].compositionParams.val.colorKeySelect = 2508 NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; 2509 params->layer[layer].compositionParams.val.blendingMode[1] = 2510 layerConfig->compParams.compMode; 2511 params->layer[layer].compositionParams.val.surfaceAlpha = 2512 layerConfig->compParams.surfaceAlpha; 2513 params->layer[layer].compositionParams.specified = TRUE; 2514 params->layer[layer].minPresentInterval = 2515 layerConfig->minPresentInterval; 2516 } 2517 2518 params->layer[layer].sizeIn.val.width = layerConfig->srcWidth; 2519 params->layer[layer].sizeIn.val.height = layerConfig->srcHeight; 2520 params->layer[layer].sizeIn.specified = TRUE; 2521 2522 params->layer[layer].sizeOut.val.width = layerConfig->dstWidth; 2523 params->layer[layer].sizeOut.val.height = layerConfig->dstHeight; 2524 params->layer[layer].sizeOut.specified = TRUE; 2525 2526 if (layerRequestedConfig->flags.dstXYChanged || bFromKmsSetMode) { 2527 params->layer[layer].outputPosition.val.x = layerConfig->dstX; 2528 params->layer[layer].outputPosition.val.y = layerConfig->dstY; 2529 2530 params->layer[layer].outputPosition.specified = NV_TRUE; 2531 } 2532 2533 params->layer[layer].colorspace.val = layerConfig->inputColorSpace; 2534 params->layer[layer].colorspace.specified = TRUE; 2535 2536 AssignHDRMetadataConfig(layerConfig, layer, params); 2537 2538 if (commit) { 2539 NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX( 2540 device->layerState[head][layer]. 2541 currFlipNotifierIndex); 2542 2543 if (layerConfig->surface != NULL) { 2544 NvU32 nextIndexOffsetInBytes = 2545 NVKMS_KAPI_NOTIFIER_OFFSET(head, 2546 layer, nextIndex); 2547 2548 params->layer[layer].completionNotifier.val. 2549 surface.surfaceHandle = device->notifier.hKmsHandle; 2550 2551 params->layer[layer].completionNotifier.val. 2552 surface.format = device->notifier.format; 2553 2554 params->layer[layer].completionNotifier.val. 2555 surface.offsetInWords = nextIndexOffsetInBytes >> 2; 2556 2557 params->layer[layer].completionNotifier.val.awaken = NV_TRUE; 2558 } 2559 2560 ret = AssignSyncObjectConfig(device, 2561 layerConfig, 2562 ¶ms->layer[layer].syncObjects.val); 2563 if (ret == NV_FALSE) { 2564 return ret; 2565 } 2566 2567 /* 2568 * XXX Should this be done after commit? 2569 * What if commit fail? 2570 * 2571 * It is not expected to fail any commit in KAPI layer, 2572 * only validated configuration is expected 2573 * to commit. 2574 */ 2575 device->layerState[head][layer]. 2576 currFlipNotifierIndex = nextIndex; 2577 } 2578 2579 return NV_TRUE; 2580 } 2581 2582 static NvBool NvKmsKapiPrimaryLayerConfigToKms( 2583 struct NvKmsKapiDevice *device, 2584 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, 2585 const NvU32 head, 2586 struct NvKmsFlipCommonParams *params, 2587 NvBool commit, 2588 NvBool bFromKmsSetMode) 2589 { 2590 NvBool ret = NV_FALSE; 2591 const struct NvKmsKapiLayerConfig *layerConfig = 2592 &layerRequestedConfig->config; 2593 2594 NvBool changed = FALSE; 2595 2596 if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) { 2597 params->layer[NVKMS_MAIN_LAYER].surface.specified = NV_TRUE; 2598 params->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = NV_TRUE; 2599 params->layer[NVKMS_MAIN_LAYER].syncObjects.specified = NV_TRUE; 2600 2601 2602 params->layer[NVKMS_MAIN_LAYER].minPresentInterval = 2603 layerConfig->minPresentInterval; 2604 params->layer[NVKMS_MAIN_LAYER].tearing = layerConfig->tearing; 2605 params->layer[NVKMS_MAIN_LAYER].surface.rrParams = layerConfig->rrParams; 2606 2607 if (layerConfig->surface != NULL) { 2608 params->layer[NVKMS_MAIN_LAYER].surface.handle[0] = 2609 layerConfig->surface->hKmsHandle; 2610 2611 if (params->layer[NVKMS_MAIN_LAYER].surface.handle[0] != 0) { 2612 params->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = layerConfig->srcWidth; 2613 params->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = layerConfig->srcHeight; 2614 params->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; 2615 2616 params->layer[NVKMS_MAIN_LAYER].sizeOut.val.width = layerConfig->dstWidth; 2617 params->layer[NVKMS_MAIN_LAYER].sizeOut.val.height = layerConfig->dstHeight; 2618 params->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; 2619 } 2620 } 2621 2622 changed = TRUE; 2623 } 2624 2625 if (layerRequestedConfig->flags.srcXYChanged || bFromKmsSetMode) { 2626 params->viewPortIn.point.x = layerConfig->srcX; 2627 params->viewPortIn.point.y = layerConfig->srcY; 2628 params->viewPortIn.specified = NV_TRUE; 2629 2630 changed = TRUE; 2631 } 2632 2633 params->layer[NVKMS_MAIN_LAYER].colorspace.val = layerConfig->inputColorSpace; 2634 params->layer[NVKMS_MAIN_LAYER].colorspace.specified = TRUE; 2635 2636 AssignHDRMetadataConfig(layerConfig, NVKMS_MAIN_LAYER, params); 2637 2638 if (commit && changed) { 2639 NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX( 2640 device->layerState[head][NVKMS_MAIN_LAYER]. 2641 currFlipNotifierIndex); 2642 2643 if (layerConfig->surface != NULL) { 2644 NvU32 nextIndexOffsetInBytes = 2645 NVKMS_KAPI_NOTIFIER_OFFSET(head, 2646 NVKMS_MAIN_LAYER, nextIndex); 2647 2648 params->layer[NVKMS_MAIN_LAYER].completionNotifier. 2649 val.surface.surfaceHandle = device->notifier.hKmsHandle; 2650 2651 params->layer[NVKMS_MAIN_LAYER].completionNotifier. 2652 val.surface.format = device->notifier.format; 2653 2654 params->layer[NVKMS_MAIN_LAYER].completionNotifier. 2655 val.surface.offsetInWords = nextIndexOffsetInBytes >> 2; 2656 2657 params->layer[NVKMS_MAIN_LAYER].completionNotifier.val.awaken = NV_TRUE; 2658 } 2659 2660 ret = AssignSyncObjectConfig(device, 2661 layerConfig, 2662 ¶ms->layer[NVKMS_MAIN_LAYER].syncObjects.val); 2663 if (ret == NV_FALSE) { 2664 return ret; 2665 } 2666 2667 /* 2668 * XXX Should this be done after commit? 2669 * What if commit fail? 2670 * 2671 * It is not expected to fail any commit in KAPI layer, 2672 * only validated configuration is expected 2673 * to commit. 2674 */ 2675 device->layerState[head][NVKMS_MAIN_LAYER]. 2676 currFlipNotifierIndex = nextIndex; 2677 } 2678 2679 return NV_TRUE; 2680 } 2681 2682 static NvBool NvKmsKapiLayerConfigToKms( 2683 struct NvKmsKapiDevice *device, 2684 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, 2685 const NvU32 layer, 2686 const NvU32 head, 2687 struct NvKmsFlipCommonParams *params, 2688 NvBool commit, 2689 NvBool bFromKmsSetMode) 2690 { 2691 if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) { 2692 return NvKmsKapiPrimaryLayerConfigToKms(device, 2693 layerRequestedConfig, 2694 head, 2695 params, 2696 commit, 2697 bFromKmsSetMode); 2698 2699 } 2700 2701 return NvKmsKapiOverlayLayerConfigToKms(device, 2702 layerRequestedConfig, 2703 layer, 2704 head, 2705 params, 2706 commit, 2707 bFromKmsSetMode); 2708 } 2709 2710 static NvBool GetOutputTransferFunction( 2711 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig, 2712 enum NvKmsOutputTf *tf) 2713 { 2714 NvBool found = NV_FALSE; 2715 NvU32 layer; 2716 2717 *tf = NVKMS_OUTPUT_TF_NONE; 2718 2719 for (layer = 0; 2720 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); 2721 layer++) { 2722 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig = 2723 &headRequestedConfig->layerRequestedConfig[layer]; 2724 const struct NvKmsKapiLayerConfig *layerConfig = 2725 &layerRequestedConfig->config; 2726 2727 if (layerConfig->hdrMetadataSpecified) { 2728 if (!found) { 2729 *tf = layerConfig->tf; 2730 found = NV_TRUE; 2731 } else if (*tf != layerConfig->tf) { 2732 nvKmsKapiLogDebug( 2733 "Output transfer function should be the same for all layers on a head"); 2734 return NV_FALSE; 2735 } 2736 } 2737 } 2738 2739 return NV_TRUE; 2740 } 2741 2742 /* 2743 * Helper function to convert NvKmsKapiRequestedModeSetConfig 2744 * to NvKmsSetModeParams. 2745 */ 2746 static NvBool NvKmsKapiRequestedModeSetConfigToKms( 2747 struct NvKmsKapiDevice *device, 2748 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, 2749 struct NvKmsSetModeParams *params, 2750 NvBool commit) 2751 { 2752 NvU32 dispIdx = device->dispIdx; 2753 NvU32 head; 2754 2755 nvkms_memset(params, 0, sizeof(*params)); 2756 2757 params->request.commit = commit; 2758 params->request.deviceHandle = device->hKmsDevice; 2759 params->request.requestedDispsBitMask = 1 << dispIdx; 2760 2761 for (head = 0; 2762 head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { 2763 2764 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = 2765 &requestedConfig->headRequestedConfig[head]; 2766 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = 2767 &headRequestedConfig->modeSetConfig; 2768 struct NvKmsSetModeOneHeadRequest *paramsHead; 2769 enum NvKmsOutputTf tf; 2770 NvU32 layer; 2771 NvU32 i; 2772 2773 if ((requestedConfig->headsMask & (1 << head)) == 0x0) { 2774 continue; 2775 } 2776 2777 params->request.disp[dispIdx].requestedHeadsBitMask |= 1 << head; 2778 2779 if (headModeSetConfig->numDisplays == 0) { 2780 continue; 2781 } 2782 2783 if (params->request.commit && !headModeSetConfig->bActive) { 2784 continue; 2785 } 2786 2787 paramsHead = ¶ms->request.disp[dispIdx].head[head]; 2788 2789 InitNvKmsModeValidationParams(device, 2790 ¶msHead->modeValidationParams); 2791 2792 for (i = 0; i < headModeSetConfig->numDisplays; i++) { 2793 paramsHead->dpyIdList = nvAddDpyIdToDpyIdList( 2794 nvNvU32ToDpyId(headModeSetConfig->displays[i]), 2795 paramsHead->dpyIdList); 2796 } 2797 2798 NvKmsKapiDisplayModeToKapi(&headModeSetConfig->mode, ¶msHead->mode); 2799 2800 NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig, 2801 ¶msHead->flip, 2802 NV_TRUE /* bFromKmsSetMode */); 2803 for (layer = 0; 2804 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); 2805 layer++) { 2806 2807 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig = 2808 &headRequestedConfig->layerRequestedConfig[layer]; 2809 2810 if (!NvKmsKapiLayerConfigToKms(device, 2811 layerRequestedConfig, 2812 layer, 2813 head, 2814 ¶msHead->flip, 2815 commit, 2816 NV_TRUE /* bFromKmsSetMode */)) { 2817 return NV_FALSE; 2818 } 2819 } 2820 2821 if (!GetOutputTransferFunction(headRequestedConfig, &tf)) { 2822 return NV_FALSE; 2823 } 2824 2825 paramsHead->flip.tf.val = tf; 2826 paramsHead->flip.tf.specified = NV_TRUE; 2827 2828 paramsHead->viewPortSizeIn.width = 2829 headModeSetConfig->mode.timings.hVisible; 2830 paramsHead->viewPortSizeIn.height = 2831 headModeSetConfig->mode.timings.vVisible; 2832 2833 if (device->caps.requiresVrrSemaphores) { 2834 paramsHead->allowGsync = NV_FALSE; 2835 paramsHead->allowAdaptiveSync = NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED; 2836 } else { 2837 paramsHead->allowGsync = NV_TRUE; 2838 paramsHead->allowAdaptiveSync = NVKMS_ALLOW_ADAPTIVE_SYNC_ALL; 2839 } 2840 } 2841 2842 return NV_TRUE; 2843 } 2844 2845 2846 static NvBool KmsSetMode( 2847 struct NvKmsKapiDevice *device, 2848 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, 2849 const NvBool commit) 2850 { 2851 struct NvKmsSetModeParams *params = NULL; 2852 NvBool status = NV_FALSE; 2853 2854 params = nvKmsKapiCalloc(1, sizeof(*params)); 2855 2856 if (params == NULL) { 2857 goto done; 2858 } 2859 2860 if (!NvKmsKapiRequestedModeSetConfigToKms(device, 2861 requestedConfig, 2862 params, 2863 commit)) { 2864 goto done; 2865 } 2866 2867 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 2868 NVKMS_IOCTL_SET_MODE, 2869 params, sizeof(*params)); 2870 2871 if (!status) { 2872 nvKmsKapiLogDeviceDebug( 2873 device, 2874 "NVKMS_IOCTL_SET_MODE ioctl failed"); 2875 goto done; 2876 } 2877 2878 if (params->reply.status != NVKMS_SET_MODE_STATUS_SUCCESS) 2879 { 2880 int i; 2881 2882 nvKmsKapiLogDeviceDebug( 2883 device, 2884 "NVKMS_IOCTL_SET_MODE failed! Status:\n"); 2885 2886 nvKmsKapiLogDeviceDebug( 2887 device, 2888 " top-level status: %d\n", params->reply.status); 2889 2890 nvKmsKapiLogDeviceDebug( 2891 device, 2892 " disp0 status: %d\n", params->reply.disp[0].status); 2893 2894 for (i = 0; i < ARRAY_LEN(params->reply.disp[0].head); i++) 2895 { 2896 nvKmsKapiLogDeviceDebug( 2897 device, 2898 " head%d status: %d\n", 2899 i, params->reply.disp[0].head[i].status); 2900 } 2901 2902 status = NV_FALSE; 2903 } 2904 2905 done: 2906 2907 if (params != NULL) { 2908 nvKmsKapiFree(params); 2909 } 2910 2911 return status; 2912 } 2913 2914 static NvBool IsHeadConfigValid( 2915 const struct NvKmsFlipParams *params, 2916 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, 2917 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig, 2918 NvU32 head) 2919 { 2920 if ((requestedConfig->headsMask & (1 << head)) == 0x0) { 2921 return NV_FALSE; 2922 } 2923 2924 if (headModeSetConfig->numDisplays == 0) { 2925 return NV_FALSE; 2926 } 2927 2928 if (params->request.commit && !headModeSetConfig->bActive) { 2929 return NV_FALSE; 2930 } 2931 return NV_TRUE; 2932 } 2933 2934 static NvBool KmsFlip( 2935 struct NvKmsKapiDevice *device, 2936 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, 2937 struct NvKmsKapiModeSetReplyConfig *replyConfig, 2938 const NvBool commit) 2939 { 2940 struct NvKmsFlipParams *params = NULL; 2941 struct NvKmsFlipRequestOneHead *pFlipHead = NULL; 2942 NvBool status = NV_TRUE; 2943 NvU32 i, head; 2944 2945 /* Allocate space for the params structure, plus space for each possible 2946 * head. */ 2947 params = nvKmsKapiCalloc(1, 2948 sizeof(*params) + sizeof(pFlipHead[0]) * NVKMS_KAPI_MAX_HEADS); 2949 2950 if (params == NULL) { 2951 return NV_FALSE; 2952 } 2953 2954 /* The flipHead array was allocated in the same block above. */ 2955 pFlipHead = (struct NvKmsFlipRequestOneHead *)(params + 1); 2956 2957 params->request.deviceHandle = device->hKmsDevice; 2958 params->request.commit = commit; 2959 params->request.allowVrr = NV_FALSE; 2960 params->request.pFlipHead = nvKmsPointerToNvU64(pFlipHead); 2961 params->request.numFlipHeads = 0; 2962 for (head = 0; 2963 head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { 2964 2965 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = 2966 &requestedConfig->headRequestedConfig[head]; 2967 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = 2968 &headRequestedConfig->modeSetConfig; 2969 enum NvKmsOutputTf tf; 2970 2971 struct NvKmsFlipCommonParams *flipParams = NULL; 2972 2973 NvU32 layer; 2974 2975 if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, head)) { 2976 continue; 2977 } 2978 2979 pFlipHead[params->request.numFlipHeads].sd = 0; 2980 pFlipHead[params->request.numFlipHeads].head = head; 2981 flipParams = &pFlipHead[params->request.numFlipHeads].flip; 2982 params->request.numFlipHeads++; 2983 2984 NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig, 2985 flipParams, 2986 NV_FALSE /* bFromKmsSetMode */); 2987 2988 for (layer = 0; 2989 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); 2990 layer++) { 2991 2992 const struct NvKmsKapiLayerRequestedConfig 2993 *layerRequestedConfig = 2994 &headRequestedConfig->layerRequestedConfig[layer]; 2995 2996 status = NvKmsKapiLayerConfigToKms(device, 2997 layerRequestedConfig, 2998 layer, 2999 head, 3000 flipParams, 3001 commit, 3002 NV_FALSE /* bFromKmsSetMode */); 3003 3004 if (status != NV_TRUE) { 3005 goto done; 3006 } 3007 } 3008 3009 status = GetOutputTransferFunction(headRequestedConfig, &tf); 3010 if (status != NV_TRUE) { 3011 goto done; 3012 } 3013 3014 flipParams->tf.val = tf; 3015 flipParams->tf.specified = NV_TRUE; 3016 3017 if (headModeSetConfig->vrrEnabled) { 3018 params->request.allowVrr = NV_TRUE; 3019 } 3020 } 3021 3022 if (params->request.numFlipHeads == 0) { 3023 goto done; 3024 } 3025 3026 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 3027 NVKMS_IOCTL_FLIP, 3028 params, sizeof(*params)); 3029 3030 if (!status) { 3031 nvKmsKapiLogDeviceDebug( 3032 device, 3033 "NVKMS_IOCTL_FLIP ioctl failed"); 3034 goto done; 3035 } 3036 3037 if (!commit) { 3038 goto done; 3039 } 3040 3041 /*! fill back flip reply */ 3042 for (i = 0; i < params->request.numFlipHeads; i++) { 3043 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = 3044 &requestedConfig->headRequestedConfig[pFlipHead[i].head]; 3045 3046 struct NvKmsKapiHeadReplyConfig *headReplyConfig = 3047 &replyConfig->headReplyConfig[pFlipHead[i].head]; 3048 3049 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = 3050 &headRequestedConfig->modeSetConfig; 3051 3052 struct NvKmsFlipCommonReplyOneHead *flipParams = ¶ms->reply.flipHead[i]; 3053 3054 NvU32 layer; 3055 3056 if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, pFlipHead[i].head)) { 3057 continue; 3058 } 3059 3060 for (layer = 0; 3061 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); 3062 layer++) { 3063 3064 const struct NvKmsKapiLayerConfig *layerRequestedConfig = 3065 &headRequestedConfig->layerRequestedConfig[layer].config; 3066 3067 struct NvKmsKapiLayerReplyConfig *layerReplyConfig = 3068 &headReplyConfig->layerReplyConfig[layer]; 3069 3070 /*! initialize explicitly to -1 as 0 is valid file descriptor */ 3071 layerReplyConfig->postSyncptFd = -1; 3072 if (layerRequestedConfig->syncptParams.postSyncptRequested) { 3073 layerReplyConfig->postSyncptFd = 3074 flipParams->layer[layer].postSyncpt.u.fd; 3075 } 3076 } 3077 } 3078 3079 done: 3080 3081 nvKmsKapiFree(params); 3082 3083 return status; 3084 } 3085 3086 static NvBool ApplyModeSetConfig( 3087 struct NvKmsKapiDevice *device, 3088 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, 3089 struct NvKmsKapiModeSetReplyConfig *replyConfig, 3090 const NvBool commit) 3091 { 3092 NvBool bRequiredModeset = NV_FALSE; 3093 NvU32 head; 3094 3095 if (device == NULL || requestedConfig == NULL) { 3096 return NV_FALSE; 3097 } 3098 3099 for (head = 0; 3100 head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { 3101 3102 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = 3103 &requestedConfig->headRequestedConfig[head]; 3104 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = 3105 &headRequestedConfig->modeSetConfig; 3106 3107 if ((requestedConfig->headsMask & (1 << head)) == 0x0) { 3108 continue; 3109 } 3110 3111 bRequiredModeset = 3112 headRequestedConfig->flags.activeChanged || 3113 headRequestedConfig->flags.displaysChanged || 3114 headRequestedConfig->flags.modeChanged; 3115 3116 /* 3117 * NVKMS flip ioctl could not validate flip configuration for an 3118 * inactive head, therefore use modeset ioctl if configuration contain 3119 * any such head. 3120 */ 3121 if (!commit && 3122 headModeSetConfig->numDisplays != 0 && !headModeSetConfig->bActive) { 3123 bRequiredModeset = TRUE; 3124 } 3125 3126 if (bRequiredModeset) { 3127 break; 3128 } 3129 } 3130 3131 if (bRequiredModeset) { 3132 return KmsSetMode(device, requestedConfig, commit); 3133 } 3134 3135 return KmsFlip(device, requestedConfig, replyConfig, commit); 3136 } 3137 3138 void nvKmsKapiHandleEventQueueChange 3139 ( 3140 struct NvKmsKapiDevice *device 3141 ) 3142 { 3143 if (device == NULL) { 3144 return; 3145 } 3146 3147 /* 3148 * If the callback is NULL, event interest declaration should be 3149 * rejected, and no events would be reported. 3150 */ 3151 nvAssert(device->eventCallback != NULL); 3152 3153 do 3154 { 3155 struct NvKmsGetNextEventParams kmsEventParams = { }; 3156 struct NvKmsKapiEvent kapiEvent = { }; 3157 NvBool err = NV_FALSE; 3158 3159 if (!nvkms_ioctl_from_kapi(device->pKmsOpen, 3160 NVKMS_IOCTL_GET_NEXT_EVENT, 3161 &kmsEventParams, sizeof(kmsEventParams))) { 3162 break; 3163 } 3164 3165 if (!kmsEventParams.reply.valid) { 3166 break; 3167 } 3168 3169 kapiEvent.type = kmsEventParams.reply.event.eventType; 3170 3171 kapiEvent.device = device; 3172 kapiEvent.privateData = device->privateData; 3173 3174 switch (kmsEventParams.reply.event.eventType) { 3175 case NVKMS_EVENT_TYPE_DPY_CHANGED: 3176 kapiEvent.u.displayChanged.display = 3177 nvDpyIdToNvU32(kmsEventParams. 3178 reply.event.u.dpyChanged.dpyId); 3179 break; 3180 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: 3181 kapiEvent.u.dynamicDisplayConnected.display = 3182 nvDpyIdToNvU32(kmsEventParams. 3183 reply.event.u.dynamicDpyConnected.dpyId); 3184 break; 3185 case NVKMS_EVENT_TYPE_FLIP_OCCURRED: 3186 kapiEvent.u.flipOccurred.head = 3187 kmsEventParams.reply.event.u.flipOccurred.head; 3188 kapiEvent.u.flipOccurred.layer = 3189 kmsEventParams.reply.event.u.flipOccurred.layer; 3190 break; 3191 default: 3192 continue; 3193 } 3194 3195 if (err) { 3196 nvKmsKapiLogDeviceDebug( 3197 device, 3198 "Error in conversion from " 3199 "NvKmsGetNextEventParams to NvKmsKapiEvent"); 3200 continue; 3201 } 3202 3203 device->eventCallback(&kapiEvent); 3204 3205 } while(1); 3206 } 3207 3208 /* 3209 * Helper function to convert NvKmsQueryDpyCRC32Reply to NvKmsKapiDpyCRC32. 3210 */ 3211 static void NvKmsCrcsToKapi 3212 ( 3213 const struct NvKmsQueryDpyCRC32Reply *crcs, 3214 struct NvKmsKapiCrcs *kmsCrcs 3215 ) 3216 { 3217 kmsCrcs->outputCrc32.value = crcs->outputCrc32.value; 3218 kmsCrcs->outputCrc32.supported = crcs->outputCrc32.supported; 3219 kmsCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value; 3220 kmsCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported; 3221 kmsCrcs->compositorCrc32.value = crcs->compositorCrc32.value; 3222 kmsCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported; 3223 } 3224 3225 static NvBool GetCRC32 3226 ( 3227 struct NvKmsKapiDevice *device, 3228 NvU32 head, 3229 struct NvKmsKapiCrcs *crc32 3230 ) 3231 { 3232 struct NvKmsQueryDpyCRC32Params params = { }; 3233 NvBool status; 3234 3235 if (device->hKmsDevice == 0x0) { 3236 return NV_TRUE; 3237 } 3238 3239 params.request.deviceHandle = device->hKmsDevice; 3240 params.request.dispHandle = device->hKmsDisp; 3241 params.request.head = head; 3242 3243 status = nvkms_ioctl_from_kapi(device->pKmsOpen, 3244 NVKMS_IOCTL_QUERY_DPY_CRC32, 3245 ¶ms, sizeof(params)); 3246 3247 if (!status) { 3248 nvKmsKapiLogDeviceDebug(device, "NVKMS QueryDpyCRC32Data failed."); 3249 return NV_FALSE; 3250 } 3251 NvKmsCrcsToKapi(¶ms.reply, crc32); 3252 return NV_TRUE; 3253 } 3254 3255 NvBool nvKmsKapiGetFunctionsTableInternal 3256 ( 3257 struct NvKmsKapiFunctionsTable *funcsTable 3258 ) 3259 { 3260 if (funcsTable == NULL) { 3261 return NV_FALSE; 3262 } 3263 3264 if (nvkms_strcmp(funcsTable->versionString, NV_VERSION_STRING) != 0) { 3265 funcsTable->versionString = NV_VERSION_STRING; 3266 return NV_FALSE; 3267 } 3268 3269 funcsTable->systemInfo.bAllowWriteCombining = 3270 nvkms_allow_write_combining(); 3271 3272 funcsTable->enumerateGpus = EnumerateGpus; 3273 3274 funcsTable->allocateDevice = AllocateDevice; 3275 funcsTable->freeDevice = FreeDevice; 3276 3277 funcsTable->grabOwnership = GrabOwnership; 3278 funcsTable->releaseOwnership = ReleaseOwnership; 3279 3280 funcsTable->grantPermissions = GrantPermissions; 3281 funcsTable->revokePermissions = RevokePermissions; 3282 3283 funcsTable->declareEventInterest = DeclareEventInterest; 3284 3285 funcsTable->getDeviceResourcesInfo = GetDeviceResourcesInfo; 3286 funcsTable->getDisplays = GetDisplays; 3287 funcsTable->getConnectorInfo = GetConnectorInfo; 3288 3289 funcsTable->getStaticDisplayInfo = GetStaticDisplayInfo; 3290 funcsTable->getDynamicDisplayInfo = GetDynamicDisplayInfo; 3291 3292 funcsTable->allocateVideoMemory = AllocateVideoMemory; 3293 funcsTable->allocateSystemMemory = AllocateSystemMemory; 3294 funcsTable->importMemory = ImportMemory; 3295 funcsTable->dupMemory = DupMemory; 3296 funcsTable->exportMemory = ExportMemory; 3297 funcsTable->freeMemory = FreeMemory; 3298 funcsTable->getSystemMemoryHandleFromSgt = GetSystemMemoryHandleFromSgt; 3299 funcsTable->getSystemMemoryHandleFromDmaBuf = 3300 GetSystemMemoryHandleFromDmaBuf; 3301 3302 funcsTable->mapMemory = MapMemory; 3303 funcsTable->unmapMemory = UnmapMemory; 3304 3305 funcsTable->createSurface = CreateSurface; 3306 funcsTable->destroySurface = DestroySurface; 3307 3308 funcsTable->getDisplayMode = GetDisplayMode; 3309 funcsTable->validateDisplayMode = ValidateDisplayMode; 3310 3311 funcsTable->applyModeSetConfig = ApplyModeSetConfig; 3312 3313 funcsTable->allocateChannelEvent = nvKmsKapiAllocateChannelEvent; 3314 funcsTable->freeChannelEvent = nvKmsKapiFreeChannelEvent; 3315 3316 funcsTable->getCRC32 = GetCRC32; 3317 3318 funcsTable->getMemoryPages = GetMemoryPages; 3319 funcsTable->freeMemoryPages = FreeMemoryPages; 3320 3321 funcsTable->isMemoryValidForDisplay = IsMemoryValidForDisplay; 3322 3323 return NV_TRUE; 3324 } 3325