1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 25 26 //***************************** Module Header ********************************** 27 // 28 // This code is linked into the resource manager proper. It receives the 29 // ioctl from the resource manager's customer, unbundles the args and 30 // calls the correct resman routines. 31 // 32 //****************************************************************************** 33 34 #include <core/prelude.h> 35 #include <core/locks.h> 36 #include <nv.h> 37 #include <nv_escape.h> 38 #include <osapi.h> 39 #include <rmapi/exports.h> 40 #include <nv-unix-nvos-params-wrappers.h> 41 42 #include <nvos.h> 43 #include <class/cl0000.h> // NV01_ROOT 44 #include <class/cl0001.h> // NV01_ROOT_NON_PRIV 45 #include <class/cl0005.h> // NV01_EVENT 46 #include <class/cl003e.h> // NV01_MEMORY_SYSTEM 47 #include <class/cl0071.h> // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR 48 49 #include "rmapi/client_resource.h" 50 #include "nvlog/nvlog.h" 51 #include <nv-ioctl-lockless-diag.h> 52 53 #include <ctrl/ctrl00fd.h> 54 55 #include <ctrl/ctrl00e0.h> 56 57 #define NV_CTL_DEVICE_ONLY(nv) \ 58 { \ 59 if (((nv)->flags & NV_FLAG_CONTROL) == 0) \ 60 { \ 61 rmStatus = NV_ERR_INVALID_ARGUMENT; \ 62 goto done; \ 63 } \ 64 } 65 66 #define NV_ACTUAL_DEVICE_ONLY(nv) \ 67 { \ 68 if (((nv)->flags & NV_FLAG_CONTROL) != 0) \ 69 { \ 70 rmStatus = NV_ERR_INVALID_ARGUMENT; \ 71 goto done; \ 72 } \ 73 } 74 75 static NV_STATUS RmGetDeviceFd(NVOS54_PARAMETERS *pApi, NvS32 *pFd, 76 NvBool *pSkipDeviceRef) 77 { 78 RMAPI_PARAM_COPY paramCopy; 79 void *pKernelParams; 80 NvU32 paramSize; 81 NV_STATUS status; 82 83 *pFd = -1; 84 *pSkipDeviceRef = NV_TRUE; 85 86 switch(pApi->cmd) 87 { 88 case NV00FD_CTRL_CMD_ATTACH_GPU: 89 paramSize = sizeof(NV00FD_CTRL_ATTACH_GPU_PARAMS); 90 break; 91 case NV00E0_CTRL_CMD_EXPORT_MEM: 92 paramSize = sizeof(NV00E0_CTRL_EXPORT_MEM_PARAMS); 93 break; 94 default: 95 return NV_OK; 96 } 97 98 RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, pApi->params, paramSize, 1); 99 100 status = rmapiParamsAcquire(¶mCopy, NV_TRUE); 101 if (status != NV_OK) 102 return status; 103 104 switch(pApi->cmd) 105 { 106 case NV00FD_CTRL_CMD_ATTACH_GPU: 107 { 108 NV00FD_CTRL_ATTACH_GPU_PARAMS *pAttachGpuParams = pKernelParams; 109 110 *pSkipDeviceRef = NV_FALSE; 111 *pFd = (NvS32)pAttachGpuParams->devDescriptor; 112 } 113 break; 114 case NV00E0_CTRL_CMD_EXPORT_MEM: 115 { 116 NV00E0_CTRL_EXPORT_MEM_PARAMS *pExportMemParams = pKernelParams; 117 118 // If hParent is client, no need to reference device. 119 *pSkipDeviceRef = (pExportMemParams->hParent == pApi->hClient); 120 *pFd = (NvS32)pExportMemParams->devDescriptor; 121 } 122 break; 123 default: 124 NV_ASSERT(0); 125 break; 126 } 127 128 NV_ASSERT(rmapiParamsRelease(¶mCopy) == NV_OK); 129 130 return status; 131 } 132 133 // Only return errors through pApi->status 134 static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secInfo) 135 { 136 NV_STATUS rmStatus; 137 NvBool writable; 138 NvU32 flags = 0; 139 NvU64 allocSize, pageCount, *pPteArray = NULL; 140 void *pDescriptor, *pPageArray = NULL; 141 142 pDescriptor = NvP64_VALUE(pApi->data.AllocOsDesc.descriptor); 143 if (((NvUPtr)pDescriptor & ~os_page_mask) != 0) 144 { 145 rmStatus = NV_ERR_NOT_SUPPORTED; 146 goto done; 147 } 148 149 // Check to prevent an NvU64 overflow 150 if ((pApi->data.AllocOsDesc.limit + 1) == 0) 151 { 152 rmStatus = NV_ERR_INVALID_LIMIT; 153 goto done; 154 } 155 156 allocSize = (pApi->data.AllocOsDesc.limit + 1); 157 pageCount = (1 + ((allocSize - 1) / os_page_size)); 158 159 writable = FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_WRITE, pApi->data.AllocOsDesc.attr2); 160 161 flags = FLD_SET_DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable, flags); 162 rmStatus = os_lock_user_pages(pDescriptor, pageCount, &pPageArray, flags); 163 if (rmStatus == NV_OK) 164 { 165 pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray; 166 pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY; 167 } 168 else if (rmStatus == NV_ERR_INVALID_ADDRESS) 169 { 170 rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount, 171 &pPteArray, &pPageArray); 172 if (rmStatus == NV_OK) 173 { 174 if (pPageArray != NULL) 175 { 176 pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray; 177 pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY; 178 } 179 else if (pPteArray != NULL) 180 { 181 pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray; 182 pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY; 183 } 184 else 185 { 186 NV_ASSERT_FAILED("unknown memory import type"); 187 rmStatus = NV_ERR_NOT_SUPPORTED; 188 } 189 } 190 } 191 if (rmStatus != NV_OK) 192 goto done; 193 194 Nv04VidHeapControlWithSecInfo(pApi, secInfo); 195 196 if (pApi->status != NV_OK) 197 { 198 switch (pApi->data.AllocOsDesc.descriptorType) 199 { 200 default: 201 break; 202 case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY: 203 os_unlock_user_pages(pageCount, pPageArray); 204 break; 205 } 206 } 207 208 done: 209 if (rmStatus != NV_OK) 210 pApi->status = rmStatus; 211 } 212 213 // Only return errors through pApi->status 214 static void RmAllocOsDescriptor(NVOS02_PARAMETERS *pApi, API_SECURITY_INFO secInfo) 215 { 216 NV_STATUS rmStatus = NV_OK; 217 NvU32 flags, attr, attr2; 218 NVOS32_PARAMETERS *pVidHeapParams; 219 220 if (!FLD_TEST_DRF(OS02, _FLAGS, _LOCATION, _PCI, pApi->flags) || 221 !FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, pApi->flags)) 222 { 223 rmStatus = NV_ERR_INVALID_FLAGS; 224 goto done; 225 } 226 227 attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); 228 229 if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, pApi->flags) || 230 FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, pApi->flags)) 231 { 232 attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr); 233 } 234 else if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, pApi->flags)) 235 attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr); 236 else { 237 rmStatus = NV_ERR_INVALID_FLAGS; 238 goto done; 239 } 240 241 if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, pApi->flags)) 242 attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr); 243 else 244 attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr); 245 246 if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, pApi->flags)) 247 attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES); 248 else 249 attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); 250 251 pVidHeapParams = portMemAllocNonPaged(sizeof(NVOS32_PARAMETERS)); 252 if (pVidHeapParams == NULL) 253 { 254 rmStatus = NV_ERR_NO_MEMORY; 255 goto done; 256 } 257 portMemSet(pVidHeapParams, 0, sizeof(NVOS32_PARAMETERS)); 258 259 pVidHeapParams->hRoot = pApi->hRoot; 260 pVidHeapParams->hObjectParent = pApi->hObjectParent; 261 pVidHeapParams->function = NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR; 262 263 flags = (NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED | 264 NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED); 265 266 if (DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags)) 267 attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2); 268 269 // Currently CPU-RO memory implies GPU-RO as well 270 if (DRF_VAL(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, pApi->flags) || 271 DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags)) 272 attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2); 273 274 pVidHeapParams->data.AllocOsDesc.hMemory = pApi->hObjectNew; 275 pVidHeapParams->data.AllocOsDesc.flags = flags; 276 pVidHeapParams->data.AllocOsDesc.attr = attr; 277 pVidHeapParams->data.AllocOsDesc.attr2 = attr2; 278 pVidHeapParams->data.AllocOsDesc.descriptor = pApi->pMemory; 279 pVidHeapParams->data.AllocOsDesc.limit = pApi->limit; 280 pVidHeapParams->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS; 281 282 RmCreateOsDescriptor(pVidHeapParams, secInfo); 283 284 pApi->status = pVidHeapParams->status; 285 286 portMemFree(pVidHeapParams); 287 288 done: 289 if (rmStatus != NV_OK) 290 pApi->status = rmStatus; 291 } 292 293 ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hRoot) == NV_OFFSETOF(NVOS64_PARAMETERS, hRoot)); 294 ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectParent) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectParent)); 295 ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectNew) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectNew)); 296 ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hClass) == NV_OFFSETOF(NVOS64_PARAMETERS, hClass)); 297 ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, pAllocParms) == NV_OFFSETOF(NVOS64_PARAMETERS, pAllocParms)); 298 299 NV_STATUS RmIoctl( 300 nv_state_t *nv, 301 nv_file_private_t *nvfp, 302 NvU32 cmd, 303 void *data, 304 NvU32 dataSize 305 ) 306 { 307 NV_STATUS rmStatus = NV_ERR_GENERIC; 308 API_SECURITY_INFO secInfo = { }; 309 310 secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; 311 secInfo.paramLocation = PARAM_LOCATION_USER; 312 secInfo.pProcessToken = NULL; 313 secInfo.gpuOsInfo = NULL; 314 secInfo.clientOSInfo = nvfp->ctl_nvfp; 315 if (secInfo.clientOSInfo == NULL) 316 secInfo.clientOSInfo = nvfp; 317 318 switch (cmd) 319 { 320 case NV_ESC_RM_ALLOC_MEMORY: 321 { 322 nv_ioctl_nvos02_parameters_with_fd *pApi; 323 NVOS02_PARAMETERS *pParms; 324 325 pApi = data; 326 pParms = &pApi->params; 327 328 NV_ACTUAL_DEVICE_ONLY(nv); 329 330 if (dataSize != sizeof(nv_ioctl_nvos02_parameters_with_fd)) 331 { 332 rmStatus = NV_ERR_INVALID_ARGUMENT; 333 goto done; 334 } 335 336 if (pParms->hClass == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) 337 RmAllocOsDescriptor(pParms, secInfo); 338 else 339 { 340 NvU32 flags = pParms->flags; 341 342 Nv01AllocMemoryWithSecInfo(pParms, secInfo); 343 344 // 345 // If the system memory is going to be mapped immediately, 346 // create the mmap context for it now. 347 // 348 if ((pParms->hClass == NV01_MEMORY_SYSTEM) && 349 (!FLD_TEST_DRF(OS02, _FLAGS, _ALLOC, _NONE, flags)) && 350 (!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, flags)) && 351 (pParms->status == NV_OK)) 352 { 353 if (rm_create_mmap_context(pParms->hRoot, 354 pParms->hObjectParent, pParms->hObjectNew, 355 pParms->pMemory, pParms->limit + 1, 0, 356 NV_MEMORY_DEFAULT, 357 pApi->fd) != NV_OK) 358 { 359 NV_PRINTF(LEVEL_WARNING, 360 "could not create mmap context for %p\n", 361 NvP64_VALUE(pParms->pMemory)); 362 rmStatus = NV_ERR_INVALID_ARGUMENT; 363 goto done; 364 } 365 } 366 } 367 368 break; 369 } 370 371 case NV_ESC_RM_ALLOC_OBJECT: 372 { 373 NVOS05_PARAMETERS *pApi = data; 374 375 NV_CTL_DEVICE_ONLY(nv); 376 377 if (dataSize != sizeof(NVOS05_PARAMETERS)) 378 { 379 rmStatus = NV_ERR_INVALID_ARGUMENT; 380 goto done; 381 } 382 383 Nv01AllocObjectWithSecInfo(pApi, secInfo); 384 break; 385 } 386 387 case NV_ESC_RM_ALLOC: 388 { 389 NVOS21_PARAMETERS *pApi = data; 390 NVOS64_PARAMETERS *pApiAccess = data; 391 NvBool bAccessApi = (dataSize == sizeof(NVOS64_PARAMETERS)); 392 393 if ((dataSize != sizeof(NVOS21_PARAMETERS)) && 394 (dataSize != sizeof(NVOS64_PARAMETERS))) 395 { 396 rmStatus = NV_ERR_INVALID_ARGUMENT; 397 goto done; 398 } 399 400 switch (pApi->hClass) 401 { 402 case NV01_ROOT: 403 case NV01_ROOT_CLIENT: 404 case NV01_ROOT_NON_PRIV: 405 { 406 NV_CTL_DEVICE_ONLY(nv); 407 408 // Force userspace client allocations to be the _CLIENT class. 409 pApi->hClass = NV01_ROOT_CLIENT; 410 break; 411 } 412 case NV01_EVENT: 413 case NV01_EVENT_OS_EVENT: 414 case NV01_EVENT_KERNEL_CALLBACK: 415 case NV01_EVENT_KERNEL_CALLBACK_EX: 416 { 417 break; 418 } 419 default: 420 { 421 NV_CTL_DEVICE_ONLY(nv); 422 break; 423 } 424 } 425 426 if (!bAccessApi) 427 { 428 Nv04AllocWithSecInfo(pApi, secInfo); 429 } 430 else 431 { 432 Nv04AllocWithAccessSecInfo(pApiAccess, secInfo); 433 } 434 435 break; 436 } 437 438 case NV_ESC_RM_FREE: 439 { 440 NVOS00_PARAMETERS *pApi = data; 441 442 NV_CTL_DEVICE_ONLY(nv); 443 444 if (dataSize != sizeof(NVOS00_PARAMETERS)) 445 { 446 rmStatus = NV_ERR_INVALID_ARGUMENT; 447 goto done; 448 } 449 450 Nv01FreeWithSecInfo(pApi, secInfo); 451 452 if (pApi->status == NV_OK && 453 pApi->hObjectOld == pApi->hRoot) 454 { 455 rm_client_free_os_events(pApi->hRoot); 456 } 457 458 break; 459 } 460 461 case NV_ESC_RM_VID_HEAP_CONTROL: 462 { 463 NVOS32_PARAMETERS *pApi = data; 464 465 NV_CTL_DEVICE_ONLY(nv); 466 467 if (dataSize != sizeof(NVOS32_PARAMETERS)) 468 { 469 rmStatus = NV_ERR_INVALID_ARGUMENT; 470 goto done; 471 } 472 473 if (pApi->function == NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR) 474 RmCreateOsDescriptor(pApi, secInfo); 475 else 476 Nv04VidHeapControlWithSecInfo(pApi, secInfo); 477 478 break; 479 } 480 481 case NV_ESC_RM_I2C_ACCESS: 482 { 483 NVOS_I2C_ACCESS_PARAMS *pApi = data; 484 485 NV_ACTUAL_DEVICE_ONLY(nv); 486 487 if (dataSize != sizeof(NVOS_I2C_ACCESS_PARAMS)) 488 { 489 rmStatus = NV_ERR_INVALID_ARGUMENT; 490 goto done; 491 } 492 493 Nv04I2CAccessWithSecInfo(pApi, secInfo); 494 break; 495 } 496 497 case NV_ESC_RM_IDLE_CHANNELS: 498 { 499 NVOS30_PARAMETERS *pApi = data; 500 501 NV_CTL_DEVICE_ONLY(nv); 502 503 if (dataSize != sizeof(NVOS30_PARAMETERS)) 504 { 505 rmStatus = NV_ERR_INVALID_ARGUMENT; 506 goto done; 507 } 508 509 Nv04IdleChannelsWithSecInfo(pApi, secInfo); 510 break; 511 } 512 513 case NV_ESC_RM_MAP_MEMORY: 514 { 515 nv_ioctl_nvos33_parameters_with_fd *pApi; 516 NVOS33_PARAMETERS *pParms; 517 518 pApi = data; 519 pParms = &pApi->params; 520 521 NV_CTL_DEVICE_ONLY(nv); 522 523 if (dataSize != sizeof(nv_ioctl_nvos33_parameters_with_fd)) 524 { 525 rmStatus = NV_ERR_INVALID_ARGUMENT; 526 goto done; 527 } 528 529 // Don't allow userspace to override the caching type 530 pParms->flags = FLD_SET_DRF(OS33, _FLAGS, _CACHING_TYPE, _DEFAULT, pParms->flags); 531 Nv04MapMemoryWithSecInfo(pParms, secInfo); 532 533 if (pParms->status == NV_OK) 534 { 535 pParms->status = rm_create_mmap_context(pParms->hClient, 536 pParms->hDevice, pParms->hMemory, 537 pParms->pLinearAddress, pParms->length, 538 pParms->offset, 539 DRF_VAL(OS33, _FLAGS, _CACHING_TYPE, pParms->flags), 540 pApi->fd); 541 if (pParms->status != NV_OK) 542 { 543 NVOS34_PARAMETERS params; 544 portMemSet(¶ms, 0, sizeof(NVOS34_PARAMETERS)); 545 params.hClient = pParms->hClient; 546 params.hDevice = pParms->hDevice; 547 params.hMemory = pParms->hMemory; 548 params.pLinearAddress = pParms->pLinearAddress; 549 params.flags = pParms->flags; 550 Nv04UnmapMemoryWithSecInfo(¶ms, secInfo); 551 } 552 } 553 break; 554 } 555 556 case NV_ESC_RM_UNMAP_MEMORY: 557 { 558 NVOS34_PARAMETERS *pApi = data; 559 560 NV_CTL_DEVICE_ONLY(nv); 561 562 if (dataSize != sizeof(NVOS34_PARAMETERS)) 563 { 564 rmStatus = NV_ERR_INVALID_ARGUMENT; 565 goto done; 566 } 567 568 Nv04UnmapMemoryWithSecInfo(pApi, secInfo); 569 break; 570 } 571 572 case NV_ESC_RM_ACCESS_REGISTRY: 573 { 574 NVOS38_PARAMETERS *pApi = data; 575 576 NV_CTL_DEVICE_ONLY(nv); 577 578 if (dataSize != sizeof(NVOS38_PARAMETERS)) 579 { 580 rmStatus = NV_ERR_INVALID_ARGUMENT; 581 goto done; 582 } 583 584 pApi->status = rm_access_registry(pApi->hClient, 585 pApi->hObject, 586 pApi->AccessType, 587 pApi->pDevNode, 588 pApi->DevNodeLength, 589 pApi->pParmStr, 590 pApi->ParmStrLength, 591 pApi->pBinaryData, 592 &pApi->BinaryDataLength, 593 &pApi->Data, 594 &pApi->Entry); 595 break; 596 } 597 598 case NV_ESC_RM_ALLOC_CONTEXT_DMA2: 599 { 600 NVOS39_PARAMETERS *pApi = data; 601 602 NV_CTL_DEVICE_ONLY(nv); 603 604 if (dataSize != sizeof(NVOS39_PARAMETERS)) 605 { 606 rmStatus = NV_ERR_INVALID_ARGUMENT; 607 goto done; 608 } 609 610 Nv04AllocContextDmaWithSecInfo(pApi, secInfo); 611 break; 612 } 613 614 case NV_ESC_RM_BIND_CONTEXT_DMA: 615 { 616 NVOS49_PARAMETERS *pApi = data; 617 618 NV_CTL_DEVICE_ONLY(nv); 619 620 if (dataSize != sizeof(NVOS49_PARAMETERS)) 621 { 622 rmStatus = NV_ERR_INVALID_ARGUMENT; 623 goto done; 624 } 625 626 Nv04BindContextDmaWithSecInfo(pApi, secInfo); 627 break; 628 } 629 630 case NV_ESC_RM_MAP_MEMORY_DMA: 631 { 632 NVOS46_PARAMETERS *pApi = data; 633 634 NV_CTL_DEVICE_ONLY(nv); 635 636 if (dataSize != sizeof(NVOS46_PARAMETERS)) 637 { 638 rmStatus = NV_ERR_INVALID_ARGUMENT; 639 goto done; 640 } 641 642 Nv04MapMemoryDmaWithSecInfo(pApi, secInfo); 643 break; 644 } 645 646 case NV_ESC_RM_UNMAP_MEMORY_DMA: 647 { 648 NVOS47_PARAMETERS *pApi = data; 649 650 NV_CTL_DEVICE_ONLY(nv); 651 652 if (dataSize != sizeof(NVOS47_PARAMETERS)) 653 { 654 rmStatus = NV_ERR_INVALID_ARGUMENT; 655 goto done; 656 } 657 658 Nv04UnmapMemoryDmaWithSecInfo(pApi, secInfo); 659 break; 660 } 661 662 case NV_ESC_RM_DUP_OBJECT: 663 { 664 NVOS55_PARAMETERS *pApi = data; 665 666 NV_CTL_DEVICE_ONLY(nv); 667 668 if (dataSize != sizeof(NVOS55_PARAMETERS)) 669 { 670 rmStatus = NV_ERR_INVALID_ARGUMENT; 671 goto done; 672 } 673 674 Nv04DupObjectWithSecInfo(pApi, secInfo); 675 break; 676 } 677 678 case NV_ESC_RM_SHARE: 679 { 680 NVOS57_PARAMETERS *pApi = data; 681 682 NV_CTL_DEVICE_ONLY(nv); 683 684 if (dataSize != sizeof(NVOS57_PARAMETERS)) 685 { 686 rmStatus = NV_ERR_INVALID_ARGUMENT; 687 goto done; 688 } 689 690 Nv04ShareWithSecInfo(pApi, secInfo); 691 break; 692 } 693 694 case NV_ESC_ALLOC_OS_EVENT: 695 { 696 nv_ioctl_alloc_os_event_t *pApi = data; 697 698 if (dataSize != sizeof(nv_ioctl_alloc_os_event_t)) 699 { 700 rmStatus = NV_ERR_INVALID_ARGUMENT; 701 goto done; 702 } 703 704 pApi->Status = rm_alloc_os_event(pApi->hClient, 705 nvfp, 706 pApi->fd); 707 break; 708 } 709 710 case NV_ESC_FREE_OS_EVENT: 711 { 712 nv_ioctl_free_os_event_t *pApi = data; 713 714 if (dataSize != sizeof(nv_ioctl_free_os_event_t)) 715 { 716 rmStatus = NV_ERR_INVALID_ARGUMENT; 717 goto done; 718 } 719 720 pApi->Status = rm_free_os_event(pApi->hClient, pApi->fd); 721 break; 722 } 723 724 case NV_ESC_RM_GET_EVENT_DATA: 725 { 726 NVOS41_PARAMETERS *pApi = data; 727 728 if (dataSize != sizeof(NVOS41_PARAMETERS)) 729 { 730 rmStatus = NV_ERR_INVALID_ARGUMENT; 731 goto done; 732 } 733 734 pApi->status = rm_get_event_data(nvfp, 735 pApi->pEvent, 736 &pApi->MoreEvents); 737 break; 738 } 739 740 case NV_ESC_STATUS_CODE: 741 { 742 nv_state_t *pNv; 743 nv_ioctl_status_code_t *pApi = data; 744 745 NV_CTL_DEVICE_ONLY(nv); 746 747 if (dataSize != sizeof(nv_ioctl_status_code_t)) 748 { 749 rmStatus = NV_ERR_INVALID_ARGUMENT; 750 goto done; 751 } 752 753 pNv = nv_get_adapter_state(pApi->domain, pApi->bus, pApi->slot); 754 if (pNv == NULL) 755 { 756 rmStatus = NV_ERR_INVALID_ARGUMENT; 757 goto done; 758 } 759 760 rmStatus = rm_get_adapter_status(pNv, &pApi->status); 761 762 if (rmStatus != NV_OK) 763 goto done; 764 765 break; 766 } 767 768 case NV_ESC_RM_CONTROL: 769 { 770 NVOS54_PARAMETERS *pApi = data; 771 void *priv = NULL; 772 nv_file_private_t *dev_nvfp = NULL; 773 NvS32 fd; 774 NvBool bSkipDeviceRef; 775 776 NV_CTL_DEVICE_ONLY(nv); 777 778 if (dataSize != sizeof(NVOS54_PARAMETERS)) 779 { 780 rmStatus = NV_ERR_INVALID_ARGUMENT; 781 goto done; 782 } 783 784 rmStatus = RmGetDeviceFd(pApi, &fd, &bSkipDeviceRef); 785 if (rmStatus != NV_OK) 786 { 787 goto done; 788 } 789 790 if (!bSkipDeviceRef) 791 { 792 dev_nvfp = nv_get_file_private(fd, NV_FALSE, &priv); 793 if (dev_nvfp == NULL) 794 { 795 rmStatus = NV_ERR_INVALID_DEVICE; 796 goto done; 797 } 798 799 // Check to avoid cyclic dependency with NV_ESC_REGISTER_FD 800 if (!portAtomicCompareAndSwapU32(&dev_nvfp->register_or_refcount, 801 NVFP_TYPE_REFCOUNTED, 802 NVFP_TYPE_NONE)) 803 { 804 // Is this already refcounted... 805 if (dev_nvfp->register_or_refcount != NVFP_TYPE_REFCOUNTED) 806 { 807 nv_put_file_private(priv); 808 rmStatus = NV_ERR_IN_USE; 809 goto done; 810 } 811 } 812 813 secInfo.gpuOsInfo = priv; 814 } 815 816 Nv04ControlWithSecInfo(pApi, secInfo); 817 818 if ((pApi->status != NV_OK) && (priv != NULL)) 819 { 820 // 821 // No need to reset `register_or_refcount` as it might be set 822 // for previous successful calls. We let it clear with FD close. 823 // 824 nv_put_file_private(priv); 825 826 secInfo.gpuOsInfo = NULL; 827 } 828 829 break; 830 } 831 832 case NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO: 833 { 834 NVOS56_PARAMETERS *pApi = data; 835 void *pOldCpuAddress; 836 void *pNewCpuAddress; 837 838 NV_CTL_DEVICE_ONLY(nv); 839 840 if (dataSize != sizeof(NVOS56_PARAMETERS)) 841 { 842 rmStatus = NV_ERR_INVALID_ARGUMENT; 843 goto done; 844 } 845 846 pOldCpuAddress = NvP64_VALUE(pApi->pOldCpuAddress); 847 pNewCpuAddress = NvP64_VALUE(pApi->pNewCpuAddress); 848 849 pApi->status = rm_update_device_mapping_info(pApi->hClient, 850 pApi->hDevice, 851 pApi->hMemory, 852 pOldCpuAddress, 853 pNewCpuAddress); 854 break; 855 } 856 857 case NV_ESC_RM_LOCKLESS_DIAGNOSTIC: 858 { 859 NV_LOCKLESS_DIAGNOSTIC_PARAMS *pParams = data; 860 861 NV_CTL_DEVICE_ONLY(nv); 862 863 if (!osIsAdministrator()) 864 { 865 rmStatus = NV_ERR_INSUFFICIENT_PERMISSIONS; 866 pParams->status = rmStatus; 867 goto done; 868 } 869 870 switch (pParams->cmd) 871 { 872 // Do not use NVOC _DISPATCH here as it dereferences NULL RmClientResource* 873 case NV0000_CTRL_CMD_NVD_GET_NVLOG_INFO: 874 rmStatus = cliresCtrlCmdNvdGetNvlogInfo_IMPL(NULL, &pParams->params.getNvlogInfo); 875 break; 876 case NV0000_CTRL_CMD_NVD_GET_NVLOG_BUFFER_INFO: 877 rmStatus = cliresCtrlCmdNvdGetNvlogBufferInfo_IMPL(NULL, &pParams->params.getNvlogBufferInfo); 878 break; 879 case NV0000_CTRL_CMD_NVD_GET_NVLOG: 880 rmStatus = cliresCtrlCmdNvdGetNvlog_IMPL(NULL, &pParams->params.getNvlog); 881 break; 882 default: 883 rmStatus = NV_ERR_NOT_SUPPORTED; 884 break; 885 } 886 887 pParams->status = rmStatus; 888 goto done; 889 } 890 891 case NV_ESC_REGISTER_FD: 892 { 893 nv_ioctl_register_fd_t *params = data; 894 void *priv = NULL; 895 nv_file_private_t *ctl_nvfp; 896 897 if (dataSize != sizeof(nv_ioctl_register_fd_t)) 898 { 899 rmStatus = NV_ERR_INVALID_ARGUMENT; 900 goto done; 901 } 902 903 // LOCK: acquire API lock 904 rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); 905 if (rmStatus != NV_OK) 906 goto done; 907 908 // If there is already a ctl fd registered on this nvfp, fail. 909 if (nvfp->ctl_nvfp != NULL) 910 { 911 // UNLOCK: release API lock 912 rmapiLockRelease(); 913 rmStatus = NV_ERR_INVALID_STATE; 914 goto done; 915 } 916 917 // 918 // Note that this call is valid for both "actual" devices and ctrl 919 // devices. In particular, NV_ESC_ALLOC_OS_EVENT can be used with 920 // both types of devices. 921 // But, the ctl_fd passed in should always correspond to a control FD. 922 // 923 ctl_nvfp = nv_get_file_private(params->ctl_fd, 924 NV_TRUE, /* require ctl fd */ 925 &priv); 926 if (ctl_nvfp == NULL) 927 { 928 // UNLOCK: release API lock 929 rmapiLockRelease(); 930 rmStatus = NV_ERR_INVALID_ARGUMENT; 931 goto done; 932 } 933 934 // Disallow self-referential links, and disallow links to FDs that 935 // themselves have a link. 936 if ((ctl_nvfp == nvfp) || (ctl_nvfp->ctl_nvfp != NULL)) 937 { 938 nv_put_file_private(priv); 939 // UNLOCK: release API lock 940 rmapiLockRelease(); 941 rmStatus = NV_ERR_INVALID_ARGUMENT; 942 goto done; 943 } 944 945 // Check to avoid cyclic dependency with device refcounting 946 if (!portAtomicCompareAndSwapU32(&nvfp->register_or_refcount, 947 NVFP_TYPE_REGISTERED, 948 NVFP_TYPE_NONE)) 949 { 950 nv_put_file_private(priv); 951 // UNLOCK: release API lock 952 rmapiLockRelease(); 953 rmStatus = NV_ERR_IN_USE; 954 goto done; 955 } 956 957 // 958 // nvfp->ctl_nvfp is read outside the lock, so set it atomically. 959 // Note that once set, this can never be removed until the fd 960 // associated with nvfp is closed. We hold on to 'priv' until the 961 // fd is closed, too, to ensure that the fd associated with 962 // ctl_nvfp remains valid. 963 // 964 portAtomicSetSize(&nvfp->ctl_nvfp, ctl_nvfp); 965 nvfp->ctl_nvfp_priv = priv; 966 967 // UNLOCK: release API lock 968 rmapiLockRelease(); 969 970 // NOTE: nv_put_file_private(priv) is not called here. It MUST be 971 // called during cleanup of this nvfp. 972 rmStatus = NV_OK; 973 break; 974 } 975 976 default: 977 { 978 NV_PRINTF(LEVEL_ERROR, "unknown NVRM ioctl command: 0x%x\n", cmd); 979 goto done; 980 } 981 } 982 983 rmStatus = NV_OK; 984 done: 985 986 return rmStatus; 987 } 988