1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "kernel/gpu/fifo/kernel_fifo.h" 25 #include "kernel/gpu/fifo/kernel_channel.h" 26 #include "kernel/gpu/fifo/kernel_channel_group.h" 27 #include "kernel/gpu/device/device.h" 28 #include "kernel/gpu/subdevice/subdevice.h" 29 #include "kernel/gpu/subdevice/subdevice_diag.h" 30 #include "kernel/gpu/mem_mgr/mem_mgr.h" 31 #include "kernel/virtualization/hypervisor/hypervisor.h" 32 #include "kernel/core/locks.h" 33 #include "lib/base_utils.h" 34 35 #include "vgpu/rpc.h" 36 #include "vgpu/vgpu_events.h" 37 38 #include "class/cl0080.h" 39 #include "class/cl2080.h" 40 #include "class/cl208f.h" 41 42 #include "ctrl/ctrl0080/ctrl0080fifo.h" 43 44 static NV_STATUS _kfifoGetCaps(OBJGPU *pGpu, NvU8 *pKfifoCaps); 45 46 /*! 47 * @brief deviceCtrlCmdFifoGetChannelList 48 */ 49 NV_STATUS 50 deviceCtrlCmdFifoGetChannelList_IMPL 51 ( 52 Device *pDevice, 53 NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *pChannelParams 54 ) 55 { 56 OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); 57 NvU32 *pChannelHandleList = NvP64_VALUE(pChannelParams->pChannelHandleList); 58 NvU32 *pChannelList = NvP64_VALUE(pChannelParams->pChannelList); 59 NvU32 counter; 60 61 // Validate input / Size / Args / Copy args 62 if (pChannelParams->numChannels == 0) 63 { 64 NV_PRINTF(LEVEL_ERROR, 65 "Invalid Params for command NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST\n"); 66 return NV_ERR_INVALID_ARGUMENT; 67 } 68 69 for (counter = 0; counter < pChannelParams->numChannels; counter++) 70 { 71 KernelChannel *pKernelChannel; 72 NvU32 chid = NV0080_CTRL_FIFO_GET_CHANNELLIST_INVALID_CHANNEL; 73 NV_STATUS status; 74 75 // Searching through the rm client db. 76 status = CliGetKernelChannel(RES_GET_CLIENT_HANDLE(pDevice), pChannelHandleList[counter], &pKernelChannel); 77 78 if (status == NV_OK) 79 { 80 chid = pKernelChannel->ChID; 81 82 // Amodel-specific : Encode runlist ID 83 if (pGpu && (IS_MODS_AMODEL(pGpu))) 84 { 85 chid |= ((kchannelGetRunlistId(pKernelChannel) & 0xffff) << 16); 86 } 87 } 88 89 pChannelList[counter] = chid; 90 } 91 92 return NV_OK; 93 } 94 95 NV_STATUS 96 deviceCtrlCmdFifoIdleChannels_IMPL 97 ( 98 Device *pDevice, 99 NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS *pParams 100 ) 101 { 102 NvBool isGpuLockAcquired = NV_FALSE; 103 NV_STATUS status = NV_OK; 104 OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); 105 CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); 106 RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; 107 108 // Check buffer size against maximum 109 if (pParams->numChannels > NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS) 110 return NV_ERR_INVALID_ARGUMENT; 111 112 // 113 // Acquire GPU lock manually in control call body instead of letting Resource 114 // Server do it to ensure that RM_LOCK_MODULES_FIFO is used. 115 // 116 if (!rmGpuLockIsOwner()) 117 { 118 status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO); 119 120 if (status != NV_OK) 121 goto done; 122 123 isGpuLockAcquired = NV_TRUE; 124 } 125 126 // 127 // Send RPC if running in Guest/CPU-RM. Do this manually instead of ROUTE_TO_PHYSICAL 128 // so that we can acquire the GPU lock in CPU-RM first. 129 // 130 if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) 131 { 132 NV_RM_RPC_CONTROL(pGpu, 133 pRmCtrlParams->hClient, 134 pRmCtrlParams->hObject, 135 pRmCtrlParams->cmd, 136 pRmCtrlParams->pParams, 137 pRmCtrlParams->paramsSize, 138 status); 139 } 140 else 141 { 142 status = NV_ERR_NOT_SUPPORTED; 143 } 144 145 done: 146 147 if (isGpuLockAcquired) 148 rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); 149 150 return status; 151 } 152 153 NV_STATUS 154 subdeviceCtrlCmdGetPhysicalChannelCount_IMPL 155 ( 156 Subdevice *pSubdevice, 157 NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS *pParams 158 ) 159 { 160 OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); 161 KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); 162 NvU32 numChannelsInUse = 0; 163 NvU32 numChannels; 164 NvU32 i; 165 NvU32 chGrpID; 166 167 pParams->physChannelCount = NV_U32_MAX; 168 pParams->physChannelCountInUse = 0; 169 170 // TODO: Follow up with clients before turning on per esched chidmgr 171 for (i = 0; i < pKernelFifo->numChidMgrs; i++) 172 { 173 if (pKernelFifo->ppChidMgr[i] != NULL) 174 { 175 // Get the max number of HW channels on the runlist 176 numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pKernelFifo->ppChidMgr[i]); 177 178 // Get the number of channels already in use 179 for (chGrpID = 0; chGrpID < numChannels; chGrpID++) 180 { 181 if (nvBitFieldTest(pKernelFifo->ppChidMgr[i]->channelGrpMgr.pHwIdInUse, 182 pKernelFifo->ppChidMgr[i]->channelGrpMgr.hwIdInUseSz, 183 chGrpID)) 184 { 185 numChannelsInUse++; 186 } 187 } 188 189 pParams->physChannelCount = NV_MIN(pParams->physChannelCount, numChannels); 190 pParams->physChannelCountInUse = NV_MAX(pParams->physChannelCountInUse, numChannelsInUse); 191 } 192 } 193 return NV_OK; 194 } 195 196 /*! 197 * @brief subdeviceCtrlCmdFifoGetInfo 198 * 199 * Lock Requirements: 200 * Assert that both the GPUs lock and API lock are held on entry. 201 */ 202 NV_STATUS 203 subdeviceCtrlCmdFifoGetInfo_IMPL 204 ( 205 Subdevice *pSubdevice, 206 NV2080_CTRL_FIFO_GET_INFO_PARAMS *pFifoInfoParams 207 ) 208 { 209 OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); 210 KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); 211 MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); 212 NV_STATUS status = NV_OK; 213 NvU32 runlistId; 214 CHID_MGR *pChidMgr; 215 NvU32 i; 216 NvU32 data; 217 218 LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner()); 219 220 // error checck 221 if (pFifoInfoParams->fifoInfoTblSize > NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES) 222 return NV_ERR_INVALID_PARAM_STRUCT; 223 224 // step thru list 225 for (i = 0; i < pFifoInfoParams->fifoInfoTblSize; i++) 226 { 227 switch (pFifoInfoParams->fifoInfoTbl[i].index) 228 { 229 case NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL: 230 data = memmgrGetRsvdMemorySize(pMemoryManager); 231 break; 232 case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS: 233 // 234 // TODO: Follow up with clients using this control call before 235 // turning on per esched chidmgr 236 // 237 data = kfifoGetMaxChannelGroupsInSystem(pGpu, pKernelFifo); 238 break; 239 case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP: 240 data = kfifoGetMaxChannelGroupSize_HAL(pKernelFifo); 241 break; 242 case NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE: 243 // 244 // TODO: Follow up with clients using this control call before 245 // turning on per esched chidmgr 246 // 247 data = kfifoGetChannelGroupsInUse(pGpu, pKernelFifo); 248 break; 249 case NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP: 250 // 251 // RM-SMC AMPERE-TODO This data is incompatible with SMC, where 252 // different engines can have different max VEID counts 253 // 254 data = kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, NV_FALSE); 255 break; 256 case NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET: 257 { 258 NvU64 userdAddr; 259 NvU32 userdSize; 260 NvU32 gfid; 261 262 NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); 263 if (hypervisorIsVgxHyper() && IS_GFID_PF(gfid)) 264 { 265 status = kfifoGetUserdBar1MapInfo_HAL(pGpu, pKernelFifo, &userdAddr, &userdSize); 266 if (status == NV_OK) 267 data = (NvU32)(userdAddr >> NV2080_CTRL_FIFO_GET_INFO_USERD_OFFSET_SHIFT); 268 } 269 else 270 { 271 data = 0; 272 status = NV_ERR_INVALID_REQUEST; 273 } 274 break; 275 } 276 case NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE: 277 { 278 NvU64 timeslice = kfifoChannelGroupGetDefaultTimeslice_HAL(pKernelFifo); 279 data = NvU64_LO32(timeslice); 280 NV_ASSERT_OR_RETURN((NvU64_HI32(timeslice) == 0), NV_ERR_INVALID_PARAM_STRUCT); 281 } 282 break; 283 case NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED: 284 data = (NvU32) kfifoIsPerRunlistChramEnabled(pKernelFifo); 285 break; 286 case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE: 287 // Get runlist ID for Engine type. 288 NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, 289 ENGINE_INFO_TYPE_RM_ENGINE_TYPE, 290 gpuGetRmEngineType(pFifoInfoParams->engineType), 291 ENGINE_INFO_TYPE_RUNLIST, 292 &runlistId)); 293 pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistId); 294 data = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); 295 break; 296 case NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE: 297 // Get runlist ID for Engine type. 298 NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, 299 ENGINE_INFO_TYPE_RM_ENGINE_TYPE, 300 gpuGetRmEngineType(pFifoInfoParams->engineType), 301 ENGINE_INFO_TYPE_RUNLIST, &runlistId)); 302 data = kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId); 303 break; 304 default: 305 data = 0; 306 status = NV_ERR_INVALID_ARGUMENT; 307 break; 308 } 309 310 if (status != NV_OK) 311 break; 312 313 // save off data value 314 pFifoInfoParams->fifoInfoTbl[i].data = data; 315 } 316 317 return status; 318 } 319 320 321 /*! 322 * @brief Get bitmask of allocated channels 323 */ 324 NV_STATUS subdeviceCtrlCmdFifoGetAllocatedChannels_IMPL 325 ( 326 Subdevice *pSubdevice, 327 NV2080_CTRL_FIFO_GET_ALLOCATED_CHANNELS_PARAMS *pParams 328 ) 329 { 330 KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(GPU_RES_GET_GPU(pSubdevice)); 331 NV_STATUS status; 332 333 status = kfifoGetAllocatedChannelMask(GPU_RES_GET_GPU(pSubdevice), 334 pKernelFifo, 335 pParams->runlistId, 336 pParams->bitMask, 337 sizeof pParams->bitMask); 338 switch(status) 339 { 340 case NV_ERR_BUFFER_TOO_SMALL: 341 case NV_ERR_INVALID_ARGUMENT: 342 // 343 // Update the ctrl call structure to have sufficient space for 1 bit per 344 // possible channels in a runlist. This is a driver bug. 345 // 346 NV_ASSERT_OK(status); 347 return NV_ERR_NOT_SUPPORTED; 348 default: 349 return status; 350 } 351 } 352 353 354 /*! 355 * @brief subdeviceCtrlCmdFifoGetUserdLocation 356 * 357 * Lock Requirements: 358 * Assert that API lock and GPUs lock held on entry 359 */ 360 NV_STATUS 361 subdeviceCtrlCmdFifoGetUserdLocation_IMPL 362 ( 363 Subdevice *pSubdevice, 364 NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS *pUserdLocationParams 365 ) 366 { 367 RsClient *pClient = RES_GET_CLIENT(pSubdevice); 368 Device *pDevice; 369 NvU32 userdAperture; 370 NvU32 userdAttribute; 371 NV_STATUS rmStatus = NV_OK; 372 OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); 373 KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); 374 375 LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner()); 376 377 rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); 378 if (rmStatus != NV_OK) 379 return NV_ERR_INVALID_DEVICE; 380 381 rmStatus = kfifoGetUserdLocation_HAL(pKernelFifo, 382 &userdAperture, 383 &userdAttribute); 384 385 if (rmStatus != NV_OK) 386 return rmStatus; 387 388 // Support for NVLINK coherent memory is not yet available in RM 389 390 if (userdAperture == ADDR_FBMEM) 391 { 392 pUserdLocationParams->aperture = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_VIDMEM; 393 } 394 else if (userdAperture == ADDR_SYSMEM) 395 { 396 pUserdLocationParams->aperture = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_SYSMEM; 397 } 398 else 399 { 400 NV_PRINTF(LEVEL_ERROR, "Invalid userdAperture value = 0x%08x\n", 401 userdAperture); 402 return NV_ERR_INVALID_STATE; 403 } 404 405 if (userdAttribute == NV_MEMORY_CACHED) 406 { 407 pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_CACHED; 408 } 409 else if (userdAttribute == NV_MEMORY_UNCACHED) 410 { 411 pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_UNCACHED; 412 } 413 else if (userdAttribute == NV_MEMORY_WRITECOMBINED) 414 { 415 pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_WRITECOMBINED; 416 } 417 else 418 { 419 NV_PRINTF(LEVEL_ERROR, "Invalid userdAttribute value = 0x%08x\n", 420 userdAttribute); 421 return NV_ERR_INVALID_STATE; 422 } 423 424 return rmStatus; 425 } 426 427 /*! 428 * @brief subdeviceCtrlCmdFifoGetChannelMemInfo 429 * 430 * Lock Requirements: 431 * Assert that API lock and GPUs lock held on entry 432 */ 433 NV_STATUS 434 subdeviceCtrlCmdFifoGetChannelMemInfo_IMPL 435 ( 436 Subdevice *pSubdevice, 437 NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS *pChannelMemParams 438 ) 439 { 440 OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); 441 Device *pDevice; 442 RsClient *pClient = RES_GET_CLIENT(pSubdevice); 443 NV_STATUS rmStatus = NV_OK; 444 NvU32 index; 445 NvU32 runqueues; 446 KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); 447 KernelChannel *pKernelChannel; 448 MEMORY_DESCRIPTOR *pMemDesc = NULL; 449 NV2080_CTRL_FIFO_CHANNEL_MEM_INFO chMemInfo; 450 451 LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner()); 452 453 rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); 454 if (rmStatus != NV_OK) 455 return NV_ERR_INVALID_DEVICE; 456 457 rmStatus = CliGetKernelChannelWithDevice(pClient->hClient, 458 RES_GET_HANDLE(pDevice), 459 pChannelMemParams->hChannel, 460 &pKernelChannel); 461 if (rmStatus != NV_OK) 462 { 463 return NV_ERR_INVALID_CHANNEL; 464 } 465 466 portMemSet((void *)&chMemInfo, 0, sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO)); 467 468 // Get Inst Block Mem Info 469 rmStatus = kfifoChannelGetFifoContextMemDesc_HAL(pGpu, 470 pKernelFifo, 471 pKernelChannel, 472 FIFO_CTX_INST_BLOCK, 473 &pMemDesc); 474 if (rmStatus != NV_OK) 475 return rmStatus; 476 477 kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.inst); 478 479 // Get RAMFC mem Info 480 pMemDesc = NULL; 481 rmStatus = kfifoChannelGetFifoContextMemDesc_HAL(pGpu, 482 pKernelFifo, 483 pKernelChannel, 484 FIFO_CTX_RAMFC, 485 &pMemDesc); 486 487 if (rmStatus != NV_OK) 488 return rmStatus; 489 490 kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.ramfc); 491 492 // Get Method buffer mem info 493 runqueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); 494 NV_ASSERT((runqueues <= NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT)); 495 for (index = 0; index < runqueues; index++) 496 { 497 pMemDesc = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pMthdBuffers[index].pMemDesc; 498 if (pMemDesc != NULL) 499 { 500 kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.methodBuf[index]); 501 chMemInfo.methodBufCount++; 502 } 503 } 504 505 // copy into the kernel structure, there is no userland pointer 506 // maybe later structure is copied out to userland 507 portMemCopy(&pChannelMemParams->chMemInfo, 508 sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO), 509 &chMemInfo, 510 sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO)); 511 512 return rmStatus; 513 } 514 515 NV_STATUS 516 diagapiCtrlCmdFifoEnableVirtualContext_IMPL 517 ( 518 DiagApi *pDiagApi, 519 NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS *pEnableVCParams 520 ) 521 { 522 OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); 523 Device *pDevice; 524 NV_STATUS rmStatus = NV_OK; 525 KernelChannel *pKernelChannel = NULL; 526 RsClient *pClient = RES_GET_CLIENT(pDiagApi); 527 528 rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); 529 if (rmStatus != NV_OK) 530 return NV_ERR_INVALID_DEVICE; 531 532 NV_CHECK_OK_OR_RETURN(LEVEL_INFO, 533 CliGetKernelChannelWithDevice(pClient->hClient, 534 RES_GET_HANDLE(pDevice), 535 pEnableVCParams->hChannel, 536 &pKernelChannel)); 537 538 rmStatus = kchannelEnableVirtualContext_HAL(pKernelChannel); 539 return rmStatus; 540 } 541 542 /*! 543 * @brief subdeviceCtrlCmdFifoUpdateChannelInfo 544 * 545 * This function is broken for SLI. 546 * Will be fixed after instance block and userd 547 * is made unicast. 548 * 549 * Lock Requirements: 550 * Assert that API lock and GPUs lock held on entry 551 */ 552 NV_STATUS 553 subdeviceCtrlCmdFifoUpdateChannelInfo_IMPL 554 ( 555 Subdevice *pSubdevice, 556 NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS *pChannelInfo 557 ) 558 { 559 CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); 560 RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; 561 OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); 562 NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); 563 KernelChannel *pKernelChannel = NULL; 564 NV_STATUS status = NV_OK; 565 NvU64 userdAddr = 0; 566 NvU32 userdAper = 0; 567 568 // Bug 724186 -- Skip this check for deferred API 569 LOCK_ASSERT_AND_RETURN(pRmCtrlParams->bDeferredApi || rmGpuLockIsOwner()); 570 571 NV_CHECK_OK_OR_RETURN(LEVEL_INFO, 572 CliGetKernelChannel(pChannelInfo->hClient, 573 pChannelInfo->hChannel, 574 &pKernelChannel)); 575 NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); 576 577 if (!pChannelInfo->hUserdMemory) 578 { 579 return NV_ERR_INVALID_ARGUMENT; 580 } 581 582 if (!pKernelChannel->bClientAllocatedUserD) 583 { 584 return NV_ERR_NOT_SUPPORTED; 585 } 586 587 if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) 588 { 589 NV_RM_RPC_CONTROL(pGpu, 590 pRmCtrlParams->hClient, 591 pRmCtrlParams->hObject, 592 pRmCtrlParams->cmd, 593 pRmCtrlParams->pParams, 594 pRmCtrlParams->paramsSize, 595 status); 596 if (status != NV_OK) 597 return status; 598 599 // Destroy the submemdescriptor of the previous USERD 600 kchannelDestroyUserdMemDesc_HAL(pGpu, pKernelChannel); 601 602 // Get the userd hMemory and create a submemdescriptor 603 // Store it in pKernelChannel 604 status = kchannelCreateUserdMemDesc_HAL(pGpu, pKernelChannel, hClient, 605 pChannelInfo->hUserdMemory, 606 pChannelInfo->userdOffset, 607 &userdAddr, &userdAper); 608 if (status != NV_OK) 609 { 610 NV_PRINTF(LEVEL_ERROR, 611 "kchannelCreateUserdMemDesc_HAL" 612 "failed for hClient 0x%x and channel 0x%x status 0x%x\n", 613 hClient, kchannelGetDebugTag(pKernelChannel), status); 614 } 615 } 616 else 617 { 618 status = NV_ERR_NOT_SUPPORTED; 619 } 620 621 return status; 622 } 623 624 NV_STATUS 625 diagapiCtrlCmdFifoGetChannelState_IMPL 626 ( 627 DiagApi *pDiagApi, 628 NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams 629 ) 630 { 631 OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); 632 KernelChannel *pKernelChannel; 633 634 NV_CHECK_OK_OR_RETURN(LEVEL_INFO, 635 CliGetKernelChannel(pChannelStateParams->hClient, pChannelStateParams->hChannel, &pKernelChannel)); 636 NV_CHECK_OK_OR_RETURN(LEVEL_INFO, 637 kchannelGetChannelPhysicalState(pGpu, pKernelChannel, pChannelStateParams)); 638 639 // Fill out kernel state here 640 pChannelStateParams->bCpuMap = kchannelIsCpuMapped(pGpu, pKernelChannel); 641 pChannelStateParams->bRunlistSet = kchannelIsRunlistSet(pGpu, pKernelChannel); 642 643 return NV_OK; 644 } 645 646 static NV_STATUS 647 _kfifoGetCaps 648 ( 649 OBJGPU *pGpu, 650 NvU8 *pKfifoCaps 651 ) 652 { 653 NV_STATUS rmStatus = NV_OK; 654 NvBool bCapsInitialized = NV_FALSE; 655 KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); 656 657 VERIFY_OBJ_PTR(pKernelFifo); 658 659 SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) 660 { 661 pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); 662 if (pKernelFifo == NULL) 663 { 664 rmStatus = NV_ERR_INVALID_POINTER; 665 SLI_LOOP_BREAK; 666 } 667 kfifoGetDeviceCaps(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized); 668 bCapsInitialized = NV_TRUE; 669 } 670 SLI_LOOP_END 671 672 return rmStatus; 673 } 674 675 /*! 676 * @brief deviceCtrlCmdFifoGetCaps 677 * 678 * Lock Requirements: 679 * Assert that API lock and GPUs lock held on entry 680 */ 681 NV_STATUS 682 deviceCtrlCmdFifoGetCaps_IMPL 683 ( 684 Device *pDevice, 685 NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pKfifoCapsParams 686 ) 687 { 688 OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); 689 NvU8 *pKfifoCaps = NvP64_VALUE(pKfifoCapsParams->capsTbl); 690 691 LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner()); 692 693 // sanity check array size 694 if (pKfifoCapsParams->capsTblSize != NV0080_CTRL_FIFO_CAPS_TBL_SIZE) 695 { 696 NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n", 697 pKfifoCapsParams->capsTblSize, 698 NV0080_CTRL_FIFO_CAPS_TBL_SIZE); 699 return NV_ERR_INVALID_ARGUMENT; 700 } 701 702 // now accumulate caps for entire device 703 return _kfifoGetCaps(pGpu, pKfifoCaps); 704 } 705 706 /*! 707 * @brief deviceCtrlCmdFifoGetCapsV2 708 * 709 * Lock Requirements: 710 * Assert that API lock and GPUs lock held on entry 711 */ 712 NV_STATUS 713 deviceCtrlCmdFifoGetCapsV2_IMPL 714 ( 715 Device *pDevice, 716 NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS *pKfifoCapsParams 717 ) 718 { 719 OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); 720 NvU8 *pKfifoCaps = pKfifoCapsParams->capsTbl; 721 722 LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner()); 723 724 // now accumulate caps for entire device 725 return _kfifoGetCaps(pGpu, pKfifoCaps); 726 } 727 728 /** 729 * @brief Disables or enables the given channels. 730 */ 731 NV_STATUS 732 subdeviceCtrlCmdFifoDisableChannels_IMPL 733 ( 734 Subdevice *pSubdevice, 735 NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *pDisableChannelParams 736 ) 737 { 738 NV_STATUS status = NV_OK; 739 OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); 740 CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); 741 RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; 742 743 // Validate use of pRunlistPreemptEvent to allow use by Kernel clients only 744 if ((pDisableChannelParams->pRunlistPreemptEvent != NULL) && 745 (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL)) 746 { 747 return NV_ERR_INSUFFICIENT_PERMISSIONS; 748 } 749 750 // Send RPC to handle message on Host-RM 751 if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) 752 { 753 NV_RM_RPC_CONTROL(pGpu, 754 pRmCtrlParams->hClient, 755 pRmCtrlParams->hObject, 756 pRmCtrlParams->cmd, 757 pRmCtrlParams->pParams, 758 pRmCtrlParams->paramsSize, 759 status); 760 } 761 // Send internal control call to actually disable channels 762 else 763 { 764 status = NV_ERR_NOT_SUPPORTED; 765 } 766 767 return status; 768 } 769