1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 //****************************************************************************** 25 // 26 // Description: 27 // This module implements RPC send and receive ring buffers. 28 // 29 //****************************************************************************** 30 31 // FIXME XXX 32 #define NVOC_KERNEL_GRAPHICS_CONTEXT_H_PRIVATE_ACCESS_ALLOWED 33 34 #include "os/os.h" 35 #include "core/system.h" 36 #include "core/locks.h" 37 #include "gpu/gpu.h" 38 #include "gpu/bif/kernel_bif.h" 39 #include "gpu/subdevice/subdevice.h" 40 #include "gpu/mem_mgr/mem_desc.h" 41 #include "nvVer.h" 42 #include "nvBldVer.h" 43 #include "gpu/mem_mgr/virt_mem_allocator.h" 44 #include "platform/chipset/chipset.h" 45 #include "resserv/rs_client.h" 46 #include "resserv/rs_server.h" 47 #include "rmapi/alloc_size.h" 48 #include "rmapi/rs_utils.h" 49 #include "rmapi/rmapi_utils.h" 50 #include "rmapi/client_resource.h" 51 #include "gpu/gsp/kernel_gsp.h" 52 #include "gpu/mem_mgr/mem_mgr.h" 53 #include "vgpu/vgpu_version.h" 54 #include "vgpu/rpc.h" 55 #include "vgpu/vgpu_events.h" 56 #include "virtualization/hypervisor/hypervisor.h" 57 #include "os/os.h" 58 #include "objtmr.h" 59 #include "lib/base_utils.h" 60 61 #include "gpu/conf_compute/conf_compute.h" 62 63 #define SDK_ALL_CLASSES_INCLUDE_FULL_HEADER 64 #include "g_allclasses.h" 65 #undef SDK_ALL_CLASSES_INCLUDE_FULL_HEADER 66 #include "nverror.h" 67 68 69 #define RPC_STRUCTURES 70 #define RPC_GENERIC_UNION 71 #include "g_rpc-structures.h" 72 #undef RPC_STRUCTURES 73 #undef RPC_GENERIC_UNION 74 75 #define RPC_MESSAGE_STRUCTURES 76 #define RPC_MESSAGE_GENERIC_UNION 77 #include "g_rpc-message-header.h" 78 #undef RPC_MESSAGE_STRUCTURES 79 #undef RPC_MESSAGE_GENERIC_UNION 80 81 #include "g_rpc_private.h" 82 83 #include "g_finn_rm_api.h" 84 85 #include "gpu/gsp/message_queue_priv.h" 86 87 static NvBool bProfileRPC = NV_FALSE; 88 89 typedef struct rpc_meter_list 90 { 91 RPC_METER_ENTRY rpcData; 92 struct rpc_meter_list *pNext; 93 } RPC_METER_LIST; 94 95 typedef struct rpc_meter_head 96 { 97 RPC_METER_LIST *pHead; 98 RPC_METER_LIST *pTail; 99 } RPC_METER_HEAD; 100 101 static RPC_METER_HEAD rpcMeterHead; 102 static NvU32 rpcProfilerEntryCount; 103 104 typedef struct rpc_vgx_version 105 { 106 NvU32 majorNum; 107 NvU32 minorNum; 108 } RPC_VGX_VERSION; 109 110 static RPC_VGX_VERSION rpcVgxVersion; 111 static NvBool bSkipRpcVersionHandshake = NV_FALSE; 112 113 void rpcSetIpVersion(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 ipVersion) 114 { 115 OBJHAL *pHal = GPU_GET_HAL(pGpu); 116 PMODULEDESCRIPTOR pMod = objhalGetModuleDescriptor(pHal); 117 IGRP_IP_VERSIONS_TABLE_INFO info = {0}; 118 119 _objrpcAssignIpVersion(pRpc, ipVersion); 120 pMod->pHalSetIfaces->rpcHalIfacesSetupFn(&pRpc->_hal); 121 info.pGpu = pGpu; 122 info.pDynamic = (void*) pRpc; 123 rpc_iGrp_ipVersions_getInfo_HAL(pRpc, &info); 124 info.ifacesWrapupFn(&info); 125 126 } 127 128 NV_STATUS rpcConstruct_IMPL(OBJGPU *pGpu, OBJRPC *pRpc) 129 { 130 NV_STATUS rmStatus = NV_OK; 131 return rmStatus; 132 } 133 134 void rpcDestroy_IMPL(OBJGPU *pGpu, OBJRPC *pRpc) 135 { 136 } 137 138 NV_STATUS rpcSendMessage_IMPL(OBJGPU *pGpu, OBJRPC *pRpc) 139 { 140 NV_PRINTF(LEVEL_ERROR, "virtual function not implemented.\n"); 141 return NV_ERR_NOT_SUPPORTED; 142 } 143 144 NV_STATUS rpcRecvPoll_IMPL(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 expectedFunc) 145 { 146 NV_PRINTF(LEVEL_ERROR, "virtual function not implemented.\n"); 147 return NV_ERR_NOT_SUPPORTED; 148 } 149 150 static NV_STATUS _issueRpcAndWait(OBJGPU *pGpu, OBJRPC *pRpc) 151 { 152 NV_STATUS status = NV_OK; 153 RPC_METER_LIST *pNewEntry = NULL; 154 155 // should not be called in broadcast mode 156 NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); 157 158 if (bProfileRPC) 159 { 160 // Create a new entry for our RPC profiler 161 pNewEntry = portMemAllocNonPaged(sizeof(RPC_METER_LIST)); 162 if (pNewEntry == NULL) 163 { 164 NV_PRINTF(LEVEL_ERROR, "failed to allocate RPC meter memory!\n"); 165 NV_ASSERT(0); 166 return NV_ERR_INSUFFICIENT_RESOURCES; 167 } 168 169 portMemSet(pNewEntry, 0, sizeof(RPC_METER_LIST)); 170 171 if (rpcMeterHead.pHead == NULL) 172 rpcMeterHead.pHead = pNewEntry; 173 else 174 rpcMeterHead.pTail->pNext = pNewEntry; 175 176 rpcMeterHead.pTail = pNewEntry; 177 178 pNewEntry->rpcData.rpcDataTag = vgpu_rpc_message_header_v->function; 179 180 rpcProfilerEntryCount++; 181 182 osGetPerformanceCounter(&pNewEntry->rpcData.startTimeInNs); 183 } 184 185 // For HCC, cache expectedFunc value before encrypting. 186 NvU32 expectedFunc = vgpu_rpc_message_header_v->function; 187 188 status = rpcSendMessage(pGpu, pRpc); 189 if (status != NV_OK) 190 { 191 NV_PRINTF_COND(pRpc->bQuietPrints, LEVEL_INFO, LEVEL_ERROR, 192 "rpcSendMessage failed with status 0x%08x for fn %d!\n", 193 status, vgpu_rpc_message_header_v->function); 194 // 195 // It has been observed that returning NV_ERR_BUSY_RETRY in a bad state (RPC 196 // buffers full and not being serviced) can make things worse, i.e. turn RPC 197 // failures into app hangs such that even nvidia-bug-report.sh gets stuck. 198 // Avoid this for now while still returning the correct error in other cases. 199 // 200 return (status == NV_ERR_BUSY_RETRY) ? NV_ERR_GENERIC : status; 201 } 202 203 // Use cached expectedFunc here because vgpu_rpc_message_header_v is encrypted for HCC. 204 status = rpcRecvPoll(pGpu, pRpc, expectedFunc); 205 if (status != NV_OK) 206 { 207 if (status == NV_ERR_TIMEOUT) 208 { 209 NV_PRINTF_COND(pRpc->bQuietPrints, LEVEL_INFO, LEVEL_ERROR, 210 "rpcRecvPoll timedout for fn %d!\n", 211 vgpu_rpc_message_header_v->function); 212 } 213 else 214 { 215 NV_PRINTF_COND(pRpc->bQuietPrints, LEVEL_INFO, LEVEL_ERROR, 216 "rpcRecvPoll failed with status 0x%08x for fn %d!\n", 217 status, vgpu_rpc_message_header_v->function); 218 } 219 return status; 220 } 221 222 if (bProfileRPC) 223 osGetPerformanceCounter(&pNewEntry->rpcData.endTimeInNs); 224 225 // Now check if RPC really succeeded 226 if (vgpu_rpc_message_header_v->rpc_result != NV_VGPU_MSG_RESULT_SUCCESS) 227 { 228 NV_PRINTF(LEVEL_WARNING, "RPC failed with status 0x%08x for fn %d!\n", 229 vgpu_rpc_message_header_v->rpc_result, 230 vgpu_rpc_message_header_v->function); 231 232 if (vgpu_rpc_message_header_v->rpc_result < DRF_BASE(NV_VGPU_MSG_RESULT__VMIOP)) 233 return vgpu_rpc_message_header_v->rpc_result; 234 235 return NV_ERR_GENERIC; 236 } 237 238 return NV_OK; 239 } 240 241 static NV_STATUS _issueRpcAsync(OBJGPU *pGpu, OBJRPC *pRpc) 242 { 243 NV_STATUS status; 244 245 // should not be called in broadcast mode 246 NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); 247 248 status = rpcSendMessage(pGpu, pRpc); 249 if (status != NV_OK) 250 { 251 NV_PRINTF(LEVEL_ERROR, "rpcSendMessage failed with status 0x%08x for fn %d!\n", 252 status, vgpu_rpc_message_header_v->function); 253 NV_ASSERT(0); 254 // 255 // It has been observed that returning NV_ERR_BUSY_RETRY in a bad state (RPC 256 // buffers full and not being serviced) can make things worse, i.e. turn RPC 257 // failures into app hangs such that even nvidia-bug-report.sh gets stuck. 258 // Avoid this for now while still returning the correct error in other cases. 259 // 260 return (status == NV_ERR_BUSY_RETRY) ? NV_ERR_GENERIC : status; 261 } 262 263 return NV_OK; 264 } 265 266 static NV_STATUS _issueRpcLarge 267 ( 268 OBJGPU *pGpu, 269 OBJRPC *pRpc, 270 NvU32 bufSize, 271 const void *pBuffer, 272 NvBool bBidirectional, 273 NvBool bWait 274 ) 275 { 276 NvU8 *pBuf8 = (NvU8 *)pBuffer; 277 NV_STATUS nvStatus = NV_OK; 278 NvU32 expectedFunc = vgpu_rpc_message_header_v->function; 279 NvU32 entryLength; 280 NvU32 remainingSize = bufSize; 281 NvU32 recordCount = 0; 282 283 // should not be called in broadcast mode 284 NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); 285 286 // Copy the initial buffer 287 entryLength = NV_MIN(bufSize, pRpc->maxRpcSize); 288 289 if ((NvU8 *)vgpu_rpc_message_header_v != pBuf8) 290 portMemCopy(vgpu_rpc_message_header_v, entryLength, pBuf8, entryLength); 291 292 // Set the correct length for this queue entry. 293 vgpu_rpc_message_header_v->length = entryLength; 294 295 nvStatus = rpcSendMessage(pGpu, pRpc); 296 if (nvStatus != NV_OK) 297 { 298 NV_PRINTF(LEVEL_ERROR, "rpcSendMessage failed with status 0x%08x for fn %d!\n", 299 nvStatus, expectedFunc); 300 NV_ASSERT(0); 301 // 302 // It has been observed that returning NV_ERR_BUSY_RETRY in a bad state (RPC 303 // buffers full and not being serviced) can make things worse, i.e. turn RPC 304 // failures into app hangs such that even nvidia-bug-report.sh gets stuck. 305 // Avoid this for now while still returning the correct error in other cases. 306 // 307 return (nvStatus == NV_ERR_BUSY_RETRY) ? NV_ERR_GENERIC : nvStatus; 308 } 309 remainingSize -= entryLength; 310 pBuf8 += entryLength; 311 312 // Copy the remaining buffers 313 entryLength = pRpc->maxRpcSize - sizeof(rpc_message_header_v); 314 while (remainingSize != 0) 315 { 316 if (entryLength > remainingSize) 317 entryLength = remainingSize; 318 319 ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu); 320 if (pCC != NULL && pCC->getProperty(pCC, PDB_PROP_CONFCOMPUTE_ENCRYPT_ENABLED)) 321 { 322 // Zero out the entire RPC message header to clear the state of previous chunk. 323 portMemSet(vgpu_rpc_message_header_v, 0, sizeof(rpc_message_header_v)); 324 } 325 326 portMemCopy(rpc_message, entryLength, pBuf8, entryLength); 327 328 // Set the correct length for this queue entry. 329 vgpu_rpc_message_header_v->length = entryLength + sizeof(rpc_message_header_v); 330 vgpu_rpc_message_header_v->function = NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD; 331 332 nvStatus = rpcSendMessage(pGpu, pRpc); 333 if (nvStatus != NV_OK) 334 { 335 NV_PRINTF(LEVEL_ERROR, 336 "rpcSendMessage failed with status 0x%08x for fn %d continuation record (remainingSize=0x%x)!\n", 337 nvStatus, expectedFunc, remainingSize); 338 NV_ASSERT(0); 339 // 340 // It has been observed that returning NV_ERR_BUSY_RETRY in a bad state (RPC 341 // buffers full and not being serviced) can make things worse, i.e. turn RPC 342 // failures into app hangs such that even nvidia-bug-report.sh gets stuck. 343 // Avoid this for now while still returning the correct error in other cases. 344 // 345 return (nvStatus == NV_ERR_BUSY_RETRY) ? NV_ERR_GENERIC : nvStatus; 346 } 347 348 remainingSize -= entryLength; 349 pBuf8 += entryLength; 350 recordCount++; 351 } 352 353 if (!bWait) 354 { 355 // In case of Async RPC, we are done here. 356 return nvStatus; 357 } 358 359 // Always receive at least one.. 360 nvStatus = rpcRecvPoll(pGpu, pRpc, expectedFunc); 361 if (nvStatus != NV_OK) 362 { 363 if (nvStatus == NV_ERR_TIMEOUT) 364 { 365 NV_PRINTF(LEVEL_ERROR, "rpcRecvPoll timedout for fn %d!\n", 366 vgpu_rpc_message_header_v->function); 367 } 368 else 369 { 370 NV_PRINTF(LEVEL_ERROR, "rpcRecvPoll failed with status 0x%08x for fn %d!\n", 371 nvStatus, vgpu_rpc_message_header_v->function); 372 } 373 NV_ASSERT(0); 374 return nvStatus; 375 } 376 377 pBuf8 = (NvU8 *)pBuffer; 378 remainingSize = bufSize; 379 entryLength = NV_MIN(bufSize, vgpu_rpc_message_header_v->length); 380 NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength <= pRpc->maxRpcSize, NV_ERR_INVALID_STATE); 381 382 if (((NvU8 *)vgpu_rpc_message_header_v != pBuf8) && bBidirectional) 383 portMemCopy(pBuf8, entryLength, vgpu_rpc_message_header_v, entryLength); 384 385 remainingSize -= entryLength; 386 pBuf8 += entryLength; 387 388 // For bidirectional transfer messages, need to receive all other frames as well 389 if (bBidirectional && (recordCount > 0)) 390 { 391 while (remainingSize > 0) 392 { 393 nvStatus = rpcRecvPoll(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD); 394 if (nvStatus != NV_OK) 395 { 396 if (nvStatus == NV_ERR_TIMEOUT) 397 { 398 NV_PRINTF(LEVEL_ERROR, 399 "rpcRecvPoll timedout for fn %d continuation record (remainingSize=0x%x)!\n", 400 vgpu_rpc_message_header_v->function, remainingSize); 401 } 402 else 403 { 404 NV_PRINTF(LEVEL_ERROR, 405 "rpcRecvPoll failed with status 0x%08x for fn %d continuation record! (remainingSize=0x%x)\n", 406 nvStatus, vgpu_rpc_message_header_v->function, remainingSize); 407 } 408 NV_ASSERT(0); 409 return nvStatus; 410 } 411 412 entryLength = vgpu_rpc_message_header_v->length; 413 NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength <= pRpc->maxRpcSize, NV_ERR_INVALID_STATE); 414 NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength >= sizeof(rpc_message_header_v), NV_ERR_INVALID_STATE); 415 entryLength -= sizeof(rpc_message_header_v); 416 417 if (entryLength > remainingSize) 418 entryLength = remainingSize; 419 420 portMemCopy(pBuf8, entryLength, rpc_message, entryLength); 421 remainingSize -= entryLength; 422 pBuf8 += entryLength; 423 recordCount--; 424 } 425 vgpu_rpc_message_header_v->function = expectedFunc; 426 NV_ASSERT(recordCount == 0); 427 } 428 429 // Now check if RPC really succeeded 430 if (vgpu_rpc_message_header_v->rpc_result != NV_VGPU_MSG_RESULT_SUCCESS) 431 { 432 NV_PRINTF(LEVEL_WARNING, "RPC failed with status 0x%08x for fn %d!\n", 433 vgpu_rpc_message_header_v->rpc_result, 434 vgpu_rpc_message_header_v->function); 435 436 if (vgpu_rpc_message_header_v->rpc_result < DRF_BASE(NV_VGPU_MSG_RESULT__VMIOP)) 437 return vgpu_rpc_message_header_v->rpc_result; 438 439 return NV_ERR_GENERIC; 440 } 441 442 return NV_OK; 443 } 444 445 static NV_STATUS _issueRpcAndWaitLarge 446 ( 447 OBJGPU *pGpu, 448 OBJRPC *pRpc, 449 NvU32 bufSize, 450 const void *pBuffer, 451 NvBool bBidirectional 452 ) 453 { 454 return _issueRpcLarge(pGpu, pRpc, bufSize, pBuffer, 455 bBidirectional, 456 NV_TRUE); //bWait 457 } 458 459 static NV_STATUS _issueRpcAsyncLarge 460 ( 461 OBJGPU *pGpu, 462 OBJRPC *pRpc, 463 NvU32 bufSize, 464 const void *pBuffer 465 ) 466 { 467 return _issueRpcLarge(pGpu, pRpc, bufSize, pBuffer, 468 NV_FALSE, //bBidirectional 469 NV_FALSE); //bWait 470 } 471 472 static NV_STATUS _issuePteDescRpc 473 ( 474 OBJGPU *pGpu, 475 OBJRPC *pRpc, 476 NvU32 offsetToPTE, 477 NvU32 pageCount, 478 RmPhysAddr *guestPages, 479 NvBool physicallyContiguous 480 ) 481 { 482 rpc_message_header_v *pHdr = vgpu_rpc_message_header_v; 483 void *pAllocatedRecord = NULL; 484 struct pte_desc *pPteDesc; 485 NvU64 contigBase; 486 NV_STATUS nvStatus = NV_OK; 487 NvU32 recordSize; 488 NvU32 i; 489 DMA_PAGE_ARRAY pageArray; 490 491 NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); 492 NV_ASSERT_OR_RETURN(pRpc != NULL, NV_ERR_INVALID_ARGUMENT); 493 NV_ASSERT_OR_RETURN(guestPages != NULL, NV_ERR_INVALID_ARGUMENT); 494 NV_ASSERT_OR_RETURN(pHdr != NULL, NV_ERR_INVALID_ARGUMENT); 495 496 recordSize = offsetToPTE + NV_OFFSETOF(struct pte_desc, pte_pde[0].pte) + 497 (pageCount * NV_VGPU_PTE_64_SIZE); 498 499 if (recordSize > pRpc->maxRpcSize) 500 { 501 // Multiple queue entries. Create a temporary buffer for the PTEs. 502 pAllocatedRecord = portMemAllocNonPaged(recordSize); 503 if (pAllocatedRecord == NULL) 504 { 505 NV_PRINTF(LEVEL_ERROR, "no memory for allocated record\n"); 506 return NV_ERR_INSUFFICIENT_RESOURCES; 507 } 508 509 // Copy in the record so far. 510 portMemCopy(pAllocatedRecord, pHdr->length, pHdr, pHdr->length); 511 512 // Point to the allocated record. 513 pHdr = (rpc_message_header_v *)pAllocatedRecord; 514 } 515 516 dmaPageArrayInit(&pageArray, guestPages, pageCount); 517 518 pPteDesc = (struct pte_desc *)NvP64_PLUS_OFFSET(pHdr, offsetToPTE); 519 pPteDesc->idr = NV_VGPU_PTEDESC_IDR_NONE; 520 pPteDesc->length = pageCount; 521 contigBase = (dmaPageArrayGetPhysAddr(&pageArray, 0) >> RM_PAGE_SHIFT); 522 523 for (i = 0; i < pageCount; i++) 524 { 525 if (physicallyContiguous) 526 pPteDesc->pte_pde[i].pte = contigBase + i; 527 else 528 pPteDesc->pte_pde[i].pte = 529 (dmaPageArrayGetPhysAddr(&pageArray, i) >> RM_PAGE_SHIFT); 530 } 531 532 nvStatus = _issueRpcAndWaitLarge(pGpu, pRpc, recordSize, pHdr, NV_FALSE); 533 534 portMemFree(pAllocatedRecord); 535 536 return nvStatus; 537 } 538 539 NV_STATUS rpcAllocMemory_v13_01(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, NvU32 hClass, 540 NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc) 541 { 542 NV_STATUS status = NV_OK; 543 544 if (pMemDesc == NULL) 545 { 546 NV_PRINTF(LEVEL_ERROR, 547 "NVRM_RPC: AllocMemory: pMemDesc arg was NULL\n"); 548 return NV_ERR_GENERIC; 549 } 550 551 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, sizeof(rpc_alloc_memory_v13_01)); 552 if (status != NV_OK) 553 return status; 554 555 rpc_message->alloc_memory_v13_01.hClient = hClient; 556 rpc_message->alloc_memory_v13_01.hDevice = hDevice; 557 rpc_message->alloc_memory_v13_01.hMemory = hMemory; 558 rpc_message->alloc_memory_v13_01.hClass = hClass; 559 rpc_message->alloc_memory_v13_01.flags = flags; 560 rpc_message->alloc_memory_v13_01.pteAdjust = pMemDesc->PteAdjust; 561 rpc_message->alloc_memory_v13_01.format = memdescGetPteKind(pMemDesc); 562 rpc_message->alloc_memory_v13_01.length = pMemDesc->Size; 563 rpc_message->alloc_memory_v13_01.pageCount = (NvU32)pMemDesc->PageCount; 564 565 if (IS_GSP_CLIENT(pGpu)) 566 { 567 status = _issuePteDescRpc(pGpu, pRpc, 568 NV_OFFSETOF(rpc_message_header_v, rpc_message_data[0].alloc_memory_v13_01.pteDesc), 569 pMemDesc->PageCount, 570 memdescGetPteArray(pMemDesc, AT_GPU), 571 memdescGetContiguity(pMemDesc, AT_GPU)); 572 } 573 574 return status; 575 } 576 577 NV_STATUS rpcMapMemoryDma_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, NvHandle hDma, NvHandle hMemory, 578 NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset) 579 { 580 NV_STATUS status; 581 NVOS46_PARAMETERS_v03_00 *rpc_params = &rpc_message->map_memory_dma_v03_00.params; 582 583 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_MAP_MEMORY_DMA, sizeof(rpc_map_memory_dma_v03_00)); 584 if (status != NV_OK) 585 return status; 586 587 rpc_params->hClient = hClient; 588 rpc_params->hDevice = hDevice; 589 rpc_params->hDma = hDma; 590 rpc_params->hMemory = hMemory; 591 rpc_params->flags = flags; 592 593 rpc_params->offset = offset; 594 rpc_params->length = length; 595 rpc_params->dmaOffset = *pDmaOffset; 596 597 status = _issueRpcAndWait(pGpu, pRpc); 598 599 if (status == NV_OK) 600 { 601 *pDmaOffset = rpc_params->dmaOffset; 602 } 603 return status; 604 } 605 606 NV_STATUS rpcUnmapMemoryDma_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, NvHandle hDma, 607 NvHandle hMemory, NvU32 flags, NvU64 pDmaOffset) 608 { 609 NV_STATUS status; 610 NVOS47_PARAMETERS_v03_00 *rpc_params = &rpc_message->unmap_memory_dma_v03_00.params; 611 612 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY_DMA, sizeof(rpc_unmap_memory_dma_v03_00)); 613 if (status != NV_OK) 614 return status; 615 616 rpc_params->hClient = hClient; 617 rpc_params->hDevice = hDevice; 618 rpc_params->hDma = hDma; 619 rpc_params->hMemory = hMemory; 620 rpc_params->flags = flags; 621 rpc_params->dmaOffset = pDmaOffset; 622 623 status = _issueRpcAndWait(pGpu, pRpc); 624 return status; 625 } 626 627 /* max entries is how many 3 DWORD entries fit in what remains of the message_buffer */ 628 #define IDLE_CHANNELS_MAX_ENTRIES_v03_00 \ 629 ((pRpc->maxRpcSize - (sizeof(rpc_message_header_v) + sizeof(rpc_idle_channels_v03_00))) / sizeof(idle_channel_list_v03_00)) 630 631 NV_STATUS rpcIdleChannels_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, 632 NvU32 numEntries, NvU32 flags, NvU32 timeout) 633 { 634 NV_STATUS status; 635 NvU32 i; 636 637 if (numEntries > IDLE_CHANNELS_MAX_ENTRIES_v03_00) 638 { 639 // unable to fit all the entries in the message buffer 640 NV_PRINTF(LEVEL_ERROR, 641 "NVRM_RPC: IdleChannels: requested %u entries (but only room for %u)\n", 642 numEntries, (NvU32)IDLE_CHANNELS_MAX_ENTRIES_v03_00); 643 return NV_ERR_GENERIC; 644 } 645 646 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_IDLE_CHANNELS, 647 sizeof(rpc_idle_channels_v03_00) + numEntries * sizeof(idle_channel_list_v03_00)); 648 if (status != NV_OK) 649 return status; 650 651 rpc_message->idle_channels_v03_00.flags = flags; 652 rpc_message->idle_channels_v03_00.timeout = timeout; 653 rpc_message->idle_channels_v03_00.nchannels = numEntries; 654 655 for (i = 0; i < numEntries; i++) 656 { 657 rpc_message->idle_channels_v03_00.channel_list[i].phClient = ((NvU32) phClients[i]); 658 rpc_message->idle_channels_v03_00.channel_list[i].phDevice = ((NvU32) phDevices[i]); 659 rpc_message->idle_channels_v03_00.channel_list[i].phChannel = ((NvU32) phChannels[i]); 660 } 661 662 status = _issueRpcAndWait(pGpu, pRpc); 663 return status; 664 } 665 666 NV_STATUS RmRpcSetGuestSystemInfo(OBJGPU *pGpu, OBJRPC *pRpc) 667 { 668 OBJSYS *pSys = SYS_GET_INSTANCE(); 669 OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); 670 NV_STATUS status = NV_OK; 671 NvS32 message_buffer_remaining; 672 NvU32 data_len; 673 674 if (pGpuMgr->numGpuHandles == 0) 675 { 676 rpcVgxVersion.majorNum = 0; 677 rpcVgxVersion.minorNum = 0; 678 } 679 680 // 681 // Skip RPC version handshake if we've already done it on one GPU. 682 // 683 // For GSP: Multi GPU setup can have pre-Turing GPUs 684 // and GSP offload is disabled for all pre-Turing GPUs. 685 // Don't skip RPC version handshake for GSP_CLIENT or if VGPU-GSP plugin offload is enabled. 686 // There are different GSPs/plugins for different GPUs and we need to have a handshake with all of them. 687 // 688 689 if (pGpuMgr->numGpuHandles > 1 && !IS_GSP_CLIENT(pGpu) && !(IS_VIRTUAL(pGpu) && IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu))) 690 { 691 if (rpcVgxVersion.majorNum != 0) 692 { 693 if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH) && !bSkipRpcVersionHandshake) 694 { 695 bSkipRpcVersionHandshake = NV_TRUE; 696 } 697 else 698 { 699 NV_PRINTF(LEVEL_INFO, 700 "NVRM_RPC: Skipping RPC version handshake for instance 0x%x\n", 701 gpuGetInstance(pGpu)); 702 goto skip_ver_handshake; 703 } 704 } 705 else 706 { 707 status = NV_ERR_GENERIC; 708 NV_PRINTF(LEVEL_ERROR, 709 "NVRM_RPC: RPC version handshake already failed. Bailing out for device" 710 " instance 0x%x\n", gpuGetInstance(pGpu)); 711 goto skip_ver_handshake; 712 } 713 } 714 715 message_buffer_remaining = pRpc->maxRpcSize - (sizeof(rpc_message_header_v) + 716 sizeof(rpc_set_guest_system_info_v)); 717 718 if (message_buffer_remaining < 0) 719 { 720 // unable to fit the data in the message buffer 721 NV_PRINTF(LEVEL_ERROR, 722 "NVRM_RPC: SetGuestSystemInfo: Insufficient space on message buffer\n"); 723 724 return NV_ERR_BUFFER_TOO_SMALL; 725 } 726 727 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO, 728 sizeof(rpc_set_guest_system_info_v)); 729 if (status != NV_OK) 730 return status; 731 732 if(sizeof(NV_VERSION_STRING) < NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE) 733 { 734 data_len = NV_ROUNDUP((NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE), sizeof(NvU32)); 735 rpc_message->set_guest_system_info_v.guestDriverVersionBufferLength = data_len; 736 portStringCopy(rpc_message->set_guest_system_info_v.guestDriverVersion, 737 sizeof(rpc_message->set_guest_system_info_v.guestDriverVersion), 738 (const char*)NV_VERSION_STRING, data_len); 739 } 740 else 741 { 742 return NV_ERR_BUFFER_TOO_SMALL; 743 } 744 745 if(sizeof(NV_BUILD_BRANCH_VERSION) < NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE) 746 { 747 data_len = NV_ROUNDUP((NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE), sizeof(NvU32)); 748 rpc_message->set_guest_system_info_v.guestVersionBufferLength = data_len; 749 portStringCopy(rpc_message->set_guest_system_info_v.guestVersion, 750 sizeof(rpc_message->set_guest_system_info_v.guestVersion), 751 (const char*)NV_BUILD_BRANCH_VERSION, data_len); 752 } 753 else 754 { 755 return NV_ERR_BUFFER_TOO_SMALL; 756 } 757 758 if (sizeof(NV_DISPLAY_DRIVER_TITLE) < NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE) 759 { 760 data_len = NV_ROUNDUP((NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE), sizeof(NvU32)); 761 rpc_message->set_guest_system_info_v.guestTitleBufferLength = data_len; 762 portStringCopy(rpc_message->set_guest_system_info_v.guestTitle, 763 sizeof(rpc_message->set_guest_system_info_v.guestTitle), 764 (const char*)NV_DISPLAY_DRIVER_TITLE, data_len); 765 } 766 else 767 { 768 return NV_ERR_BUFFER_TOO_SMALL; 769 } 770 771 rpc_message->set_guest_system_info_v.guestClNum = NV_BUILD_CHANGELIST_NUM; 772 rpc_message->set_guest_system_info_v.vgxVersionMajorNum = VGX_MAJOR_VERSION_NUMBER; 773 rpc_message->set_guest_system_info_v.vgxVersionMinorNum = VGX_MINOR_VERSION_NUMBER; 774 775 status = _issueRpcAndWait(pGpu, pRpc); 776 777 if ((status == NV_OK) && (vgpu_rpc_message_header_v->rpc_result_private != NV_OK)) 778 { 779 status = vgpu_rpc_message_header_v->rpc_result_private; 780 if ((rpc_message->set_guest_system_info_v.vgxVersionMajorNum != VGX_MAJOR_VERSION_NUMBER) || 781 (rpc_message->set_guest_system_info_v.vgxVersionMinorNum != VGX_MINOR_VERSION_NUMBER)) 782 { 783 if (RPC_VERSION_FROM_VGX_VERSION(rpc_message->set_guest_system_info_v.vgxVersionMajorNum, 784 rpc_message->set_guest_system_info_v.vgxVersionMinorNum) >= 785 RPC_VERSION_FROM_VGX_VERSION(NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MAJOR, 786 NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MINOR)) 787 { 788 NV_PRINTF(LEVEL_WARNING, 789 "NVRM_RPC: SetGuestSystemInfo: Guest VGX version (%d.%d) is newer than " 790 "the host VGX version (%d.%d)\n" 791 "NVRM_RPC: SetGuestSystemInfo: Retrying with the VGX version requested " 792 "by the host.\n", VGX_MAJOR_VERSION_NUMBER, 793 VGX_MINOR_VERSION_NUMBER, 794 rpc_message->set_guest_system_info_v.vgxVersionMajorNum, 795 rpc_message->set_guest_system_info_v.vgxVersionMinorNum); 796 status = _issueRpcAndWait(pGpu, pRpc); 797 } 798 else 799 { 800 NV_PRINTF(LEVEL_ERROR, 801 "NVRM_RPC: SetGuestSystemInfo: The host version (%d.%d) is too old.\n" 802 "NVRM_RPC: SetGuestSystemInfo: Minimum required host version is %d.%d.\n", 803 rpc_message->set_guest_system_info_v.vgxVersionMajorNum, 804 rpc_message->set_guest_system_info_v.vgxVersionMinorNum, 805 NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MAJOR, 806 NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MINOR); 807 808 NV_RM_RPC_LOG(pGpu, "######## Guest NVIDIA Driver Information: ########", NV_VGPU_LOG_LEVEL_NOTICE); 809 NV_RM_RPC_LOG(pGpu, "Driver Version: "NV_VERSION_STRING, NV_VGPU_LOG_LEVEL_NOTICE); 810 NV_RM_RPC_LOG(pGpu, "Incompatible Guest/Host drivers: Host VGX version is older than the minimum version " 811 "supported by the Guest. Disabling vGPU.", NV_VGPU_LOG_LEVEL_ERROR); 812 } 813 } 814 } 815 816 if (status == NV_OK) 817 { 818 if (rpcVgxVersion.majorNum != 0) 819 { 820 if (rpcVgxVersion.majorNum != rpc_message->set_guest_system_info_v.vgxVersionMajorNum || 821 rpcVgxVersion.minorNum != rpc_message->set_guest_system_info_v.vgxVersionMinorNum) 822 { 823 return NV_ERR_INVALID_STATE; 824 } 825 } 826 827 rpcVgxVersion.majorNum = rpc_message->set_guest_system_info_v.vgxVersionMajorNum; 828 rpcVgxVersion.minorNum = rpc_message->set_guest_system_info_v.vgxVersionMinorNum; 829 } 830 831 skip_ver_handshake: 832 if (status == NV_OK) 833 { 834 rpcSetIpVersion(pGpu, pRpc, 835 RPC_VERSION_FROM_VGX_VERSION(rpcVgxVersion.majorNum, 836 rpcVgxVersion.minorNum)); 837 838 NV_RM_RPC_SET_GUEST_SYSTEM_INFO_EXT(pGpu, status); 839 840 if (status != NV_OK) 841 { 842 NV_PRINTF(LEVEL_ERROR, "SET_GUEST_SYSTEM_INFO_EXT : failed.\n"); 843 } 844 } 845 846 return status; 847 } 848 849 NV_STATUS rpcUnloadingGuestDriver_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvBool bInPMTransition, NvBool bGc6Entering, NvU32 newPMLevel) 850 { 851 NV_STATUS status = NV_OK; 852 853 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, 0); 854 if (status != NV_OK) 855 return status; 856 857 status = _issueRpcAndWait(pGpu, pRpc); 858 859 return status; 860 } 861 862 863 NV_STATUS rpcUnloadingGuestDriver_v1F_07(OBJGPU *pGpu, OBJRPC *pRpc, NvBool bInPMTransition, NvBool bGc6Entering, NvU32 newPMLevel) 864 { 865 NV_STATUS status = NV_OK; 866 NvU32 headerLength = sizeof(rpc_message_header_v) + sizeof(rpc_unloading_guest_driver_v1F_07); 867 if (headerLength > pRpc->maxRpcSize) 868 { 869 NV_PRINTF(LEVEL_ERROR, 870 "Unloading guest driver parameters size (0x%x) exceed message_buffer " 871 "size (0x%x)\n", headerLength, pRpc->maxRpcSize); 872 873 NV_ASSERT(0); 874 return NV_ERR_INSUFFICIENT_RESOURCES; 875 } 876 877 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(rpc_unloading_guest_driver_v1F_07)); 878 if (status != NV_OK) 879 return status; 880 rpc_message->unloading_guest_driver_v1F_07.bInPMTransition = bInPMTransition; 881 rpc_message->unloading_guest_driver_v1F_07.bGc6Entering = bGc6Entering; 882 rpc_message->unloading_guest_driver_v1F_07.newLevel = newPMLevel; 883 884 status = _issueRpcAndWait(pGpu, pRpc); 885 886 return status; 887 } 888 889 NV_STATUS rpcGpuExecRegOps_v12_01(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hObject, 890 NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS *pParams, 891 NV2080_CTRL_GPU_REG_OP *pRegOps) 892 { 893 NV_STATUS status; 894 NvU32 i, j, regOpsExecuted = 0; 895 896 if (pParams == NULL) 897 { 898 return NV_ERR_INVALID_ARGUMENT; 899 } 900 901 if (pParams->regOpCount == 0) 902 { 903 NV_PRINTF(LEVEL_ERROR,"RegOps RPC failed: Invalid regOp count - requested 0x%x regOps\n", pParams->regOpCount); 904 return NV_ERR_INVALID_ARGUMENT; 905 } 906 907 /* RPC message buffer can accomodate a maximum of VGPU_MAX_REGOPS_PER_RPC regops only. 908 * This value must be adjusted(if required) in case of any change to the internal 909 * RegOps RPC structures. 910 */ 911 if (pRpc->maxRpcSize < 912 (sizeof(rpc_message_header_v) + 913 sizeof(rpc_gpu_exec_reg_ops_v12_01) + 914 VGPU_MAX_REGOPS_PER_RPC * sizeof(NV2080_CTRL_GPU_REG_OP_v03_00))) { 915 NV_PRINTF(LEVEL_ERROR, 916 "NVRM_RPC: rpcGpuExecRegOps_v12_01: Insufficient space on message buffer\n"); 917 return NV_ERR_BUFFER_TOO_SMALL; 918 } 919 920 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_GPU_EXEC_REG_OPS, 921 sizeof(rpc_gpu_exec_reg_ops_v12_01)); 922 if (status != NV_OK) 923 return status; 924 925 rpc_message->gpu_exec_reg_ops_v12_01.hClient = hClient; 926 rpc_message->gpu_exec_reg_ops_v12_01.hObject = hObject; 927 928 // copy params into the message buffer 929 rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.hClientTarget = pParams->hClientTarget; 930 rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.hChannelTarget = pParams->hChannelTarget; 931 rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.regOpCount = pParams->regOpCount; 932 rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.grRouteInfo.flags = pParams->grRouteInfo.flags; 933 rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.grRouteInfo.route = pParams->grRouteInfo.route; 934 935 while (regOpsExecuted < pParams->regOpCount){ 936 for (i = 0, j = regOpsExecuted; i < VGPU_MAX_REGOPS_PER_RPC && j < pParams->regOpCount; i++, j++) 937 { 938 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regOp = pRegOps[j].regOp; 939 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regType = pRegOps[j].regType; 940 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regStatus = pRegOps[j].regStatus; 941 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regQuad = pRegOps[j].regQuad; 942 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regGroupMask = pRegOps[j].regGroupMask; 943 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regSubGroupMask = pRegOps[j].regSubGroupMask; 944 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regOffset = pRegOps[j].regOffset; 945 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regValueHi = pRegOps[j].regValueHi; 946 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regValueLo = pRegOps[j].regValueLo; 947 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regAndNMaskHi = pRegOps[j].regAndNMaskHi; 948 rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regAndNMaskLo = pRegOps[j].regAndNMaskLo; 949 } 950 rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.regOpCount = i; 951 952 status = _issueRpcAndWait(pGpu, pRpc); 953 954 if (status == NV_OK) 955 { 956 status = vgpu_rpc_message_header_v->rpc_result_private; 957 if (status == NV_OK) 958 { 959 for (i = 0, j = regOpsExecuted; i < VGPU_MAX_REGOPS_PER_RPC && j < pParams->regOpCount; i++, j++) 960 { 961 pRegOps[j].regStatus = rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regStatus; 962 pRegOps[j].regValueHi = rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regValueHi; 963 pRegOps[j].regValueLo = rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regValueLo; 964 } 965 } 966 else 967 { 968 NV_PRINTF(LEVEL_ERROR,"RegOps RPC failed: skipping 0x%x regOps\n", pParams->regOpCount - regOpsExecuted); 969 } 970 } 971 regOpsExecuted = j; 972 } 973 974 return status; 975 } 976 977 NV_STATUS rpcGetStaticInfo_v17_05(OBJGPU *pGpu, OBJRPC *pRpc) 978 { 979 NV_STATUS status = NV_OK; 980 return status; 981 } 982 983 NV_STATUS rpcGetStaticInfo_v18_03(OBJGPU *pGpu, OBJRPC *pRpc) 984 { 985 NV_STATUS status = NV_OK; 986 return status; 987 } 988 989 NV_STATUS rpcGetStaticInfo_v18_04(OBJGPU *pGpu, OBJRPC *pRpc) 990 { 991 NV_STATUS status = NV_OK; 992 return status; 993 } 994 995 NV_STATUS rpcGetStaticInfo_v18_0E(OBJGPU *pGpu, OBJRPC *pRpc) 996 { 997 NV_STATUS status = NV_OK; 998 return status; 999 } 1000 1001 NV_STATUS rpcGetStaticInfo_v18_10(OBJGPU *pGpu, OBJRPC *pRpc) 1002 { 1003 NV_STATUS status = NV_OK; 1004 return status; 1005 } 1006 1007 NV_STATUS rpcGetStaticInfo_v18_11(OBJGPU *pGpu, OBJRPC *pRpc) 1008 { 1009 NV_STATUS status = NV_OK; 1010 return status; 1011 } 1012 1013 NV_STATUS rpcGetStaticInfo_v18_13(OBJGPU *pGpu, OBJRPC *pRpc) 1014 { 1015 NV_STATUS status = NV_OK; 1016 return status; 1017 } 1018 1019 NV_STATUS rpcGetStaticInfo_v18_16(OBJGPU *pGpu, OBJRPC *pRpc) 1020 { 1021 NV_STATUS status = NV_OK; 1022 return status; 1023 } 1024 1025 NV_STATUS rpcGetStaticInfo_v19_00(OBJGPU *pGpu, OBJRPC *pRpc) 1026 { 1027 NV_STATUS status = NV_OK; 1028 return status; 1029 } 1030 1031 NV_STATUS rpcGetStaticInfo_v1A_00(OBJGPU *pGpu, OBJRPC *pRpc) 1032 { 1033 NV_STATUS status = NV_OK; 1034 return status; 1035 } 1036 1037 NV_STATUS rpcGetStaticInfo_v1A_05(OBJGPU *pGpu, OBJRPC *pRpc) 1038 { 1039 NV_STATUS status = NV_OK; 1040 return status; 1041 } 1042 1043 NV_STATUS rpcGetStaticInfo_v20_01(OBJGPU *pGpu, OBJRPC *pRpc) 1044 { 1045 NV_STATUS status = NV_OK; 1046 return status; 1047 } 1048 1049 NV_STATUS rpcGetGspStaticInfo_v14_00(OBJGPU *pGpu, OBJRPC *pRpc) 1050 { 1051 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1052 1053 if (IS_GSP_CLIENT(pGpu)) 1054 { 1055 NvU32 headerLength; 1056 GspStaticConfigInfo *pSCI = GPU_GET_GSP_STATIC_INFO(pGpu); 1057 GspStaticConfigInfo *rpcInfo = (GspStaticConfigInfo *)&rpc_message->get_gsp_static_info_v14_00.data; 1058 1059 NV_ASSERT_OR_RETURN(pSCI, NV_ERR_INVALID_POINTER); 1060 1061 headerLength = sizeof(rpc_message_header_v) + 1062 sizeof(GspStaticConfigInfo); 1063 if (headerLength > pRpc->maxRpcSize) 1064 { 1065 NV_PRINTF(LEVEL_ERROR, 1066 "Gsp static info parameters size (0x%x) exceed message_buffer size (0x%x)\n", 1067 headerLength, pRpc->maxRpcSize); 1068 1069 NV_ASSERT(0); 1070 return NV_ERR_INSUFFICIENT_RESOURCES; 1071 } 1072 1073 status = rpcWriteCommonHeader(pGpu, pRpc, 1074 NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, 1075 sizeof(GspStaticConfigInfo)); 1076 if (status != NV_OK) 1077 return status; 1078 1079 status = _issueRpcAndWait(pGpu, pRpc); 1080 NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, status); 1081 1082 // Copy info 1083 portMemCopy(pSCI, sizeof(*pSCI), rpcInfo, sizeof(*rpcInfo)); 1084 } 1085 1086 return status; 1087 } 1088 1089 NV_STATUS rpcUpdateBarPde_v15_00(OBJGPU *pGpu, OBJRPC *pRpc, NV_RPC_UPDATE_PDE_BAR_TYPE barType, NvU64 entryValue, NvU64 entryLevelShift) 1090 { 1091 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1092 1093 if (IS_GSP_CLIENT(pGpu)) 1094 { 1095 UpdateBarPde_v15_00 *rpc_params = &rpc_message->update_bar_pde_v15_00.info; 1096 1097 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, 1098 sizeof(rpc_update_bar_pde_v15_00)); 1099 if (status != NV_OK) 1100 { 1101 return status; 1102 } 1103 1104 rpc_params->barType = barType; 1105 rpc_params->entryValue = entryValue; 1106 rpc_params->entryLevelShift = entryLevelShift; 1107 1108 status = _issueRpcAndWait(pGpu, pRpc); 1109 } 1110 1111 return status; 1112 } 1113 1114 NV_STATUS rpcSetPageDirectory_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, 1115 NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams) 1116 { 1117 NV_STATUS status = NV_OK; 1118 NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v03_00 *rpc_params = &rpc_message->set_page_directory_v03_00.params; 1119 1120 if (hypervisorIsType(OS_HYPERVISOR_HYPERV)) 1121 { 1122 if (!FLD_TEST_DRF(0080, _CTRL_DMA_SET_PAGE_DIRECTORY, _FLAGS_APERTURE, _VIDMEM, pParams->flags)) 1123 { 1124 NV_ASSERT(0); 1125 return NV_ERR_NOT_SUPPORTED; 1126 } 1127 } 1128 1129 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY, sizeof(rpc_set_page_directory_v03_00)); 1130 if (status != NV_OK) 1131 return status; 1132 1133 rpc_message->set_page_directory_v03_00.hClient = hClient; 1134 rpc_message->set_page_directory_v03_00.hDevice = hDevice; 1135 1136 rpc_params->physAddress = pParams->physAddress; 1137 rpc_params->numEntries = pParams->numEntries; 1138 rpc_params->flags = pParams->flags; 1139 rpc_params->hVASpace = pParams->hVASpace; 1140 rpc_params->chId = pParams->chId; 1141 rpc_params->subDeviceId = pParams->subDeviceId; 1142 1143 status = _issueRpcAndWait(pGpu, pRpc); 1144 1145 return status; 1146 1147 } 1148 1149 NV_STATUS rpcSetPageDirectory_v1E_05(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, 1150 NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams) 1151 { 1152 NV_STATUS status = NV_OK; 1153 NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05 *rpc_params = &rpc_message->set_page_directory_v1E_05.params; 1154 1155 if (hypervisorIsType(OS_HYPERVISOR_HYPERV)) 1156 { 1157 if (!FLD_TEST_DRF(0080, _CTRL_DMA_SET_PAGE_DIRECTORY, _FLAGS_APERTURE, _VIDMEM, pParams->flags)) 1158 { 1159 NV_ASSERT(0); 1160 return NV_ERR_NOT_SUPPORTED; 1161 } 1162 } 1163 1164 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY, sizeof(rpc_set_page_directory_v1E_05)); 1165 if (status != NV_OK) 1166 return status; 1167 1168 rpc_message->set_page_directory_v1E_05.hClient = hClient; 1169 rpc_message->set_page_directory_v1E_05.hDevice = hDevice; 1170 1171 rpc_params->physAddress = pParams->physAddress; 1172 rpc_params->numEntries = pParams->numEntries; 1173 rpc_params->flags = pParams->flags; 1174 rpc_params->hVASpace = pParams->hVASpace; 1175 rpc_params->chId = pParams->chId; 1176 rpc_params->subDeviceId = pParams->subDeviceId; 1177 rpc_params->pasid = pParams->pasid; 1178 1179 status = _issueRpcAndWait(pGpu, pRpc); 1180 1181 return status; 1182 1183 } 1184 1185 NV_STATUS rpcUnsetPageDirectory_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, 1186 NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams) 1187 { 1188 NV_STATUS status = NV_OK; 1189 NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v03_00 *rpc_params = &rpc_message->unset_page_directory_v03_00.params; 1190 1191 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNSET_PAGE_DIRECTORY, sizeof(rpc_unset_page_directory_v03_00)); 1192 if (status != NV_OK) 1193 return status; 1194 1195 rpc_message->unset_page_directory_v03_00.hClient = hClient; 1196 rpc_message->unset_page_directory_v03_00.hDevice = hDevice; 1197 1198 rpc_params->hVASpace = pParams->hVASpace; 1199 rpc_params->subDeviceId = pParams->subDeviceId; 1200 1201 status = _issueRpcAndWait(pGpu, pRpc); 1202 1203 return status; 1204 } 1205 1206 NV_STATUS rpcUnsetPageDirectory_v1E_05(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, 1207 NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams) 1208 { 1209 NV_STATUS status = NV_OK; 1210 NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05 *rpc_params = &rpc_message->unset_page_directory_v1E_05.params; 1211 1212 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNSET_PAGE_DIRECTORY, sizeof(rpc_unset_page_directory_v1E_05)); 1213 if (status != NV_OK) 1214 return status; 1215 1216 rpc_message->unset_page_directory_v1E_05.hClient = hClient; 1217 rpc_message->unset_page_directory_v1E_05.hDevice = hDevice; 1218 1219 rpc_params->hVASpace = pParams->hVASpace; 1220 rpc_params->subDeviceId = pParams->subDeviceId; 1221 1222 status = _issueRpcAndWait(pGpu, pRpc); 1223 1224 return status; 1225 } 1226 1227 NV_STATUS rpcVgpuPfRegRead32_v15_00(OBJGPU *pGpu, 1228 OBJRPC *pRpc, 1229 NvU64 address, 1230 NvU32 *value, 1231 NvU32 grEngId) 1232 { 1233 1234 NV_STATUS status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_VGPU_PF_REG_READ32, 1235 sizeof(rpc_vgpu_pf_reg_read32_v15_00)); 1236 if (status != NV_OK) 1237 return status; 1238 1239 rpc_message->vgpu_pf_reg_read32_v15_00.address = address; 1240 rpc_message->vgpu_pf_reg_read32_v15_00.grEngId = grEngId; 1241 1242 status = _issueRpcAndWait(pGpu, pRpc); 1243 1244 if (status == NV_OK) 1245 { 1246 *value = rpc_message->vgpu_pf_reg_read32_v15_00.value; 1247 } 1248 1249 return status; 1250 } 1251 1252 /* 1253 * Tells GSP-RM about the overall system environment, such as what physical 1254 * memory addresses to use. 1255 * 1256 * Note that this is an asynchronous RPC. It is stuffed into the message queue 1257 * before the GSP is booted. 1258 */ 1259 NV_STATUS rpcGspSetSystemInfo_v17_00 1260 ( 1261 OBJGPU *pGpu, 1262 OBJRPC *pRpc 1263 ) 1264 { 1265 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1266 1267 if (IS_GSP_CLIENT(pGpu)) 1268 { 1269 OBJSYS *pSys = SYS_GET_INSTANCE(); 1270 OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); 1271 GspSystemInfo *rpcInfo = (GspSystemInfo *)&rpc_message->gsp_set_system_info_v17_00.data; 1272 const NvU32 messageLength = sizeof(rpc_message_header_v) + sizeof(*rpcInfo); 1273 1274 if (messageLength > pRpc->maxRpcSize) 1275 { 1276 NV_PRINTF(LEVEL_ERROR, 1277 "GSP_SET_SYSTEM_INFO parameters size (0x%x) exceed message_buffer size (0x%x)\n", 1278 messageLength, pRpc->maxRpcSize); 1279 1280 NV_ASSERT(0); 1281 return NV_ERR_INSUFFICIENT_RESOURCES; 1282 } 1283 1284 status = rpcWriteCommonHeader(pGpu, pRpc, 1285 NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, 1286 sizeof(GspSystemInfo)); 1287 if (status != NV_OK) 1288 return status; 1289 1290 rpcInfo->gpuPhysAddr = pGpu->busInfo.gpuPhysAddr; 1291 rpcInfo->gpuPhysFbAddr = pGpu->busInfo.gpuPhysFbAddr; 1292 rpcInfo->gpuPhysInstAddr = pGpu->busInfo.gpuPhysInstAddr; 1293 rpcInfo->nvDomainBusDeviceFunc = pGpu->busInfo.nvDomainBusDeviceFunc; 1294 rpcInfo->oorArch = (NvU8)pGpu->busInfo.oorArch; 1295 1296 KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); 1297 if (pKernelBif != NULL) 1298 { 1299 NV_ASSERT_OK(kbifGetPciConfigSpacePriMirror_HAL(pGpu, pKernelBif, 1300 &rpcInfo->pciConfigMirrorBase, 1301 &rpcInfo->pciConfigMirrorSize)); 1302 1303 // Cache MNOC interface support 1304 rpcInfo->bMnocAvailable = pKernelBif->bMnocAvailable; 1305 } 1306 1307 if (IS_SIMULATION(pGpu)) 1308 { 1309 KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); 1310 rpcInfo->simAccessBufPhysAddr = memdescGetPhysAddr(pKernelGsp->pMemDesc_simAccessBuf, AT_CPU, 0); 1311 } 1312 else 1313 { 1314 rpcInfo->simAccessBufPhysAddr = 0; 1315 } 1316 rpcInfo->pcieAtomicsOpMask = GPU_GET_KERNEL_BIF(pGpu) ? 1317 GPU_GET_KERNEL_BIF(pGpu)->osPcieAtomicsOpMask : 0U; 1318 rpcInfo->consoleMemSize = GPU_GET_MEMORY_MANAGER(pGpu)->Ram.ReservedConsoleDispMemSize; 1319 rpcInfo->maxUserVa = osGetMaxUserVa(); 1320 1321 OBJCL *pCl = SYS_GET_CL(SYS_GET_INSTANCE()); 1322 if (pCl != NULL) 1323 { 1324 clSyncWithGsp(pCl, rpcInfo); 1325 } 1326 1327 // Fill in the cached ACPI method data 1328 rpcInfo->acpiMethodData = pGpu->acpiMethodData; 1329 1330 // Fill in ASPM related GPU flags 1331 rpcInfo->bGpuBehindBridge = pGpu->getProperty(pGpu, PDB_PROP_GPU_BEHIND_BRIDGE); 1332 rpcInfo->bUpstreamL0sUnsupported = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED); 1333 rpcInfo->bUpstreamL1Unsupported = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED); 1334 rpcInfo->bUpstreamL1PorSupported = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED); 1335 rpcInfo->bUpstreamL1PorMobileOnly = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY); 1336 rpcInfo->upstreamAddressValid = pGpu->gpuClData.upstreamPort.addr.valid; 1337 1338 rpcInfo->hypervisorType = hypervisorGetHypervisorType(pHypervisor); 1339 rpcInfo->bIsPassthru = pGpu->bIsPassthru; 1340 1341 // Fill in VF related GPU flags 1342 rpcInfo->gspVFInfo.totalVFs = pGpu->sriovState.totalVFs; 1343 rpcInfo->gspVFInfo.firstVFOffset = pGpu->sriovState.firstVFOffset; 1344 rpcInfo->gspVFInfo.FirstVFBar0Address = pGpu->sriovState.firstVFBarAddress[0]; 1345 rpcInfo->gspVFInfo.FirstVFBar1Address = pGpu->sriovState.firstVFBarAddress[1]; 1346 rpcInfo->gspVFInfo.FirstVFBar2Address = pGpu->sriovState.firstVFBarAddress[2]; 1347 rpcInfo->gspVFInfo.b64bitBar0 = pGpu->sriovState.b64bitVFBar0; 1348 rpcInfo->gspVFInfo.b64bitBar1 = pGpu->sriovState.b64bitVFBar1; 1349 rpcInfo->gspVFInfo.b64bitBar2 = pGpu->sriovState.b64bitVFBar2; 1350 1351 OBJTMR *pTmr = GPU_GET_TIMER(pGpu); 1352 rpcInfo->sysTimerOffsetNs = pTmr->sysTimerOffsetNs; 1353 1354 status = _issueRpcAsync(pGpu, pRpc); 1355 } 1356 1357 return status; 1358 } 1359 1360 /* 1361 * Transfers registry entries from CPU-RM to GSP-RM during init. 1362 * 1363 * Note that this is an asynchronous RPC. It is stuffed into the message queue 1364 * before the GSP is booted. 1365 */ 1366 NV_STATUS rpcSetRegistry_v17_00 1367 ( 1368 OBJGPU *pGpu, 1369 OBJRPC *pRpc 1370 ) 1371 { 1372 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1373 1374 if (IS_GSP_CLIENT(pGpu)) 1375 { 1376 NvU32 regTableSize = 0; 1377 NvU32 totalSize; 1378 NvU32 remainingMessageSize; 1379 PACKED_REGISTRY_TABLE *pRegTable; 1380 rpc_message_header_v *largeRpcBuffer = NULL; 1381 1382 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, 0); 1383 if (status != NV_OK) 1384 return status; 1385 1386 remainingMessageSize = pRpc->maxRpcSize - sizeof(rpc_message_header_v); 1387 1388 // Compute size of registry table 1389 status = osPackageRegistry(pGpu, NULL, ®TableSize); 1390 if (status != NV_OK) 1391 return status; 1392 1393 // 1394 // SET_REGISTRY is async RPC. If registry table exceeds size of 1395 // message queue, we won't be able to send complete data and it's 1396 // time to evaluate alternate implementations. Some ways to resolve 1397 // this issue are use bigger queue, use sync RPC or allocate dedicated 1398 // memory for sharing regkey table with GSP-RM. 1399 // 1400 totalSize = sizeof(rpc_message_header_v) + regTableSize; 1401 NV_ASSERT(totalSize < pRpc->pMessageQueueInfo->commandQueueSize); 1402 1403 // Find out if we need to issue large RPC 1404 if (regTableSize > remainingMessageSize) 1405 { 1406 largeRpcBuffer = portMemAllocNonPaged(totalSize); 1407 if (largeRpcBuffer == NULL) 1408 return NV_ERR_NO_MEMORY; 1409 1410 portMemCopy(largeRpcBuffer, totalSize, 1411 vgpu_rpc_message_header_v, sizeof(rpc_message_header_v)); 1412 1413 pRegTable = (PACKED_REGISTRY_TABLE *)(&largeRpcBuffer->rpc_message_data); 1414 } 1415 else 1416 { 1417 pRegTable = (PACKED_REGISTRY_TABLE *)&rpc_message; 1418 } 1419 1420 status = osPackageRegistry(pGpu, pRegTable, ®TableSize); 1421 if (status != NV_OK) 1422 goto fail; 1423 1424 if (largeRpcBuffer != NULL) 1425 { 1426 status = _issueRpcAsyncLarge(pGpu, pRpc, totalSize, largeRpcBuffer); 1427 } 1428 else 1429 { 1430 vgpu_rpc_message_header_v->length = totalSize; 1431 status = _issueRpcAsync(pGpu, pRpc); 1432 } 1433 1434 fail: 1435 portMemFree(largeRpcBuffer); 1436 } 1437 1438 return status; 1439 } 1440 1441 NV_STATUS rpcDumpProtobufComponent_v18_12 1442 ( 1443 OBJGPU *pGpu, 1444 OBJRPC *pRpc, 1445 PRB_ENCODER *pPrbEnc, 1446 NVD_STATE *pNvDumpState, 1447 NVDUMP_COMPONENT component 1448 ) 1449 { 1450 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1451 1452 if (IS_GSP_CLIENT(pGpu)) 1453 { 1454 rpc_dump_protobuf_component_v18_12 *rpc_params = &rpc_message->dump_protobuf_component_v18_12; 1455 1456 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_DUMP_PROTOBUF_COMPONENT, 1457 sizeof(*rpc_params)); 1458 if (status != NV_OK) 1459 return status; 1460 1461 rpc_params->component = component; 1462 rpc_params->nvDumpType = pNvDumpState->nvDumpType; 1463 rpc_params->countOnly = ((pPrbEnc->flags & PRB_COUNT_ONLY) != 0); 1464 rpc_params->bugCheckCode = pNvDumpState->bugCheckCode; 1465 rpc_params->internalCode = pNvDumpState->internalCode; 1466 rpc_params->bufferSize = NV_MIN(pRpc->maxRpcSize, prbEncBufLeft(pPrbEnc)); 1467 1468 status = _issueRpcAndWait(pGpu, pRpc); 1469 1470 // Add blob to protobuf. 1471 if ((status == NV_OK) && rpc_params->bufferSize > 0) 1472 status = prbEncStubbedAddBytes(pPrbEnc, rpc_params->blob, rpc_params->bufferSize); 1473 } 1474 1475 return status; 1476 } 1477 1478 NV_STATUS rpcRmfsInit_v15_00 1479 ( 1480 OBJGPU *pGpu, 1481 OBJRPC *pRpc, 1482 PMEMORY_DESCRIPTOR pStatusQueueMemDesc 1483 ) 1484 { 1485 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1486 1487 return status; 1488 } 1489 1490 NV_STATUS rpcRmfsCloseQueue_v15_00 1491 ( 1492 OBJGPU *pGpu, 1493 OBJRPC *pRpc 1494 ) 1495 { 1496 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1497 1498 return status; 1499 } 1500 1501 NV_STATUS rpcRmfsCleanup_v15_00 1502 ( 1503 OBJGPU *pGpu, 1504 OBJRPC *pRpc 1505 ) 1506 { 1507 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1508 1509 return status; 1510 } 1511 1512 NV_STATUS rpcRmfsTest_v15_00 1513 ( 1514 OBJGPU *pGpu, 1515 OBJRPC *pRpc, 1516 NvU32 numReps, 1517 NvU32 flags, 1518 NvU32 testData1, 1519 NvU32 testData2 1520 ) 1521 { 1522 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1523 1524 return status; 1525 } 1526 1527 #if NV_PRINTF_STRINGS_ALLOWED 1528 void osAssertFailed(void); 1529 #define RPC_LOCK_DEBUG_DUMP_STACK() \ 1530 do { \ 1531 static NvU64 previousRetAddr; \ 1532 NvU64 retAddr = (NvU64)NV_RETURN_ADDRESS(); \ 1533 if (retAddr != previousRetAddr) \ 1534 { \ 1535 previousRetAddr = retAddr; \ 1536 osAssertFailed(); \ 1537 } \ 1538 /* Add an assert so it shows as test score regression */ \ 1539 NV_ASSERT_FAILED("RPC locking violation - see kernel_log.txt"); \ 1540 } while(0) 1541 #else 1542 #define RPC_LOCK_DEBUG_DUMP_STACK() 1543 #endif 1544 1545 NV_STATUS rpcRmApiControl_GSP 1546 ( 1547 RM_API *pRmApi, 1548 NvHandle hClient, 1549 NvHandle hObject, 1550 NvU32 cmd, 1551 void *pParamStructPtr, 1552 NvU32 paramsSize 1553 ) 1554 { 1555 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1556 1557 OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; 1558 OBJRPC *pRpc = GPU_GET_RPC(pGpu); 1559 1560 rpc_message_header_v *large_message_copy = NULL; 1561 rpc_gsp_rm_control_v03_00 *rpc_params = &rpc_message->gsp_rm_control_v03_00; 1562 1563 const NvU32 fixed_param_size = sizeof(rpc_message_header_v) + sizeof(*rpc_params); 1564 NvU32 message_buffer_remaining = pRpc->maxRpcSize - fixed_param_size; 1565 NvU32 rpc_params_size; 1566 NvU32 total_size; 1567 1568 NvU32 gpuMaskRelease = 0; 1569 NvU32 ctrlFlags = 0; 1570 NvU32 ctrlAccessRight = 0; 1571 NvBool bCacheable; 1572 1573 CALL_CONTEXT *pCallContext; 1574 CALL_CONTEXT newContext; 1575 NvU32 resCtrlFlags = NVOS54_FLAGS_NONE; 1576 NvBool bPreSerialized = NV_FALSE; 1577 void *pOriginalParams = pParamStructPtr; 1578 1579 if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) 1580 { 1581 NV_PRINTF(LEVEL_WARNING, "Calling RPC RmControl 0x%08x without adequate locks!\n", cmd); 1582 RPC_LOCK_DEBUG_DUMP_STACK(); 1583 1584 NV_ASSERT_OK_OR_RETURN( 1585 rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, 1586 GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease)); 1587 } 1588 1589 rmapiutilGetControlInfo(cmd, &ctrlFlags, &ctrlAccessRight); 1590 bCacheable = rmapiControlIsCacheable(ctrlFlags, ctrlAccessRight, NV_TRUE); 1591 1592 pCallContext = resservGetTlsCallContext(); 1593 if (pCallContext == NULL || pCallContext->bReserialize) 1594 { 1595 // This should only happen when using the internal physical RMAPI 1596 NV_ASSERT_OR_RETURN(pRmApi == GPU_GET_PHYSICAL_RMAPI(pGpu), NV_ERR_INVALID_STATE); 1597 1598 portMemSet(&newContext, 0, sizeof(newContext)); 1599 pCallContext = &newContext; 1600 } 1601 1602 if (pCallContext->pControlParams != NULL) 1603 { 1604 resCtrlFlags = pCallContext->pControlParams->flags; 1605 } 1606 1607 if (resCtrlFlags & NVOS54_FLAGS_FINN_SERIALIZED) 1608 { 1609 bPreSerialized = NV_TRUE; 1610 } 1611 else 1612 { 1613 status = serverSerializeCtrlDown(pCallContext, cmd, &pParamStructPtr, ¶msSize, &resCtrlFlags); 1614 if (status != NV_OK) 1615 goto done; 1616 } 1617 1618 // If this is a serializable API, rpc_params->params is a serialized buffer. 1619 // otherwise this is a flat API and paramsSize is the param struct size 1620 if (resCtrlFlags & NVOS54_FLAGS_FINN_SERIALIZED) 1621 { 1622 NV_ASSERT_OR_RETURN(!bCacheable, NV_ERR_INVALID_STATE); 1623 } 1624 1625 if (bCacheable) 1626 { 1627 status = rmapiControlCacheGet(hClient, hObject, cmd, pParamStructPtr, paramsSize); 1628 if (status == NV_OK) 1629 goto done; 1630 } 1631 1632 // Initialize these values now that paramsSize is known 1633 rpc_params_size = sizeof(*rpc_params) + paramsSize; 1634 total_size = fixed_param_size + paramsSize; 1635 1636 // Write the header assuming one record. If continuation records are used, 1637 // then the length in the header will be overwritten by _issueRpcAndWaitLarge 1638 NV_ASSERT_OK_OR_GOTO(status, 1639 rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, rpc_params_size), 1640 done); 1641 1642 rpc_params->hClient = hClient; 1643 rpc_params->hObject = hObject; 1644 rpc_params->cmd = cmd; 1645 rpc_params->paramsSize = paramsSize; 1646 rpc_params->flags = RMAPI_RPC_FLAGS_NONE; 1647 1648 if (ctrlFlags & RMCTRL_FLAGS_COPYOUT_ON_ERROR) 1649 rpc_params->flags |= RMAPI_RPC_FLAGS_COPYOUT_ON_ERROR; 1650 1651 if (resCtrlFlags & NVOS54_FLAGS_FINN_SERIALIZED) 1652 rpc_params->flags |= RMAPI_RPC_FLAGS_SERIALIZED; 1653 1654 // If we have a big payload control, we need to make a local copy... 1655 if (message_buffer_remaining < paramsSize) 1656 { 1657 large_message_copy = portMemAllocNonPaged(total_size); 1658 NV_ASSERT_OR_ELSE(large_message_copy != NULL, {status = NV_ERR_NO_MEMORY; goto done; }); 1659 portMemCopy(large_message_copy, total_size, vgpu_rpc_message_header_v, fixed_param_size); 1660 rpc_params = &large_message_copy->rpc_message_data->gsp_rm_control_v03_00; 1661 message_buffer_remaining = total_size - fixed_param_size; 1662 } 1663 1664 if (paramsSize != 0) 1665 { 1666 if (pParamStructPtr == NULL) 1667 { 1668 status = NV_ERR_INVALID_ARGUMENT; 1669 goto done; 1670 } 1671 else 1672 { 1673 if (portMemCopy(rpc_params->params, message_buffer_remaining, pParamStructPtr, paramsSize) == NULL) 1674 { 1675 status = NV_ERR_BUFFER_TOO_SMALL; 1676 goto done; 1677 } 1678 } 1679 } 1680 else if (pParamStructPtr != NULL) 1681 { 1682 NV_PRINTF(LEVEL_ERROR, "Bad params: ptr " NvP64_fmt " size: 0x%x\n", 1683 pParamStructPtr, paramsSize); 1684 status = NV_ERR_INVALID_ARGUMENT; 1685 goto done; 1686 } 1687 else 1688 { 1689 // 1690 // paramsSize = 0 and pParamStructPtr == NULL 1691 // rpc_params->params is static, cannot be set to NULL. 1692 // We will allow rpc_params->paramsSize = 0 and 1693 // rpc_params->params != NULL from here, but in 1694 // _rpcGspRmControl() have the logic that 1695 // pc_params->paramsSize = 0 means no params. 1696 // 1697 } 1698 1699 // Issue RPC 1700 if (large_message_copy) 1701 { 1702 status = _issueRpcAndWaitLarge(pGpu, pRpc, total_size, large_message_copy, NV_TRUE); 1703 } 1704 else 1705 { 1706 status = _issueRpcAndWait(pGpu, pRpc); 1707 } 1708 1709 // 1710 // At this point we have: 1711 // status: The status of the RPC transfer. If NV_OK, we got something back 1712 // rpc_params->status: Status returned by the actual ctrl handler on GSP 1713 // 1714 if (status == NV_OK) 1715 { 1716 // Skip copyout if we got an error from the GSP control handler 1717 if (rpc_params->status != NV_OK && !(rpc_params->flags & RMAPI_RPC_FLAGS_COPYOUT_ON_ERROR)) 1718 { 1719 status = rpc_params->status; 1720 goto done; 1721 } 1722 1723 if (resCtrlFlags & NVOS54_FLAGS_FINN_SERIALIZED) 1724 { 1725 // 1726 // If it was preserialized, copy it to call context for deserialization by caller 1727 // Otherwise deserialize it because it was serialized here 1728 // 1729 if (bPreSerialized) 1730 { 1731 portMemCopy(pCallContext->pSerializedParams, pCallContext->serializedSize, rpc_params->params, rpc_params->paramsSize); 1732 } 1733 else 1734 { 1735 status = serverDeserializeCtrlUp(pCallContext, cmd, &pParamStructPtr, ¶msSize, &resCtrlFlags); 1736 if (status != NV_OK) 1737 goto done; 1738 } 1739 } 1740 else 1741 { 1742 if (paramsSize != 0) 1743 { 1744 portMemCopy(pParamStructPtr, paramsSize, rpc_params->params, paramsSize); 1745 } 1746 } 1747 1748 if (rpc_params->status != NV_OK) 1749 status = rpc_params->status; 1750 else if (bCacheable) 1751 NV_ASSERT_OK(rmapiControlCacheSet(hClient, hObject, cmd, rpc_params->params, paramsSize)); 1752 } 1753 1754 if (status != NV_OK) 1755 { 1756 NvBool bQuiet = NV_FALSE; 1757 switch (status) 1758 { 1759 case NV_ERR_NOT_SUPPORTED: 1760 case NV_ERR_OBJECT_NOT_FOUND: 1761 bQuiet = NV_TRUE; 1762 break; 1763 } 1764 NV_PRINTF_COND((pRpc->bQuietPrints || bQuiet), LEVEL_INFO, LEVEL_WARNING, 1765 "GspRmControl failed: hClient=0x%08x; hObject=0x%08x; cmd=0x%08x; paramsSize=0x%08x; paramsStatus=0x%08x; status=0x%08x\n", 1766 hClient, hObject, cmd, paramsSize, rpc_params->status, status); 1767 } 1768 1769 done: 1770 if (gpuMaskRelease != 0) 1771 { 1772 rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); 1773 } 1774 // Free the local copy we might have allocated above 1775 portMemFree(large_message_copy); 1776 1777 // 1778 // Free data structures if we serialized/deserialized here 1779 // Also check for serialized flag here as we may be called directly from within another control call 1780 // 1781 if ((resCtrlFlags & NVOS54_FLAGS_FINN_SERIALIZED) && !bPreSerialized) 1782 { 1783 serverFreeSerializeStructures(pCallContext, pOriginalParams); 1784 } 1785 1786 return status; 1787 } 1788 1789 NV_STATUS rpcRmApiAlloc_GSP 1790 ( 1791 RM_API *pRmApi, 1792 NvHandle hClient, 1793 NvHandle hParent, 1794 NvHandle hObject, 1795 NvU32 hClass, 1796 void *pAllocParams, 1797 NvU32 allocParamsSize 1798 ) 1799 { 1800 NV_STATUS status = NV_ERR_NOT_SUPPORTED; 1801 1802 OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; 1803 OBJRPC *pRpc = GPU_GET_RPC(pGpu); 1804 1805 rpc_gsp_rm_alloc_v03_00 *rpc_params = &rpc_message->gsp_rm_alloc_v03_00; 1806 CALL_CONTEXT callContext = {0}; 1807 NvU32 flags = RMAPI_ALLOC_FLAGS_NONE; 1808 NvU32 paramsSize; 1809 void *pOriginalParams = pAllocParams; 1810 NvBool bNullAllowed; 1811 1812 const NvU32 fixed_param_size = sizeof(rpc_message_header_v) + sizeof(*rpc_params); 1813 const NvU32 message_buffer_remaining = pRpc->maxRpcSize - fixed_param_size; 1814 1815 NvU32 gpuMaskRelease = 0; 1816 1817 if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) 1818 { 1819 NV_PRINTF(LEVEL_WARNING, "Calling RPC RmAlloc 0x%04x without adequate locks!\n", hClass); 1820 RPC_LOCK_DEBUG_DUMP_STACK(); 1821 NV_ASSERT_OK_OR_RETURN( 1822 rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, 1823 GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease)); 1824 } 1825 1826 NV_ASSERT_OK_OR_GOTO(status, 1827 rmapiGetClassAllocParamSize(¶msSize, NV_PTR_TO_NvP64(pAllocParams), &bNullAllowed, hClass), 1828 done); 1829 1830 // TODO CORERM-2934: Remove this when all client allocations take NV0000_ALLOC_PARAMETERS. 1831 // Manually set paramsSize for client as a temporary WAR for bug 3183091, so that NV0000_ALLOC_PARAMETERS 1832 // can be passed as pAllocParams while NvHandle is still listed in resource_list.h. 1833 if ((hClass == NV01_ROOT) || (hClass == NV01_ROOT_CLIENT)) 1834 { 1835 paramsSize = sizeof(NV0000_ALLOC_PARAMETERS); 1836 } 1837 1838 if (pAllocParams == NULL && !bNullAllowed) 1839 { 1840 NV_PRINTF(LEVEL_ERROR, "NULL allocation params not allowed for class 0x%x\n", hClass); 1841 status = NV_ERR_INVALID_ARGUMENT; 1842 goto done; 1843 } 1844 1845 NV_ASSERT_OK_OR_GOTO(status, 1846 rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, 1847 sizeof(rpc_gsp_rm_alloc_v03_00)), 1848 done); 1849 1850 rpc_params->hClient = hClient; 1851 rpc_params->hParent = hParent; 1852 rpc_params->hObject = hObject; 1853 rpc_params->hClass = hClass; 1854 rpc_params->flags = RMAPI_RPC_FLAGS_NONE; 1855 1856 // Serialize RM alloc 1857 if (paramsSize > 0) 1858 { 1859 void *memCopyResult; 1860 1861 NV_CHECK_OK_OR_GOTO(status, LEVEL_INFO, serverSerializeAllocDown(&callContext, hClass, &pAllocParams, ¶msSize, &flags), done); 1862 if (flags & RMAPI_ALLOC_FLAGS_SERIALIZED) 1863 { 1864 rpc_params->flags |= RMAPI_RPC_FLAGS_SERIALIZED; 1865 } 1866 1867 memCopyResult = portMemCopy(rpc_params->params, message_buffer_remaining, pAllocParams, paramsSize); 1868 rpc_params->paramsSize = paramsSize; 1869 1870 if (memCopyResult == NULL) 1871 { 1872 status = NV_ERR_BUFFER_TOO_SMALL; 1873 goto done; 1874 } 1875 } 1876 else 1877 { 1878 rpc_params->paramsSize = 0; 1879 } 1880 1881 status = _issueRpcAndWait(pGpu, pRpc); 1882 1883 if (status == NV_OK) 1884 { 1885 // Deserialize. pAllocParams will be populated correctly if deserialized 1886 NV_CHECK_OK_OR_GOTO(status, LEVEL_INFO, serverDeserializeAllocUp(&callContext, hClass, &pAllocParams, ¶msSize, &flags), done); 1887 if (!(flags & RMAPI_ALLOC_FLAGS_SERIALIZED) && (paramsSize > 0)) 1888 { 1889 portMemCopy(pAllocParams, paramsSize, rpc_params->params, paramsSize); 1890 } 1891 } 1892 else 1893 { 1894 NV_PRINTF_COND(pRpc->bQuietPrints, LEVEL_INFO, LEVEL_ERROR, 1895 "GspRmAlloc failed: hClient=0x%08x; hParent=0x%08x; hObject=0x%08x; hClass=0x%08x; paramsSize=0x%08x; paramsStatus=0x%08x; status=0x%08x\n", 1896 hClient, hParent, hObject, hClass, paramsSize, rpc_params->status, status); 1897 status = rpc_params->status; 1898 } 1899 1900 done: 1901 if (gpuMaskRelease != 0) 1902 { 1903 rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); 1904 } 1905 serverFreeSerializeStructures(&callContext, pOriginalParams); 1906 1907 return status; 1908 } 1909 1910 NV_STATUS rpcRmApiDupObject_GSP 1911 ( 1912 RM_API *pRmApi, 1913 NvHandle hClient, 1914 NvHandle hParent, 1915 NvHandle *phObject, 1916 NvHandle hClientSrc, 1917 NvHandle hObjectSrc, 1918 NvU32 flags 1919 ) 1920 { 1921 OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; 1922 OBJRPC *pRpc = GPU_GET_RPC(pGpu); 1923 NVOS55_PARAMETERS_v03_00 *rpc_params = &rpc_message->dup_object_v03_00.params; 1924 NV_STATUS status; 1925 NvU32 gpuMaskRelease = 0; 1926 1927 if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) 1928 { 1929 NV_PRINTF(LEVEL_WARNING, "Calling RPC RmDupObject without adequate locks!\n"); 1930 RPC_LOCK_DEBUG_DUMP_STACK(); 1931 NV_ASSERT_OK_OR_RETURN( 1932 rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, 1933 GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease)); 1934 } 1935 1936 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_DUP_OBJECT, sizeof(rpc_dup_object_v03_00)); 1937 if (status != NV_OK) 1938 goto done; 1939 1940 rpc_params->hClient = hClient; 1941 rpc_params->hParent = hParent; 1942 rpc_params->hObject = *phObject; 1943 rpc_params->hClientSrc = hClientSrc; 1944 rpc_params->hObjectSrc = hObjectSrc; 1945 rpc_params->flags = flags; 1946 1947 status = _issueRpcAndWait(pGpu, pRpc); 1948 if (status != NV_OK) 1949 { 1950 NV_PRINTF_COND(pRpc->bQuietPrints, LEVEL_INFO, LEVEL_ERROR, 1951 "GspRmDupObject failed: hClient=0x%08x; hParent=0x%08x; hObject=0x%08x; hClientSrc=0x%08x; hObjectSrc=0x%08x; flags=0x%08x; paramsStatus=0x%08x; status=0x%08x\n", 1952 hClient, hParent, *phObject, hClientSrc, hObjectSrc, flags, rpc_params->status, status); 1953 } 1954 done: 1955 if (gpuMaskRelease != 0) 1956 { 1957 rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); 1958 } 1959 return status; 1960 } 1961 1962 NV_STATUS rpcRmApiFree_GSP 1963 ( 1964 RM_API *pRmApi, 1965 NvHandle hClient, 1966 NvHandle hObject 1967 ) 1968 { 1969 OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; 1970 OBJRPC *pRpc = GPU_GET_RPC(pGpu); 1971 NVOS00_PARAMETERS_v03_00 *rpc_params = &rpc_message->free_v03_00.params; 1972 NV_STATUS status = NV_OK; 1973 NvU32 gpuMaskRelease = 0; 1974 1975 if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) 1976 { 1977 NV_PRINTF(LEVEL_WARNING, "Calling RPC RmFree without adequate locks!\n"); 1978 RPC_LOCK_DEBUG_DUMP_STACK(); 1979 NV_ASSERT_OK_OR_RETURN( 1980 rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, 1981 GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease)); 1982 } 1983 1984 status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_FREE, sizeof(rpc_free_v03_00)); 1985 if (status != NV_OK) 1986 goto done; 1987 1988 rpc_params->hRoot = hClient; 1989 rpc_params->hObjectParent = NV01_NULL_OBJECT; 1990 rpc_params->hObjectOld = hObject; 1991 1992 status = _issueRpcAndWait(pGpu, pRpc); 1993 if (status != NV_OK) 1994 { 1995 NV_PRINTF_COND(pRpc->bQuietPrints, LEVEL_INFO, LEVEL_ERROR, 1996 "GspRmFree failed: hClient=0x%08x; hObject=0x%08x; paramsStatus=0x%08x; status=0x%08x\n", 1997 hClient, hObject, rpc_params->status, status); 1998 } 1999 done: 2000 if (gpuMaskRelease != 0) 2001 { 2002 rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); 2003 } 2004 return status; 2005 } 2006