1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2012-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "virtualization/vgpuconfigapi.h" 25 26 #include "core/core.h" 27 #include "os/os.h" 28 #include "gpu/mem_mgr/mem_mgr.h" 29 #include "gpu/mem_sys/kern_mem_sys.h" 30 #include "gpu/mem_mgr/heap.h" 31 #include "kernel/gpu/fifo/kernel_sched_mgr.h" 32 #include "virtualization/kernel_vgpu_mgr.h" 33 #include "virtualization/hypervisor/hypervisor.h" 34 #include "rmapi/control.h" 35 #include "nv-hypervisor.h" 36 #include "ctrl/ctrla081.h" 37 #include "nvRmReg.h" 38 #include "kernel/gpu/fifo/kernel_fifo.h" 39 40 NV_STATUS 41 vgpuconfigapiConstruct_IMPL 42 ( 43 VgpuConfigApi *pVgpuConfigApi, 44 CALL_CONTEXT *pCallContext, 45 RS_RES_ALLOC_PARAMS_INTERNAL *pParams 46 ) 47 { 48 NvU32 i, pgpuIndex; 49 NV_STATUS rmStatus = NV_OK; 50 VGPU_CONFIG_EVENT_INFO_NODE *pVgpuConfigEventInfoNode = NULL; 51 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 52 OBJSYS *pSys = SYS_GET_INSTANCE(); 53 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 54 NvHandle hClient = pParams->hClient; 55 NvHandle hVgpuConfig = pParams->hResource; 56 57 // Forbid allocation of this class on GSP-RM and Guest-RM 58 // to avoid fuzzing this class in such cases. See bug 3529160. 59 if (!RMCFG_FEATURE_KERNEL_RM || IS_VIRTUAL(pGpu)) 60 return NV_ERR_NOT_SUPPORTED; 61 62 for (i = 0; i < NVA081_NOTIFIERS_MAXCOUNT; i++) 63 { 64 pVgpuConfigApi->notifyActions[i] = NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; 65 } 66 67 // Add config event info to vgpuMgr DB 68 if (kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex) != NV_OK) 69 { 70 return NV_ERR_OBJECT_NOT_FOUND; 71 } 72 73 pVgpuConfigEventInfoNode = listAppendNew(&(pKernelVgpuMgr->pgpuInfo[pgpuIndex].listVgpuConfigEventsHead)); 74 if (pVgpuConfigEventInfoNode != NULL) 75 { 76 portMemSet(pVgpuConfigEventInfoNode, 0, sizeof(VGPU_CONFIG_EVENT_INFO_NODE)); 77 78 pVgpuConfigEventInfoNode->hVgpuConfig = hVgpuConfig; 79 pVgpuConfigEventInfoNode->hClient = hClient; 80 pVgpuConfigEventInfoNode->pVgpuConfigApi = pVgpuConfigApi; 81 } 82 83 return rmStatus; 84 } 85 86 void 87 vgpuconfigapiDestruct_IMPL 88 ( 89 VgpuConfigApi *pVgpuConfigApi 90 ) 91 { 92 NV_STATUS rmStatus = NV_OK; 93 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 94 OBJSYS *pSys = SYS_GET_INSTANCE(); 95 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 96 VGPU_CONFIG_EVENT_INFO_NODE *pVgpuConfigEventInfoNode = NULL; 97 NvHandle hClient; 98 NvHandle hVgpuConfig; 99 NvU32 pgpuIndex; 100 CALL_CONTEXT *pCallContext; 101 RS_RES_FREE_PARAMS_INTERNAL *pParams; 102 103 resGetFreeParams(staticCast(pVgpuConfigApi, RsResource), &pCallContext, &pParams); 104 hClient = pParams->hClient; 105 hVgpuConfig = pParams->hResource; 106 107 if (kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex) != NV_OK) 108 { 109 pParams->status = NV_ERR_OBJECT_NOT_FOUND; 110 return; 111 } 112 113 rmStatus = kvgpumgrGetConfigEventInfoFromDb(hClient, 114 hVgpuConfig, 115 &pVgpuConfigEventInfoNode, 116 pgpuIndex); 117 if (rmStatus != NV_OK) 118 { 119 NV_PRINTF(LEVEL_ERROR, "Failed to remove config event info from DB\n"); 120 NV_ASSERT(0); 121 pParams->status = rmStatus; 122 return; 123 } 124 else 125 { 126 listRemove(&(pKernelVgpuMgr->pgpuInfo[pgpuIndex].listVgpuConfigEventsHead), 127 pVgpuConfigEventInfoNode); 128 } 129 130 pParams->status = rmStatus; 131 } 132 133 void 134 CliNotifyVgpuConfigEvent 135 ( 136 OBJGPU *pGpu, 137 NvU32 notifyIndex 138 ) 139 { 140 VGPU_CONFIG_EVENT_INFO_NODE *pVgpuConfigEventInfoNode = NULL; 141 PEVENTNOTIFICATION pEventNotification; 142 VgpuConfigApi *pVgpuConfigApi; 143 OBJSYS *pSys = SYS_GET_INSTANCE(); 144 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 145 NvU32 pgpuIndex; 146 147 // Get pgpuIndex, for which we need to notify events 148 if (kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex) != NV_OK) 149 return; 150 151 for (pVgpuConfigEventInfoNode = listHead(&(pKernelVgpuMgr->pgpuInfo[pgpuIndex].listVgpuConfigEventsHead)); 152 pVgpuConfigEventInfoNode != NULL; 153 pVgpuConfigEventInfoNode = listNext(&(pKernelVgpuMgr->pgpuInfo[pgpuIndex].listVgpuConfigEventsHead), pVgpuConfigEventInfoNode)) 154 { 155 pVgpuConfigApi = pVgpuConfigEventInfoNode->pVgpuConfigApi; 156 if (pVgpuConfigApi->notifyActions[notifyIndex] == NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) 157 { 158 continue; 159 } 160 161 pEventNotification = inotifyGetNotificationList(staticCast(pVgpuConfigApi, INotifier)); 162 // ping all events on the list of type notifyIndex 163 while (pEventNotification) 164 { 165 if (pEventNotification->NotifyIndex == notifyIndex) 166 { 167 if (osNotifyEvent(pGpu, pEventNotification, 0, 0, 0) != NV_OK) 168 { 169 NV_PRINTF(LEVEL_ERROR, 170 "CliNotifyVgpuEvent: failed to deliver event 0x%x\n", 171 notifyIndex); 172 } 173 } 174 pEventNotification = pEventNotification->Next; 175 } 176 177 // reset if single shot notify action 178 if (pVgpuConfigApi->notifyActions[notifyIndex] == NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE) 179 { 180 pVgpuConfigApi->notifyActions[notifyIndex] = NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; 181 } 182 } 183 } 184 185 NV_STATUS 186 vgpuconfigapiCtrlCmdVgpuConfigSetInfo_IMPL 187 ( 188 VgpuConfigApi *pVgpuConfigApi, 189 NVA081_CTRL_VGPU_CONFIG_INFO_PARAMS *pParams 190 ) 191 { 192 NV_STATUS rmStatus = NV_OK; 193 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 194 OBJSYS *pSys = SYS_GET_INSTANCE(); 195 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 196 KERNEL_PHYS_GPU_INFO *pPhysGpuInfo = NULL; 197 NvU32 index; 198 199 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 200 if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &index)) != NV_OK) 201 { 202 NV_PRINTF(LEVEL_ERROR, "Failed to find pGpu info entry.\n"); 203 return rmStatus; 204 } 205 206 pPhysGpuInfo = &pKernelVgpuMgr->pgpuInfo[index]; 207 208 // Set vgpu config state 209 if (pPhysGpuInfo == NULL) 210 return NV_ERR_INVALID_STATE; 211 else 212 pPhysGpuInfo->vgpuConfigState = pParams->vgpuConfigState; 213 214 rmStatus = kvgpumgrPgpuAddVgpuType(pGpu, pParams->discardVgpuTypes, &pParams->vgpuInfo); 215 if (rmStatus != NV_OK) 216 { 217 pPhysGpuInfo->vgpuConfigState = NVA081_CTRL_VGPU_CONFIG_STATE_UNINITIALIZED; 218 NV_PRINTF(LEVEL_ERROR, "Failed to add vGPU type to pGPU info.\n"); 219 return rmStatus; 220 } 221 222 return rmStatus; 223 } 224 225 NV_STATUS 226 vgpuconfigapiCtrlCmdVgpuConfigGetVgpuFbUsage_IMPL 227 ( 228 VgpuConfigApi *pVgpuConfigApi, 229 NVA081_CTRL_VGPU_CONFIG_GET_VGPU_FB_USAGE_PARAMS *pParams 230 ) 231 { 232 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 233 234 return kvgpumgrGetVgpuFbUsage(pGpu, pParams); 235 } 236 237 NV_STATUS 238 vgpuconfigapiCtrlCmdVgpuConfigEnumerateVgpuPerPgpu_IMPL 239 ( 240 VgpuConfigApi *pVgpuConfigApi, 241 NVA081_CTRL_VGPU_CONFIG_ENUMERATE_VGPU_PER_PGPU_PARAMS *pParams 242 ) 243 { 244 NVA081_VGPU_GUEST *pVgpuGuest; 245 KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice = NULL; 246 KERNEL_VGPU_GUEST *pVgpuGuestTmp = NULL; 247 KERNEL_PHYS_GPU_INFO *pPhysGpuInfo; 248 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 249 OBJSYS *pSys = SYS_GET_INSTANCE(); 250 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 251 NV_STATUS rmStatus = NV_OK; 252 NvU32 index[] = {3, 2, 1, 0, 5, 4, 7, 6, 8, 9, 10, 11, 12, 13, 14, 15}; 253 NvU32 i, j; 254 NV2080_CTRL_VGPU_MGR_INTERNAL_ENUMERATE_VGPU_PER_PGPU_PARAMS *pVgpuPerPgpuParams; 255 NV2080_VGPU_GUEST *pVgpuGuestGsp; 256 257 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 258 259 // This structure can't be allocated on stack because it will result function stack usage > 4KB 260 pVgpuPerPgpuParams = portMemAllocNonPaged(sizeof(*pVgpuPerPgpuParams)); 261 if (pVgpuPerPgpuParams == NULL) 262 return NV_ERR_NO_MEMORY; 263 portMemSet(pVgpuPerPgpuParams, 0, sizeof(*pVgpuPerPgpuParams)); 264 265 NV_ASSERT_OK_OR_GOTO(rmStatus, kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &i), failed); 266 267 NV_ASSERT_OK_OR_GOTO(rmStatus, kvgpumgrEnumerateVgpuPerPgpu(pGpu, pVgpuPerPgpuParams), failed); 268 269 pPhysGpuInfo = &pKernelVgpuMgr->pgpuInfo[i]; 270 271 if (pVgpuPerPgpuParams->numVgpu != pPhysGpuInfo->numActiveVgpu) 272 { 273 rmStatus = NV_ERR_INVALID_STATE; 274 goto failed; 275 } 276 277 i = 0; 278 for (pKernelHostVgpuDevice = listHead(&(pPhysGpuInfo->listHostVgpuDeviceHead)); 279 pKernelHostVgpuDevice != NULL; 280 pKernelHostVgpuDevice = listNext(&(pPhysGpuInfo->listHostVgpuDeviceHead), pKernelHostVgpuDevice), i++) 281 { 282 pVgpuGuestTmp = pKernelHostVgpuDevice->vgpuGuest; 283 if (pVgpuGuestTmp == NULL) 284 { 285 rmStatus = NV_ERR_INVALID_STATE; 286 goto failed; 287 } 288 289 if (i >= NV_ARRAY_ELEMENTS(pParams->vgpuGuest)) 290 { 291 rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; 292 goto failed; 293 } 294 pVgpuGuest = &pParams->vgpuGuest[i]; 295 296 portMemSet(pVgpuGuest->guestVmInfo.vmName, 0, NVA081_VM_NAME_SIZE); 297 portMemSet(pVgpuGuest->vgpuDevice.mdevUuid, 0, VGPU_UUID_SIZE); 298 299 if (osIsVgpuVfioPresent() == NV_OK) 300 { 301 portMemCopy(pVgpuGuest->guestVmInfo.vmName, 302 NVA081_VM_NAME_SIZE, 303 pVgpuGuestTmp->guestVmInfo.vmName, 304 NVA081_VM_NAME_SIZE); 305 306 if (pKernelHostVgpuDevice->pRequestVgpuInfoNode == NULL) 307 { 308 rmStatus = NV_ERR_INVALID_POINTER; 309 goto failed; 310 } 311 312 /* 313 * SRIOV vGPUs using vfio-pci-core framework on KVM do not use MDEV framework, 314 * so don't expose MDEV UUID to NVML. 315 */ 316 if (!osIsVfioPciCorePresent() || !gpuIsSriovEnabled(pGpu)) 317 { 318 for (j = 0; j < VGPU_UUID_SIZE; j++) 319 pVgpuGuest->vgpuDevice.mdevUuid[j] = pKernelHostVgpuDevice->pRequestVgpuInfoNode->mdevUuid[index[j]]; 320 } 321 } 322 323 // Copy VM's information 324 if (!IS_GSP_CLIENT(pGpu)) 325 { 326 pVgpuGuestGsp = &pVgpuPerPgpuParams->vgpuGuest[i]; 327 } 328 else 329 { 330 pVgpuGuestGsp = NULL; 331 for (j = 0; j < pVgpuPerPgpuParams->numVgpu; j++) 332 { 333 if (pVgpuPerPgpuParams->vgpuGuest[j].vgpuDevice.gfid == pKernelHostVgpuDevice->gfid) 334 { 335 pVgpuGuestGsp = &pVgpuPerPgpuParams->vgpuGuest[j]; 336 break; 337 } 338 } 339 if (pVgpuGuestGsp == NULL) 340 { 341 rmStatus = NV_ERR_OBJECT_NOT_FOUND; 342 goto failed; 343 } 344 } 345 346 pVgpuGuest->guestVmInfo.vmPid = pVgpuGuestGsp->guestVmInfo.vmPid; 347 pVgpuGuest->guestVmInfo.guestOs = pVgpuGuestGsp->guestVmInfo.guestOs; 348 pVgpuGuest->guestVmInfo.migrationProhibited = pVgpuGuestGsp->guestVmInfo.migrationProhibited; 349 pVgpuGuest->guestVmInfo.guestNegotiatedVgpuVersion = pVgpuGuestGsp->guestVmInfo.guestNegotiatedVgpuVersion; 350 pVgpuGuest->guestVmInfo.licensed = pVgpuGuestGsp->guestVmInfo.licensed; 351 pVgpuGuest->guestVmInfo.licenseState = pVgpuGuestGsp->guestVmInfo.licenseState; 352 pVgpuGuest->guestVmInfo.guestVmInfoState = pVgpuGuestGsp->guestVmInfo.guestVmInfoState; 353 pVgpuGuest->guestVmInfo.licenseExpiryTimestamp = pVgpuGuestGsp->guestVmInfo.licenseExpiryTimestamp; 354 pVgpuGuest->guestVmInfo.licenseExpiryStatus = pVgpuGuestGsp->guestVmInfo.licenseExpiryStatus; 355 pVgpuGuest->guestVmInfo.frameRateLimit = pVgpuGuestGsp->guestVmInfo.frameRateLimit; 356 357 portStringCopy((char *) pVgpuGuest->guestVmInfo.guestDriverVersion, 358 sizeof(pVgpuGuest->guestVmInfo.guestDriverVersion), 359 (char *) pVgpuGuestGsp->guestVmInfo.guestDriverVersion, 360 NVA081_VGPU_STRING_BUFFER_SIZE); 361 362 portStringCopy((char *) pVgpuGuest->guestVmInfo.guestDriverBranch, 363 sizeof(pVgpuGuest->guestVmInfo.guestDriverBranch), 364 (char *) pVgpuGuestGsp->guestVmInfo.guestDriverBranch, 365 NVA081_VGPU_STRING_BUFFER_SIZE); 366 367 pVgpuGuest->guestVmInfo.vmIdType = pVgpuGuestTmp->guestVmInfo.vmIdType; 368 if (pVgpuGuestTmp->guestVmInfo.vmIdType == VM_ID_DOMAIN_ID) 369 { 370 pVgpuGuest->guestVmInfo.guestVmId.vmId = pVgpuGuestTmp->guestVmInfo.guestVmId.vmId; 371 } 372 else if (pVgpuGuestTmp->guestVmInfo.vmIdType == VM_ID_UUID) 373 { 374 portMemCopy(pVgpuGuest->guestVmInfo.guestVmId.vmUuid, 375 NVA081_VM_UUID_SIZE, 376 pVgpuGuestTmp->guestVmInfo.guestVmId.vmUuid, 377 NVA081_VM_UUID_SIZE); 378 } 379 else 380 { 381 rmStatus = NV_ERR_INVALID_STATE; 382 goto failed; 383 } 384 385 pVgpuGuest->vgpuDevice.vgpuType = pKernelHostVgpuDevice->vgpuType; 386 pVgpuGuest->vgpuDevice.vgpuDeviceInstanceId = pVgpuGuestGsp->vgpuDevice.vgpuDeviceInstanceId; 387 pVgpuGuest->vgpuDevice.encoderCapacity = pVgpuGuestGsp->vgpuDevice.encoderCapacity; 388 pVgpuGuest->vgpuDevice.fbUsed = pVgpuGuestGsp->vgpuDevice.fbUsed; 389 pVgpuGuest->vgpuDevice.swizzId = pKernelHostVgpuDevice->swizzId; 390 pVgpuGuest->vgpuDevice.eccState = pVgpuGuestGsp->vgpuDevice.eccState; 391 pVgpuGuest->vgpuDevice.bDriverLoaded = pVgpuGuestGsp->vgpuDevice.bDriverLoaded; 392 pVgpuGuest->vgpuDevice.placementId = pKernelHostVgpuDevice->placementId; 393 394 portMemCopy(pVgpuGuest->vgpuDevice.vgpuUuid, VGPU_UUID_SIZE, pKernelHostVgpuDevice->vgpuUuid, VGPU_UUID_SIZE); 395 396 pVgpuGuest->vgpuDevice.vgpuPciId = pVgpuGuestGsp->vgpuDevice.vgpuPciId; 397 } 398 399 pParams->numVgpu = pPhysGpuInfo->numActiveVgpu; 400 pParams->vgpuType = NVA081_CTRL_VGPU_CONFIG_INVALID_TYPE; // This field is unused on the client side. 401 // We should remove this field, unless it is required for compatibility reasons. 402 403 failed: 404 portMemFree(pVgpuPerPgpuParams); 405 return rmStatus; 406 } 407 408 NV_STATUS 409 vgpuconfigapiCtrlCmdVgpuConfigGetVgpuTypeInfo_IMPL 410 ( 411 VgpuConfigApi *pVgpuConfigApi, 412 NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPE_INFO_PARAMS *pParams 413 ) 414 { 415 // This code should match hostvgpudeviceapiCtrlCmdGetVgpuTypeInfo 416 417 VGPU_TYPE *vgpuTypeInfo; 418 NV_STATUS rmStatus = NV_OK; 419 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 420 NvU32 data = 0; 421 OBJSYS *pSys = SYS_GET_INSTANCE(); 422 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 423 NvU8 shortGpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; 424 NvU32 i; 425 NvU32 pgpuIndex; 426 KERNEL_PHYS_GPU_INFO *pPgpuInfo; 427 428 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 429 430 if ((rmStatus = kvgpumgrGetVgpuTypeInfo(pParams->vgpuType, &vgpuTypeInfo)) != NV_OK) 431 { 432 return rmStatus; 433 } 434 435 if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex)) != NV_OK) 436 return rmStatus; 437 438 pPgpuInfo = &pKernelVgpuMgr->pgpuInfo[pgpuIndex]; 439 440 pParams->vgpuTypeInfo.vgpuType = vgpuTypeInfo->vgpuTypeId; 441 pParams->vgpuTypeInfo.maxInstance = vgpuTypeInfo->maxInstance; 442 pParams->vgpuTypeInfo.numHeads = vgpuTypeInfo->numHeads; 443 pParams->vgpuTypeInfo.maxResolutionX = vgpuTypeInfo->maxResolutionX; 444 pParams->vgpuTypeInfo.maxResolutionY = vgpuTypeInfo->maxResolutionY; 445 pParams->vgpuTypeInfo.maxPixels = vgpuTypeInfo->maxPixels; 446 pParams->vgpuTypeInfo.frlConfig = vgpuTypeInfo->frlConfig; 447 pParams->vgpuTypeInfo.cudaEnabled = vgpuTypeInfo->cudaEnabled; 448 pParams->vgpuTypeInfo.eccSupported = vgpuTypeInfo->eccSupported; 449 pParams->vgpuTypeInfo.gpuInstanceSize = vgpuTypeInfo->gpuInstanceSize; 450 pParams->vgpuTypeInfo.vdevId = vgpuTypeInfo->vdevId; 451 pParams->vgpuTypeInfo.pdevId = vgpuTypeInfo->pdevId; 452 pParams->vgpuTypeInfo.profileSize = vgpuTypeInfo->profileSize; 453 pParams->vgpuTypeInfo.fbLength = vgpuTypeInfo->fbLength; 454 pParams->vgpuTypeInfo.gspHeapSize = vgpuTypeInfo->gspHeapSize; 455 pParams->vgpuTypeInfo.fbReservation = vgpuTypeInfo->fbReservation; 456 pParams->vgpuTypeInfo.mappableVideoSize = vgpuTypeInfo->mappableVideoSize; 457 pParams->vgpuTypeInfo.encoderCapacity = vgpuTypeInfo->encoderCapacity; 458 pParams->vgpuTypeInfo.bar1Length = vgpuTypeInfo->bar1Length; 459 pParams->vgpuTypeInfo.gpuDirectSupported = vgpuTypeInfo->gpuDirectSupported; 460 pParams->vgpuTypeInfo.nvlinkP2PSupported = vgpuTypeInfo->nvlinkP2PSupported; 461 pParams->vgpuTypeInfo.multiVgpuExclusive = vgpuTypeInfo->multiVgpuExclusive; 462 pParams->vgpuTypeInfo.frlEnable = vgpuTypeInfo->frlEnable; 463 pParams->vgpuTypeInfo.multiVgpuSupported = vgpuTypeInfo->multiVgpuSupported; 464 465 /* Represents vGPU type level support for heterogeneous timeslice profiles */ 466 pParams->vgpuTypeInfo.exclusiveType = !kvgpumgrIsHeterogeneousVgpuSupported(); 467 468 /* 469 * Represents vGPU type level support for heterogenenous timeslice size. 470 * Currently, if a pGpu supports heterogenenous timeslice size, all vGPU 471 * types support it. Set to false if supported 472 */ 473 pParams->vgpuTypeInfo.exclusiveSize = !pPgpuInfo->heterogeneousTimesliceSizesSupported; 474 475 // Disable FRL if we are using sched_sw (PVMRL) in timesliced mode 476 if (IsPASCALorBetter(pGpu)) 477 { 478 KernelSchedMgr *pKernelSchedMgr = GPU_GET_KERNEL_SCHEDMGR(pGpu); 479 if (pKernelSchedMgr && 480 kschedmgrIsPvmrlEnabled(pKernelSchedMgr)) 481 { 482 pParams->vgpuTypeInfo.frlEnable = 0; 483 } 484 } 485 486 portStringCopy((char *) pParams->vgpuTypeInfo.vgpuName, sizeof(pParams->vgpuTypeInfo.vgpuName), (char *) vgpuTypeInfo->vgpuName, VGPU_STRING_BUFFER_SIZE); 487 portStringCopy((char *) pParams->vgpuTypeInfo.vgpuClass, sizeof(pParams->vgpuTypeInfo.vgpuClass), (char *) vgpuTypeInfo->vgpuClass, VGPU_STRING_BUFFER_SIZE); 488 portStringCopy((char *) pParams->vgpuTypeInfo.license, sizeof(pParams->vgpuTypeInfo.license), (char *) vgpuTypeInfo->license, NV_GRID_LICENSE_INFO_MAX_LENGTH); 489 portStringCopy((char *) pParams->vgpuTypeInfo.vgpuExtraParams, sizeof(pParams->vgpuTypeInfo.vgpuExtraParams), (char *) vgpuTypeInfo->vgpuExtraParams, 490 sizeof(pParams->vgpuTypeInfo.vgpuExtraParams)); 491 portStringCopy((char *) pParams->vgpuTypeInfo.licensedProductName, sizeof(pParams->vgpuTypeInfo.licensedProductName), (char *) vgpuTypeInfo->licensedProductName, 492 NV_GRID_LICENSE_INFO_MAX_LENGTH); 493 494 if (NV_OK == osReadRegistryDword(pGpu, NV_REG_STR_RM_ENABLE_PLUGIN_IN_FTRACE_BUFFER, &data)) 495 { 496 if (data == NV_REG_STR_RM_ENABLE_PLUGIN_IN_FTRACE_BUFFER_ENABLED) 497 pParams->vgpuTypeInfo.ftraceEnable = data; 498 } 499 500 rmStatus = gpuGetShortNameString(pGpu, (void *)shortGpuNameString); 501 if (rmStatus != NV_OK) 502 return rmStatus; 503 504 portMemCopy(pParams->vgpuTypeInfo.shortGpuNameString, sizeof(shortGpuNameString), shortGpuNameString, sizeof(shortGpuNameString)); 505 506 portStringCopy((char *) pParams->vgpuTypeInfo.adapterName, sizeof(pParams->vgpuTypeInfo.adapterName), 507 (char *) vgpuTypeInfo->vgpuName, VGPU_STRING_BUFFER_SIZE); 508 portStringCopy((char *) pParams->vgpuTypeInfo.adapterName_Unicode, sizeof(pParams->vgpuTypeInfo.adapterName_Unicode), 509 (char *) vgpuTypeInfo->vgpuName, VGPU_STRING_BUFFER_SIZE); 510 511 portStringConvertAsciiToUtf16(pParams->vgpuTypeInfo.adapterName_Unicode, VGPU_STRING_BUFFER_SIZE, 512 (char *)pParams->vgpuTypeInfo.adapterName_Unicode, VGPU_STRING_BUFFER_SIZE); 513 514 // used only by NVML 515 if (vgpuTypeInfo->gpuInstanceSize != 0) 516 { 517 NV_ASSERT_OK_OR_RETURN( 518 kvgpumgrGetPartitionFlag(vgpuTypeInfo->vgpuTypeId, &pParams->vgpuTypeInfo.gpuInstanceProfileId)); 519 } 520 else 521 { 522 pParams->vgpuTypeInfo.gpuInstanceProfileId = PARTITIONID_INVALID; 523 } 524 525 if (pPgpuInfo->heterogeneousTimesliceSizesSupported == NV_TRUE) 526 { 527 pParams->vgpuTypeInfo.placementSize = vgpuTypeInfo->placementSize; 528 pParams->vgpuTypeInfo.placementCount = vgpuTypeInfo->placementCount; 529 530 for (i = 0; i < vgpuTypeInfo->placementCount; i++) 531 { 532 pParams->vgpuTypeInfo.placementIds[i] = vgpuTypeInfo->supportedPlacementIds[i]; 533 } 534 } 535 else 536 { 537 pParams->vgpuTypeInfo.placementSize = 0; 538 pParams->vgpuTypeInfo.placementCount = 0; 539 } 540 541 return rmStatus; 542 } 543 544 NV_STATUS 545 vgpuconfigapiCtrlCmdVgpuConfigGetSupportedVgpuTypes_IMPL 546 ( 547 VgpuConfigApi *pVgpuConfigApi, 548 NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS *pParams 549 ) 550 { 551 NvU32 pgpuIndex, i; 552 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 553 OBJSYS *pSys = SYS_GET_INSTANCE(); 554 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 555 KERNEL_PHYS_GPU_INFO *pPgpuInfo; 556 NV_STATUS rmStatus = NV_OK; 557 558 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 559 560 if (pGpu == NULL || 561 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU)) 562 { 563 return NV_ERR_NOT_SUPPORTED; 564 } 565 566 if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex)) != NV_OK) 567 return rmStatus; 568 569 pPgpuInfo = &pKernelVgpuMgr->pgpuInfo[pgpuIndex]; 570 571 pParams->vgpuConfigState = pPgpuInfo->vgpuConfigState; 572 573 for (i = 0; i < pPgpuInfo->numVgpuTypes; i++) 574 { 575 pParams->vgpuTypes[i] = pPgpuInfo->vgpuTypes[i]->vgpuTypeId; 576 } 577 pParams->numVgpuTypes = pPgpuInfo->numVgpuTypes; 578 579 return rmStatus; 580 } 581 582 NV_STATUS 583 vgpuconfigapiCtrlCmdVgpuConfigGetCreatableVgpuTypes_IMPL 584 ( 585 VgpuConfigApi *pVgpuConfigApi, 586 NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS *pParams 587 ) 588 { 589 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 590 OBJSYS *pSys = SYS_GET_INSTANCE(); 591 NvU32 pgpuIndex; 592 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 593 NV_STATUS rmStatus = NV_OK; 594 595 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 596 597 if (pGpu == NULL || 598 !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU)) 599 { 600 return NV_ERR_NOT_SUPPORTED; 601 } 602 603 if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex)) != NV_OK) 604 return rmStatus; 605 606 rmStatus = kvgpumgrGetCreatableVgpuTypes(pGpu, pKernelVgpuMgr, pgpuIndex, 607 &pParams->numVgpuTypes, pParams->vgpuTypes); 608 609 return rmStatus; 610 } 611 612 NV_STATUS 613 vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification_IMPL 614 ( 615 VgpuConfigApi *pVgpuConfigApi, 616 NVA081_CTRL_VGPU_CONFIG_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams 617 ) 618 { 619 OBJSYS *pSys = SYS_GET_INSTANCE(); 620 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 621 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 622 NvHandle hClient = RES_GET_CLIENT_HANDLE(pVgpuConfigApi); 623 NvHandle hObject = RES_GET_HANDLE(pVgpuConfigApi); 624 VGPU_CONFIG_EVENT_INFO_NODE *pVgpuConfigEventInfoNode = NULL; 625 VgpuConfigApi *pVgpuConfigInfo; 626 NV_STATUS rmStatus = NV_OK; 627 NvU32 pgpuIndex; 628 629 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 630 631 if (pSetEventParams->event >= NVA081_NOTIFIERS_MAXCOUNT) 632 { 633 NV_PRINTF(LEVEL_INFO, "Bad event 0x%x\n", pSetEventParams->event); 634 return NV_ERR_INVALID_ARGUMENT; 635 } 636 637 if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex)) != NV_OK) 638 return rmStatus; 639 640 rmStatus = kvgpumgrGetConfigEventInfoFromDb(hClient, 641 hObject, 642 &pVgpuConfigEventInfoNode, 643 pgpuIndex); 644 if (rmStatus != NV_OK || pVgpuConfigEventInfoNode == NULL) 645 { 646 NV_PRINTF(LEVEL_ERROR, 647 "Failed to find config event info for %x from DB\n", 648 hClient); 649 return rmStatus; 650 } 651 pVgpuConfigInfo = pVgpuConfigEventInfoNode->pVgpuConfigApi; 652 653 if (inotifyGetNotificationList(staticCast(pVgpuConfigInfo, INotifier)) == NULL) 654 { 655 NV_PRINTF(LEVEL_INFO, "Cmd 0x%x: no event list\n", 656 NVA081_CTRL_CMD_VGPU_CONFIG_EVENT_SET_NOTIFICATION); 657 return NV_ERR_INVALID_STATE; 658 } 659 660 switch (pSetEventParams->action) 661 { 662 case NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: 663 case NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: 664 { 665 // must be in disabled state to transition to an active state 666 if (pVgpuConfigInfo->notifyActions[pSetEventParams->event] != NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) 667 { 668 rmStatus = NV_ERR_INVALID_STATE; 669 break; 670 } 671 pVgpuConfigInfo->notifyActions[pSetEventParams->event] = pSetEventParams->action; 672 break; 673 } 674 675 case NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: 676 { 677 pVgpuConfigInfo->notifyActions[pSetEventParams->event] = pSetEventParams->action; 678 break; 679 } 680 default: 681 { 682 rmStatus = NV_ERR_INVALID_ARGUMENT; 683 break; 684 } 685 } 686 687 return rmStatus; 688 } 689 690 NV_STATUS 691 vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_IMPL 692 ( 693 VgpuConfigApi *pVgpuConfigApi, 694 NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS *pNotifyParams 695 ) 696 { 697 OBJSYS *pSys = SYS_GET_INSTANCE(); 698 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 699 REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL; 700 KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice; 701 702 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 703 704 for (pRequestVgpu = listHead(&pKernelVgpuMgr->listRequestVgpuHead); 705 pRequestVgpu != NULL; 706 pRequestVgpu = listNext(&pKernelVgpuMgr->listRequestVgpuHead, pRequestVgpu)) 707 { 708 if (portMemCmp(pNotifyParams->mdevUuid, pRequestVgpu->mdevUuid, VGPU_UUID_SIZE) == 0) 709 { 710 if (pRequestVgpu->returnStatus && pRequestVgpu->waitQueue) 711 { 712 *pRequestVgpu->returnStatus = pNotifyParams->returnStatus; 713 portStringCopy((char *)pRequestVgpu->vmName, NVA081_VM_NAME_SIZE, 714 (const char *)pNotifyParams->vmName, NVA081_VM_NAME_SIZE); 715 if (pNotifyParams->returnStatus == NV_OK) 716 { 717 pKernelHostVgpuDevice = pRequestVgpu->pKernelHostVgpuDevice; 718 719 if (pKernelHostVgpuDevice == NULL) 720 { 721 *pRequestVgpu->returnStatus = NV_ERR_INVALID_STATE; 722 return NV_ERR_INVALID_STATE; 723 } 724 portStringCopy((char *)pKernelHostVgpuDevice->vgpuGuest->guestVmInfo.vmName, 725 sizeof(pKernelHostVgpuDevice->vgpuGuest->guestVmInfo.vmName), 726 (const char *)pNotifyParams->vmName, 727 sizeof(pNotifyParams->vmName)); 728 } 729 osVgpuVfioWake(pRequestVgpu->waitQueue); 730 return NV_OK; 731 } 732 return NV_ERR_INVALID_STATE; 733 } 734 } 735 return NV_ERR_OBJECT_NOT_FOUND; 736 } 737 738 NV_STATUS 739 vgpuconfigapiCtrlCmdVgpuConfigGetCreatablePlacements_IMPL 740 ( 741 VgpuConfigApi *pVgpuConfigApi, 742 NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_PLACEMENTS_PARAMS *pParams 743 ) 744 { 745 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 746 OBJSYS *pSys = SYS_GET_INSTANCE(); 747 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 748 NV_STATUS rmStatus = NV_OK; 749 KERNEL_PHYS_GPU_INFO *pPhysGpuInfo; 750 VGPU_TYPE *pVgpuTypeInfo; 751 NvU32 pgpuIndex; 752 NvU32 i, j, count = 0; 753 754 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 755 756 if (pGpu == NULL || (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE))) 757 { 758 return NV_ERR_NOT_SUPPORTED; 759 } 760 761 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex)); 762 763 pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[pgpuIndex]); 764 765 for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++) 766 { 767 pVgpuTypeInfo = pPhysGpuInfo->vgpuTypes[i]; 768 if (pParams->vgpuTypeId == pVgpuTypeInfo->vgpuTypeId) 769 { 770 for (j = 0; j < pVgpuTypeInfo->placementCount; j++) 771 { 772 if (pPhysGpuInfo->creatablePlacementIds[i][j] != NVA081_PLACEMENT_ID_INVALID) 773 { 774 pParams->placementIds[count] = pPhysGpuInfo->creatablePlacementIds[i][j]; 775 count++; 776 } 777 } 778 pParams->placementSize = pVgpuTypeInfo->placementSize; 779 pParams->count = count; 780 break; 781 } 782 } 783 784 return rmStatus; 785 } 786 787 NV_STATUS 788 vgpuconfigapiCtrlCmdVgpuConfigSetCapability_IMPL 789 ( 790 VgpuConfigApi *pVgpuConfigApi, 791 NVA081_CTRL_VGPU_SET_CAPABILITY_PARAMS *pSetCapabilityParams 792 ) 793 { 794 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 795 OBJSYS *pSys = SYS_GET_INSTANCE(); 796 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 797 KERNEL_PHYS_GPU_INFO *pPhysGpuInfo; 798 NvU32 i; 799 NV_STATUS rmStatus = NV_OK; 800 801 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 802 803 if (kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &i) != NV_OK) 804 { 805 return NV_ERR_OBJECT_NOT_FOUND; 806 } 807 808 pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[i]); 809 810 switch (pSetCapabilityParams->capability) 811 { 812 case NVA081_CTRL_VGPU_CAPABILITY_MINI_QUARTER_GPU: 813 { 814 pPhysGpuInfo->miniQuarterEnabled = pSetCapabilityParams->state; 815 break; 816 } 817 case NVA081_CTRL_VGPU_CAPABILITY_COMPUTE_MEDIA_ENGINE_GPU: 818 { 819 pPhysGpuInfo->computeMediaEngineEnabled = pSetCapabilityParams->state; 820 break; 821 } 822 default: 823 { 824 rmStatus = NV_ERR_INVALID_ARGUMENT; 825 break; 826 } 827 } 828 829 return rmStatus; 830 } 831 832 NV_STATUS 833 vgpuconfigapiCtrlCmdVgpuConfigGetCapability_IMPL 834 ( 835 VgpuConfigApi *pVgpuConfigApi, 836 NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS *pGetCapabilityParams 837 ) 838 { 839 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 840 OBJSYS *pSys = SYS_GET_INSTANCE(); 841 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 842 KERNEL_PHYS_GPU_INFO *pPhysGpuInfo; 843 NvU32 index; 844 NV_STATUS rmStatus = NV_OK; 845 846 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 847 848 if (kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &index) != NV_OK) 849 { 850 return NV_ERR_OBJECT_NOT_FOUND; 851 } 852 853 pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[index]); 854 855 switch (pGetCapabilityParams->capability) 856 { 857 case NVA081_CTRL_VGPU_CAPABILITY_MINI_QUARTER_GPU: 858 { 859 pGetCapabilityParams->state = pPhysGpuInfo->miniQuarterEnabled; 860 break; 861 } 862 case NVA081_CTRL_VGPU_CAPABILITY_COMPUTE_MEDIA_ENGINE_GPU: 863 { 864 pGetCapabilityParams->state = pPhysGpuInfo->computeMediaEngineEnabled; 865 break; 866 } 867 default: 868 { 869 rmStatus = NV_ERR_INVALID_ARGUMENT; 870 break; 871 } 872 } 873 874 return rmStatus; 875 } 876 877 NV_STATUS 878 vgpuconfigapiCtrlCmdVgpuConfigUpdatePgpuInfo_IMPL 879 ( 880 VgpuConfigApi *pVgpuConfigApi 881 ) 882 { 883 NV_STATUS rmStatus = NV_OK; 884 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 885 886 if (pGpu != NULL) 887 { 888 rmStatus = kvgpumgrSetSupportedPlacementIds(pGpu); 889 if (rmStatus != NV_OK) 890 return rmStatus; 891 892 rmStatus = osVgpuRegisterMdev(pGpu->pOsGpuInfo); 893 if (rmStatus == NV_ERR_NOT_SUPPORTED) 894 return NV_OK; 895 } 896 else 897 rmStatus = NV_ERR_INVALID_STATE; 898 899 return rmStatus; 900 } 901 902 NV_STATUS 903 vgpuconfigapiCtrlCmdVgpuConfigSetVgpuInstanceEncoderCapacity_IMPL 904 ( 905 VgpuConfigApi *pVgpuConfigApi, 906 NVA081_CTRL_VGPU_CONFIG_VGPU_INSTANCE_ENCODER_CAPACITY_PARAMS *pEncoderParams 907 ) 908 { 909 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 910 911 return kvgpumgrSetVgpuEncoderCapacity(pGpu, pEncoderParams->vgpuUuid, pEncoderParams->encoderCapacity); 912 } 913 914 NV_STATUS 915 vgpuconfigapiCtrlCmdVgpuConfigGetMigrationCap_IMPL 916 ( 917 VgpuConfigApi *pVgpuConfigApi, 918 NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_CAP_PARAMS *pMigrationCapParams 919 ) 920 { 921 OBJSYS *pSys = SYS_GET_INSTANCE(); 922 OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); 923 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 924 NvU32 data = 0; 925 926 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 927 928 if (pHypervisor == NULL) 929 return NV_ERR_INVALID_REQUEST; 930 931 if (pGpu == NULL) 932 return NV_ERR_INVALID_STATE; 933 934 if (osIsVgpuVfioPresent() == NV_OK) 935 { 936 if (NV_OK == osReadRegistryDword(pGpu, 937 NV_REG_STR_RM_ENABLE_KVM_VGPU_MIGRATION, 938 &data)) 939 { 940 if (data == NV_REG_STR_RM_ENABLE_KVM_VGPU_MIGRATION_FALSE) 941 { 942 pMigrationCapParams->bMigrationCap = NV_FALSE; 943 } 944 else 945 { 946 pMigrationCapParams->bMigrationCap = kvgpumgrCheckPgpuMigrationSupport(pGpu); 947 } 948 } 949 else // Regkey isn't set explicitly, check for default value 950 { 951 pMigrationCapParams->bMigrationCap = ((NV_REG_STR_RM_ENABLE_KVM_VGPU_MIGRATION_DEFAULT == 952 NV_REG_STR_RM_ENABLE_KVM_VGPU_MIGRATION_TRUE) && 953 kvgpumgrCheckPgpuMigrationSupport(pGpu)); 954 } 955 } 956 else 957 pMigrationCapParams->bMigrationCap = kvgpumgrCheckPgpuMigrationSupport(pGpu); 958 959 return NV_OK; 960 } 961 962 NV_STATUS 963 vgpuconfigapiCtrlCmdVgpuConfigGetPgpuMetadataString_IMPL 964 ( 965 VgpuConfigApi *pVgpuConfigApi, 966 NVA081_CTRL_VGPU_CONFIG_GET_PGPU_METADATA_STRING_PARAMS *pGpuMetadataStringParams 967 ) 968 { 969 OBJSYS *pSys = SYS_GET_INSTANCE(); 970 OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); 971 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 972 NvU32 written_bytes = 0; 973 NvU32 total_written_bytes = 0; 974 NvBool pgpuMigrationCap = NV_FALSE; 975 NvU8 *pGidString = NULL; 976 NvU32 flags = 0; 977 NvU32 gidStrlen; 978 NV_STATUS rmStatus = NV_OK; 979 980 if (pHypervisor == NULL) 981 return NV_ERR_INVALID_REQUEST; 982 983 if (pGpu == NULL) 984 return NV_ERR_INVALID_STATE; 985 986 portMemSet(pGpuMetadataStringParams->pGpuString, 0, 987 NVA081_PGPU_METADATA_STRING_SIZE); 988 989 pgpuMigrationCap = kvgpumgrCheckPgpuMigrationSupport(pGpu); 990 991 if (pgpuMigrationCap == NV_FALSE) 992 { 993 // Bug 2063867 994 // For migration unsupported GPUs, get the GPU's UUID 995 // and save it in the string. 996 // To perform offline compatibility checks for vGPU migration, 997 // when the strings are compared, there is mismatch and migration 998 // can be avoided for these migration unsupported GPUs. 999 rmStatus = gpuGetGidInfo(pGpu, &pGidString, &gidStrlen, flags); 1000 if (rmStatus != NV_OK) 1001 return rmStatus; 1002 1003 if (NVA081_PGPU_METADATA_STRING_SIZE >= gidStrlen) 1004 { 1005 portMemCopy(pGpuMetadataStringParams->pGpuString, gidStrlen, pGidString, gidStrlen); 1006 } 1007 else 1008 { 1009 rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; 1010 portMemFree(pGidString); 1011 return rmStatus; 1012 } 1013 portMemFree(pGidString); 1014 } 1015 else 1016 { 1017 written_bytes = kvgpumgrGetPgpuDevIdEncoding(pGpu, 1018 pGpuMetadataStringParams->pGpuString, 1019 NVA081_PGPU_METADATA_STRING_SIZE); 1020 1021 if (written_bytes == NV_U32_MAX) 1022 return NV_ERR_INSUFFICIENT_RESOURCES; 1023 total_written_bytes += written_bytes; 1024 1025 written_bytes = kvgpumgrGetPgpuSubdevIdEncoding(pGpu, 1026 pGpuMetadataStringParams->pGpuString + total_written_bytes, 1027 NVA081_PGPU_METADATA_STRING_SIZE - total_written_bytes); 1028 1029 if (written_bytes == NV_U32_MAX) 1030 return NV_ERR_INSUFFICIENT_RESOURCES; 1031 total_written_bytes += written_bytes; 1032 1033 1034 /* 1035 * Dynamic Floor Sweeping support for hopper+ allows migration on GPUs 1036 * with different floor sweep. Floor sweep info encoding for checking 1037 * migration capability is not needed hopper+. 1038 */ 1039 if (!IsdHOPPERorBetter(pGpu)) 1040 { 1041 written_bytes = kvgpumgrGetPgpuFSEncoding(pGpu, 1042 pGpuMetadataStringParams->pGpuString + total_written_bytes, 1043 NVA081_PGPU_METADATA_STRING_SIZE - total_written_bytes); 1044 1045 if (written_bytes == NV_U32_MAX) 1046 return NV_ERR_INSUFFICIENT_RESOURCES; 1047 total_written_bytes += written_bytes; 1048 } 1049 1050 1051 written_bytes = kvgpumgrGetPgpuCapEncoding(pGpu, 1052 pGpuMetadataStringParams->pGpuString + total_written_bytes, 1053 NVA081_PGPU_METADATA_STRING_SIZE - total_written_bytes); 1054 1055 if (written_bytes == NV_U32_MAX) 1056 return NV_ERR_INSUFFICIENT_RESOURCES; 1057 total_written_bytes += written_bytes; 1058 } 1059 1060 return NV_OK; 1061 } 1062 1063 NV_STATUS 1064 vgpuconfigapiCtrlCmdVgpuConfigGetHostFbReservation_IMPL 1065 ( 1066 VgpuConfigApi *pVgpuConfigApi, 1067 NVA081_CTRL_VGPU_CONFIG_GET_HOST_FB_RESERVATION_PARAMS *pParams 1068 ) 1069 { 1070 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 1071 MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); 1072 1073 NV_PRINTF(LEVEL_INFO, "%s\n", __FUNCTION__); 1074 1075 if (pParams->vgpuTypeId == NVA081_CTRL_VGPU_CONFIG_INVALID_TYPE) 1076 { 1077 pParams->hostReservedFb = 0; 1078 pParams->eccAndPrReservedFb = 0; 1079 pParams->totalReservedFb = 0; 1080 1081 return NV_ERR_INVALID_ARGUMENT; 1082 } 1083 1084 pParams->hostReservedFb = memmgrGetVgpuHostRmReservedFb_HAL(pGpu, pMemoryManager, pParams->vgpuTypeId); 1085 1086 pParams->eccAndPrReservedFb = kvgpumgrGetEccAndPrReservedFb(pGpu); 1087 1088 pParams->totalReservedFb = pParams->hostReservedFb + pParams->eccAndPrReservedFb; 1089 1090 return NV_OK; 1091 } 1092 1093 NV_STATUS 1094 vgpuconfigapiCtrlCmdVgpuConfigGetDoorbellEmulationSupport_IMPL 1095 ( 1096 VgpuConfigApi *pVgpuConfigApi, 1097 NVA081_CTRL_VGPU_CONFIG_GET_DOORBELL_EMULATION_SUPPORT_PARAMS *pParams 1098 ) 1099 { 1100 1101 return NV_ERR_NOT_SUPPORTED; 1102 } 1103 1104 NV_STATUS 1105 vgpuconfigapiCtrlCmdVgpuConfigGetFreeSwizzId_IMPL 1106 ( 1107 VgpuConfigApi *pVgpuConfigApi, 1108 NVA081_CTRL_VGPU_CONFIG_GET_FREE_SWIZZID_PARAMS *pParams 1109 ) 1110 { 1111 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 1112 OBJSYS *pSys = SYS_GET_INSTANCE(); 1113 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 1114 REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL; 1115 KERNEL_PHYS_GPU_INFO *pPhysGpuInfo; 1116 NvU32 i; 1117 1118 if (kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &i) != NV_OK) 1119 { 1120 return NV_ERR_OBJECT_NOT_FOUND; 1121 } 1122 1123 pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[i]); 1124 1125 NV_CHECK_OR_RETURN(LEVEL_INFO, 1126 IS_MIG_IN_USE(pGpu), 1127 NV_ERR_INVALID_OPERATION); 1128 1129 if (osIsVgpuVfioPresent() == NV_OK) 1130 { 1131 for (pRequestVgpu = listHead(&pKernelVgpuMgr->listRequestVgpuHead); 1132 pRequestVgpu != NULL; 1133 pRequestVgpu = listNext(&pKernelVgpuMgr->listRequestVgpuHead, pRequestVgpu)) 1134 { 1135 if (pRequestVgpu->deviceState == NV_VGPU_DEV_OPENED && 1136 pRequestVgpu->gpuPciId == pParams->gpuPciId) 1137 break; 1138 } 1139 if (pRequestVgpu == NULL) 1140 { 1141 return NV_ERR_OBJECT_NOT_FOUND; 1142 } 1143 1144 pParams->swizzId = pRequestVgpu->swizzId; 1145 } 1146 else 1147 { 1148 NvU32 partitionFlag = PARTITIONID_INVALID; 1149 1150 NV_ASSERT_OK_OR_RETURN( 1151 kvgpumgrGetPartitionFlag(pParams->vgpuTypeId, &partitionFlag)); 1152 1153 NV_ASSERT_OK_OR_RETURN( 1154 kvgpumgrGetSwizzId(pGpu, pPhysGpuInfo, partitionFlag, &pParams->swizzId)); 1155 } 1156 1157 return NV_OK; 1158 } 1159 1160 NV_STATUS 1161 vgpuconfigapiCtrlCmdPgpuGetMultiVgpuSupportInfo_IMPL 1162 ( 1163 VgpuConfigApi *pVgpuConfigApi, 1164 NVA081_CTRL_PGPU_GET_MULTI_VGPU_SUPPORT_INFO_PARAMS *pParams 1165 ) 1166 { 1167 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 1168 OBJSYS *pSys = SYS_GET_INSTANCE(); 1169 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 1170 KERNEL_PHYS_GPU_INFO *pPhysGpuInfo; 1171 NvU32 i; 1172 1173 if (kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &i) != NV_OK) 1174 { 1175 return NV_ERR_OBJECT_NOT_FOUND; 1176 } 1177 1178 pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[i]); 1179 pParams->fractionalmultiVgpuSupported = pPhysGpuInfo->fractionalMultiVgpu; 1180 1181 // We are not currently limiting the feature based on the pgpu. 1182 // Return the system level value here. 1183 pParams->heterogeneousTimesliceProfilesSupported = kvgpumgrIsHeterogeneousVgpuSupported(); 1184 1185 pParams->heterogeneousTimesliceSizesSupported = pPhysGpuInfo->heterogeneousTimesliceSizesSupported; 1186 1187 return NV_OK; 1188 } 1189 1190 NV_STATUS 1191 vgpuconfigapiCtrlCmdPgpuGetVgpuStreamingCapability_IMPL 1192 ( 1193 VgpuConfigApi *pVgpuConfigApi, 1194 NVA081_CTRL_PGPU_GET_VGPU_STREAMING_CAPABILITY_PARAMS *pParams 1195 ) 1196 { 1197 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 1198 pParams->streamingCapability = NV_FALSE; 1199 1200 if (gpuIsSriovEnabled(pGpu)) 1201 { 1202 pParams->streamingCapability = NV_TRUE; 1203 } 1204 1205 return NV_OK; 1206 } 1207 1208 NV_STATUS 1209 vgpuconfigapiCtrlCmdGetVgpuDriversCaps_IMPL 1210 ( 1211 VgpuConfigApi *pVgpuConfigApi, 1212 NVA081_CTRL_GET_VGPU_DRIVER_CAPS_PARAMS *pParams 1213 ) 1214 { 1215 pParams->heterogeneousMultiVgpuSupported = kvgpumgrIsHeterogeneousVgpuSupported(); 1216 return NV_OK; 1217 } 1218 1219 NV_STATUS 1220 vgpuconfigapiCtrlCmdVgpuConfigSetPgpuInfo_IMPL 1221 ( 1222 VgpuConfigApi *pVgpuConfigApi, 1223 NVA081_CTRL_VGPU_CONFIG_SET_PGPU_INFO_PARAMS *pParams 1224 ) 1225 { 1226 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 1227 OBJSYS *pSys = SYS_GET_INSTANCE(); 1228 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); 1229 KERNEL_PHYS_GPU_INFO *pPhysGpuInfo; 1230 NvU32 i; 1231 1232 if (kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &i) != NV_OK) 1233 { 1234 return NV_ERR_OBJECT_NOT_FOUND; 1235 } 1236 1237 pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[i]); 1238 pPhysGpuInfo->fractionalMultiVgpu = pParams->fractionalMultiVgpu; 1239 1240 return NV_OK; 1241 } 1242 1243 NV_STATUS 1244 vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId_IMPL 1245 ( 1246 VgpuConfigApi *pVgpuConfigApi, 1247 NVA081_CTRL_VGPU_CONFIG_VALIDATE_SWIZZID_PARAMS *pParams 1248 ) 1249 { 1250 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 1251 1252 NV_CHECK_OR_RETURN(LEVEL_INFO, 1253 IS_MIG_IN_USE(pGpu), 1254 NV_ERR_INVALID_OPERATION); 1255 1256 NV_ASSERT_OK_OR_RETURN( 1257 kvgpumgrValidateSwizzId(pGpu, pParams->vgpuTypeId, pParams->swizzId)); 1258 1259 return NV_OK; 1260 } 1261 1262 NV_STATUS 1263 vgpuconfigapiCtrlCmdVgpuConfigUpdateHeterogeneousInfo_IMPL 1264 ( 1265 VgpuConfigApi *pVgpuConfigApi, 1266 NVA081_CTRL_VGPU_CONFIG_UPDATE_HETEROGENEOUS_INFO_PARAMS *pParams 1267 ) 1268 { 1269 OBJGPU *pGpu = GPU_RES_GET_GPU(pVgpuConfigApi); 1270 NV_STATUS rmStatus = NV_OK; 1271 1272 if (pGpu == NULL) 1273 return NV_ERR_INVALID_STATE; 1274 1275 if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE)) 1276 { 1277 pParams->isHeterogeneousEnabled = NV_FALSE; 1278 pParams->placementId = NVA081_PLACEMENT_ID_INVALID; 1279 return NV_OK; 1280 } 1281 1282 pParams->isHeterogeneousEnabled = NV_TRUE; 1283 1284 rmStatus = kvgpumgrUpdateHeterogeneousInfo(pGpu, pParams->vgpuTypeId, 1285 &pParams->placementId, 1286 &pParams->guestFbLength, 1287 &pParams->guestFbOffset, 1288 &pParams->gspHeapOffset); 1289 if (rmStatus != NV_OK) 1290 return rmStatus; 1291 1292 return NV_OK; 1293 } 1294