1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /*!
25 *
26 * @file os-hypervisor.c
27 * @brief OS specific Hypervisor interfaces for RM
28 *
29 */
30
31 #include "os/os.h"
32 #include "nv.h"
33 #include "nv-priv.h"
34 #include <nvRmReg.h>
35 #include <virtualization/hypervisor/hypervisor.h>
36 #include "core/thread_state.h"
37 #include "core/locks.h"
38 #include "virtualization/kernel_vgpu_mgr.h"
39 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
40 #include "kernel/gpu/fifo/kernel_fifo.h"
41 #include "osapi.h"
42 #include "virtualization/kernel_hostvgpudeviceapi.h"
43 #include <objtmr.h>
44 #include "gpu/bif/kernel_bif.h"
45 #include "gpu/bus/kern_bus.h"
46 #include <nv_ref.h> // NV_PMC_BOOT_1_VGPU
47 #include "nvdevid.h"
48
49 #include "g_vgpu_chip_flags.h" // vGPU device names
50
51 #define NV_VFIO_PCI_BAR0_REGION_INDEX 0
52 #define NV_VFIO_PCI_BAR1_REGION_INDEX 1
53 #define NV_VFIO_PCI_BAR2_REGION_INDEX 2
54 #define NV_VFIO_PCI_BAR3_REGION_INDEX 3
55
56 static NV_STATUS nv_parse_config_params(const char *, const char *, const char, NvU32 *);
57
58
hypervisorSetHypervVgpuSupported_IMPL(OBJHYPERVISOR * pHypervisor)59 void hypervisorSetHypervVgpuSupported_IMPL(OBJHYPERVISOR *pHypervisor)
60 {
61 pHypervisor->bIsHypervVgpuSupported = NV_TRUE;
62 }
63
hypervisorIsVgxHyper_IMPL(void)64 NvBool hypervisorIsVgxHyper_IMPL(void)
65 {
66 return os_is_vgx_hyper();
67 }
68
hypervisorInjectInterrupt_IMPL(OBJHYPERVISOR * pHypervisor,VGPU_NS_INTR * pVgpuNsIntr)69 NV_STATUS hypervisorInjectInterrupt_IMPL
70 (
71 OBJHYPERVISOR *pHypervisor,
72 VGPU_NS_INTR *pVgpuNsIntr
73 )
74 {
75 NV_STATUS status = NV_ERR_NOT_SUPPORTED;
76
77 if (osIsVgpuVfioPresent() == NV_TRUE)
78 return NV_ERR_NOT_SUPPORTED;
79 else
80 {
81 if (pVgpuNsIntr->guestMSIAddr && pVgpuNsIntr->guestMSIData)
82 {
83 status = os_inject_vgx_msi((NvU16)pVgpuNsIntr->guestDomainId,
84 pVgpuNsIntr->guestMSIAddr,
85 pVgpuNsIntr->guestMSIData);
86 }
87 }
88
89 return status;
90 }
91
nv_get_hypervisor_type(void)92 HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
93 {
94 OBJSYS *pSys = SYS_GET_INSTANCE();
95 OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
96 return hypervisorGetHypervisorType(pHypervisor);
97 }
98
99 #define MAX_STR_LEN 256
nv_vgpu_get_type_info(nvidia_stack_t * sp,nv_state_t * pNv,NvU32 vgpuTypeId,char * buffer,int type_info,NvU8 devfn)100 NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
101 nvidia_stack_t *sp,
102 nv_state_t *pNv,
103 NvU32 vgpuTypeId,
104 char *buffer,
105 int type_info,
106 NvU8 devfn
107 )
108 {
109 THREAD_STATE_NODE threadState;
110 OBJSYS *pSys = SYS_GET_INSTANCE();
111 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
112 NV_STATUS rmStatus = NV_OK;
113 VGPU_TYPE *vgpuTypeInfo;
114 NvU32 pgpuIndex, i, avail_instances = 0;
115 OBJGPU *pGpu = NULL;
116 void *fp;
117
118 NV_ENTER_RM_RUNTIME(sp,fp);
119 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
120
121 // LOCK: acquire API lock
122 if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
123 {
124 if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex)) ==
125 NV_OK)
126 {
127 for (i = 0; i < MAX_VGPU_TYPES_PER_PGPU; i++)
128 {
129 vgpuTypeInfo = pKernelVgpuMgr->pgpuInfo[pgpuIndex].vgpuTypes[i];
130 if (vgpuTypeInfo == NULL)
131 break;
132
133 if (vgpuTypeInfo->vgpuTypeId != vgpuTypeId)
134 continue;
135
136 switch (type_info)
137 {
138 case VGPU_TYPE_INSTANCES:
139 pGpu = NV_GET_NV_PRIV_PGPU(pNv);
140 if (pGpu == NULL)
141 {
142 NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n",
143 __FUNCTION__);
144 rmStatus = NV_ERR_INVALID_STATE;
145 goto exit;
146 }
147
148 rmStatus = kvgpumgrGetAvailableInstances(&avail_instances, pGpu,
149 vgpuTypeInfo,
150 pgpuIndex, devfn);
151 if (rmStatus != NV_OK)
152 goto exit;
153
154 os_snprintf(buffer, MAX_STR_LEN, "%d\n", avail_instances);
155 break;
156 default:
157 rmStatus = NV_ERR_INVALID_ARGUMENT;
158 }
159 break;
160 }
161 }
162
163 exit:
164 // UNLOCK: release API lock
165 rmapiLockRelease();
166 }
167
168 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
169 NV_EXIT_RM_RUNTIME(sp,fp);
170
171 return rmStatus;
172 }
173
nv_vgpu_get_type_ids(nvidia_stack_t * sp,nv_state_t * pNv,NvU32 * numVgpuTypes,NvU32 * vgpuTypeIds,NvBool isVirtfn,NvU8 devfn,NvBool getCreatableTypes)174 NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
175 nvidia_stack_t *sp,
176 nv_state_t *pNv,
177 NvU32 *numVgpuTypes,
178 NvU32 *vgpuTypeIds,
179 NvBool isVirtfn,
180 NvU8 devfn,
181 NvBool getCreatableTypes
182 )
183 {
184 THREAD_STATE_NODE threadState;
185 OBJSYS *pSys = SYS_GET_INSTANCE();
186 OBJGPU *pGpu = NULL;
187 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
188 NV_STATUS rmStatus = NV_OK;
189 NvU32 pgpuIndex, i, avail_instances = 0;
190 NvU32 numSupportedVgpuTypes = 0;
191 VGPU_TYPE *vgpuTypeInfo;
192 void *fp;
193
194 if (!vgpuTypeIds || !numVgpuTypes)
195 return NV_ERR_INVALID_ARGUMENT;
196
197 NV_ENTER_RM_RUNTIME(sp,fp);
198 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
199
200 // LOCK: acquire API lock
201 if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
202 {
203 if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex)) ==
204 NV_OK)
205 {
206 if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled && !isVirtfn)
207 {
208 *numVgpuTypes = 0;
209 }
210 else
211 {
212 numSupportedVgpuTypes = pKernelVgpuMgr->pgpuInfo[pgpuIndex].numVgpuTypes;
213 *numVgpuTypes = 0;
214
215 for (i = 0; i < numSupportedVgpuTypes; i++)
216 {
217 vgpuTypeInfo = pKernelVgpuMgr->pgpuInfo[pgpuIndex].vgpuTypes[i];
218
219 if (!getCreatableTypes)
220 {
221 // Return all available types
222 vgpuTypeIds[*numVgpuTypes] = vgpuTypeInfo->vgpuTypeId;
223 (*numVgpuTypes)++;
224 continue;
225 }
226
227 pGpu = NV_GET_NV_PRIV_PGPU(pNv);
228 if (pGpu == NULL)
229 {
230 NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n",
231 __FUNCTION__);
232 goto exit;
233 }
234
235 rmStatus = kvgpumgrGetAvailableInstances(&avail_instances, pGpu,
236 vgpuTypeInfo, pgpuIndex,
237 devfn);
238 if (rmStatus != NV_OK)
239 {
240 NV_PRINTF(LEVEL_ERROR, "Failed to get available instances for vGPU ID: %d, status: 0x%x\n",
241 vgpuTypeInfo->vgpuTypeId, rmStatus);
242 continue;
243 }
244
245 if (avail_instances == 0)
246 continue;
247
248 vgpuTypeIds[*numVgpuTypes] = vgpuTypeInfo->vgpuTypeId;
249 (*numVgpuTypes)++;
250 }
251 }
252 }
253
254 exit:
255 // UNLOCK: release API lock
256 rmapiLockRelease();
257 }
258
259 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
260 NV_EXIT_RM_RUNTIME(sp,fp);
261
262 return rmStatus;
263 }
264
nv_vgpu_delete(nvidia_stack_t * sp,const NvU8 * pMdevUuid,NvU16 vgpuId)265 NV_STATUS NV_API_CALL nv_vgpu_delete(
266 nvidia_stack_t *sp,
267 const NvU8 *pMdevUuid,
268 NvU16 vgpuId
269 )
270 {
271 THREAD_STATE_NODE threadState;
272 void *fp = NULL;
273 NvU32 rmStatus = NV_OK;
274
275 NV_ENTER_RM_RUNTIME(sp,fp);
276 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
277
278 // LOCK: acquire API lock
279 if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
280 {
281 rmStatus = kvgpumgrDeleteRequestVgpu(pMdevUuid, vgpuId);
282 // UNLOCK: release API lock
283 rmapiLockRelease();
284 }
285 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
286 NV_EXIT_RM_RUNTIME(sp,fp);
287
288 return rmStatus;
289 }
290
nv_vgpu_process_vf_info(nvidia_stack_t * sp,nv_state_t * pNv,NvU8 cmd,NvU32 domain,NvU8 bus,NvU8 slot,NvU8 function,NvBool isMdevAttached,void * vf_pci_info)291 NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(
292 nvidia_stack_t *sp,
293 nv_state_t *pNv,
294 NvU8 cmd,
295 NvU32 domain,
296 NvU8 bus,
297 NvU8 slot,
298 NvU8 function,
299 NvBool isMdevAttached,
300 void *vf_pci_info
301 )
302 {
303 THREAD_STATE_NODE threadState;
304 NV_STATUS rmStatus = NV_OK;
305 void *fp = NULL;
306
307 NV_ENTER_RM_RUNTIME(sp,fp);
308 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
309
310 // LOCK: acquire API lock
311 if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
312 {
313 rmStatus = kvgpumgrProcessVfInfo(pNv->gpu_id, cmd, domain, bus, slot, function, isMdevAttached, (vgpu_vf_pci_info *) vf_pci_info);
314
315 // UNLOCK: release API lock
316 rmapiLockRelease();
317 }
318 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
319 NV_EXIT_RM_RUNTIME(sp,fp);
320
321 return rmStatus;
322 }
323
nv_vgpu_create_request(nvidia_stack_t * sp,nv_state_t * pNv,const NvU8 * pMdevUuid,NvU32 vgpuTypeId,NvU16 * vgpuId,NvU32 gpuPciBdf)324 NV_STATUS NV_API_CALL nv_vgpu_create_request(
325 nvidia_stack_t *sp,
326 nv_state_t *pNv,
327 const NvU8 *pMdevUuid,
328 NvU32 vgpuTypeId,
329 NvU16 *vgpuId,
330 NvU32 gpuPciBdf
331 )
332 {
333 THREAD_STATE_NODE threadState;
334 void *fp = NULL;
335 NV_STATUS rmStatus = NV_OK;
336
337 NV_ENTER_RM_RUNTIME(sp,fp);
338 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
339
340 // LOCK: acquire API lock
341 if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
342 {
343 rmStatus = kvgpumgrCreateRequestVgpu(pNv->gpu_id, pMdevUuid,
344 vgpuTypeId, vgpuId, gpuPciBdf);
345
346 // UNLOCK: release API lock
347 rmapiLockRelease();
348 }
349
350 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
351 NV_EXIT_RM_RUNTIME(sp,fp);
352
353 return rmStatus;
354 }
355
is_bar_64bit(OBJGPU * pGpu,NvU32 regionIndex,NvBool * isBar64bit)356 static NV_STATUS is_bar_64bit(
357 OBJGPU *pGpu,
358 NvU32 regionIndex,
359 NvBool *isBar64bit
360 )
361 {
362 NV_STATUS rmStatus = NV_OK;
363
364 if (pGpu == NULL)
365 {
366 NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
367 rmStatus = NV_ERR_INVALID_STATE;
368 goto exit;
369 }
370
371 switch (regionIndex)
372 {
373 case NV_VFIO_PCI_BAR0_REGION_INDEX:
374 *isBar64bit = pGpu->sriovState.b64bitVFBar0;
375 break;
376
377 case NV_VFIO_PCI_BAR1_REGION_INDEX:
378 *isBar64bit = pGpu->sriovState.b64bitVFBar1;
379 break;
380
381 case NV_VFIO_PCI_BAR2_REGION_INDEX:
382 *isBar64bit = pGpu->sriovState.b64bitVFBar2;
383 break;
384
385 default:
386 NV_PRINTF(LEVEL_ERROR, "BAR%d region doesn't exist!\n", regionIndex);
387 rmStatus = NV_ERR_INVALID_ARGUMENT;
388 goto exit;
389 break;
390 }
391
392 NV_PRINTF(LEVEL_INFO, "BAR%d region is_64bit: %d\n", regionIndex, *isBar64bit);;
393
394 exit:
395 return rmStatus;
396 }
397
398 static NV_STATUS
_nv_vgpu_get_bar_size(OBJGPU * pGpu,KERNEL_HOST_VGPU_DEVICE * pKernelHostVgpuDevice,NvU32 regionIndex,NvU64 * size,NvU8 * configParams)399 _nv_vgpu_get_bar_size(OBJGPU *pGpu, KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice,
400 NvU32 regionIndex, NvU64 *size, NvU8 *configParams)
401 {
402 OBJSYS *pSys = SYS_GET_INSTANCE();
403 KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
404 NV_STATUS status;
405 KernelBus *pKernelBus;
406 NvU32 value = 0;
407
408 pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
409 *size = kbusGetPciBarSize(pKernelBus, regionIndex);
410
411 if (regionIndex == NV_VFIO_PCI_BAR1_REGION_INDEX)
412 {
413 VGPU_TYPE *vgpuTypeInfo;
414 NvU32 pgpuIndex = 0;
415 NvBool bOverrideBar1Size = NV_FALSE;
416
417 // Read BAR1 length from vgpuTypeInfo
418 NV_ASSERT_OK_OR_RETURN(kvgpumgrGetVgpuTypeInfo(pKernelHostVgpuDevice->vgpuType,
419 &vgpuTypeInfo));
420
421 *size = vgpuTypeInfo->bar1Length << 20;
422 NV_ASSERT_OK_OR_RETURN(kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex));
423
424 /*
425 * check for 'override_bar1_size' param in vgpuExtraParams list first,
426 * if param is missing there then check it in vgpu_params list
427 */
428 status = nv_parse_config_params((const char*)vgpuTypeInfo->vgpuExtraParams,
429 "override_bar1_size", ';', &value);
430 if (status == NV_OK && value)
431 {
432 bOverrideBar1Size = NV_TRUE;
433 }
434 else if (status == NV_ERR_OBJECT_NOT_FOUND)
435 {
436 status = nv_parse_config_params((const char *)configParams,
437 "override_bar1_size", ',', &value);
438 if (status == NV_OK && value)
439 bOverrideBar1Size = NV_TRUE;
440 }
441
442 if (gpuIsVfResizableBAR1Supported(pGpu))
443 {
444 if ((*size > pGpu->sriovState.vfBarSize[1]) ||
445 (!portStringCompare("Compute", (const char *)vgpuTypeInfo->vgpuClass, 7)))
446 {
447 *size = pGpu->sriovState.vfBarSize[1];
448 }
449 }
450
451 if (bOverrideBar1Size)
452 {
453 NvU64 bar1SizeInBytes, guestBar1;
454 NvU64 gpuBar1LowerLimit = 256 * 1024 * 1024; // bar1 lower limit for override_bar1_length parameter
455 bar1SizeInBytes = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR1_REGION_INDEX);
456
457 if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled)
458 {
459 *size = pGpu->sriovState.vfBarSize[1];
460 }
461 else if (bar1SizeInBytes > gpuBar1LowerLimit)
462 {
463 guestBar1 = bar1SizeInBytes / vgpuTypeInfo->maxInstance;
464 *size = nvPrevPow2_U64(guestBar1);
465 }
466 }
467 }
468 else if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX ||
469 regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX)
470 {
471 status = nv_parse_config_params((const char *)configParams,
472 "address64", ',', &value);
473
474 if ((status != NV_OK) || ((status == NV_OK) && (value != 0)))
475 {
476 if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX)
477 *size = 0;
478 else if (regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX)
479 *size = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR2_REGION_INDEX);
480 }
481 }
482
483 return NV_OK;
484 }
485
nv_vgpu_get_bar_info(nvidia_stack_t * sp,nv_state_t * pNv,const NvU8 * pMdevUuid,NvU64 * barSizes,NvU64 * sparseOffsets,NvU64 * sparseSizes,NvU32 * sparseCount,NvBool * isBar064bit,NvU8 * configParams)486 NV_STATUS NV_API_CALL nv_vgpu_get_bar_info
487 (
488 nvidia_stack_t *sp,
489 nv_state_t *pNv,
490 const NvU8 *pMdevUuid,
491 NvU64 *barSizes,
492 NvU64 *sparseOffsets,
493 NvU64 *sparseSizes,
494 NvU32 *sparseCount,
495 NvBool *isBar064bit,
496 NvU8 *configParams
497 )
498 {
499 THREAD_STATE_NODE threadState;
500 NV_STATUS rmStatus = NV_OK;
501 OBJGPU *pGpu = NULL;
502 void *fp = NULL;
503
504 NV_ENTER_RM_RUNTIME(sp,fp);
505 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
506
507 // LOCK: acquire API lock
508 NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
509 rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
510
511 pGpu = NV_GET_NV_PRIV_PGPU(pNv);
512 if (pGpu == NULL)
513 {
514 NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
515 rmStatus = NV_ERR_INVALID_STATE;
516 goto release_lock;
517 }
518
519 NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
520 nv_vgpu_rm_get_bar_info(pGpu, pMdevUuid, barSizes,
521 sparseOffsets, sparseSizes,
522 sparseCount, isBar064bit,
523 configParams),
524 release_lock);
525 release_lock:
526 // UNLOCK: release API lock
527 rmapiLockRelease();
528
529 exit:
530 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
531 NV_EXIT_RM_RUNTIME(sp,fp);
532
533 return rmStatus;
534 }
535
nv_vgpu_get_hbm_info(nvidia_stack_t * sp,nv_state_t * pNv,const NvU8 * pMdevUuid,NvU64 * hbmAddr,NvU64 * size)536 NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(
537 nvidia_stack_t *sp,
538 nv_state_t *pNv,
539 const NvU8 *pMdevUuid,
540 NvU64 *hbmAddr,
541 NvU64 *size
542 )
543 {
544 NV_STATUS rmStatus = NV_OK;
545 THREAD_STATE_NODE threadState;
546 OBJGPU *pGpu = NULL;
547 KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
548 void *fp = NULL;
549
550 NV_ENTER_RM_RUNTIME(sp,fp);
551 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
552
553 if ((size == NULL) || (hbmAddr == NULL))
554 {
555 rmStatus = NV_ERR_INVALID_ARGUMENT;
556 goto exit;
557 }
558
559 // LOCK: acquire API lock
560 NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
561
562 pGpu = NV_GET_NV_PRIV_PGPU(pNv);
563 if (pGpu == NULL)
564 {
565 NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
566 rmStatus = NV_ERR_INVALID_STATE;
567 goto release_lock;
568 }
569
570 NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
571 kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id,
572 pMdevUuid,
573 &pKernelHostVgpuDevice), release_lock);
574 if (pKernelHostVgpuDevice->numValidHbmRegions > 1)
575 {
576 NV_PRINTF(LEVEL_NOTICE, "non contiguous HBM region is not supported\n");
577 NV_ASSERT(0);
578 }
579
580 if (pKernelHostVgpuDevice->hbmRegionList != NULL)
581 {
582 *hbmAddr = pKernelHostVgpuDevice->hbmRegionList[0].hbmBaseAddr;
583 *size = pKernelHostVgpuDevice->hbmRegionList[0].size;
584 }
585 else
586 {
587 *hbmAddr = 0;
588 *size = 0;
589 }
590
591 release_lock:
592 // UNLOCK: release API lock
593 rmapiLockRelease();
594 exit:
595
596 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
597 NV_EXIT_RM_RUNTIME(sp,fp);
598 return rmStatus;
599 }
600
nv_parse_config_params(const char * config_params,const char * key,const char delim,NvU32 * config_value)601 static NV_STATUS nv_parse_config_params(
602 const char *config_params,
603 const char *key,
604 const char delim,
605 NvU32 *config_value
606 )
607 {
608 char *ptr, *configParams = rm_remove_spaces(config_params);
609 char *token, *value, *name;
610 NvU32 data;
611 NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND;
612
613 ptr = configParams;
614 while ((token = rm_string_token(&ptr, delim)) != NULL)
615 {
616 if (!(name = rm_string_token(&token, '=')) || !os_string_length(name))
617 continue;
618
619 if (!(value = rm_string_token(&token, '=')) || !os_string_length(value))
620 continue;
621
622 data = os_strtoul(value, NULL, 0);
623
624 if (os_string_compare(name, key) == 0)
625 {
626 rmStatus = NV_OK;
627 *config_value = data;
628 }
629 }
630
631 // Free the memory allocated by rm_remove_spaces()
632 os_free_mem(configParams);
633
634 return rmStatus;
635 }
636
_nv_vgpu_get_sparse_mmap(OBJGPU * pGpu,KERNEL_HOST_VGPU_DEVICE * pKernelHostVgpuDevice,NvU64 * offsets,NvU64 * sizes,NvU32 * numAreas,NvU8 * configParams)637 static NV_STATUS _nv_vgpu_get_sparse_mmap(
638 OBJGPU *pGpu,
639 KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice,
640 NvU64 *offsets,
641 NvU64 *sizes,
642 NvU32 *numAreas,
643 NvU8 *configParams
644 )
645 {
646 NV_STATUS rmStatus = NV_OK, status;
647 OBJTMR *pTmr = GPU_GET_TIMER(pGpu);;
648 KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);;
649 KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
650 NvU32 value = 0;
651
652 *numAreas = 0;
653 if (pKernelHostVgpuDevice->gfid != 0)
654 {
655 rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice,
656 os_page_size, numAreas, NULL, NULL);
657 if (rmStatus == NV_OK)
658 {
659 if (*numAreas > NVA081_MAX_SPARSE_REGION_COUNT)
660 {
661 NV_PRINTF(LEVEL_ERROR, "Not enough space for sparse mmap region info\n");
662 return NV_ERR_INSUFFICIENT_RESOURCES;
663 }
664
665
666 rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
667 numAreas, offsets, sizes);
668 if (rmStatus != NV_OK)
669 return rmStatus;
670 }
671 }
672 else
673 {
674 status = nv_parse_config_params((const char *)configParams,
675 "direct_gpu_timer_access", ',', &value);
676 if ((status == NV_OK) && (value != 0))
677 {
678 NvU64 offset = 0;
679 NvU32 size = 0;
680
681 rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, &offset, &size);
682
683 if (rmStatus == NV_OK)
684 {
685 offsets[*numAreas] = offset;
686 sizes[*numAreas] = size;
687 (*numAreas)++;
688 }
689 }
690
691 value = 0;
692 {
693 NvU64 offset = 0;
694 NvU32 size = 0;
695
696 status = kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, &offset, &size);
697
698 if (status == NV_OK)
699 {
700 offsets[*numAreas] = offset;
701 sizes[*numAreas] = size;
702 (*numAreas)++;
703 }
704 }
705 }
706
707 return rmStatus;
708 }
709
nv_vgpu_rm_get_bar_info(OBJGPU * pGpu,const NvU8 * pMdevUuid,NvU64 * barSizes,NvU64 * sparseOffsets,NvU64 * sparseSizes,NvU32 * sparseCount,NvBool * isBar064bit,NvU8 * configParams)710 NV_STATUS nv_vgpu_rm_get_bar_info
711 (
712 OBJGPU *pGpu,
713 const NvU8 *pMdevUuid,
714 NvU64 *barSizes,
715 NvU64 *sparseOffsets,
716 NvU64 *sparseSizes,
717 NvU32 *sparseCount,
718 NvBool *isBar064bit,
719 NvU8 *configParams
720 )
721 {
722 KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
723 NV_STATUS rmStatus;
724 NvU32 i = 0;
725
726 NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
727 is_bar_64bit(pGpu, NV_VFIO_PCI_BAR0_REGION_INDEX, isBar064bit),
728 exit);
729
730 NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
731 kvgpumgrGetHostVgpuDeviceFromMdevUuid(pGpu->gpuId,
732 pMdevUuid,
733 &pKernelHostVgpuDevice),
734 exit);
735
736 for (i = 0; i < NVA081_MAX_BAR_REGION_COUNT; i++)
737 {
738 /*
739 * For SRIOV, only VF BAR1 is queried via RM, others BARs are directly
740 * queried via VF config space in vgpu-vfio
741 */
742 if (gpuIsSriovEnabled(pGpu) && (i != NV_VFIO_PCI_BAR1_REGION_INDEX))
743 {
744 barSizes[i] = 0;
745 continue;
746 }
747
748 rmStatus = _nv_vgpu_get_bar_size(pGpu, pKernelHostVgpuDevice, i,
749 &barSizes[i], configParams);
750 if (rmStatus != NV_OK)
751 {
752 NV_PRINTF(LEVEL_ERROR, "Failed to query BAR size for index %u 0x%x\n",
753 i, rmStatus);
754 goto exit;
755 }
756 }
757
758 NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
759 _nv_vgpu_get_sparse_mmap(pGpu, pKernelHostVgpuDevice,
760 sparseOffsets, sparseSizes,
761 sparseCount, configParams),
762 exit);
763
764 exit:
765 return rmStatus;
766 }
767
nv_gpu_unbind_event(nvidia_stack_t * sp,NvU32 gpuId,NvBool * isEventNotified)768 NV_STATUS NV_API_CALL nv_gpu_unbind_event
769 (
770 nvidia_stack_t *sp,
771 NvU32 gpuId,
772 NvBool *isEventNotified
773 )
774 {
775 THREAD_STATE_NODE threadState;
776 NV_STATUS rmStatus = NV_OK;
777 void *fp = NULL;
778
779 NV_ENTER_RM_RUNTIME(sp,fp);
780 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
781
782 // LOCK: acquire API lock
783 if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
784 {
785 /*
786 * Send gpu_id in "status" field of the event so that nvidia-vgpu-mgr
787 * daemon knows which GPU is being unbound
788 */
789 CliAddSystemEvent(NV0000_NOTIFIERS_GPU_UNBIND_EVENT, gpuId, isEventNotified);
790
791 // UNLOCK: release API lock
792 rmapiLockRelease();
793 }
794
795 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
796 NV_EXIT_RM_RUNTIME(sp,fp);
797
798 return rmStatus;
799 }
800
nv_gpu_bind_event(nvidia_stack_t * sp)801 NV_STATUS NV_API_CALL nv_gpu_bind_event(
802 nvidia_stack_t *sp
803 )
804 {
805 THREAD_STATE_NODE threadState;
806 NV_STATUS rmStatus = NV_OK;
807 void *fp = NULL;
808
809 NV_ENTER_RM_RUNTIME(sp,fp);
810 threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
811
812 // LOCK: acquire API lock
813 if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
814 {
815 CliAddSystemEvent(NV0000_NOTIFIERS_GPU_BIND_EVENT, 0, NULL);
816
817 // UNLOCK: release API lock
818 rmapiLockRelease();
819 }
820
821 threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
822 NV_EXIT_RM_RUNTIME(sp,fp);
823
824 return rmStatus;
825 }
826
osIsVgpuVfioPresent(void)827 NV_STATUS osIsVgpuVfioPresent(void)
828 {
829 vgpu_vfio_info vgpu_info;
830
831 return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_PRESENT);
832 }
833
osIsVfioPciCorePresent(void)834 NV_STATUS osIsVfioPciCorePresent(void)
835 {
836 vgpu_vfio_info vgpu_info;
837
838 return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VFIO_PCI_CORE_PRESENT);
839 }
840
osWakeRemoveVgpu(NvU32 gpuId,NvU32 returnStatus)841 void osWakeRemoveVgpu(NvU32 gpuId, NvU32 returnStatus)
842 {
843 vgpu_vfio_info vgpu_info;
844
845 vgpu_info.return_status = returnStatus;
846 vgpu_info.domain = gpuDecodeDomain(gpuId);
847 vgpu_info.bus = gpuDecodeBus(gpuId);
848 vgpu_info.device = gpuDecodeDevice(gpuId);
849
850 os_call_vgpu_vfio((void *)&vgpu_info, CMD_VFIO_WAKE_REMOVE_GPU);
851 }
852
853
osGetGridCspSupport(void)854 NvU32 osGetGridCspSupport(void)
855 {
856 return os_get_grid_csp_support();
857 }
858
initVGXSpecificRegistry(OBJGPU * pGpu)859 void initVGXSpecificRegistry(OBJGPU *pGpu)
860 {
861 NvU32 data32;
862 osWriteRegistryDword(pGpu, NV_REG_STR_RM_POWER_FEATURES, 0x55455555);
863 osWriteRegistryDword(pGpu, NV_REG_STR_RM_INFOROM_DISABLE_BBX,
864 NV_REG_STR_RM_INFOROM_DISABLE_BBX_YES);
865 osWriteRegistryDword(pGpu, NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR,
866 NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE);
867 if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_DUMP_NVLOG, &data32) != NV_OK))
868 {
869 osWriteRegistryDword(pGpu, NV_REG_STR_RM_DUMP_NVLOG,
870 NV_REG_STR_RM_DUMP_NVLOG_ENABLE);
871 }
872 osWriteRegistryDword(pGpu, NV_REG_STR_RM_RC_WATCHDOG,
873 NV_REG_STR_RM_RC_WATCHDOG_DISABLE);
874 osWriteRegistryDword(pGpu, NV_REG_STR_CL_FORCE_P2P,
875 DRF_DEF(_REG_STR, _CL_FORCE_P2P, _READ, _DISABLE) |
876 DRF_DEF(_REG_STR, _CL_FORCE_P2P, _WRITE, _DISABLE));
877 }
878
879
rm_is_vgpu_supported_device(OS_GPU_INFO * pOsGpuInfo,NvU32 pmc_boot_1,NvU32 pmc_boot_42)880 NV_STATUS rm_is_vgpu_supported_device(
881 OS_GPU_INFO *pOsGpuInfo,
882 NvU32 pmc_boot_1,
883 NvU32 pmc_boot_42
884 )
885 {
886 OBJSYS *pSys = SYS_GET_INSTANCE();
887 OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
888 NvBool is_sriov_enabled = FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _VF, pmc_boot_1);
889 NvU32 i;
890
891 // if not running in vGPU mode (guest VM) return NV_OK
892 if (!(pHypervisor && pHypervisor->bIsHVMGuest &&
893 (FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _PV, pmc_boot_1) ||
894 is_sriov_enabled)))
895 {
896 return NV_OK;
897 }
898
899 if (!is_sriov_enabled)
900 {
901 return NV_OK;
902 }
903
904 if (os_is_grid_supported() && !gpumgrIsVgxRmFirmwareCapableChip(pmc_boot_42))
905 {
906 return NV_ERR_NOT_SUPPORTED;
907 }
908
909 if (os_is_grid_supported())
910 {
911 for (i = 0; i < NV_ARRAY_ELEMENTS(sVgpuUsmTypes); i++)
912 {
913 if (pOsGpuInfo->pci_info.device_id == sVgpuUsmTypes[i].ulDevID &&
914 pOsGpuInfo->subsystem_vendor == sVgpuUsmTypes[i].ulSubSystemVendorID &&
915 pOsGpuInfo->subsystem_id == sVgpuUsmTypes[i].ulSubID)
916 {
917 return NV_OK;
918 }
919 }
920 }
921
922 return NV_ERR_NOT_SUPPORTED;
923 }
924