1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*!
25  *
26  * @file os-hypervisor.c
27  * @brief OS specific Hypervisor interfaces for RM
28  *
29  */
30 
31 #include "os/os.h"
32 #include "nv.h"
33 #include "nv-priv.h"
34 #include <nvRmReg.h>
35 #include <virtualization/hypervisor/hypervisor.h>
36 #include "core/thread_state.h"
37 #include "core/locks.h"
38 #include "virtualization/kernel_vgpu_mgr.h"
39 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
40 #include "kernel/gpu/fifo/kernel_fifo.h"
41 #include "osapi.h"
42 #include "virtualization/kernel_hostvgpudeviceapi.h"
43 #include <objtmr.h>
44 #include "gpu/bif/kernel_bif.h"
45 #include "gpu/bus/kern_bus.h"
46 #include <nv_ref.h>               // NV_PMC_BOOT_1_VGPU
47 #include "nvdevid.h"
48 
49 #include "g_vgpu_chip_flags.h"    // vGPU device names
50 
51 #define NV_VFIO_PCI_BAR0_REGION_INDEX 0
52 #define NV_VFIO_PCI_BAR1_REGION_INDEX 1
53 #define NV_VFIO_PCI_BAR2_REGION_INDEX 2
54 #define NV_VFIO_PCI_BAR3_REGION_INDEX 3
55 
56 static NV_STATUS nv_parse_config_params(const char *, const char *, const char, NvU32 *);
57 
58 void hypervisorSetHypervVgpuSupported_IMPL(OBJHYPERVISOR *pHypervisor)
59 {
60     pHypervisor->bIsHypervVgpuSupported = NV_TRUE;
61 }
62 
63 NvBool hypervisorIsVgxHyper_IMPL(void)
64 {
65     return os_is_vgx_hyper();
66 }
67 
68 NV_STATUS hypervisorInjectInterrupt_IMPL
69 (
70     OBJHYPERVISOR *pHypervisor,
71     VGPU_NS_INTR   *pVgpuNsIntr
72 )
73 {
74     NV_STATUS status = NV_ERR_NOT_SUPPORTED;
75 
76     if (pVgpuNsIntr->pVgpuVfioRef)
77         status = osVgpuInjectInterrupt(pVgpuNsIntr->pVgpuVfioRef);
78     else
79     {
80         if (pVgpuNsIntr->guestMSIAddr && pVgpuNsIntr->guestMSIData)
81         {
82             status = os_inject_vgx_msi((NvU16)pVgpuNsIntr->guestDomainId,
83                                        pVgpuNsIntr->guestMSIAddr,
84                                        pVgpuNsIntr->guestMSIData);
85         }
86     }
87 
88     return status;
89 }
90 
91 HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
92 {
93     OBJSYS *pSys = SYS_GET_INSTANCE();
94     OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
95     return hypervisorGetHypervisorType(pHypervisor);
96 }
97 
98 static NV_STATUS get_available_instances(
99     NvU32 *avail_instances,
100     nv_state_t *pNv,
101     VGPU_TYPE *vgpuTypeInfo,
102     NvU32 pgpuIndex,
103     NvU8 devfn
104 )
105 {
106     NV_STATUS rmStatus = NV_OK;
107     OBJGPU *pGpu = NULL;
108     OBJSYS *pSys = SYS_GET_INSTANCE();
109     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
110     OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
111 
112     *avail_instances = 0;
113 
114     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
115     if (pGpu == NULL)
116     {
117         NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
118         rmStatus = NV_ERR_INVALID_STATE;
119         goto exit;
120     }
121 
122     /* TODO: Needs to have a proper fix this for DriverVM config */
123     if (gpuIsSriovEnabled(pGpu) &&
124         !(pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED)))
125     {
126         NvU8 fnId = devfn - pGpu->sriovState.firstVFOffset;
127 
128         if (fnId > 63)
129         {
130             NV_ASSERT(0);
131             rmStatus = NV_ERR_INVALID_ARGUMENT;
132             goto exit;
133         }
134 
135         if (IS_MIG_ENABLED(pGpu))
136         {
137             if (IS_MIG_IN_USE(pGpu)) {
138                 NvU64 swizzIdInUseMask = 0;
139                 NvU32 partitionFlag = PARTITIONID_INVALID;
140                 KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
141                 NvU32 id;
142 
143                 swizzIdInUseMask = kmigmgrGetSwizzIdInUseMask(pGpu, pKernelMIGManager);
144 
145                 rmStatus = kvgpumgrGetPartitionFlag(vgpuTypeInfo->vgpuTypeId,
146                                                    &partitionFlag);
147                 if (rmStatus != NV_OK)
148                 {
149                     // Query for a non MIG vgpuType
150                     NV_PRINTF(LEVEL_ERROR, "%s Query for a non MIG vGPU type \n",
151                               __FUNCTION__);
152                     rmStatus = NV_OK;
153                     goto exit;
154                 }
155 
156                 // Determine valid swizzids not assigned to any vGPU device.
157                 FOR_EACH_INDEX_IN_MASK(64, id, swizzIdInUseMask)
158                 {
159                     KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
160                     NvU64 mask = 0;
161 
162                     rmStatus = kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager,
163                                                          id, &pKernelMIGGpuInstance);
164                     if (rmStatus != NV_OK)
165                     {
166                         // Didn't find requested GPU instance
167                         NV_PRINTF(LEVEL_ERROR,
168                                   "No valid GPU instance with SwizzId - %d found\n", id);
169                         goto exit;
170                     }
171 
172                     mask = NVBIT64(id);
173 
174                     if (pKernelMIGGpuInstance->partitionFlag == partitionFlag)
175                     {
176                         // Validate that same ID is not already set and VF is available
177                         if (!(mask & pKernelVgpuMgr->pgpuInfo[pgpuIndex].assignedSwizzIdMask) &&
178                             !(pKernelVgpuMgr->pgpuInfo[pgpuIndex].createdVfMask & NVBIT64(fnId)))
179                         {
180                             *avail_instances = 1;
181                             break;
182                         }
183                     }
184                 }
185                 FOR_EACH_INDEX_IN_MASK_END;
186             }
187         }
188         else
189         {
190             if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].numCreatedVgpu < vgpuTypeInfo->maxInstance)
191             {
192                 if (vgpuTypeInfo->gpuInstanceSize)
193                 {
194                     // Query for a MIG vgpuType
195                     NV_PRINTF(LEVEL_ERROR, "%s Query for a MIG vGPU type \n",
196                               __FUNCTION__);
197                     rmStatus = NV_OK;
198                     goto exit;
199                 }
200 
201                 if (!(pKernelVgpuMgr->pgpuInfo[pgpuIndex].createdVfMask & NVBIT64(fnId)))
202                 {
203                     if (kvgpumgrCheckVgpuTypeCreatable(pGpu, &pKernelVgpuMgr->pgpuInfo[pgpuIndex], vgpuTypeInfo) == NV_OK)
204                         *avail_instances = 1;
205                 }
206             }
207         }
208     }
209     else
210     {
211         if (kvgpumgrCheckVgpuTypeCreatable(pGpu, &pKernelVgpuMgr->pgpuInfo[pgpuIndex], vgpuTypeInfo) == NV_OK)
212             *avail_instances = vgpuTypeInfo->maxInstance - pKernelVgpuMgr->pgpuInfo[pgpuIndex].numCreatedVgpu;
213     }
214 
215 exit:
216     return rmStatus;
217 }
218 
219 #define MAX_STR_LEN 256
220 NV_STATUS  NV_API_CALL nv_vgpu_get_type_info(
221     nvidia_stack_t *sp,
222     nv_state_t *pNv,
223     NvU32 vgpuTypeId,
224     char *buffer,
225     int type_info,
226     NvU8 devfn
227 )
228 {
229     THREAD_STATE_NODE threadState;
230     OBJSYS *pSys = SYS_GET_INSTANCE();
231     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
232     NV_STATUS rmStatus = NV_OK;
233     VGPU_TYPE *vgpuTypeInfo;
234     NvU32 pgpuIndex, i, avail_instances = 0;
235     void *fp;
236 
237     NV_ENTER_RM_RUNTIME(sp,fp);
238     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
239 
240     // LOCK: acquire API lock
241     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
242     {
243         if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex)) ==
244             NV_OK)
245         {
246             for (i = 0; i < MAX_VGPU_TYPES_PER_PGPU; i++)
247             {
248                 vgpuTypeInfo = pKernelVgpuMgr->pgpuInfo[pgpuIndex].vgpuTypes[i];
249                 if (vgpuTypeInfo == NULL)
250                     break;
251 
252                 if (vgpuTypeInfo->vgpuTypeId != vgpuTypeId)
253                     continue;
254 
255                 switch (type_info)
256                 {
257                     case VGPU_TYPE_NAME:
258                         os_snprintf(buffer, VGPU_STRING_BUFFER_SIZE, "%s\n",
259                                     vgpuTypeInfo->vgpuName);
260                         break;
261                     case VGPU_TYPE_DESCRIPTION:
262                          os_snprintf(buffer, MAX_STR_LEN,
263                                      "num_heads=%d, frl_config=%d, "
264                                      "framebuffer=%lluM, max_resolution=%dx%d, max_instance=%d\n",
265                                      vgpuTypeInfo->numHeads, vgpuTypeInfo->frlConfig,
266                                      vgpuTypeInfo->profileSize >> 20,
267                                      vgpuTypeInfo->maxResolutionX,
268                                      vgpuTypeInfo->maxResolutionY,
269                                      vgpuTypeInfo->maxInstance);
270                         break;
271                     case VGPU_TYPE_INSTANCES:
272                         rmStatus = get_available_instances(&avail_instances, pNv,
273                                                            vgpuTypeInfo,
274                                                            pgpuIndex, devfn);
275                         if (rmStatus != NV_OK)
276                             goto exit;
277 
278                         os_snprintf(buffer, MAX_STR_LEN, "%d\n", avail_instances);
279                         break;
280                     default:
281                         rmStatus = NV_ERR_INVALID_ARGUMENT;
282                 }
283                 break;
284             }
285         }
286 
287 exit:
288         // UNLOCK: release API lock
289         rmapiLockRelease();
290     }
291 
292     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
293     NV_EXIT_RM_RUNTIME(sp,fp);
294 
295     return rmStatus;
296 }
297 
298 NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
299     nvidia_stack_t *sp,
300     nv_state_t *pNv,
301     NvU32 *numVgpuTypes,
302     NvU32 *vgpuTypeIds,
303     NvBool isVirtfn,
304     NvU8 devfn,
305     NvBool getCreatableTypes
306 )
307 {
308     THREAD_STATE_NODE threadState;
309     OBJSYS *pSys = SYS_GET_INSTANCE();
310     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
311     NV_STATUS rmStatus = NV_OK;
312     NvU32 pgpuIndex, i, avail_instances = 0;
313     NvU32 numSupportedVgpuTypes = 0;
314     VGPU_TYPE *vgpuTypeInfo;
315     void *fp;
316 
317     if (!vgpuTypeIds || !numVgpuTypes)
318         return NV_ERR_INVALID_ARGUMENT;
319 
320     NV_ENTER_RM_RUNTIME(sp,fp);
321     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
322 
323     // LOCK: acquire API lock
324     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
325     {
326         if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex)) ==
327             NV_OK)
328         {
329             if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled && !isVirtfn)
330             {
331                 *numVgpuTypes = 0;
332             }
333             else
334             {
335                 numSupportedVgpuTypes = pKernelVgpuMgr->pgpuInfo[pgpuIndex].numVgpuTypes;
336                 *numVgpuTypes = 0;
337 
338                 for (i = 0; i < numSupportedVgpuTypes; i++)
339                 {
340                     vgpuTypeInfo = pKernelVgpuMgr->pgpuInfo[pgpuIndex].vgpuTypes[i];
341 
342                     if (!getCreatableTypes)
343                     {
344                         // Return all available types
345                         vgpuTypeIds[*numVgpuTypes] = vgpuTypeInfo->vgpuTypeId;
346                         (*numVgpuTypes)++;
347                         continue;
348                     }
349 
350                     rmStatus = get_available_instances(&avail_instances, pNv,
351                                                        vgpuTypeInfo, pgpuIndex,
352                                                        devfn);
353                     if (rmStatus != NV_OK)
354                     {
355                         NV_PRINTF(LEVEL_ERROR, "Failed to get available instances for vGPU ID: %d, status: 0x%x\n",
356                                   vgpuTypeInfo->vgpuTypeId, rmStatus);
357                         continue;
358                     }
359 
360                     if (avail_instances == 0)
361                         continue;
362 
363                     vgpuTypeIds[*numVgpuTypes] = vgpuTypeInfo->vgpuTypeId;
364                     (*numVgpuTypes)++;
365                 }
366             }
367         }
368 
369         // UNLOCK: release API lock
370         rmapiLockRelease();
371     }
372 
373     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
374     NV_EXIT_RM_RUNTIME(sp,fp);
375 
376     return rmStatus;
377 }
378 
379 NV_STATUS NV_API_CALL nv_vgpu_delete(
380     nvidia_stack_t *sp,
381     const NvU8 *pMdevUuid,
382     NvU16 vgpuId
383 )
384 {
385     THREAD_STATE_NODE threadState;
386     void         *fp = NULL;
387     NvU32        rmStatus = NV_OK;
388 
389     NV_ENTER_RM_RUNTIME(sp,fp);
390     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
391 
392     // LOCK: acquire API lock
393     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
394     {
395         rmStatus = kvgpumgrDeleteRequestVgpu(pMdevUuid, vgpuId);
396         // UNLOCK: release API lock
397         rmapiLockRelease();
398     }
399     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
400     NV_EXIT_RM_RUNTIME(sp,fp);
401 
402     return rmStatus;
403 }
404 
405 NV_STATUS  NV_API_CALL nv_vgpu_process_vf_info(
406     nvidia_stack_t *sp,
407     nv_state_t *pNv,
408     NvU8 cmd,
409     NvU32 domain,
410     NvU8 bus,
411     NvU8 slot,
412     NvU8 function,
413     NvBool isMdevAttached,
414     void *vf_pci_info
415 )
416 {
417     THREAD_STATE_NODE threadState;
418     NV_STATUS     rmStatus = NV_OK;
419     void         *fp = NULL;
420 
421     NV_ENTER_RM_RUNTIME(sp,fp);
422     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
423 
424     // LOCK: acquire API lock
425     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
426     {
427         rmStatus = kvgpumgrProcessVfInfo(pNv->gpu_id, cmd, domain, bus, slot, function, isMdevAttached, (vgpu_vf_pci_info *) vf_pci_info);
428 
429         // UNLOCK: release API lock
430         rmapiLockRelease();
431     }
432     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
433     NV_EXIT_RM_RUNTIME(sp,fp);
434 
435     return rmStatus;
436 }
437 
438 NV_STATUS  NV_API_CALL nv_vgpu_create_request(
439     nvidia_stack_t *sp,
440     nv_state_t *pNv,
441     const NvU8 *pMdevUuid,
442     NvU32 vgpuTypeId,
443     NvU16 *vgpuId,
444     NvU32 gpuPciBdf
445 )
446 {
447     THREAD_STATE_NODE threadState;
448     void          *fp          = NULL;
449     NV_STATUS     rmStatus     = NV_OK;
450 
451     NV_ENTER_RM_RUNTIME(sp,fp);
452     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
453 
454     // LOCK: acquire API lock
455     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
456     {
457         rmStatus = kvgpumgrCreateRequestVgpu(pNv->gpu_id, pMdevUuid,
458                                              vgpuTypeId, vgpuId, gpuPciBdf);
459 
460         // UNLOCK: release API lock
461         rmapiLockRelease();
462     }
463 
464     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
465     NV_EXIT_RM_RUNTIME(sp,fp);
466 
467     return rmStatus;
468 }
469 
470 static NV_STATUS is_bar_64bit(
471     OBJGPU *pGpu,
472     NvU32 regionIndex,
473     NvBool *isBar64bit
474 )
475 {
476     NV_STATUS   rmStatus = NV_OK;
477 
478     if (pGpu == NULL)
479     {
480         NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
481         rmStatus = NV_ERR_INVALID_STATE;
482         goto exit;
483     }
484 
485     switch (regionIndex)
486     {
487         case NV_VFIO_PCI_BAR0_REGION_INDEX:
488             *isBar64bit = pGpu->sriovState.b64bitVFBar0;
489             break;
490 
491         case NV_VFIO_PCI_BAR1_REGION_INDEX:
492             *isBar64bit = pGpu->sriovState.b64bitVFBar1;
493             break;
494 
495         case NV_VFIO_PCI_BAR2_REGION_INDEX:
496             *isBar64bit = pGpu->sriovState.b64bitVFBar2;
497             break;
498 
499         default:
500             NV_PRINTF(LEVEL_ERROR, "BAR%d region doesn't exist!\n", regionIndex);
501             rmStatus = NV_ERR_INVALID_ARGUMENT;
502             goto exit;
503             break;
504     }
505 
506     NV_PRINTF(LEVEL_INFO, "BAR%d region is_64bit: %d\n", regionIndex, *isBar64bit);;
507 
508 exit:
509     return rmStatus;
510 }
511 
512 NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
513     nvidia_stack_t *sp,
514     nv_state_t *pNv,
515     const NvU8 *pMdevUuid,
516     NvU64 *size,
517     NvU32 regionIndex,
518     void *pVgpuVfioRef,
519     NvBool *isBar64bit
520 )
521 {
522     REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
523     THREAD_STATE_NODE threadState;
524     NV_STATUS    rmStatus = NV_OK, status;
525     OBJGPU      *pGpu = NULL;
526     KernelBus   *pKernelBus;
527     KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
528     void         *fp = NULL;
529     NvU32        value = 0;
530     OBJSYS      *pSys = SYS_GET_INSTANCE();
531     KernelVgpuMgr * pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
532 
533     NV_ENTER_RM_RUNTIME(sp,fp);
534     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
535 
536     /*
537      * This function can be used to query both BAR 64bit state and/or BAR size
538      * If neither is queried, return with error.
539      */
540     if ((size == NULL) && (isBar64bit == NULL))
541     {
542         rmStatus = NV_ERR_INVALID_ARGUMENT;
543         goto exit;
544     }
545 
546     // LOCK: acquire API lock
547     NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
548 
549     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
550     if (pGpu == NULL)
551     {
552         NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
553         rmStatus = NV_ERR_INVALID_STATE;
554         goto release_lock;
555     }
556 
557     /* Get input BAR index 64bit state */
558     if (isBar64bit != NULL)
559     {
560         NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
561             is_bar_64bit(pGpu, regionIndex, isBar64bit), release_lock);
562 
563         /* Query is only for BAR index 64bit state*/
564         if (size == NULL)
565             goto release_lock;
566     }
567 
568     pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
569     *size = kbusGetPciBarSize(pKernelBus, regionIndex);
570 
571     NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
572         kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id,
573                                              pMdevUuid,
574                                              &pKernelHostVgpuDevice), release_lock);
575 
576     pRequestVgpu = pKernelHostVgpuDevice->pRequestVgpuInfoNode;
577     if (pRequestVgpu == NULL)
578     {
579         rmStatus = NV_ERR_INVALID_POINTER;
580         goto release_lock;
581     }
582 
583     pKernelHostVgpuDevice->pVgpuVfioRef = pVgpuVfioRef;
584 
585     if (regionIndex == NV_VFIO_PCI_BAR1_REGION_INDEX)
586     {
587         VGPU_TYPE *vgpuTypeInfo;
588         NvU32     pgpuIndex = 0;
589         NvBool    bOverrideBar1Size = NV_FALSE;
590 
591         // Read BAR1 length from vgpuTypeInfo
592         NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
593             kvgpumgrGetVgpuTypeInfo(pKernelHostVgpuDevice->vgpuType, &vgpuTypeInfo), release_lock);
594 
595         *size = vgpuTypeInfo->bar1Length << 20;
596 
597         NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
598             kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex), release_lock);
599 
600         /*
601          * check for 'override_bar1_size' param in vgpuExtraParams list first,
602          * if param is missing there then check it in vgpu_params list
603          */
604         status = nv_parse_config_params((const char*)vgpuTypeInfo->vgpuExtraParams,
605                                         "override_bar1_size", ';', &value);
606 
607         if (status == NV_OK && value) {
608             bOverrideBar1Size = NV_TRUE;
609         } else if (status == NV_ERR_OBJECT_NOT_FOUND) {
610             status = nv_parse_config_params(pRequestVgpu->configParams,
611                                             "override_bar1_size", ',', &value);
612             if (status == NV_OK && value)
613                 bOverrideBar1Size = NV_TRUE;
614         }
615 
616         if (gpuIsVfResizableBAR1Supported(pGpu))
617         {
618             if ((*size > pGpu->sriovState.vfBarSize[1]) ||
619                 (!portStringCompare("Compute", (const char *)vgpuTypeInfo->vgpuClass, 7)))
620             {
621                 *size = pGpu->sriovState.vfBarSize[1];
622             }
623         }
624 
625         if (bOverrideBar1Size) {
626             NvU64 bar1SizeInBytes, guestBar1;
627             NvU64 gpuBar1LowerLimit = 256 * 1024 * 1024; // bar1 lower limit for override_bar1_length parameter
628 
629             bar1SizeInBytes = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR1_REGION_INDEX);
630             if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled)
631             {
632                 *size = pGpu->sriovState.vfBarSize[1];
633             }
634             else if (bar1SizeInBytes > gpuBar1LowerLimit)
635             {
636                 guestBar1 = bar1SizeInBytes / vgpuTypeInfo->maxInstance;
637                 *size = nvPrevPow2_U64(guestBar1);
638             }
639         }
640     }
641     else if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX ||
642              regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX)
643     {
644         status = nv_parse_config_params(pRequestVgpu->configParams,
645                                         "address64", ',', &value);
646 
647         if ((status != NV_OK) || ((status == NV_OK) && (value != 0)))
648         {
649             if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX)
650                 *size = 0;
651             else if (regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX)
652                 *size = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR2_REGION_INDEX);
653         }
654     }
655 
656 release_lock:
657     // UNLOCK: release API lock
658     rmapiLockRelease();
659 
660 exit:
661     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
662     NV_EXIT_RM_RUNTIME(sp,fp);
663 
664     return rmStatus;
665 }
666 
667 NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(
668     nvidia_stack_t *sp,
669     nv_state_t *pNv,
670     const NvU8 *pMdevUuid,
671     NvU64 *hbmAddr,
672     NvU64 *size
673 )
674 {
675     NV_STATUS    rmStatus = NV_OK;
676     THREAD_STATE_NODE threadState;
677     OBJGPU      *pGpu = NULL;
678     KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
679     void         *fp = NULL;
680 
681     NV_ENTER_RM_RUNTIME(sp,fp);
682     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
683 
684     if ((size == NULL) || (hbmAddr == NULL))
685     {
686         rmStatus = NV_ERR_INVALID_ARGUMENT;
687         goto exit;
688     }
689 
690     // LOCK: acquire API lock
691     NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
692 
693     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
694     if (pGpu == NULL)
695     {
696         NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
697         rmStatus = NV_ERR_INVALID_STATE;
698         goto release_lock;
699     }
700 
701     NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
702                         kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id,
703                                                               pMdevUuid,
704                                                               &pKernelHostVgpuDevice), release_lock);
705     if (pKernelHostVgpuDevice->numValidHbmRegions > 1)
706     {
707         NV_PRINTF(LEVEL_NOTICE, "non contiguous HBM region is not supported\n");
708         NV_ASSERT(0);
709     }
710 
711     if (pKernelHostVgpuDevice->hbmRegionList != NULL)
712     {
713         *hbmAddr = pKernelHostVgpuDevice->hbmRegionList[0].hbmBaseAddr;
714         *size = pKernelHostVgpuDevice->hbmRegionList[0].size;
715     }
716     else
717     {
718         *hbmAddr = 0;
719         *size = 0;
720     }
721 
722 release_lock:
723     // UNLOCK: release API lock
724     rmapiLockRelease();
725 exit:
726 
727     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
728     NV_EXIT_RM_RUNTIME(sp,fp);
729     return rmStatus;
730 }
731 
732 NV_STATUS osVgpuVfioWake(
733     void *waitQueue
734 )
735 {
736     vgpu_vfio_info vgpu_info;
737 
738     vgpu_info.waitQueue = waitQueue;
739 
740     return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_WAKE_WAIT_QUEUE);
741 }
742 
743 NV_STATUS NV_API_CALL nv_vgpu_start(
744     nvidia_stack_t *sp,
745     const NvU8 *pMdevUuid,
746     void *waitQueue,
747     NvS32 *returnStatus,
748     NvU8 *vmName,
749     NvU32 qemuPid
750 )
751 {
752     THREAD_STATE_NODE threadState;
753     NV_STATUS rmStatus = NV_OK;
754     void *fp = NULL;
755 
756     NV_ENTER_RM_RUNTIME(sp,fp);
757     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
758 
759     // LOCK: acquire API lock
760     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
761     {
762         rmStatus = kvgpumgrStart(pMdevUuid, waitQueue, returnStatus,
763                                 vmName, qemuPid);
764 
765         // UNLOCK: release API lock
766         rmapiLockRelease();
767     }
768     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
769     NV_EXIT_RM_RUNTIME(sp,fp);
770 
771     return rmStatus;
772 }
773 
774 static NV_STATUS nv_parse_config_params(
775     const char *config_params,
776     const char *key,
777     const char delim,
778     NvU32 *config_value
779 )
780 {
781     char *ptr, *configParams = rm_remove_spaces(config_params);
782     char *token, *value, *name;
783     NvU32 data;
784     NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND;
785 
786     ptr = configParams;
787     while ((token = rm_string_token(&ptr, delim)) != NULL)
788     {
789         if (!(name = rm_string_token(&token, '=')) || !os_string_length(name))
790             continue;
791 
792         if (!(value = rm_string_token(&token, '=')) || !os_string_length(value))
793             continue;
794 
795         data = os_strtoul(value, NULL, 0);
796 
797         if (os_string_compare(name, key) == 0)
798         {
799             rmStatus = NV_OK;
800             *config_value = data;
801         }
802     }
803 
804    // Free the memory allocated by rm_remove_spaces()
805    os_free_mem(configParams);
806 
807    return rmStatus;
808 }
809 
810 NV_STATUS  NV_API_CALL  nv_vgpu_get_sparse_mmap(
811     nvidia_stack_t *sp ,
812     nv_state_t *pNv,
813     const NvU8 *pMdevUuid,
814     NvU64 **offsets,
815     NvU64 **sizes,
816     NvU32 *numAreas
817 )
818 {
819     THREAD_STATE_NODE threadState;
820     NV_STATUS rmStatus = NV_ERR_INVALID_STATE, status;
821     OBJGPU *pGpu = NULL;
822     OBJTMR *pTmr = NULL;
823     KernelFifo *pKernelFifo = NULL;
824     void *fp = NULL;
825     REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
826     KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
827     NvU32 bar0TmrMapSize = 0, bar0FifoMapSize = 0, value = 0;
828     NvU64 bar0TmrMapOffset = 0, bar0FifoMapOffset = 0;
829     NvU64 *vfRegionSizes = NULL;
830     NvU64 *vfRegionOffsets = NULL;
831     KernelBif *pKernelBif = NULL;
832 
833 
834     NV_ENTER_RM_RUNTIME(sp,fp);
835     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
836 
837     // LOCK: acquire API lock
838     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
839     {
840         pGpu = NV_GET_NV_PRIV_PGPU(pNv);
841 
842         if (pGpu == NULL)
843         {
844             rmStatus = NV_ERR_INVALID_STATE;
845             goto cleanup;
846         }
847         pTmr = GPU_GET_TIMER(pGpu);
848         pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
849         pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
850         *numAreas = 0;
851         rmStatus = kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id, pMdevUuid,
852                                                         &pKernelHostVgpuDevice);
853         if (rmStatus == NV_OK)
854         {
855             if (pKernelHostVgpuDevice->gfid != 0)
856             {
857                 rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
858                                                           numAreas, NULL, NULL);
859                 if (rmStatus == NV_OK)
860                 {
861                     rmStatus = os_alloc_mem((void **)&vfRegionOffsets, sizeof(NvU64) * (*numAreas));
862                     if (rmStatus != NV_OK)
863                         goto cleanup;
864 
865                     rmStatus = os_alloc_mem((void **)&vfRegionSizes, sizeof (NvU64) * (*numAreas));
866                     if (rmStatus != NV_OK)
867                     {
868                         os_free_mem(vfRegionOffsets);
869                         goto cleanup;
870                     }
871 
872                     if (vfRegionOffsets && vfRegionSizes)
873                     {
874                         rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
875                                                                   numAreas, vfRegionOffsets, vfRegionSizes);
876                         if (rmStatus == NV_OK)
877                         {
878                             *offsets = vfRegionOffsets;
879                             *sizes   = vfRegionSizes;
880                         }
881                         else
882                         {
883                             os_free_mem(vfRegionOffsets);
884                             os_free_mem(vfRegionSizes);
885                         }
886                     }
887                     else
888                     {
889                         if (vfRegionOffsets != NULL)
890                             os_free_mem(vfRegionOffsets);
891 
892                         if (vfRegionSizes != NULL)
893                             os_free_mem(vfRegionSizes);
894 
895                         rmStatus = NV_ERR_INSUFFICIENT_RESOURCES;
896                     }
897                 }
898             }
899             else
900             {
901                 pRequestVgpu = pKernelHostVgpuDevice->pRequestVgpuInfoNode;
902                 if (pRequestVgpu == NULL)
903                 {
904                     rmStatus = NV_ERR_INVALID_POINTER;
905                     goto cleanup;
906                 }
907 
908                 status = nv_parse_config_params(pRequestVgpu->configParams, "direct_gpu_timer_access", ',', &value);
909                 if ((status == NV_OK) && (value != 0))
910                 {
911                     rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr,
912                                                           &bar0TmrMapOffset,
913                                                           &bar0TmrMapSize);
914                     if (rmStatus == NV_OK)
915                         (*numAreas)++;
916                     else
917                         NV_PRINTF(LEVEL_ERROR,
918                                   "%s Failed to get NV_PTIMER region \n",
919                                   __FUNCTION__);
920                 }
921 
922                 value = 0;
923                 {
924                     status = kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo,
925                                                          &bar0FifoMapOffset,
926                                                          &bar0FifoMapSize);
927                     if (status == NV_OK)
928                         (*numAreas)++;
929                 }
930 
931                 if (*numAreas != 0)
932                 {
933                     NvU32 i = 0;
934                     NvU64 *tmpOffset, *tmpSize;
935                     rmStatus = os_alloc_mem((void **)offsets, sizeof(NvU64) * (*numAreas));
936                     if (rmStatus != NV_OK)
937                         goto cleanup;
938 
939                     rmStatus = os_alloc_mem((void **)sizes, sizeof (NvU64) * (*numAreas));
940                     if (rmStatus != NV_OK)
941                     {
942                         os_free_mem(*offsets);
943                         goto cleanup;
944                     }
945 
946                     tmpOffset = *offsets;
947                     tmpSize   = *sizes;
948 
949                     if (bar0TmrMapSize != 0)
950                     {
951                         tmpOffset[i] = bar0TmrMapOffset;
952                         tmpSize[i] = bar0TmrMapSize;
953                         i++;
954                     }
955 
956                     if (bar0FifoMapSize != 0)
957                     {
958                         tmpOffset[i] = bar0FifoMapOffset;
959                         tmpSize[i] = bar0FifoMapSize;
960                     }
961                 }
962             }
963         }
964 
965 cleanup:
966         // UNLOCK: release API lock
967         rmapiLockRelease();
968     }
969 
970     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
971     NV_EXIT_RM_RUNTIME(sp,fp);
972 
973     return rmStatus;
974 }
975 
976 NV_STATUS  NV_API_CALL nv_vgpu_update_request(
977     nvidia_stack_t *sp ,
978     const NvU8 *pMdevUuid,
979     VGPU_DEVICE_STATE deviceState,
980     NvU64 *offsets,
981     NvU64 *sizes,
982     const char *configParams
983 )
984 {
985     THREAD_STATE_NODE threadState;
986     NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND;
987     void *fp = NULL;
988     REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
989     OBJSYS *pSys = SYS_GET_INSTANCE();
990     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
991 
992     NV_ENTER_RM_RUNTIME(sp,fp);
993     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
994 
995     if (offsets != NULL)
996         os_free_mem(offsets);
997 
998     if (sizes != NULL)
999         os_free_mem(sizes);
1000 
1001     // LOCK: acquire API lock
1002     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
1003     {
1004         for (pRequestVgpu = listHead(&pKernelVgpuMgr->listRequestVgpuHead);
1005              pRequestVgpu != NULL;
1006              pRequestVgpu = listNext(&pKernelVgpuMgr->listRequestVgpuHead, pRequestVgpu))
1007         {
1008             if (portMemCmp(pRequestVgpu->mdevUuid, pMdevUuid, VGPU_UUID_SIZE) == 0)
1009             {
1010 
1011                 if (configParams != NULL)
1012                     portStringCopy(pRequestVgpu->configParams,
1013                                    sizeof(pRequestVgpu->configParams),
1014                                    configParams, (portStringLength(configParams) + 1));
1015 
1016                 pRequestVgpu->deviceState = deviceState;
1017                 rmStatus = NV_OK;
1018             }
1019         }
1020 
1021         // UNLOCK: release API lock
1022         rmapiLockRelease();
1023     }
1024 
1025     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1026     NV_EXIT_RM_RUNTIME(sp,fp);
1027 
1028     return rmStatus;
1029 }
1030 
1031 NV_STATUS NV_API_CALL nv_gpu_bind_event(
1032     nvidia_stack_t *sp
1033 )
1034 {
1035     THREAD_STATE_NODE threadState;
1036     NV_STATUS rmStatus = NV_OK;
1037     void *fp = NULL;
1038 
1039     NV_ENTER_RM_RUNTIME(sp,fp);
1040     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1041 
1042     // LOCK: acquire API lock
1043     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
1044     {
1045         CliAddSystemEvent(NV0000_NOTIFIERS_GPU_BIND_EVENT, 0);
1046 
1047         // UNLOCK: release API lock
1048         rmapiLockRelease();
1049     }
1050 
1051     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1052     NV_EXIT_RM_RUNTIME(sp,fp);
1053 
1054     return rmStatus;
1055 }
1056 
1057 NV_STATUS osVgpuInjectInterrupt(void *vgpuVfioRef)
1058 {
1059     vgpu_vfio_info vgpu_info;
1060 
1061     vgpu_info.vgpuVfioRef = vgpuVfioRef;
1062 
1063     return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_INJECT_INTERRUPT);
1064 }
1065 
1066 NV_STATUS osVgpuRegisterMdev
1067 (
1068     OS_GPU_INFO *pOsGpuInfo
1069 )
1070 {
1071     NV_STATUS status = NV_OK;
1072     vgpu_vfio_info vgpu_info = {0};
1073     OBJSYS *pSys = SYS_GET_INSTANCE();
1074     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
1075     KERNEL_PHYS_GPU_INFO *pPhysGpuInfo;
1076     NvU32 pgpuIndex, i;
1077     OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
1078 
1079     status = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pOsGpuInfo->gpu_id, &pgpuIndex);
1080     if (status != NV_OK)
1081         return status;
1082 
1083     pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[pgpuIndex]);
1084 
1085     vgpu_info.numVgpuTypes = pKernelVgpuMgr->pgpuInfo[pgpuIndex].numVgpuTypes;
1086 
1087     status = os_alloc_mem((void **)&vgpu_info.vgpuTypeIds,
1088                           ((vgpu_info.numVgpuTypes) * sizeof(NvU32)));
1089     if (status != NV_OK)
1090         goto free_mem;
1091 
1092     status = os_alloc_mem((void **)&vgpu_info.vgpuNames,
1093                           ((vgpu_info.numVgpuTypes) * sizeof(char *)));
1094     if (status != NV_OK)
1095         goto free_mem;
1096 
1097     vgpu_info.nv = pOsGpuInfo;
1098     for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
1099     {
1100         status = os_alloc_mem((void *)&vgpu_info.vgpuNames[i], (VGPU_STRING_BUFFER_SIZE * sizeof(char)));
1101         if (status != NV_OK)
1102             goto free_mem;
1103 
1104         vgpu_info.vgpuTypeIds[i] = pPhysGpuInfo->vgpuTypes[i]->vgpuTypeId;
1105         os_snprintf((char *) vgpu_info.vgpuNames[i], VGPU_STRING_BUFFER_SIZE, "%s\n", pPhysGpuInfo->vgpuTypes[i]->vgpuName);
1106     }
1107 
1108     if ((!pPhysGpuInfo->sriovEnabled) ||
1109         (pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED)))
1110     {
1111         vgpu_info.is_virtfn = NV_FALSE;
1112         status = os_call_vgpu_vfio((void *)&vgpu_info, CMD_VGPU_VFIO_REGISTER_MDEV);
1113     }
1114     else
1115     {
1116         for (i = 0; i < MAX_VF_COUNT_PER_GPU; i++)
1117         {
1118             if (pPhysGpuInfo->vfPciInfo[i].isNvidiaAttached)
1119             {
1120                 vgpu_info.is_virtfn =   NV_TRUE;
1121                 vgpu_info.domain    =   pPhysGpuInfo->vfPciInfo[i].domain;
1122                 vgpu_info.bus       =   pPhysGpuInfo->vfPciInfo[i].bus;
1123                 vgpu_info.slot      =   pPhysGpuInfo->vfPciInfo[i].slot;
1124                 vgpu_info.function  =   pPhysGpuInfo->vfPciInfo[i].function;
1125 
1126                 status = os_call_vgpu_vfio((void *)&vgpu_info, CMD_VGPU_VFIO_REGISTER_MDEV);
1127                 if (status == NV_OK)
1128                 {
1129                     pPhysGpuInfo->vfPciInfo[i].isMdevAttached = NV_TRUE;
1130                 }
1131             }
1132         }
1133     }
1134 
1135 free_mem:
1136     if (vgpu_info.vgpuTypeIds)
1137         os_free_mem(vgpu_info.vgpuTypeIds);
1138 
1139     if (vgpu_info.vgpuNames)
1140     {
1141         for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
1142         {
1143             if (vgpu_info.vgpuNames[i])
1144             {
1145                 os_free_mem(vgpu_info.vgpuNames[i]);
1146             }
1147         }
1148         os_free_mem(vgpu_info.vgpuNames);
1149     }
1150 
1151     return status;
1152 }
1153 
1154 NV_STATUS osIsVgpuVfioPresent(void)
1155 {
1156     vgpu_vfio_info vgpu_info;
1157 
1158     return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_PRESENT);
1159 }
1160 
1161 NV_STATUS osIsVfioPciCorePresent(void)
1162 {
1163     vgpu_vfio_info vgpu_info;
1164 
1165     return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VFIO_PCI_CORE_PRESENT);
1166 }
1167 
1168 NvU32 osGetGridCspSupport(void)
1169 {
1170     return os_get_grid_csp_support();
1171 }
1172 
1173 void initVGXSpecificRegistry(OBJGPU *pGpu)
1174 {
1175     NvU32 data32;
1176     osWriteRegistryDword(pGpu, NV_REG_STR_RM_POWER_FEATURES, 0x55455555);
1177     osWriteRegistryDword(pGpu, NV_REG_STR_RM_INFOROM_DISABLE_BBX,
1178                                NV_REG_STR_RM_INFOROM_DISABLE_BBX_YES);
1179     osWriteRegistryDword(pGpu, NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR,
1180                                NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE);
1181     if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_DUMP_NVLOG, &data32) != NV_OK))
1182     {
1183         osWriteRegistryDword(pGpu, NV_REG_STR_RM_DUMP_NVLOG,
1184                              NV_REG_STR_RM_DUMP_NVLOG_ENABLE);
1185     }
1186     osWriteRegistryDword(pGpu, NV_REG_STR_RM_RC_WATCHDOG,
1187                                NV_REG_STR_RM_RC_WATCHDOG_DISABLE);
1188     osWriteRegistryDword(pGpu, NV_REG_STR_CL_FORCE_P2P,
1189                          DRF_DEF(_REG_STR, _CL_FORCE_P2P, _READ, _DISABLE) |
1190                          DRF_DEF(_REG_STR, _CL_FORCE_P2P, _WRITE, _DISABLE));
1191 }
1192 
1193 
1194 NV_STATUS rm_is_vgpu_supported_device(
1195     OS_GPU_INFO *pOsGpuInfo,
1196     NvU32       pmc_boot_1,
1197     NvU32       pmc_boot_42
1198 )
1199 {
1200     OBJSYS *pSys = SYS_GET_INSTANCE();
1201     OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
1202     NvBool is_sriov_enabled = FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _VF, pmc_boot_1);
1203     NvU32     i;
1204 
1205     // if not running in vGPU mode (guest VM) return NV_OK
1206     if (!(pHypervisor && pHypervisor->bIsHVMGuest &&
1207           (FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _PV, pmc_boot_1) ||
1208            is_sriov_enabled)))
1209     {
1210         return NV_OK;
1211     }
1212 
1213     if (!is_sriov_enabled)
1214     {
1215         return NV_OK;
1216     }
1217 
1218     if (os_is_grid_supported() && !gpumgrIsVgxRmFirmwareCapableChip(pmc_boot_42))
1219     {
1220         return NV_ERR_NOT_SUPPORTED;
1221     }
1222 
1223     if (os_is_grid_supported())
1224     {
1225         for (i = 0; i < NV_ARRAY_ELEMENTS(sVgpuUsmTypes); i++)
1226         {
1227             if (pOsGpuInfo->pci_info.device_id == sVgpuUsmTypes[i].ulDevID &&
1228                     pOsGpuInfo->subsystem_vendor == sVgpuUsmTypes[i].ulSubSystemVendorID &&
1229                     pOsGpuInfo->subsystem_id == sVgpuUsmTypes[i].ulSubID)
1230             {
1231                 return NV_OK;
1232             }
1233         }
1234     }
1235 
1236     return NV_ERR_NOT_SUPPORTED;
1237 }
1238