1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*!
25  *
26  * @file os-hypervisor.c
27  * @brief OS specific Hypervisor interfaces for RM
28  *
29  */
30 
31 #include "os/os.h"
32 #include "nv.h"
33 #include "nv-priv.h"
34 #include <nvRmReg.h>
35 #include <virtualization/hypervisor/hypervisor.h>
36 #include "core/thread_state.h"
37 #include "core/locks.h"
38 #include "virtualization/kernel_vgpu_mgr.h"
39 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
40 #include "kernel/gpu/fifo/kernel_fifo.h"
41 #include "osapi.h"
42 #include "virtualization/kernel_hostvgpudeviceapi.h"
43 #include <objtmr.h>
44 #include "gpu/bif/kernel_bif.h"
45 #include "gpu/bus/kern_bus.h"
46 #include <nv_ref.h>               // NV_PMC_BOOT_1_VGPU
47 #include "nvdevid.h"
48 
49 #define NV_VFIO_PCI_BAR0_REGION_INDEX 0
50 #define NV_VFIO_PCI_BAR1_REGION_INDEX 1
51 #define NV_VFIO_PCI_BAR2_REGION_INDEX 2
52 #define NV_VFIO_PCI_BAR3_REGION_INDEX 3
53 
54 static NV_STATUS nv_parse_config_params(const char *, const char *, const char, NvU32 *);
55 
56 void hypervisorSetHypervVgpuSupported_IMPL(OBJHYPERVISOR *pHypervisor)
57 {
58     pHypervisor->bIsHypervVgpuSupported = NV_TRUE;
59 }
60 
61 NvBool hypervisorIsVgxHyper_IMPL(void)
62 {
63     return os_is_vgx_hyper();
64 }
65 
66 NvBool hypervisorIsAC_IMPL(void)
67 {
68     return NV_FALSE;
69 }
70 
71 void hypervisorSetACSupported_IMPL(OBJHYPERVISOR *pHypervisor)
72 {
73     pHypervisor->bIsACSupported = NV_TRUE;
74 }
75 
76 NV_STATUS hypervisorInjectInterrupt_IMPL
77 (
78     OBJHYPERVISOR *pHypervisor,
79     VGPU_NS_INTR   *pVgpuNsIntr
80 )
81 {
82     NV_STATUS status = NV_ERR_NOT_SUPPORTED;
83 
84     if (pVgpuNsIntr->pVgpuVfioRef)
85         status = osVgpuInjectInterrupt(pVgpuNsIntr->pVgpuVfioRef);
86     else
87     {
88         if (pVgpuNsIntr->guestMSIAddr && pVgpuNsIntr->guestMSIData)
89         {
90             status = os_inject_vgx_msi((NvU16)pVgpuNsIntr->guestDomainId,
91                                        pVgpuNsIntr->guestMSIAddr,
92                                        pVgpuNsIntr->guestMSIData);
93         }
94     }
95 
96     return status;
97 }
98 
99 HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
100 {
101     OBJSYS *pSys = SYS_GET_INSTANCE();
102     OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
103     return hypervisorGetHypervisorType(pHypervisor);
104 }
105 
106 static NV_STATUS get_available_instances(
107     NvU32 *avail_instances,
108     nv_state_t *pNv,
109     VGPU_TYPE *vgpuTypeInfo,
110     NvU32 pgpuIndex,
111     NvU8 devfn
112 )
113 {
114     NV_STATUS rmStatus = NV_OK;
115     OBJGPU *pGpu = NULL;
116     OBJSYS *pSys = SYS_GET_INSTANCE();
117     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
118     OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
119 
120     *avail_instances = 0;
121 
122     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
123     if (pGpu == NULL)
124     {
125         NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
126         rmStatus = NV_ERR_INVALID_STATE;
127         goto exit;
128     }
129 
130     /* TODO: Needs to have a proper fix this for DriverVM config */
131     if (gpuIsSriovEnabled(pGpu) &&
132         !(pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED)))
133     {
134         NvU8 fnId = devfn - pGpu->sriovState.firstVFOffset;
135 
136         if (fnId > 63)
137         {
138             NV_ASSERT(0);
139             rmStatus = NV_ERR_INVALID_ARGUMENT;
140             goto exit;
141         }
142 
143         if (IS_MIG_ENABLED(pGpu))
144         {
145             if (IS_MIG_IN_USE(pGpu)) {
146                 NvU64 swizzIdInUseMask = 0;
147                 NvU32 partitionFlag = PARTITIONID_INVALID;
148                 KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
149                 NvU32 id;
150 
151                 swizzIdInUseMask = kmigmgrGetSwizzIdInUseMask(pGpu, pKernelMIGManager);
152 
153                 rmStatus = kvgpumgrGetPartitionFlag(vgpuTypeInfo->vgpuTypeId,
154                                                    &partitionFlag);
155                 if (rmStatus != NV_OK)
156                 {
157                     // Query for a non MIG vgpuType
158                     NV_PRINTF(LEVEL_ERROR, "%s Query for a non MIG vGPU type \n",
159                               __FUNCTION__);
160                     rmStatus = NV_OK;
161                     goto exit;
162                 }
163 
164                 // Determine valid swizzids not assigned to any vGPU device.
165                 FOR_EACH_INDEX_IN_MASK(64, id, swizzIdInUseMask)
166                 {
167                     KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
168                     NvU64 mask = 0;
169 
170                     rmStatus = kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager,
171                                                          id, &pKernelMIGGpuInstance);
172                     if (rmStatus != NV_OK)
173                     {
174                         // Didn't find requested GPU instance
175                         NV_PRINTF(LEVEL_ERROR,
176                                   "No valid GPU instance with SwizzId - %d found\n", id);
177                         goto exit;
178                     }
179 
180                     mask = NVBIT64(id);
181 
182                     if (pKernelMIGGpuInstance->partitionFlag == partitionFlag)
183                     {
184                         // Validate that same ID is not already set and VF is available
185                         if (!(mask & pKernelVgpuMgr->pgpuInfo[pgpuIndex].assignedSwizzIdMask) &&
186                             !(pKernelVgpuMgr->pgpuInfo[pgpuIndex].createdVfMask & NVBIT64(fnId)))
187                         {
188                             *avail_instances = 1;
189                             break;
190                         }
191                     }
192                 }
193                 FOR_EACH_INDEX_IN_MASK_END;
194             }
195         }
196         else
197         {
198             if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].numCreatedVgpu < vgpuTypeInfo->maxInstance)
199             {
200                 if (vgpuTypeInfo->gpuInstanceSize)
201                 {
202                     // Query for a MIG vgpuType
203                     NV_PRINTF(LEVEL_ERROR, "%s Query for a MIG vGPU type \n",
204                               __FUNCTION__);
205                     rmStatus = NV_OK;
206                     goto exit;
207                 }
208 
209                 if (!(pKernelVgpuMgr->pgpuInfo[pgpuIndex].createdVfMask & NVBIT64(fnId)))
210                 {
211                     if (kvgpumgrCheckVgpuTypeCreatable(&pKernelVgpuMgr->pgpuInfo[pgpuIndex], vgpuTypeInfo) == NV_OK)
212                         *avail_instances = 1;
213                 }
214             }
215         }
216     }
217     else
218     {
219         if (kvgpumgrCheckVgpuTypeCreatable(&pKernelVgpuMgr->pgpuInfo[pgpuIndex], vgpuTypeInfo) == NV_OK)
220             *avail_instances = vgpuTypeInfo->maxInstance - pKernelVgpuMgr->pgpuInfo[pgpuIndex].numCreatedVgpu;
221     }
222 
223 exit:
224     return rmStatus;
225 }
226 
227 #define MAX_STR_LEN 256
228 NV_STATUS  NV_API_CALL nv_vgpu_get_type_info(
229     nvidia_stack_t *sp,
230     nv_state_t *pNv,
231     NvU32 vgpuTypeId,
232     char *buffer,
233     int type_info,
234     NvU8 devfn
235 )
236 {
237     THREAD_STATE_NODE threadState;
238     OBJSYS *pSys = SYS_GET_INSTANCE();
239     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
240     NV_STATUS rmStatus = NV_OK;
241     VGPU_TYPE *vgpuTypeInfo;
242     NvU32 pgpuIndex, i, avail_instances = 0;
243     void *fp;
244 
245     NV_ENTER_RM_RUNTIME(sp,fp);
246     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
247 
248     // LOCK: acquire API lock
249     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
250     {
251         if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex)) ==
252             NV_OK)
253         {
254             for (i = 0; i < MAX_VGPU_TYPES_PER_PGPU; i++)
255             {
256                 vgpuTypeInfo = pKernelVgpuMgr->pgpuInfo[pgpuIndex].vgpuTypes[i];
257                 if (vgpuTypeInfo == NULL)
258                     break;
259 
260                 if (vgpuTypeInfo->vgpuTypeId != vgpuTypeId)
261                     continue;
262 
263                 switch (type_info)
264                 {
265                     case VGPU_TYPE_NAME:
266                         os_snprintf(buffer, VGPU_STRING_BUFFER_SIZE, "%s\n",
267                                     vgpuTypeInfo->vgpuName);
268                         break;
269                     case VGPU_TYPE_DESCRIPTION:
270                          os_snprintf(buffer, MAX_STR_LEN,
271                                      "num_heads=%d, frl_config=%d, "
272                                      "framebuffer=%lluM, max_resolution=%dx%d, max_instance=%d\n",
273                                      vgpuTypeInfo->numHeads, vgpuTypeInfo->frlConfig,
274                                      vgpuTypeInfo->profileSize >> 20,
275                                      vgpuTypeInfo->maxResolutionX,
276                                      vgpuTypeInfo->maxResolutionY,
277                                      vgpuTypeInfo->maxInstance);
278                         break;
279                     case VGPU_TYPE_INSTANCES:
280                         rmStatus = get_available_instances(&avail_instances, pNv,
281                                                            vgpuTypeInfo,
282                                                            pgpuIndex, devfn);
283                         if (rmStatus != NV_OK)
284                             goto exit;
285 
286                         os_snprintf(buffer, MAX_STR_LEN, "%d\n", avail_instances);
287                         break;
288                     default:
289                         rmStatus = NV_ERR_INVALID_ARGUMENT;
290                 }
291                 break;
292             }
293         }
294 
295 exit:
296         // UNLOCK: release API lock
297         rmapiLockRelease();
298     }
299 
300     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
301     NV_EXIT_RM_RUNTIME(sp,fp);
302 
303     return rmStatus;
304 }
305 
306 NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
307     nvidia_stack_t *sp,
308     nv_state_t *pNv,
309     NvU32 *numVgpuTypes,
310     NvU32 *vgpuTypeIds,
311     NvBool isVirtfn,
312     NvU8 devfn,
313     NvBool getCreatableTypes
314 )
315 {
316     THREAD_STATE_NODE threadState;
317     OBJSYS *pSys = SYS_GET_INSTANCE();
318     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
319     NV_STATUS rmStatus = NV_OK;
320     NvU32 pgpuIndex, i, avail_instances = 0;
321     NvU32 numSupportedVgpuTypes = 0;
322     VGPU_TYPE *vgpuTypeInfo;
323     void *fp;
324 
325     if (!vgpuTypeIds || !numVgpuTypes)
326         return NV_ERR_INVALID_ARGUMENT;
327 
328     NV_ENTER_RM_RUNTIME(sp,fp);
329     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
330 
331     // LOCK: acquire API lock
332     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
333     {
334         if ((rmStatus = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex)) ==
335             NV_OK)
336         {
337             if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled && !isVirtfn)
338             {
339                 *numVgpuTypes = 0;
340             }
341             else
342             {
343                 numSupportedVgpuTypes = pKernelVgpuMgr->pgpuInfo[pgpuIndex].numVgpuTypes;
344                 *numVgpuTypes = 0;
345 
346                 for (i = 0; i < numSupportedVgpuTypes; i++)
347                 {
348                     vgpuTypeInfo = pKernelVgpuMgr->pgpuInfo[pgpuIndex].vgpuTypes[i];
349 
350                     if (!getCreatableTypes)
351                     {
352                         // Return all available types
353                         vgpuTypeIds[*numVgpuTypes] = vgpuTypeInfo->vgpuTypeId;
354                         (*numVgpuTypes)++;
355                         continue;
356                     }
357 
358                     rmStatus = get_available_instances(&avail_instances, pNv,
359                                                        vgpuTypeInfo, pgpuIndex,
360                                                        devfn);
361                     if (rmStatus != NV_OK)
362                     {
363                         NV_PRINTF(LEVEL_ERROR, "Failed to get available instances for vGPU ID: %d, status: 0x%x\n",
364                                   vgpuTypeInfo->vgpuTypeId, rmStatus);
365                         continue;
366                     }
367 
368                     if (avail_instances == 0)
369                         continue;
370 
371                     vgpuTypeIds[*numVgpuTypes] = vgpuTypeInfo->vgpuTypeId;
372                     (*numVgpuTypes)++;
373                 }
374             }
375         }
376 
377         // UNLOCK: release API lock
378         rmapiLockRelease();
379     }
380 
381     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
382     NV_EXIT_RM_RUNTIME(sp,fp);
383 
384     return rmStatus;
385 }
386 
387 NV_STATUS NV_API_CALL nv_vgpu_delete(
388     nvidia_stack_t *sp,
389     const NvU8 *pMdevUuid,
390     NvU16 vgpuId
391 )
392 {
393     THREAD_STATE_NODE threadState;
394     void         *fp = NULL;
395     NvU32        rmStatus = NV_OK;
396 
397     NV_ENTER_RM_RUNTIME(sp,fp);
398     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
399 
400     // LOCK: acquire API lock
401     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
402     {
403         rmStatus = kvgpumgrDeleteRequestVgpu(pMdevUuid, vgpuId);
404         // UNLOCK: release API lock
405         rmapiLockRelease();
406     }
407     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
408     NV_EXIT_RM_RUNTIME(sp,fp);
409 
410     return rmStatus;
411 }
412 
413 NV_STATUS  NV_API_CALL nv_vgpu_process_vf_info(
414     nvidia_stack_t *sp,
415     nv_state_t *pNv,
416     NvU8 cmd,
417     NvU32 domain,
418     NvU8 bus,
419     NvU8 slot,
420     NvU8 function,
421     NvBool isMdevAttached,
422     void *vf_pci_info
423 )
424 {
425     THREAD_STATE_NODE threadState;
426     NV_STATUS     rmStatus = NV_OK;
427     void         *fp = NULL;
428 
429     NV_ENTER_RM_RUNTIME(sp,fp);
430     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
431 
432     // LOCK: acquire API lock
433     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
434     {
435         rmStatus = kvgpumgrProcessVfInfo(pNv->gpu_id, cmd, domain, bus, slot, function, isMdevAttached, (vgpu_vf_pci_info *) vf_pci_info);
436 
437         // UNLOCK: release API lock
438         rmapiLockRelease();
439     }
440     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
441     NV_EXIT_RM_RUNTIME(sp,fp);
442 
443     return rmStatus;
444 }
445 
446 NV_STATUS  NV_API_CALL nv_vgpu_create_request(
447     nvidia_stack_t *sp,
448     nv_state_t *pNv,
449     const NvU8 *pMdevUuid,
450     NvU32 vgpuTypeId,
451     NvU16 *vgpuId,
452     NvU32 gpuPciBdf
453 )
454 {
455     THREAD_STATE_NODE threadState;
456     void          *fp          = NULL;
457     NV_STATUS     rmStatus     = NV_OK;
458 
459     NV_ENTER_RM_RUNTIME(sp,fp);
460     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
461 
462     // LOCK: acquire API lock
463     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
464     {
465         rmStatus = kvgpumgrCreateRequestVgpu(pNv->gpu_id, pMdevUuid,
466                                              vgpuTypeId, vgpuId, gpuPciBdf);
467 
468         // UNLOCK: release API lock
469         rmapiLockRelease();
470     }
471 
472     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
473     NV_EXIT_RM_RUNTIME(sp,fp);
474 
475     return rmStatus;
476 }
477 
478 static NV_STATUS is_bar_64bit(
479     OBJGPU *pGpu,
480     NvU32 regionIndex,
481     NvBool *isBar64bit
482 )
483 {
484     NV_STATUS   rmStatus = NV_OK;
485 
486     if (pGpu == NULL)
487     {
488         NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
489         rmStatus = NV_ERR_INVALID_STATE;
490         goto exit;
491     }
492 
493     switch (regionIndex)
494     {
495         case NV_VFIO_PCI_BAR0_REGION_INDEX:
496             *isBar64bit = pGpu->sriovState.b64bitVFBar0;
497             break;
498 
499         case NV_VFIO_PCI_BAR1_REGION_INDEX:
500             *isBar64bit = pGpu->sriovState.b64bitVFBar1;
501             break;
502 
503         case NV_VFIO_PCI_BAR2_REGION_INDEX:
504             *isBar64bit = pGpu->sriovState.b64bitVFBar2;
505             break;
506 
507         default:
508             NV_PRINTF(LEVEL_ERROR, "BAR%d region doesn't exist!\n", regionIndex);
509             rmStatus = NV_ERR_INVALID_ARGUMENT;
510             goto exit;
511             break;
512     }
513 
514     NV_PRINTF(LEVEL_INFO, "BAR%d region is_64bit: %d\n", regionIndex, *isBar64bit);;
515 
516 exit:
517     return rmStatus;
518 }
519 
520 NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
521     nvidia_stack_t *sp,
522     nv_state_t *pNv,
523     const NvU8 *pMdevUuid,
524     NvU64 *size,
525     NvU32 regionIndex,
526     void *pVgpuVfioRef,
527     NvBool *isBar64bit
528 )
529 {
530     REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
531     THREAD_STATE_NODE threadState;
532     NV_STATUS    rmStatus = NV_OK, status;
533     OBJGPU      *pGpu = NULL;
534     KernelBus   *pKernelBus;
535     KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
536     void         *fp = NULL;
537     NvU32        value = 0;
538     OBJSYS      *pSys = SYS_GET_INSTANCE();
539     KernelVgpuMgr * pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
540 
541     NV_ENTER_RM_RUNTIME(sp,fp);
542     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
543 
544     /*
545      * This function can be used to query both BAR 64bit state and/or BAR size
546      * If neither is queried, return with error.
547      */
548     if ((size == NULL) && (isBar64bit == NULL))
549     {
550         rmStatus = NV_ERR_INVALID_ARGUMENT;
551         goto exit;
552     }
553 
554     // LOCK: acquire API lock
555     NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
556 
557     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
558     if (pGpu == NULL)
559     {
560         NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
561         rmStatus = NV_ERR_INVALID_STATE;
562         goto release_lock;
563     }
564 
565     /* Get input BAR index 64bit state */
566     if (isBar64bit != NULL)
567     {
568         NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
569             is_bar_64bit(pGpu, regionIndex, isBar64bit), release_lock);
570 
571         /* Query is only for BAR index 64bit state*/
572         if (size == NULL)
573             goto release_lock;
574     }
575 
576     pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
577     *size = kbusGetPciBarSize(pKernelBus, regionIndex);
578 
579     NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
580         kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id,
581                                              pMdevUuid,
582                                              &pKernelHostVgpuDevice), release_lock);
583 
584     pRequestVgpu = pKernelHostVgpuDevice->pRequestVgpuInfoNode;
585     if (pRequestVgpu == NULL)
586     {
587         rmStatus = NV_ERR_INVALID_POINTER;
588         goto release_lock;
589     }
590 
591     pKernelHostVgpuDevice->pVgpuVfioRef = pVgpuVfioRef;
592 
593     if (regionIndex == NV_VFIO_PCI_BAR1_REGION_INDEX)
594     {
595         VGPU_TYPE *vgpuTypeInfo;
596         NvU32     pgpuIndex = 0;
597         NvBool    bOverrideBar1Size = NV_FALSE;
598 
599         // Read BAR1 length from vgpuTypeInfo
600         NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
601             kvgpumgrGetVgpuTypeInfo(pKernelHostVgpuDevice->vgpuType, &vgpuTypeInfo), release_lock);
602 
603         *size = vgpuTypeInfo->bar1Length << 20;
604 
605         NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
606             kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex), release_lock);
607 
608         /*
609          * check for 'override_bar1_size' param in vgpuExtraParams list first,
610          * if param is missing there then check it in vgpu_params list
611          */
612         status = nv_parse_config_params((const char*)vgpuTypeInfo->vgpuExtraParams,
613                                         "override_bar1_size", ';', &value);
614 
615         if (status == NV_OK && value) {
616             bOverrideBar1Size = NV_TRUE;
617         } else if (status == NV_ERR_OBJECT_NOT_FOUND) {
618             status = nv_parse_config_params(pRequestVgpu->configParams,
619                                             "override_bar1_size", ',', &value);
620             if (status == NV_OK && value)
621                 bOverrideBar1Size = NV_TRUE;
622         }
623         if (bOverrideBar1Size) {
624             NvU64 bar1SizeInBytes, guestBar1;
625             NvU64 gpuBar1LowerLimit = 256 * 1024 * 1024; // bar1 lower limit for override_bar1_length parameter
626 
627             bar1SizeInBytes = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR1_REGION_INDEX);
628             if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled)
629             {
630                 *size = pGpu->sriovState.vfBarSize[1];
631             }
632             else if (bar1SizeInBytes > gpuBar1LowerLimit)
633             {
634                 guestBar1 = bar1SizeInBytes / vgpuTypeInfo->maxInstance;
635                 *size = nvPrevPow2_U64(guestBar1);
636             }
637         }
638     }
639     else if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX ||
640              regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX)
641     {
642         status = nv_parse_config_params(pRequestVgpu->configParams,
643                                         "address64", ',', &value);
644 
645         if ((status != NV_OK) || ((status == NV_OK) && (value != 0)))
646         {
647             if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX)
648                 *size = 0;
649             else if (regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX)
650                 *size = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR2_REGION_INDEX);
651         }
652     }
653 
654 release_lock:
655     // UNLOCK: release API lock
656     rmapiLockRelease();
657 
658 exit:
659     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
660     NV_EXIT_RM_RUNTIME(sp,fp);
661 
662     return rmStatus;
663 }
664 
665 NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(
666     nvidia_stack_t *sp,
667     nv_state_t *pNv,
668     const NvU8 *pMdevUuid,
669     NvU64 *hbmAddr,
670     NvU64 *size
671 )
672 {
673     NV_STATUS    rmStatus = NV_OK;
674     THREAD_STATE_NODE threadState;
675     OBJGPU      *pGpu = NULL;
676     KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
677     void         *fp = NULL;
678 
679     NV_ENTER_RM_RUNTIME(sp,fp);
680     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
681 
682     if ((size == NULL) || (hbmAddr == NULL))
683     {
684         rmStatus = NV_ERR_INVALID_ARGUMENT;
685         goto exit;
686     }
687 
688     // LOCK: acquire API lock
689     NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
690 
691     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
692     if (pGpu == NULL)
693     {
694         NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
695         rmStatus = NV_ERR_INVALID_STATE;
696         goto release_lock;
697     }
698 
699     NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
700                         kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id,
701                                                               pMdevUuid,
702                                                               &pKernelHostVgpuDevice), release_lock);
703     if (pKernelHostVgpuDevice->numValidHbmRegions > 1)
704     {
705         NV_PRINTF(LEVEL_NOTICE, "non contiguous HBM region is not supported\n");
706         NV_ASSERT(0);
707     }
708 
709     if (pKernelHostVgpuDevice->hbmRegionList != NULL)
710     {
711         *hbmAddr = pKernelHostVgpuDevice->hbmRegionList[0].hbmBaseAddr;
712         *size = pKernelHostVgpuDevice->hbmRegionList[0].size;
713     }
714     else
715     {
716         *hbmAddr = 0;
717         *size = 0;
718     }
719 
720 release_lock:
721     // UNLOCK: release API lock
722     rmapiLockRelease();
723 exit:
724 
725     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
726     NV_EXIT_RM_RUNTIME(sp,fp);
727     return rmStatus;
728 }
729 
730 NV_STATUS osVgpuVfioWake(
731     void *waitQueue
732 )
733 {
734     vgpu_vfio_info vgpu_info;
735 
736     vgpu_info.waitQueue = waitQueue;
737 
738     return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_WAKE_WAIT_QUEUE);
739 }
740 
741 NV_STATUS NV_API_CALL nv_vgpu_start(
742     nvidia_stack_t *sp,
743     const NvU8 *pMdevUuid,
744     void *waitQueue,
745     NvS32 *returnStatus,
746     NvU8 *vmName,
747     NvU32 qemuPid
748 )
749 {
750     THREAD_STATE_NODE threadState;
751     NV_STATUS rmStatus = NV_OK;
752     void *fp = NULL;
753 
754     NV_ENTER_RM_RUNTIME(sp,fp);
755     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
756 
757     // LOCK: acquire API lock
758     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
759     {
760         rmStatus = kvgpumgrStart(pMdevUuid, waitQueue, returnStatus,
761                                 vmName, qemuPid);
762 
763         // UNLOCK: release API lock
764         rmapiLockRelease();
765     }
766     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
767     NV_EXIT_RM_RUNTIME(sp,fp);
768 
769     return rmStatus;
770 }
771 
772 static NV_STATUS nv_parse_config_params(
773     const char *config_params,
774     const char *key,
775     const char delim,
776     NvU32 *config_value
777 )
778 {
779     char *ptr, *configParams = rm_remove_spaces(config_params);
780     char *token, *value, *name;
781     NvU32 data;
782     NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND;
783 
784     ptr = configParams;
785     while ((token = rm_string_token(&ptr, delim)) != NULL)
786     {
787         if (!(name = rm_string_token(&token, '=')) || !os_string_length(name))
788             continue;
789 
790         if (!(value = rm_string_token(&token, '=')) || !os_string_length(value))
791             continue;
792 
793         data = os_strtoul(value, NULL, 0);
794 
795         if (os_string_compare(name, key) == 0)
796         {
797             rmStatus = NV_OK;
798             *config_value = data;
799         }
800     }
801 
802    // Free the memory allocated by rm_remove_spaces()
803    os_free_mem(configParams);
804 
805    return rmStatus;
806 }
807 
808 NV_STATUS  NV_API_CALL  nv_vgpu_get_sparse_mmap(
809     nvidia_stack_t *sp ,
810     nv_state_t *pNv,
811     const NvU8 *pMdevUuid,
812     NvU64 **offsets,
813     NvU64 **sizes,
814     NvU32 *numAreas
815 )
816 {
817     THREAD_STATE_NODE threadState;
818     NV_STATUS rmStatus = NV_ERR_INVALID_STATE, status;
819     OBJGPU *pGpu = NULL;
820     OBJTMR *pTmr = NULL;
821     KernelFifo *pKernelFifo = NULL;
822     void *fp = NULL;
823     REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
824     KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
825     NvU32 bar0TmrMapSize = 0, bar0FifoMapSize = 0, value = 0;
826     NvU64 bar0TmrMapOffset = 0, bar0FifoMapOffset = 0;
827     NvU64 *vfRegionSizes = NULL;
828     NvU64 *vfRegionOffsets = NULL;
829     KernelBif *pKernelBif = NULL;
830 
831 
832     NV_ENTER_RM_RUNTIME(sp,fp);
833     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
834 
835     // LOCK: acquire API lock
836     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
837     {
838         pGpu = NV_GET_NV_PRIV_PGPU(pNv);
839 
840         if (pGpu == NULL)
841         {
842             rmStatus = NV_ERR_INVALID_STATE;
843             goto cleanup;
844         }
845         pTmr = GPU_GET_TIMER(pGpu);
846         pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
847         pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
848         *numAreas = 0;
849         rmStatus = kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id, pMdevUuid,
850                                                         &pKernelHostVgpuDevice);
851         if (rmStatus == NV_OK)
852         {
853             if (pKernelHostVgpuDevice->gfid != 0)
854             {
855                 rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
856                                                           numAreas, NULL, NULL);
857                 if (rmStatus == NV_OK)
858                 {
859                     os_alloc_mem((void **)&vfRegionOffsets, sizeof(NvU64) * (*numAreas));
860                     os_alloc_mem((void **)&vfRegionSizes, sizeof (NvU64) * (*numAreas));
861                     if (vfRegionOffsets && vfRegionSizes)
862                     {
863                         rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
864                                                                   numAreas, vfRegionOffsets, vfRegionSizes);
865                         if (rmStatus == NV_OK)
866                         {
867                             *offsets = vfRegionOffsets;
868                             *sizes   = vfRegionSizes;
869                         }
870                     }
871                     else
872                     {
873                         if (vfRegionOffsets != NULL)
874                             os_free_mem(vfRegionOffsets);
875 
876                         if (vfRegionSizes != NULL)
877                             os_free_mem(vfRegionSizes);
878 
879                         rmStatus = NV_ERR_INSUFFICIENT_RESOURCES;
880                     }
881                 }
882             }
883             else
884             {
885                 pRequestVgpu = pKernelHostVgpuDevice->pRequestVgpuInfoNode;
886                 if (pRequestVgpu == NULL)
887                 {
888                     rmStatus = NV_ERR_INVALID_POINTER;
889                     goto cleanup;
890                 }
891 
892                 status = nv_parse_config_params(pRequestVgpu->configParams, "direct_gpu_timer_access", ',', &value);
893                 if ((status == NV_OK) && (value != 0))
894                 {
895                     rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr,
896                                                           &bar0TmrMapOffset,
897                                                           &bar0TmrMapSize);
898                     if (rmStatus == NV_OK)
899                         (*numAreas)++;
900                     else
901                         NV_PRINTF(LEVEL_ERROR,
902                                   "%s Failed to get NV_PTIMER region \n",
903                                   __FUNCTION__);
904                 }
905 
906                 value = 0;
907                 {
908                     status = kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo,
909                                                          &bar0FifoMapOffset,
910                                                          &bar0FifoMapSize);
911                     if (status == NV_OK)
912                         (*numAreas)++;
913                 }
914 
915                 if (*numAreas != 0)
916                 {
917                     NvU32 i = 0;
918                     NvU64 *tmpOffset, *tmpSize;
919                     os_alloc_mem((void **)offsets, sizeof(NvU64) * (*numAreas));
920                     os_alloc_mem((void **)sizes, sizeof (NvU64) * (*numAreas));
921 
922                     tmpOffset = *offsets;
923                     tmpSize   = *sizes;
924 
925                     if (bar0TmrMapSize != 0)
926                     {
927                         tmpOffset[i] = bar0TmrMapOffset;
928                         tmpSize[i] = bar0TmrMapSize;
929                         i++;
930                     }
931 
932                     if (bar0FifoMapSize != 0)
933                     {
934                         tmpOffset[i] = bar0FifoMapOffset;
935                         tmpSize[i] = bar0FifoMapSize;
936                     }
937                 }
938             }
939         }
940 
941 cleanup:
942         // UNLOCK: release API lock
943         rmapiLockRelease();
944     }
945 
946     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
947     NV_EXIT_RM_RUNTIME(sp,fp);
948 
949     return rmStatus;
950 }
951 
952 NV_STATUS  NV_API_CALL nv_vgpu_update_request(
953     nvidia_stack_t *sp ,
954     const NvU8 *pMdevUuid,
955     VGPU_DEVICE_STATE deviceState,
956     NvU64 *offsets,
957     NvU64 *sizes,
958     const char *configParams
959 )
960 {
961     THREAD_STATE_NODE threadState;
962     NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND;
963     void *fp = NULL;
964     REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
965     OBJSYS *pSys = SYS_GET_INSTANCE();
966     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
967 
968     NV_ENTER_RM_RUNTIME(sp,fp);
969     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
970 
971     if (offsets != NULL)
972         os_free_mem(offsets);
973 
974     if (sizes != NULL)
975         os_free_mem(sizes);
976 
977     // LOCK: acquire API lock
978     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
979     {
980         for (pRequestVgpu = listHead(&pKernelVgpuMgr->listRequestVgpuHead);
981              pRequestVgpu != NULL;
982              pRequestVgpu = listNext(&pKernelVgpuMgr->listRequestVgpuHead, pRequestVgpu))
983         {
984             if (portMemCmp(pRequestVgpu->mdevUuid, pMdevUuid, VGPU_UUID_SIZE) == 0)
985             {
986 
987                 if (configParams != NULL)
988                     portStringCopy(pRequestVgpu->configParams,
989                                    sizeof(pRequestVgpu->configParams),
990                                    configParams, (portStringLength(configParams) + 1));
991 
992                 pRequestVgpu->deviceState = deviceState;
993                 rmStatus = NV_OK;
994             }
995         }
996 
997         // UNLOCK: release API lock
998         rmapiLockRelease();
999     }
1000 
1001     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1002     NV_EXIT_RM_RUNTIME(sp,fp);
1003 
1004     return rmStatus;
1005 }
1006 
1007 NV_STATUS NV_API_CALL nv_gpu_bind_event(
1008     nvidia_stack_t *sp
1009 )
1010 {
1011     THREAD_STATE_NODE threadState;
1012     NV_STATUS rmStatus = NV_OK;
1013     void *fp = NULL;
1014 
1015     NV_ENTER_RM_RUNTIME(sp,fp);
1016     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1017 
1018     // LOCK: acquire API lock
1019     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
1020     {
1021         CliAddSystemEvent(NV0000_NOTIFIERS_GPU_BIND_EVENT, 0);
1022 
1023         // UNLOCK: release API lock
1024         rmapiLockRelease();
1025     }
1026 
1027     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1028     NV_EXIT_RM_RUNTIME(sp,fp);
1029 
1030     return rmStatus;
1031 }
1032 
1033 NV_STATUS osVgpuInjectInterrupt(void *vgpuVfioRef)
1034 {
1035     vgpu_vfio_info vgpu_info;
1036 
1037     vgpu_info.vgpuVfioRef = vgpuVfioRef;
1038 
1039     return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_INJECT_INTERRUPT);
1040 }
1041 
1042 NV_STATUS osVgpuRegisterMdev
1043 (
1044     OS_GPU_INFO *pOsGpuInfo
1045 )
1046 {
1047     NV_STATUS status = NV_OK;
1048     vgpu_vfio_info vgpu_info = {0};
1049     OBJSYS *pSys = SYS_GET_INSTANCE();
1050     KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
1051     KERNEL_PHYS_GPU_INFO *pPhysGpuInfo;
1052     NvU32 pgpuIndex, i;
1053     OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
1054 
1055     status = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pOsGpuInfo->gpu_id, &pgpuIndex);
1056     if (status != NV_OK)
1057         return status;
1058 
1059     pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[pgpuIndex]);
1060 
1061     vgpu_info.numVgpuTypes = pKernelVgpuMgr->pgpuInfo[pgpuIndex].numVgpuTypes;
1062 
1063     status = os_alloc_mem((void **)&vgpu_info.vgpuTypeIds,
1064                           ((vgpu_info.numVgpuTypes) * sizeof(NvU32)));
1065     if (status != NV_OK)
1066         goto free_mem;
1067 
1068     status = os_alloc_mem((void **)&vgpu_info.vgpuNames,
1069                           ((vgpu_info.numVgpuTypes) * sizeof(char *)));
1070     if (status != NV_OK)
1071         goto free_mem;
1072 
1073     vgpu_info.nv = pOsGpuInfo;
1074     for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
1075     {
1076         status = os_alloc_mem((void *)&vgpu_info.vgpuNames[i], (VGPU_STRING_BUFFER_SIZE * sizeof(char)));
1077         if (status != NV_OK)
1078             goto free_mem;
1079 
1080         vgpu_info.vgpuTypeIds[i] = pPhysGpuInfo->vgpuTypes[i]->vgpuTypeId;
1081         os_snprintf((char *) vgpu_info.vgpuNames[i], VGPU_STRING_BUFFER_SIZE, "%s\n", pPhysGpuInfo->vgpuTypes[i]->vgpuName);
1082     }
1083 
1084     if ((!pPhysGpuInfo->sriovEnabled) ||
1085         (pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED)))
1086     {
1087         vgpu_info.is_virtfn = NV_FALSE;
1088         status = os_call_vgpu_vfio((void *)&vgpu_info, CMD_VGPU_VFIO_REGISTER_MDEV);
1089     }
1090     else
1091     {
1092         for (i = 0; i < MAX_VF_COUNT_PER_GPU; i++)
1093         {
1094             if (pPhysGpuInfo->vfPciInfo[i].isNvidiaAttached)
1095             {
1096                 vgpu_info.is_virtfn =   NV_TRUE;
1097                 vgpu_info.domain    =   pPhysGpuInfo->vfPciInfo[i].domain;
1098                 vgpu_info.bus       =   pPhysGpuInfo->vfPciInfo[i].bus;
1099                 vgpu_info.slot      =   pPhysGpuInfo->vfPciInfo[i].slot;
1100                 vgpu_info.function  =   pPhysGpuInfo->vfPciInfo[i].function;
1101 
1102                 status = os_call_vgpu_vfio((void *)&vgpu_info, CMD_VGPU_VFIO_REGISTER_MDEV);
1103                 if (status == NV_OK)
1104                 {
1105                     pPhysGpuInfo->vfPciInfo[i].isMdevAttached = NV_TRUE;
1106                 }
1107             }
1108         }
1109     }
1110 
1111 free_mem:
1112     if (vgpu_info.vgpuTypeIds)
1113         os_free_mem(vgpu_info.vgpuTypeIds);
1114 
1115     if (vgpu_info.vgpuNames)
1116     {
1117         for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
1118         {
1119             if (vgpu_info.vgpuNames[i])
1120             {
1121                 os_free_mem(vgpu_info.vgpuNames[i]);
1122             }
1123         }
1124         os_free_mem(vgpu_info.vgpuNames);
1125     }
1126 
1127     return status;
1128 }
1129 
1130 NV_STATUS osIsVgpuVfioPresent(void)
1131 {
1132     vgpu_vfio_info vgpu_info;
1133 
1134     return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_PRESENT);
1135 }
1136 
1137 NV_STATUS osIsVfioPciCorePresent(void)
1138 {
1139     vgpu_vfio_info vgpu_info;
1140 
1141     return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VFIO_PCI_CORE_PRESENT);
1142 }
1143 
1144 
1145 void initVGXSpecificRegistry(OBJGPU *pGpu)
1146 {
1147     NvU32 data32;
1148     osWriteRegistryDword(pGpu, NV_REG_STR_RM_POWER_FEATURES, 0x55455555);
1149     osWriteRegistryDword(pGpu, NV_REG_STR_RM_INFOROM_DISABLE_BBX,
1150                                NV_REG_STR_RM_INFOROM_DISABLE_BBX_YES);
1151     osWriteRegistryDword(pGpu, NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR,
1152                                NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE);
1153     if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_DUMP_NVLOG, &data32) != NV_OK))
1154     {
1155         osWriteRegistryDword(pGpu, NV_REG_STR_RM_DUMP_NVLOG,
1156                              NV_REG_STR_RM_DUMP_NVLOG_ENABLE);
1157     }
1158     osWriteRegistryDword(pGpu, NV_REG_STR_RM_RC_WATCHDOG,
1159                                NV_REG_STR_RM_RC_WATCHDOG_DISABLE);
1160     osWriteRegistryDword(pGpu, NV_REG_STR_CL_FORCE_P2P,
1161                          DRF_DEF(_REG_STR, _CL_FORCE_P2P, _READ, _DISABLE) |
1162                          DRF_DEF(_REG_STR, _CL_FORCE_P2P, _WRITE, _DISABLE));
1163 }
1164 
1165 
1166 NV_STATUS rm_is_vgpu_supported_device(
1167     OS_GPU_INFO *pOsGpuInfo,
1168     NvU32       pmc_boot_1
1169 )
1170 {
1171     OBJSYS *pSys = SYS_GET_INSTANCE();
1172     OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
1173     NvBool is_sriov_enabled = FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _VF, pmc_boot_1);
1174 
1175     // if not running in vGPU mode (guest VM) return NV_OK
1176     if (!(pHypervisor && pHypervisor->bIsHVMGuest &&
1177           (FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _PV, pmc_boot_1) ||
1178            is_sriov_enabled)))
1179     {
1180         return NV_OK;
1181     }
1182 
1183     return NV_OK;
1184 }
1185