1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 
26 #include <nv_ref.h>
27 #include <nv.h>
28 #include <nv-priv.h>
29 #include <os/os.h>
30 #include <osapi.h>
31 #include <class/cl0000.h>
32 #include <rmosxfac.h> // Declares RmInitRm().
33 #include "gpu/gpu.h"
34 #include <osfuncs.h>
35 #include <platform/chipset/chipset.h>
36 
37 #include <objtmr.h>
38 #include <gpu/subdevice/subdevice.h>
39 #include <mem_mgr/mem.h>
40 #include "kernel/gpu/mem_mgr/mem_mgr.h"
41 
42 #include <gpu/mem_sys/kern_mem_sys.h>
43 
44 #include <diagnostics/journal.h>
45 #include <nvrm_registry.h>
46 
47 #include <nvUnixVersion.h>
48 #include <gpu_mgr/gpu_mgr.h>
49 #include <core/thread_state.h>
50 #include <platform/acpi_common.h>
51 #include <core/locks.h>
52 
53 #include <mem_mgr/p2p.h>
54 
55 #include "rmapi/exports.h"
56 #include "rmapi/rmapi_utils.h"
57 #include "rmapi/rs_utils.h"
58 #include "rmapi/resource_fwd_decls.h"
59 #include <nv-kernel-rmapi-ops.h>
60 #include <rmobjexportimport.h>
61 #include "nv-reg.h"
62 #include "core/hal_mgr.h"
63 #include "gpu/device/device.h"
64 
65 #include "resserv/rs_server.h"
66 #include "resserv/rs_client.h"
67 #include "resserv/rs_resource.h"
68 #include "gpu/gpu_uuid.h"
69 
70 #include "platform/chipset/pci_pbi.h"
71 
72 #include "ctrl/ctrl0000/ctrl0000system.h"
73 #include "ctrl/ctrl0073/ctrl0073dp.h"
74 #include "ctrl/ctrl0073/ctrl0073system.h"
75 #include "ctrl/ctrl0073/ctrl0073specific.h"
76 #include "ctrl/ctrl2080/ctrl2080bios.h"
77 #include "ctrl/ctrl2080/ctrl2080fb.h"
78 #include "ctrl/ctrl2080/ctrl2080perf.h"
79 #include "ctrl/ctrl2080/ctrl2080gpu.h"
80 #include "ctrl/ctrl402c.h"
81 
82 #include "g_nv_name_released.h"   // released chip entries from nvChipAutoFlags.h
83 
84 #include <virtualization/hypervisor/hypervisor.h>
85 
86 #include "gpu/bus/kern_bus.h"
87 
88 //
89 // Helper function which can be called before doing any RM control
90 // This function:
91 //
92 // a. Performs threadStateInit().
93 // b. Acquires API lock.
94 // c. Checks if RMAPI client handle is valid (i.e. RM is initialized) and
95 //    returns early if RMAPI client handle is invalid.
96 // d. Increments the dynamic power refcount. If GPU is in RTD3 suspended
97 //    state, then it will wake-up the GPU.
98 // e. Returns the RMAPI interface handle.
99 //
100 // This function should be called only when caller doesn't have acquired API
101 // lock. Caller needs to use RmUnixRmApiEpilogue() after RM control, if
102 // RmUnixRmApiPrologue() is successful.
103 //
104 RM_API *RmUnixRmApiPrologue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode, NvU32 module)
105 {
106     threadStateInit(pThreadNode, THREAD_STATE_FLAGS_NONE);
107 
108     if ((rmapiLockAcquire(API_LOCK_FLAGS_NONE, module)) == NV_OK)
109     {
110         if ((pNv->rmapi.hClient != 0) &&
111             (os_ref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE) == NV_OK))
112         {
113             return rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
114         }
115 
116         rmapiLockRelease();
117     }
118 
119     threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE);
120 
121     return NULL;
122 }
123 
124 //
125 // Helper function which can be called after doing RM control, if
126 // caller has used RmUnixRmApiPrologue() helper function. This function:
127 //
128 // a. Decrements the dynamic power refcount.
129 // b. Release API lock.
130 // c. Performs threadStateFree().
131 //
132 void RmUnixRmApiEpilogue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode)
133 {
134     os_unref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE);
135     rmapiLockRelease();
136     threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE);
137 }
138 
139 NvBool RmGpuHasIOSpaceEnabled(nv_state_t * nv)
140 {
141     NvU16 val;
142     NvBool has_io;
143     os_pci_read_word(nv->handle, NV_CONFIG_PCI_NV_1, &val);
144     has_io = FLD_TEST_DRF(_CONFIG, _PCI_NV_1, _IO_SPACE, _ENABLED, val);
145     return has_io;
146 }
147 
148 // This is a stub function for unix
149 void osHandleDeferredRecovery(
150     OBJGPU *pGpu
151 )
152 {
153 
154 }
155 
156 // This is a stub function for unix
157 NvBool osIsSwPreInitOnly
158 (
159     OS_GPU_INFO *pOsGpuInfo
160 )
161 {
162     return NV_FALSE;
163 }
164 
165 const NvU8 * RmGetGpuUuidRaw(
166     nv_state_t *pNv
167 )
168 {
169     NV_STATUS rmStatus;
170     OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
171     NvU32 gidFlags;
172     NvBool isApiLockTaken = NV_FALSE;
173 
174     if (pNv->nv_uuid_cache.valid)
175         goto done;
176 
177     //
178     // PBI is not present in simulation and the loop inside
179     // pciPbiReadUuid takes up considerable amount of time in
180     // simulation environment during RM load.
181     //
182     if (pGpu && IS_SIMULATION(pGpu))
183     {
184         rmStatus = NV_ERR_NOT_SUPPORTED;
185     }
186     else
187     {
188         rmStatus = pciPbiReadUuid(pNv->handle, pNv->nv_uuid_cache.uuid);
189     }
190 
191     if (rmStatus == NV_OK)
192     {
193         rmStatus = gpumgrSetUuid(pNv->gpu_id, pNv->nv_uuid_cache.uuid);
194         if (rmStatus != NV_OK)
195         {
196             return NULL;
197         }
198 
199         pNv->nv_uuid_cache.valid = NV_TRUE;
200         goto done;
201     }
202     else if (rmStatus == NV_ERR_NOT_SUPPORTED)
203     {
204         nv_printf(NV_DBG_INFO,
205                   "NVRM: PBI is not supported for GPU " NV_PCI_DEV_FMT "\n",
206                   NV_PCI_DEV_FMT_ARGS(pNv));
207     }
208 
209     gidFlags = DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1)
210              | DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_FORMAT,_BINARY);
211 
212     if (!rmapiLockIsOwner())
213     {
214         rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU);
215         if (rmStatus != NV_OK)
216         {
217             return NULL;
218         }
219 
220         isApiLockTaken = NV_TRUE;
221     }
222 
223     if (pGpu == NULL)
224     {
225         if (isApiLockTaken == NV_TRUE)
226         {
227             rmapiLockRelease();
228         }
229 
230         return NULL;
231     }
232 
233     rmStatus = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags);
234     if (isApiLockTaken == NV_TRUE)
235     {
236         rmapiLockRelease();
237     }
238 
239     if (rmStatus != NV_OK)
240         return NULL;
241 
242     if (!pGpu->gpuUuid.isInitialized)
243         return NULL;
244 
245     // copy the uuid from the OBJGPU uuid cache
246     os_mem_copy(pNv->nv_uuid_cache.uuid, pGpu->gpuUuid.uuid, GPU_UUID_LEN);
247     pNv->nv_uuid_cache.valid = NV_TRUE;
248 
249 done:
250     return pNv->nv_uuid_cache.uuid;
251 }
252 
253 static NV_STATUS RmGpuUuidRawToString(
254     const NvU8     *pGidData,
255     char           *pBuf,
256     NvU32           bufLen
257 )
258 {
259     NvU8     *pGidString;
260     NvU32     GidStrlen;
261     NV_STATUS rmStatus;
262     NvU32     gidFlags;
263 
264     gidFlags = DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) |
265                DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1);
266 
267     rmStatus = transformGidToUserFriendlyString(pGidData, RM_SHA1_GID_SIZE,
268                                                 &pGidString, &GidStrlen,
269                                                 gidFlags);
270     if (rmStatus != NV_OK)
271         return rmStatus;
272 
273     if (bufLen >= GidStrlen)
274         portMemCopy(pBuf, bufLen, pGidString, GidStrlen);
275     else
276         rmStatus = NV_ERR_BUFFER_TOO_SMALL;
277 
278     portMemFree((void *)pGidString);
279 
280     return rmStatus;
281 }
282 
283 // This function should be called with the API and GPU locks already acquired.
284 NV_STATUS
285 RmLogGpuCrash(OBJGPU *pGpu)
286 {
287     NV_STATUS status = NV_OK;
288     NvBool bGpuIsLost, bGpuIsConnected;
289 
290     if (pGpu == NULL)
291     {
292         return NV_ERR_INVALID_ARGUMENT;
293     }
294 
295     //
296     // Re-evaluate whether or not the GPU is accessible. This could be called
297     // from a recovery context where the OS has re-enabled MMIO for the device.
298     // This happens during EEH processing on IBM Power + Linux, and marking
299     // the device as connected again will allow rcdbAddRmGpuDump() to collect
300     // more GPU state.
301     //
302     bGpuIsLost = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST);
303     bGpuIsConnected = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED);
304     if (!bGpuIsConnected || bGpuIsLost)
305     {
306         nv_state_t *nv = NV_GET_NV_STATE(pGpu);
307         nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
308         NvU32 pmcBoot0 = NV_PRIV_REG_RD32(nv->regs->map_u, NV_PMC_BOOT_0);
309         if (pmcBoot0 == nvp->pmc_boot_0)
310         {
311             pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, NV_TRUE);
312             pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, NV_FALSE);
313         }
314     }
315 
316     //
317     // Log the engine data to the Journal object, to be pulled out later. This
318     // will return NV_WARN_MORE_PROCESSING_REQUIRED if the dump needed to be
319     // deferred to a passive IRQL. We still log the crash dump as being created
320     // in that case since it (should) be created shortly thereafter, and
321     // there's currently not a good way to print the below notification
322     // publicly from the core RM when the DPC completes.
323     //
324     status = rcdbAddRmGpuDump(pGpu);
325     if (status != NV_OK && status != NV_WARN_MORE_PROCESSING_REQUIRED)
326     {
327         NV_PRINTF(LEVEL_ERROR,
328                   "%s: failed to save GPU crash data\n", __FUNCTION__);
329     }
330     else
331     {
332         status = NV_OK;
333         nv_printf(NV_DBG_ERRORS,
334             "NVRM: A GPU crash dump has been created. If possible, please run\n"
335             "NVRM: nvidia-bug-report.sh as root to collect this data before\n"
336             "NVRM: the NVIDIA kernel module is unloaded.\n");
337     }
338 
339     // Restore the disconnected properties, if they were reset
340     pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, bGpuIsConnected);
341     pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, bGpuIsLost);
342 
343     // Restore persistence mode to the way it was prior to the crash
344     osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo,
345         pGpu->getProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE));
346 
347     return status;
348 }
349 
350 static void free_os_event_under_lock(nv_event_t *event)
351 {
352     event->active = NV_FALSE;
353 
354     // If refcount > 0, event will be freed by osDereferenceObjectCount
355     // when the last associated RM event is freed.
356     if (event->refcount == 0)
357         portMemFree(event);
358 }
359 
360 static void free_os_events(
361     nv_file_private_t *nvfp,
362     NvHandle client
363 )
364 {
365     nv_state_t *nv = nv_get_ctl_state();
366     nv_event_t **pprev;
367 
368     portSyncSpinlockAcquire(nv->event_spinlock);
369 
370     pprev = &nv->event_list;
371     while (*pprev != NULL)
372     {
373         nv_event_t *cur = *pprev;
374         //
375         // XXX We must be called from either rm_client_free_os_events() or
376         // RmFreeUnusedClients() for this to work.
377         //
378         if ((cur->hParent == client) || (cur->nvfp == nvfp))
379         {
380             *pprev = cur->next;
381             free_os_event_under_lock(cur);
382         }
383         else
384         {
385             pprev = &cur->next;
386         }
387     }
388 
389     portSyncSpinlockRelease(nv->event_spinlock);
390 }
391 
392 void rm_client_free_os_events(
393     NvHandle client
394 )
395 {
396     free_os_events(NULL, client);
397 }
398 
399 void RmFreeUnusedClients(
400     nv_state_t         *nv,
401     nv_file_private_t  *nvfp
402 )
403 {
404     NvU32 *pClientList;
405     NvU32 numClients, i;
406     NV_STATUS status;
407     RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
408 
409     //
410     // The 'nvfp' pointer uniquely identifies an open instance in kernel space
411     // and the kernel interface layer guarantees that we are not called before
412     // the associated nvfp descriptor is closed. We can thus safely free
413     // abandoned clients with matching 'nvfp' pointers.
414     //
415     status = rmapiGetClientHandlesFromOSInfo(nvfp, &pClientList, &numClients);
416     if (status != NV_OK)
417     {
418         numClients = 0;
419     }
420 
421     for (i = 0; i < numClients; ++i)
422     {
423         NV_PRINTF(LEVEL_INFO, "freeing abandoned client 0x%x\n",
424                   pClientList[i]);
425 
426     }
427 
428     if (numClients != 0)
429     {
430         pRmApi->FreeClientList(pRmApi, pClientList, numClients);
431 
432         portMemFree(pClientList);
433     }
434 
435     // Clean up any remaining events using this nvfp.
436     free_os_events(nvfp, 0);
437 }
438 
439 static void RmUnbindLock(
440     nv_state_t *nv
441 )
442 {
443     OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
444 
445     if ((pGpu == NULL) || (gpuGetUserClientCount(pGpu) == 0))
446     {
447         nv->flags |= NV_FLAG_UNBIND_LOCK;
448     }
449 }
450 
451 static NV_STATUS allocate_os_event(
452     NvHandle            hParent,
453     nv_file_private_t  *nvfp,
454     NvU32               fd
455 )
456 {
457     nv_state_t *nv = nv_get_ctl_state();
458     NvU32 status = NV_OK;
459     nv_event_t *event;
460 
461     nv_event_t *new_event = portMemAllocNonPaged(sizeof(nv_event_t));
462     if (new_event == NULL)
463     {
464         status = NV_ERR_NO_MEMORY;
465         goto done;
466     }
467 
468     portSyncSpinlockAcquire(nv->event_spinlock);
469     for (event = nv->event_list; event; event = event->next)
470     {
471         // Only one event may be associated with a given fd.
472         if (event->hParent == hParent && event->fd == fd)
473         {
474             status = NV_ERR_INVALID_ARGUMENT;
475             portSyncSpinlockRelease(nv->event_spinlock);
476             goto done;
477         }
478     }
479 
480     new_event->next = nv->event_list;
481     nv->event_list = new_event;
482     portSyncSpinlockRelease(nv->event_spinlock);
483 
484 done:
485     if (status == NV_OK)
486     {
487         new_event->hParent  = hParent;
488         new_event->nvfp     = nvfp;
489         new_event->fd       = fd;
490         new_event->active   = NV_TRUE;
491         new_event->refcount = 0;
492 
493         NV_PRINTF(LEVEL_INFO, "allocated OS event:\n");
494         NV_PRINTF(LEVEL_INFO, "   hParent: 0x%x\n", hParent);
495         NV_PRINTF(LEVEL_INFO, "   fd: %d\n", fd);
496     }
497     else
498     {
499         portMemFree(new_event);
500     }
501 
502     return status;
503 }
504 
505 NV_STATUS RmAllocOsEvent(
506     NvHandle            hParent,
507     nv_file_private_t  *nvfp,
508     NvU32               fd
509 )
510 {
511     if (NV_OK != allocate_os_event(hParent, nvfp, fd))
512     {
513         NV_PRINTF(LEVEL_ERROR, "failed to allocate OS event\n");
514         return NV_ERR_INSUFFICIENT_RESOURCES;
515     }
516     return NV_OK;
517 }
518 
519 static NV_STATUS free_os_event(
520     NvHandle    hParent,
521     NvU32       fd
522 )
523 {
524     nv_state_t *nv = nv_get_ctl_state();
525     nv_event_t *event, *tmp;
526     NV_STATUS result;
527 
528     portSyncSpinlockAcquire(nv->event_spinlock);
529     tmp = event = nv->event_list;
530     while (event)
531     {
532         if ((event->fd == fd) && (event->hParent == hParent))
533         {
534             if (event == nv->event_list)
535                 nv->event_list = event->next;
536             else
537                 tmp->next = event->next;
538             break;
539         }
540         tmp = event;
541         event = event->next;
542     }
543 
544     if (event != NULL)
545     {
546         free_os_event_under_lock(event);
547         result = NV_OK;
548     }
549     else
550         result = NV_ERR_INVALID_EVENT;
551     portSyncSpinlockRelease(nv->event_spinlock);
552 
553     if (result == NV_OK)
554     {
555         NV_PRINTF(LEVEL_INFO, "freed OS event:\n");
556         NV_PRINTF(LEVEL_INFO, "   hParent: 0x%x\n", hParent);
557         NV_PRINTF(LEVEL_INFO, "   fd: %d\n", fd);
558     }
559     else
560     {
561         NV_PRINTF(LEVEL_ERROR, "failed to find OS event:\n");
562         NV_PRINTF(LEVEL_ERROR, "   hParent: 0x%x\n", hParent);
563         NV_PRINTF(LEVEL_ERROR, "   fd: %d\n", fd);
564     }
565 
566     return result;
567 }
568 
569 NV_STATUS RmFreeOsEvent(
570     NvHandle    hParent,
571     NvU32       fd
572 )
573 {
574     if (NV_OK != free_os_event(hParent, fd))
575     {
576         return NV_ERR_INVALID_EVENT;
577     }
578     return NV_OK;
579 }
580 
581 static void RmExecuteWorkItem(
582     void *pWorkItem
583 )
584 {
585     nv_work_item_t *pWi = (nv_work_item_t *)pWorkItem;
586     NvU32 gpuMask;
587     NvU32 releaseLocks = 0;
588 
589     if (!(pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU) &&
590         ((pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) ||
591          (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) ||
592          (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) ||
593          (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY)))
594     {
595         // Requesting one of the GPU locks without providing a GPU instance
596         NV_ASSERT(0);
597         goto done;
598     }
599 
600     // Get locks requested by workitem
601     if (NV_OK != workItemLocksAcquire(pWi->gpuInstance, pWi->flags,
602                                       &releaseLocks, &gpuMask))
603     {
604         goto done;
605     }
606 
607     // Some work items may not require a valid GPU instance
608     if (pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU)
609     {
610         pWi->func.pGpuFunction(pWi->gpuInstance, pWi->pData);
611     }
612     else
613     {
614         pWi->func.pSystemFunction(pWi->pData);
615     }
616 
617     // Release any locks taken
618     workItemLocksRelease(releaseLocks, gpuMask);
619 
620 done:
621     if ((pWi->pData != NULL) &&
622         !(pWi->flags & NV_WORK_ITEM_FLAGS_DONT_FREE_DATA))
623     {
624         portMemFree(pWi->pData);
625     }
626 
627     portMemFree((void *)pWi);
628 }
629 
630 static NV_STATUS RmGetEventData(
631     nv_file_private_t *nvfp,
632     NvP64 pEvent,
633     NvU32 *MoreEvents,
634     NvBool bUserModeArgs
635 )
636 {
637     NV_STATUS         RmStatus;
638     NvUnixEvent      *pKernelEvent = NULL;
639     nv_event_t        nv_event;
640     RMAPI_PARAM_COPY  paramCopy;
641 
642     RmStatus = nv_get_event(nvfp, &nv_event, MoreEvents);
643     if (RmStatus != NV_OK)
644         return NV_ERR_OPERATING_SYSTEM;
645 
646     // setup for access to client's parameters
647     RMAPI_PARAM_COPY_INIT(paramCopy, pKernelEvent, pEvent, 1, sizeof(NvUnixEvent));
648     RmStatus = rmapiParamsAcquire(&paramCopy, bUserModeArgs);
649     if (RmStatus != NV_OK)
650         return NV_ERR_OPERATING_SYSTEM;
651 
652     pKernelEvent->hObject     = nv_event.hObject;
653     pKernelEvent->NotifyIndex = nv_event.index;
654     pKernelEvent->info32      = nv_event.info32;
655     pKernelEvent->info16      = nv_event.info16;
656 
657     // release client buffer access, with copyout as needed
658     if (rmapiParamsRelease(&paramCopy) != NV_OK)
659         return NV_ERR_OPERATING_SYSTEM;
660 
661     return NV_OK;
662 }
663 
664 static NV_STATUS RmAccessRegistry(
665     NvHandle   hClient,
666     NvHandle   hObject,
667     NvU32      AccessType,
668     NvP64      clientDevNodeAddress,
669     NvU32      DevNodeLength,
670     NvP64      clientParmStrAddress,
671     NvU32      ParmStrLength,
672     NvP64      clientBinaryDataAddress,
673     NvU32      *pBinaryDataLength,
674     NvU32      *Data,
675     NvU32      *Entry
676 )
677 {
678     NvU32      gpuMask = 0, gpuInstance = 0;
679     OBJGPU    *pGpu;
680     NvBool     isDevice = NV_FALSE;
681     NV_STATUS  RmStatus = NV_ERR_OPERATING_SYSTEM;
682     RsClient  *pClient;
683     Device    *pDevice;
684     Subdevice *pSubdevice;
685 
686     RMAPI_PARAM_COPY devNodeParamCopy;
687     NvU8      *tmpDevNode = NULL;
688     NvU32      copyOutDevNodeLength = 0;
689 
690     RMAPI_PARAM_COPY parmStrParamCopy;
691     char      *tmpParmStr = NULL;
692     NvU32      copyOutParmStrLength = 0;
693 
694     RMAPI_PARAM_COPY binaryDataParamCopy;
695     NvU8      *tmpBinaryData = NULL;
696     NvU32      BinaryDataLength = 0;
697     NvU32      copyOutBinaryDataLength = 0;
698 
699     if (NV_OK != serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, &pClient))
700         return NV_ERR_INVALID_CLIENT;
701 
702     if (hClient == hObject)
703     {
704         pGpu = NULL;
705     }
706     else
707     {
708         RmStatus = deviceGetByHandle(pClient, hObject, &pDevice);
709         if (RmStatus != NV_OK)
710         {
711             RmStatus = subdeviceGetByHandle(pClient, hObject, &pSubdevice);
712             if (RmStatus != NV_OK)
713                 goto done;
714 
715             RmStatus = rmGpuGroupLockAcquire(pSubdevice->subDeviceInst,
716                     GPU_LOCK_GRP_SUBDEVICE,
717                     GPUS_LOCK_FLAGS_NONE,
718                     RM_LOCK_MODULES_GPU,
719                     &gpuMask);
720             if (RmStatus != NV_OK)
721                 return RmStatus;
722 
723             GPU_RES_SET_THREAD_BC_STATE(pSubdevice);
724             pGpu = GPU_RES_GET_GPU(pSubdevice);
725         }
726         else
727         {
728             RmStatus = rmGpuGroupLockAcquire(pDevice->deviceInst,
729                     GPU_LOCK_GRP_DEVICE,
730                     GPUS_LOCK_FLAGS_NONE,
731                     RM_LOCK_MODULES_GPU,
732                     &gpuMask);
733             if (RmStatus != NV_OK)
734                 return RmStatus;
735 
736             GPU_RES_SET_THREAD_BC_STATE(pDevice);
737             pGpu = GPU_RES_GET_GPU(pDevice);
738             isDevice = NV_TRUE;
739         }
740     }
741 
742     if (pBinaryDataLength)
743     {
744         BinaryDataLength = *pBinaryDataLength;
745     }
746 
747     // a passed-in devNode
748     if (DevNodeLength)
749     {
750         // the passed-in DevNodeLength does not account for '\0'
751         DevNodeLength++;
752 
753         if (DevNodeLength > NVOS38_MAX_REGISTRY_STRING_LENGTH)
754         {
755             RmStatus = NV_ERR_INVALID_STRING_LENGTH;
756             goto done;
757         }
758 
759         // get access to client's DevNode
760         RMAPI_PARAM_COPY_INIT(devNodeParamCopy, tmpDevNode, clientDevNodeAddress, DevNodeLength, 1);
761         devNodeParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER;
762         RmStatus = rmapiParamsAcquire(&devNodeParamCopy, NV_TRUE);
763         if (RmStatus != NV_OK)
764         {
765             RmStatus = NV_ERR_OPERATING_SYSTEM;
766             goto done;
767         }
768     }
769 
770     // a passed-in parmStr
771     if (ParmStrLength)
772     {
773         // the passed-in ParmStrLength does not account for '\0'
774         ParmStrLength++;
775 
776         if ((ParmStrLength == 0) || (ParmStrLength > NVOS38_MAX_REGISTRY_STRING_LENGTH))
777         {
778             RmStatus = NV_ERR_INVALID_STRING_LENGTH;
779             goto done;
780         }
781         // get access to client's parmStr
782         RMAPI_PARAM_COPY_INIT(parmStrParamCopy, tmpParmStr, clientParmStrAddress, ParmStrLength, 1);
783         RmStatus = rmapiParamsAcquire(&parmStrParamCopy, NV_TRUE);
784         if (RmStatus != NV_OK)
785         {
786             RmStatus = NV_ERR_OPERATING_SYSTEM;
787             goto done;
788         }
789         if (tmpParmStr[ParmStrLength - 1] != '\0')
790         {
791             RmStatus = NV_ERR_INVALID_ARGUMENT;
792             goto done;
793         }
794     }
795 
796     if ((AccessType == NVOS38_ACCESS_TYPE_READ_BINARY) ||
797         (AccessType == NVOS38_ACCESS_TYPE_WRITE_BINARY))
798     {
799         if ((BinaryDataLength > NVOS38_MAX_REGISTRY_BINARY_LENGTH) ||
800             (BinaryDataLength == 0))
801         {
802             RmStatus = NV_ERR_INVALID_STRING_LENGTH;
803             goto done;
804         }
805 
806         // get access to client's binaryData
807         RMAPI_PARAM_COPY_INIT(binaryDataParamCopy, tmpBinaryData, clientBinaryDataAddress, BinaryDataLength, 1);
808         if (AccessType == NVOS38_ACCESS_TYPE_READ_BINARY)
809             binaryDataParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN;
810         RmStatus = rmapiParamsAcquire(&binaryDataParamCopy, NV_TRUE);
811         if (RmStatus != NV_OK)
812         {
813             RmStatus = NV_ERR_OPERATING_SYSTEM;
814             goto done;
815         }
816     }
817 
818     switch (AccessType)
819     {
820         case NVOS38_ACCESS_TYPE_READ_DWORD:
821             RmStatus = osReadRegistryDword(pGpu,
822                                 tmpParmStr, Data);
823             break;
824 
825         case NVOS38_ACCESS_TYPE_WRITE_DWORD:
826             if (isDevice && osIsAdministrator())
827             {
828                 while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
829                 {
830                     RmStatus = osWriteRegistryDword(pGpu,
831                                     tmpParmStr, *Data);
832 
833                     if (RmStatus != NV_OK)
834                         goto done;
835                 }
836                 break;
837             }
838 
839             RmStatus = osWriteRegistryDword(pGpu,
840                                     tmpParmStr, *Data);
841             break;
842 
843         case NVOS38_ACCESS_TYPE_READ_BINARY:
844             RmStatus = osReadRegistryBinary(pGpu,
845                                 tmpParmStr, tmpBinaryData, &BinaryDataLength);
846 
847             if (RmStatus != NV_OK)
848             {
849                 goto done;
850             }
851 
852             if (BinaryDataLength)
853                 copyOutBinaryDataLength = BinaryDataLength;
854 
855             break;
856 
857         case NVOS38_ACCESS_TYPE_WRITE_BINARY:
858             if (isDevice && osIsAdministrator())
859             {
860                 while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
861                 {
862                     RmStatus = osWriteRegistryBinary(pGpu,
863                                 tmpParmStr, tmpBinaryData,
864                                 BinaryDataLength);
865 
866                     if (RmStatus != NV_OK)
867                         goto done;
868                 }
869                 break;
870             }
871 
872             RmStatus = osWriteRegistryBinary(pGpu,
873                                 tmpParmStr, tmpBinaryData,
874                                 BinaryDataLength);
875             break;
876 
877         default:
878             RmStatus = NV_ERR_INVALID_ACCESS_TYPE;
879     }
880 
881  done:
882     if (gpuMask != 0)
883         rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE);
884 
885     if (tmpDevNode != NULL)
886     {
887         // skip copyout on error
888         if ((RmStatus != NV_OK) || (copyOutDevNodeLength == 0))
889             devNodeParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT;
890         devNodeParamCopy.paramsSize = copyOutDevNodeLength;
891         if (NV_OK != rmapiParamsRelease(&devNodeParamCopy))
892             if (RmStatus == NV_OK)
893                 RmStatus = NV_ERR_OPERATING_SYSTEM;
894     }
895     if (tmpParmStr != NULL)
896     {
897         // skip copyout on error
898         if ((RmStatus != NV_OK) || (copyOutParmStrLength == 0))
899             parmStrParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT;
900         parmStrParamCopy.paramsSize = copyOutParmStrLength;
901         if (NV_OK != rmapiParamsRelease(&parmStrParamCopy))
902             if (RmStatus == NV_OK)
903                 RmStatus = NV_ERR_OPERATING_SYSTEM;
904     }
905     if (tmpBinaryData != NULL)
906     {
907         // skip copyout on error
908         if ((RmStatus != NV_OK) || (copyOutBinaryDataLength == 0))
909             binaryDataParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT;
910         binaryDataParamCopy.paramsSize = copyOutBinaryDataLength;
911         if (NV_OK != rmapiParamsRelease(&binaryDataParamCopy))
912             if (RmStatus == NV_OK)
913                 RmStatus = NV_ERR_OPERATING_SYSTEM;
914         *pBinaryDataLength = copyOutBinaryDataLength;
915     }
916 
917     serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, pClient);
918     return RmStatus;
919 }
920 
921 static NV_STATUS RmUpdateDeviceMappingInfo(
922     NvHandle    hClient,
923     NvHandle    hDevice,
924     NvHandle    hMappable,
925     void       *pOldCpuAddress,
926     void       *pNewCpuAddress
927 )
928 {
929     NV_STATUS status;
930     RsClient *pClient;
931     RsResourceRef *pMappableRef;
932     RsCpuMapping *pCpuMapping;
933     Device *pDevice;
934     Subdevice *pSubdevice;
935     NvU32 gpuMask = 0;
936 
937     status = serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, &pClient);
938     if (status != NV_OK)
939         return status;
940 
941     status = deviceGetByHandle(pClient, hDevice, &pDevice);
942     if (status != NV_OK)
943     {
944         status = subdeviceGetByHandle(pClient, hDevice, &pSubdevice);
945         if (status != NV_OK)
946             goto done;
947 
948         status = rmGpuGroupLockAcquire(pSubdevice->subDeviceInst,
949                                        GPU_LOCK_GRP_SUBDEVICE,
950                                        GPUS_LOCK_FLAGS_NONE,
951                                        RM_LOCK_MODULES_GPU,
952                                        &gpuMask);
953         if (status != NV_OK)
954             goto done;
955 
956         GPU_RES_SET_THREAD_BC_STATE(pSubdevice);
957     }
958     else
959     {
960         status = rmGpuGroupLockAcquire(pDevice->deviceInst,
961                                        GPU_LOCK_GRP_DEVICE,
962                                        GPUS_LOCK_FLAGS_NONE,
963                                        RM_LOCK_MODULES_GPU,
964                                        &gpuMask);
965         if (status != NV_OK)
966             goto done;
967 
968         GPU_RES_SET_THREAD_BC_STATE(pDevice);
969     }
970 
971     status = clientGetResourceRef(pClient, hMappable, &pMappableRef);
972     if (status != NV_OK)
973         goto done;
974 
975     if ((objDynamicCastById(pMappableRef->pResource, classId(Memory)) == NULL) &&
976         (objDynamicCastById(pMappableRef->pResource, classId(KernelChannel)) == NULL))
977     {
978         status = NV_ERR_INVALID_OBJECT_HANDLE;
979         goto done;
980     }
981 
982     status = refFindCpuMappingWithFilter(pMappableRef,
983                                          NV_PTR_TO_NvP64(pOldCpuAddress),
984                                          serverutilMappingFilterCurrentUserProc,
985                                          &pCpuMapping);
986     if (status != NV_OK)
987         goto done;
988 
989     pCpuMapping->pLinearAddress = NV_PTR_TO_NvP64(pNewCpuAddress);
990 
991 done:
992 
993     if (gpuMask != 0)
994         rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE);
995 
996     serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, pClient);
997     return status;
998 }
999 
1000 static NV_STATUS RmPerformVersionCheck(
1001     void *pData,
1002     NvU32 dataSize
1003 )
1004 {
1005     nv_ioctl_rm_api_version_t *pParams;
1006     char clientCh, rmCh;
1007     const char *rmStr = NV_VERSION_STRING;
1008     NvBool relaxed = NV_FALSE;
1009     NvU32 i;
1010 
1011     if (dataSize != sizeof(nv_ioctl_rm_api_version_t))
1012         return NV_ERR_INVALID_ARGUMENT;
1013 
1014     pParams = pData;
1015 
1016     //
1017     // write the reply value, so that the client knows we recognized
1018     // the request
1019     //
1020     pParams->reply = NV_RM_API_VERSION_REPLY_RECOGNIZED;
1021 
1022     //
1023     // the client requested to override the version check; just return
1024     // success.
1025     //
1026     if (pParams->cmd == NV_RM_API_VERSION_CMD_OVERRIDE)
1027     {
1028         return NV_OK;
1029     }
1030 
1031     //
1032     // the client requested relaxed version checking; we will only
1033     // compare the strings until the first decimal point.
1034     //
1035     if (pParams->cmd == NV_RM_API_VERSION_CMD_RELAXED)
1036     {
1037         relaxed = NV_TRUE;
1038     }
1039 
1040     //
1041     // rmStr (i.e., NV_VERSION_STRING) must be null-terminated and fit within
1042     // NV_RM_API_VERSION_STRING_LENGTH, so that:
1043     //
1044     // (1) If the versions don't match, we can return rmStr in
1045     //     pParams->versionString.
1046     // (2) The below loop is guaranteed to not overrun rmStr.
1047     //
1048     if ((os_string_length(rmStr) + 1) > NV_RM_API_VERSION_STRING_LENGTH)
1049     {
1050         return NV_ERR_BUFFER_TOO_SMALL;
1051     }
1052 
1053     for (i = 0; i < NV_RM_API_VERSION_STRING_LENGTH; i++)
1054     {
1055         clientCh = pParams->versionString[i];
1056         rmCh = rmStr[i];
1057 
1058         //
1059         // fail if the current character is not the same
1060         //
1061         if (clientCh != rmCh)
1062         {
1063             break;
1064         }
1065 
1066         //
1067         // if relaxed matching was requested, succeed when we find the
1068         // first decimal point
1069         //
1070         if ((relaxed) && (clientCh == '.'))
1071         {
1072             return NV_OK;
1073         }
1074 
1075         //
1076         // we found the end of the strings: succeed
1077         //
1078         if (clientCh == '\0')
1079         {
1080             return NV_OK;
1081         }
1082     }
1083 
1084     //
1085     // the version strings did not match: print an error message and
1086     // copy the RM's version string into pParams->versionString, so
1087     // that the client can report the mismatch; explicitly NULL
1088     // terminate the client's string, since we cannot trust it
1089     //
1090     pParams->versionString[NV_RM_API_VERSION_STRING_LENGTH - 1] = '\0';
1091 
1092     nv_printf(NV_DBG_ERRORS,
1093               "NVRM: API mismatch: the client has the version %s, but\n"
1094               "NVRM: this kernel module has the version %s.  Please\n"
1095               "NVRM: make sure that this kernel module and all NVIDIA driver\n"
1096               "NVRM: components have the same version.\n",
1097               pParams->versionString, NV_VERSION_STRING);
1098 
1099     os_string_copy(pParams->versionString, rmStr);
1100 
1101     return NV_ERR_GENERIC;
1102 }
1103 
1104 //
1105 // Check if the NVPCF _DSM functions are implemented under
1106 // NVPCF scope or GPU device scope.
1107 // As part of RM initialisation this function checks the
1108 // support of NVPCF _DSM function implementation under
1109 // NVPCF scope, in case that fails, clear the cached DSM
1110 // support status and retry the NVPCF _DSM function under
1111 // GPU scope.
1112 //
1113 static void RmCheckNvpcfDsmScope(
1114     OBJGPU *pGpu
1115 )
1116 {
1117     NvU32 supportedFuncs;
1118     NvU16 dsmDataSize = sizeof(supportedFuncs);
1119     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1120     ACPI_DSM_FUNCTION acpiDsmFunction = ACPI_DSM_FUNCTION_NVPCF_2X;
1121     NvU32 acpiDsmSubFunction = NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED;
1122 
1123     nv->nvpcf_dsm_in_gpu_scope = NV_FALSE;
1124 
1125     if ((osCallACPI_DSM(pGpu, acpiDsmFunction, acpiDsmSubFunction,
1126                         &supportedFuncs, &dsmDataSize) != NV_OK) ||
1127         (FLD_TEST_DRF(PCF0100, _CTRL_CONFIG_DSM,
1128                       _FUNC_GET_SUPPORTED_IS_SUPPORTED, _NO, supportedFuncs)) ||
1129         (dsmDataSize != sizeof(supportedFuncs)))
1130     {
1131         nv->nvpcf_dsm_in_gpu_scope = NV_TRUE;
1132 
1133         // clear cached DSM function status
1134         uncacheDsmFuncStatus(pGpu, acpiDsmFunction, acpiDsmSubFunction);
1135     }
1136 }
1137 
1138 NV_STATUS RmPowerSourceChangeEvent(
1139     nv_state_t *pNv,
1140     NvU32       event_val
1141 )
1142 {
1143     NV2080_CTRL_PERF_SET_POWERSTATE_PARAMS params = {0};
1144     RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
1145 
1146     params.powerStateInfo.powerState = event_val ? NV2080_CTRL_PERF_POWER_SOURCE_BATTERY :
1147                                                    NV2080_CTRL_PERF_POWER_SOURCE_AC;
1148 
1149     return pRmApi->Control(pRmApi, pNv->rmapi.hClient,
1150                            pNv->rmapi.hSubDevice,
1151                            NV2080_CTRL_CMD_PERF_SET_POWERSTATE,
1152                            &params, sizeof(params));
1153 }
1154 
1155 /*!
1156  * @brief Deal with D-notifier events to apply a performance
1157  * level based on the requested auxiliary power-state.
1158  * Read confluence page "D-Notifiers on Linux" for more details.
1159  *
1160  * @param[in]   pGpu         OBJGPU pointer.
1161  * @param[in]   event_type   NvU32 Event type.
1162  */
1163 static void RmHandleDNotifierEvent(
1164     nv_state_t *pNv,
1165     NvU32       event_type
1166 )
1167 {
1168     NV2080_CTRL_PERF_SET_AUX_POWER_STATE_PARAMS params = { 0 };
1169     RM_API            *pRmApi;
1170     THREAD_STATE_NODE  threadState;
1171     NV_STATUS          rmStatus = NV_OK;
1172 
1173     switch (event_type)
1174     {
1175         case ACPI_NOTIFY_POWER_LEVEL_D1:
1176             params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P0;
1177             break;
1178         case ACPI_NOTIFY_POWER_LEVEL_D2:
1179             params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P1;
1180             break;
1181         case ACPI_NOTIFY_POWER_LEVEL_D3:
1182             params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P2;
1183             break;
1184         case ACPI_NOTIFY_POWER_LEVEL_D4:
1185             params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P3;
1186             break;
1187         case ACPI_NOTIFY_POWER_LEVEL_D5:
1188             params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P4;
1189             break;
1190         default:
1191             return;
1192     }
1193 
1194     pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_ACPI);
1195     if (pRmApi == NULL)
1196     {
1197         return;
1198     }
1199 
1200     rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient,
1201                                pNv->rmapi.hSubDevice,
1202                                NV2080_CTRL_CMD_PERF_SET_AUX_POWER_STATE,
1203                                &params, sizeof(params));
1204 
1205     RmUnixRmApiEpilogue(pNv, &threadState);
1206 
1207     if (rmStatus != NV_OK)
1208     {
1209         NV_PRINTF(LEVEL_ERROR,
1210                   "%s: Failed to handle ACPI D-Notifier event, status=0x%x\n",
1211                   __FUNCTION__, rmStatus);
1212     }
1213 }
1214 
1215 static NV_STATUS
1216 RmDmabufVerifyMemHandle(
1217     OBJGPU  *pGpu,
1218     NvHandle hSrcClient,
1219     NvHandle hMemory,
1220     NvU64    offset,
1221     NvU64    size,
1222     void    *pGpuInstanceInfo
1223 )
1224 {
1225     NV_STATUS status;
1226     RsClient *pClient = NULL;
1227     RsResourceRef *pSrcMemoryRef = NULL;
1228     Memory *pSrcMemory = NULL;
1229     MEMORY_DESCRIPTOR *pMemDesc = NULL;
1230 
1231     NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hSrcClient, &pClient));
1232 
1233     status = clientGetResourceRef(pClient, hMemory, &pSrcMemoryRef);
1234     if (status != NV_OK)
1235     {
1236         return status;
1237     }
1238 
1239     pSrcMemory = dynamicCast(pSrcMemoryRef->pResource, Memory);
1240     if (pSrcMemory == NULL)
1241     {
1242         return NV_ERR_INVALID_OBJECT;
1243     }
1244 
1245     pMemDesc = pSrcMemory->pMemDesc;
1246 
1247     if (pGpuInstanceInfo != NULL)
1248     {
1249         KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
1250         pKernelMIGGpuInstance = (KERNEL_MIG_GPU_INSTANCE *) pGpuInstanceInfo;
1251 
1252         if ((pKernelMIGGpuInstance->pMemoryPartitionHeap != pSrcMemory->pHeap))
1253         {
1254             return NV_ERR_INVALID_OBJECT_PARENT;
1255         }
1256     }
1257 
1258     // Check if hMemory belongs to the same pGpu
1259     if ((pMemDesc->pGpu != pGpu) &&
1260         (pSrcMemory->pGpu != pGpu))
1261     {
1262         return NV_ERR_INVALID_OBJECT_PARENT;
1263     }
1264 
1265     // Offset and size must be aligned to OS page-size
1266     if (!NV_IS_ALIGNED64(offset, os_page_size) ||
1267         !NV_IS_ALIGNED64(size, os_page_size))
1268     {
1269         return NV_ERR_INVALID_ARGUMENT;
1270     }
1271 
1272     // Only supported for vidmem handles
1273     if (memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM)
1274     {
1275         return NV_ERR_INVALID_ARGUMENT;
1276     }
1277 
1278     if ((size == 0) ||
1279         (size > memdescGetSize(pMemDesc)) ||
1280         (offset > (memdescGetSize(pMemDesc) - size)))
1281     {
1282         return NV_ERR_INVALID_ARGUMENT;
1283     }
1284 
1285     return NV_OK;
1286 }
1287 
1288 static NV_STATUS
1289 RmDmabufGetClientAndDevice(
1290     OBJGPU   *pGpu,
1291     NvHandle  hClient,
1292     NvHandle *phClient,
1293     NvHandle *phDevice,
1294     NvHandle *phSubdevice,
1295     void    **ppGpuInstanceInfo
1296 )
1297 {
1298     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
1299 
1300     if (IS_MIG_ENABLED(pGpu))
1301     {
1302         NV_STATUS status;
1303         MIG_INSTANCE_REF ref;
1304         KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
1305 
1306         status = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager,
1307                                                  hClient, &ref);
1308         if (status != NV_OK)
1309         {
1310             return status;
1311         }
1312 
1313         status = kmigmgrIncRefCount(ref.pKernelMIGGpuInstance->pShare);
1314         if (status != NV_OK)
1315         {
1316             return status;
1317         }
1318 
1319         *phClient    = ref.pKernelMIGGpuInstance->instanceHandles.hClient;
1320         *phDevice    = ref.pKernelMIGGpuInstance->instanceHandles.hDevice;
1321         *phSubdevice = ref.pKernelMIGGpuInstance->instanceHandles.hSubdevice;
1322         *ppGpuInstanceInfo = (void *) ref.pKernelMIGGpuInstance;
1323 
1324         return NV_OK;
1325     }
1326 
1327     *phClient    = pMemoryManager->hClient;
1328     *phDevice    = pMemoryManager->hDevice;
1329     *phSubdevice = pMemoryManager->hSubdevice;
1330     *ppGpuInstanceInfo = NULL;
1331 
1332     return NV_OK;
1333 }
1334 
1335 static void
1336 RmDmabufPutClientAndDevice(
1337     OBJGPU   *pGpu,
1338     NvHandle  hClient,
1339     NvHandle  hDevice,
1340     NvHandle  hSubdevice,
1341     void     *pGpuInstanceInfo
1342 )
1343 {
1344     KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
1345 
1346     if (pGpuInstanceInfo == NULL)
1347     {
1348         return;
1349     }
1350 
1351     pKernelMIGGpuInstance = (KERNEL_MIG_GPU_INSTANCE *) pGpuInstanceInfo;
1352 
1353     NV_ASSERT_OK(kmigmgrDecRefCount(pKernelMIGGpuInstance->pShare));
1354 }
1355 
1356 /*
1357  * ---------------------------------------------------------------------------
1358  *
1359  * The routines below are part of the interface between the kernel interface
1360  * layer and the kernel-agnostic portions of the resource manager.
1361  *
1362  * ---------------------------------------------------------------------------
1363  */
1364 
1365 NvBool NV_API_CALL rm_init_private_state(
1366     nvidia_stack_t *sp,
1367     nv_state_t *pNv
1368 )
1369 {
1370     NvBool retval;
1371     void *fp;
1372 
1373     NV_ENTER_RM_RUNTIME(sp,fp);
1374 
1375     retval = RmInitPrivateState(pNv);
1376 
1377     NV_EXIT_RM_RUNTIME(sp,fp);
1378 
1379     return retval;
1380 }
1381 
1382 void NV_API_CALL rm_free_private_state(
1383     nvidia_stack_t *sp,
1384     nv_state_t *pNv
1385 )
1386 {
1387     void *fp;
1388 
1389     NV_ENTER_RM_RUNTIME(sp,fp);
1390 
1391     RmFreePrivateState(pNv);
1392 
1393     NV_EXIT_RM_RUNTIME(sp,fp);
1394 }
1395 
1396 NvBool NV_API_CALL rm_init_adapter(
1397     nvidia_stack_t *sp,
1398     nv_state_t *pNv
1399 )
1400 {
1401     THREAD_STATE_NODE threadState;
1402     NvBool     retval = NV_FALSE;
1403     void      *fp;
1404     NvBool     bEnabled;
1405 
1406     NV_ENTER_RM_RUNTIME(sp,fp);
1407     threadStateInit(&threadState, THREAD_STATE_FLAGS_DEVICE_INIT);
1408 
1409     // LOCK: acquire API lock
1410     if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK)
1411     {
1412         if (!((gpumgrQueryGpuDrainState(pNv->gpu_id, &bEnabled, NULL) == NV_OK)
1413               && bEnabled))
1414         {
1415             if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE)
1416             {
1417                 retval = RmPartiallyInitAdapter(pNv);
1418             }
1419             else
1420             {
1421                 retval = RmInitAdapter(pNv);
1422             }
1423         }
1424 
1425         // UNLOCK: release API lock
1426         rmapiLockRelease();
1427     }
1428 
1429     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1430     NV_EXIT_RM_RUNTIME(sp,fp);
1431 
1432     return retval;
1433 }
1434 
1435 void NV_API_CALL rm_disable_adapter(
1436     nvidia_stack_t *sp,
1437     nv_state_t *pNv
1438 )
1439 {
1440     THREAD_STATE_NODE threadState;
1441     void      *fp;
1442 
1443     NV_ENTER_RM_RUNTIME(sp,fp);
1444     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1445 
1446     NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
1447 
1448     // LOCK: acquire API lock
1449     if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK)
1450     {
1451         if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE)
1452         {
1453             RmPartiallyDisableAdapter(pNv);
1454         }
1455         else
1456         {
1457             RmDisableAdapter(pNv);
1458         }
1459 
1460         // UNLOCK: release API lock
1461         rmapiLockRelease();
1462     }
1463 
1464     NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
1465     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1466     NV_EXIT_RM_RUNTIME(sp,fp);
1467 }
1468 
1469 void NV_API_CALL rm_shutdown_adapter(
1470     nvidia_stack_t *sp,
1471     nv_state_t *pNv
1472 )
1473 {
1474     THREAD_STATE_NODE threadState;
1475     void      *fp;
1476 
1477     NV_ENTER_RM_RUNTIME(sp,fp);
1478     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1479 
1480     // LOCK: acquire API lock
1481     if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK)
1482     {
1483         RmShutdownAdapter(pNv);
1484 
1485         // UNLOCK: release API lock
1486         rmapiLockRelease();
1487     }
1488 
1489     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1490     NV_EXIT_RM_RUNTIME(sp,fp);
1491 }
1492 
1493 NV_STATUS NV_API_CALL rm_exclude_adapter(
1494     nvidia_stack_t *sp,
1495     nv_state_t *pNv
1496 )
1497 {
1498     NV_STATUS rmStatus;
1499     THREAD_STATE_NODE threadState;
1500     void      *fp;
1501 
1502     NV_ENTER_RM_RUNTIME(sp,fp);
1503     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1504 
1505     rmStatus = RmExcludeAdapter(pNv);
1506 
1507     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1508     NV_EXIT_RM_RUNTIME(sp,fp);
1509 
1510     return rmStatus;
1511 }
1512 
1513 NV_STATUS NV_API_CALL rm_acquire_api_lock(
1514     nvidia_stack_t *sp
1515 )
1516 {
1517     THREAD_STATE_NODE threadState;
1518     NV_STATUS rmStatus;
1519     void *fp;
1520 
1521     NV_ENTER_RM_RUNTIME(sp,fp);
1522     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1523 
1524     // LOCK: acquire API lock
1525     rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
1526 
1527     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1528     NV_EXIT_RM_RUNTIME(sp,fp);
1529 
1530     return rmStatus;
1531 }
1532 
1533 NV_STATUS NV_API_CALL rm_release_api_lock(
1534     nvidia_stack_t *sp
1535 )
1536 {
1537     THREAD_STATE_NODE threadState;
1538     void *fp;
1539 
1540     NV_ENTER_RM_RUNTIME(sp,fp);
1541     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1542 
1543     // UNLOCK: release API lock
1544     rmapiLockRelease();
1545 
1546     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1547     NV_EXIT_RM_RUNTIME(sp,fp);
1548 
1549     return NV_OK;
1550 }
1551 
1552 NV_STATUS NV_API_CALL rm_acquire_gpu_lock(
1553     nvidia_stack_t *sp,
1554     nv_state_t     *nv
1555 )
1556 {
1557     THREAD_STATE_NODE threadState;
1558     NV_STATUS rmStatus;
1559     void *fp;
1560 
1561     NV_ENTER_RM_RUNTIME(sp,fp);
1562     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1563 
1564     // LOCK: acquire GPU lock
1565     rmStatus = rmDeviceGpuLocksAcquire(NV_GET_NV_PRIV_PGPU(nv),
1566                                        GPUS_LOCK_FLAGS_NONE,
1567                                        RM_LOCK_MODULES_OSAPI);
1568 
1569     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1570     NV_EXIT_RM_RUNTIME(sp,fp);
1571 
1572     return rmStatus;
1573 }
1574 
1575 NV_STATUS NV_API_CALL rm_release_gpu_lock(
1576     nvidia_stack_t *sp,
1577     nv_state_t     *nv
1578 )
1579 {
1580     THREAD_STATE_NODE threadState;
1581     void *fp;
1582 
1583     NV_ENTER_RM_RUNTIME(sp,fp);
1584     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1585 
1586     // UNLOCK: release GPU lock
1587     rmDeviceGpuLocksRelease(NV_GET_NV_PRIV_PGPU(nv), GPUS_LOCK_FLAGS_NONE, NULL);
1588 
1589     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1590     NV_EXIT_RM_RUNTIME(sp,fp);
1591 
1592     return NV_OK;
1593 }
1594 
1595 NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock(
1596     nvidia_stack_t *sp
1597 )
1598 {
1599     THREAD_STATE_NODE threadState;
1600     NV_STATUS rmStatus;
1601     void *fp;
1602 
1603     NV_ENTER_RM_RUNTIME(sp,fp);
1604     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1605 
1606     // LOCK: acquire all GPUs lock
1607     rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
1608 
1609     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1610     NV_EXIT_RM_RUNTIME(sp,fp);
1611 
1612     return rmStatus;
1613 }
1614 
1615 NV_STATUS NV_API_CALL rm_release_all_gpus_lock(
1616     nvidia_stack_t *sp
1617 )
1618 {
1619     THREAD_STATE_NODE threadState;
1620     void *fp;
1621 
1622     NV_ENTER_RM_RUNTIME(sp,fp);
1623     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1624 
1625     // UNLOCK: release all GPUs lock
1626     rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
1627 
1628     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1629     NV_EXIT_RM_RUNTIME(sp,fp);
1630 
1631     return NV_OK;
1632 }
1633 
1634 /*!
1635  * @brief Handle ACPI_NOTIFY_GPS_STATUS_CHANGE event.
1636  *
1637  * This function is called for GPS when SBIOS trigger
1638  * gps STATUS_CHANGE event, which calls rm control call
1639  * NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL to init the GPS
1640  * data from SBIOS.
1641  */
1642 static void RmHandleGPSStatusChange
1643 (
1644     nv_state_t *pNv
1645 )
1646 {
1647 }
1648 
1649 /*!
1650  * @brief Function to handle device specific ACPI events.
1651  *
1652  * @param[in]   sp           nvidia_stack_t pointer.
1653  * @param[in]   nv           nv_state_t pointer.
1654  * @param[in]   event_type   NvU32 Event type.
1655  */
1656 void NV_API_CALL rm_acpi_notify(
1657     nvidia_stack_t *sp,
1658     nv_state_t *nv,
1659     NvU32 event_type
1660 )
1661 {
1662     void *fp;
1663     NV_ENTER_RM_RUNTIME(sp,fp);
1664 
1665     switch (event_type)
1666     {
1667         case ACPI_VIDEO_NOTIFY_PROBE:
1668         {
1669             THREAD_STATE_NODE threadState;
1670 
1671             threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1672             RmHandleDisplayChange(sp, nv);
1673             threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1674             break;
1675         }
1676 
1677         case ACPI_NOTIFY_GPS_STATUS_CHANGE:
1678             RmHandleGPSStatusChange(nv);
1679             break;
1680 
1681         case ACPI_NOTIFY_POWER_LEVEL_D1:    /* fallthrough */
1682         case ACPI_NOTIFY_POWER_LEVEL_D2:    /* fallthrough */
1683         case ACPI_NOTIFY_POWER_LEVEL_D3:    /* fallthrough */
1684         case ACPI_NOTIFY_POWER_LEVEL_D4:    /* fallthrough */
1685         case ACPI_NOTIFY_POWER_LEVEL_D5:
1686             RmHandleDNotifierEvent(nv, event_type);
1687             break;
1688 
1689         default:
1690             NV_PRINTF(LEVEL_INFO, "No support for 0x%x event\n", event_type);
1691             NV_ASSERT(0);
1692             break;
1693     }
1694 
1695     NV_EXIT_RM_RUNTIME(sp,fp);
1696 }
1697 
1698 static void nv_align_mmap_offset_length(
1699     nv_usermap_access_params_t *nvuap)
1700 {
1701     NvU64 page_size = os_page_size;
1702     NvU64 end = nvuap->size + (nvuap->addr & (page_size - 1));
1703 
1704     nvuap->mmap_start = NV_ALIGN_DOWN(nvuap->addr, page_size);
1705     nvuap->mmap_size = NV_ALIGN_UP(end, page_size);
1706     nvuap->offset = NV_ALIGN_DOWN(nvuap->offset, page_size);
1707 }
1708 
1709 static inline NV_STATUS RmGetArrayMinMax(
1710     NvU64 *array,
1711     NvU64 count,
1712     NvU64 *min,
1713     NvU64 *max
1714 )
1715 {
1716     NvU64 i;
1717 
1718     if (array == NULL)
1719     {
1720         return NV_ERR_INVALID_ARGUMENT;
1721     }
1722 
1723     *min = array[0];
1724     *max = array[0];
1725 
1726     if (count == 1)
1727         return NV_OK;
1728 
1729     for (i = 1; i < count; i++)
1730     {
1731         if (array[i] > *max)
1732             *max = array[i];
1733 
1734         if (array[i] < *min)
1735             *min = array[i];
1736     }
1737 
1738     return NV_OK;
1739 }
1740 
1741 static NV_STATUS RmSetUserMapAccessRange(
1742     nv_usermap_access_params_t *nvuap
1743 )
1744 {
1745     NV_STATUS status = NV_OK;
1746 
1747     if (nvuap->contig)
1748     {
1749          nvuap->access_start = nvuap->mmap_start;
1750          nvuap->access_size = nvuap->mmap_size;
1751     }
1752     else
1753     {
1754         NvU64 highest_address_mapped;
1755         NvU64 lowest_address_mapped;
1756 
1757         status = RmGetArrayMinMax(nvuap->page_array, nvuap->num_pages,
1758                                   &lowest_address_mapped,
1759                                   &highest_address_mapped);
1760         if (status != NV_OK)
1761         {
1762             return status;
1763         }
1764 
1765         nvuap->access_start = lowest_address_mapped;
1766         nvuap->access_size = (highest_address_mapped + os_page_size) - lowest_address_mapped;
1767     }
1768 
1769     return status;
1770 }
1771 
1772 static NV_STATUS RmGetAllocPrivate(NvU32, NvU32, NvU64, NvU64, NvU32 *, void **,
1773                                    NvU64 *);
1774 static NV_STATUS RmValidateMmapRequest(nv_state_t *, NvU64, NvU64, NvU32 *);
1775 
1776 static NV_STATUS RmGetMmapPteArray(
1777     KernelMemorySystem         *pKernelMemorySystem,
1778     NvHandle                    hClient,
1779     NvHandle                    hMemory,
1780     nv_usermap_access_params_t *nvuap
1781 )
1782 {
1783     NV_STATUS status = NV_OK;
1784     NvU64 pages = 0;
1785     Memory *pMemory = NULL;
1786     PMEMORY_DESCRIPTOR pMemDesc  = NULL;
1787     RsResourceRef *pResourceRef;
1788     NvU64 i;
1789     NvU64 *pteArray;
1790     NvU64 index;
1791 
1792     if (nvuap == NULL)
1793     {
1794         return NV_ERR_INVALID_ARGUMENT;
1795     }
1796 
1797     //
1798     // If we're mapping a memory handle, we can get the data from the
1799     // descriptor (required if the allocation being mapped is discontiguous).
1800     //
1801     if (serverutilGetResourceRefWithType(hClient, hMemory, classId(Memory),
1802                                         &pResourceRef) == NV_OK)
1803     {
1804         pMemory = dynamicCast(pResourceRef->pResource, Memory);
1805         pMemDesc = pMemory->pMemDesc;
1806         nvuap->contig = memdescGetContiguity(pMemDesc, AT_CPU);
1807     }
1808 
1809     //
1810     // In the discontiguous case, the page array needs to be large enough to hold
1811     // the 4K-page-sized addresses that we will then deflate to OS page addresses.
1812     // For the contiguous case, we can create the OS-page-sized addresses
1813     // directly in the array.
1814     //
1815     if (nvuap->contig)
1816     {
1817         pages = nvuap->mmap_size / os_page_size;
1818     }
1819     else
1820     {
1821         pages = nvuap->mmap_size / NV_RM_PAGE_SIZE;
1822     }
1823 
1824     NV_ASSERT_OR_RETURN(pages != 0, NV_ERR_INVALID_ARGUMENT);
1825 
1826     status = os_alloc_mem((void **)&nvuap->page_array, (pages * sizeof(NvU64)));
1827     if (status != NV_OK)
1828     {
1829         return status;
1830     }
1831 
1832     if (!nvuap->contig)
1833     {
1834         pteArray = memdescGetPteArray(pMemDesc, AT_CPU);
1835         index = nvuap->offset / NV_RM_PAGE_SIZE;
1836 
1837         //
1838         // We're guaranteed to have a MEMORY_DESCRIPTOR in the discontiguous
1839         // case. Copy over the addresses now.
1840         //
1841         portMemCopy((void *)nvuap->page_array,
1842                     pages * sizeof(NvU64), (void *)&pteArray[index],
1843                     pages * sizeof(NvU64));
1844 
1845         if (NV_RM_PAGE_SIZE < os_page_size)
1846         {
1847             RmDeflateRmToOsPageArray(nvuap->page_array, pages);
1848             pages = NV_RM_PAGES_TO_OS_PAGES(pages);
1849         }
1850 
1851         //
1852         // Convert the GPU physical addresses to system physical addresses,
1853         // if applicable.
1854         //
1855         for (i = 0; i < pages; i++)
1856         {
1857             nvuap->page_array[i] += pKernelMemorySystem->coherentCpuFbBase;
1858         }
1859     }
1860     else
1861     {
1862         // Offset is accounted in mmap_start.
1863         for (nvuap->page_array[0] = nvuap->mmap_start, i = 1;
1864              i < pages; i++)
1865         {
1866             nvuap->page_array[i] = nvuap->page_array[i-1] + os_page_size;
1867         }
1868     }
1869 
1870     nvuap->num_pages = pages;
1871 
1872     return status;
1873 }
1874 
1875 /* Must be called with the API lock and the GPU locks */
1876 static NV_STATUS RmCreateMmapContextLocked(
1877     NvHandle    hClient,
1878     NvHandle    hDevice,
1879     NvHandle    hMemory,
1880     NvP64       address,
1881     NvU64       size,
1882     NvU64       offset,
1883     NvU32       cachingType,
1884     NvU32       fd
1885 )
1886 {
1887     NV_STATUS status = NV_OK;
1888     void *pAllocPriv = NULL;
1889     OBJGPU *pGpu = NULL;
1890     KernelMemorySystem *pKernelMemorySystem = NULL;
1891     NvBool bCoherentAtsCpuOffset = NV_FALSE;
1892     nv_state_t *pNv = NULL;
1893     NvU64 addr = (NvU64)address;
1894     NvU32 prot = 0;
1895     NvU64 pageIndex = 0;
1896     nv_usermap_access_params_t *nvuap = NULL;
1897     NvBool bClientMap = (hClient == hDevice);
1898 
1899     if (!bClientMap)
1900     {
1901         if (CliSetGpuContext(hClient, hDevice, &pGpu, NULL) != NV_OK)
1902         {
1903             NvU32 tmp;
1904             if (CliSetSubDeviceContext(hClient, hDevice, &tmp, &pGpu) != NV_OK)
1905             {
1906                 //
1907                 // If this mapping isn't for a GPU then we don't need to
1908                 // create a context for it.
1909                 //
1910                 return status;
1911             }
1912         }
1913     }
1914 
1915     status = os_alloc_mem((void**)&nvuap, sizeof(nv_usermap_access_params_t));
1916     if (status != NV_OK)
1917     {
1918         return status;
1919     }
1920 
1921     portMemSet(nvuap, 0, sizeof(nv_usermap_access_params_t));
1922     nvuap->addr = addr;
1923     nvuap->size = size;
1924     nvuap->offset = offset;
1925     nvuap->caching = cachingType;
1926 
1927     //
1928     // Assume the allocation is contiguous until RmGetMmapPteArray
1929     // determines otherwise.
1930     //
1931     nvuap->contig = NV_TRUE;
1932     nv_align_mmap_offset_length(nvuap);
1933 
1934     if (pGpu != NULL)
1935     {
1936         pNv = NV_GET_NV_STATE(pGpu);
1937         pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
1938         bCoherentAtsCpuOffset = IS_COHERENT_CPU_ATS_OFFSET(pKernelMemorySystem, addr, size);
1939     }
1940 
1941     //
1942     // If no device is given, or the address isn't in the given device's BARs,
1943     // validate this as a system memory mapping and associate it with the
1944     // control device.
1945     //
1946     if ((pNv == NULL) ||
1947         (!IS_REG_OFFSET(pNv, addr, size) &&
1948          !IS_FB_OFFSET(pNv, addr, size) &&
1949          !bCoherentAtsCpuOffset &&
1950          !IS_IMEM_OFFSET(pNv, addr, size)))
1951     {
1952         pNv = nv_get_ctl_state();
1953 
1954         //
1955         // Validate the mapping request by looking up the underlying sysmem
1956         // allocation.
1957         //
1958         status = RmGetAllocPrivate(hClient, hMemory, addr, size, &prot, &pAllocPriv,
1959                                    &pageIndex);
1960 
1961         if (status != NV_OK)
1962         {
1963             goto done;
1964         }
1965     }
1966     else
1967     {
1968         //
1969         // Validate the mapping request for ATS and get pteArray
1970         //
1971         if (bCoherentAtsCpuOffset)
1972         {
1973             status = RmGetMmapPteArray(pKernelMemorySystem, hClient, hMemory, nvuap);
1974             if (status != NV_OK)
1975             {
1976                 goto done;
1977             }
1978         }
1979 
1980         if (RmSetUserMapAccessRange(nvuap) != NV_OK)
1981         {
1982             goto done;
1983         }
1984 
1985         status = nv_get_usermap_access_params(pNv, nvuap);
1986         if (status != NV_OK)
1987         {
1988             goto done;
1989         }
1990 
1991         // Validate the mapping request for BAR's.
1992         status = RmValidateMmapRequest(pNv, nvuap->access_start,
1993                                        nvuap->access_size, &prot);
1994         if (status != NV_OK)
1995         {
1996             goto done;
1997         }
1998     }
1999 
2000     status = nv_add_mapping_context_to_file(pNv, nvuap, prot, pAllocPriv,
2001                                             pageIndex, fd);
2002 
2003 done:
2004     os_free_mem(nvuap);
2005     return status;
2006 }
2007 
2008 // TODO: Bug 1802250: [uvm8] Use an alt stack in all functions in unix/src/osapi.c
2009 NV_STATUS rm_create_mmap_context(
2010     NvHandle    hClient,
2011     NvHandle    hDevice,
2012     NvHandle    hMemory,
2013     NvP64       address,
2014     NvU64       size,
2015     NvU64       offset,
2016     NvU32       cachingType,
2017     NvU32       fd
2018 )
2019 {
2020     NV_STATUS rmStatus = NV_OK;
2021     // LOCK: acquire API lock
2022     if ((rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) == NV_OK)
2023     {
2024         RmClient *pClient;
2025 
2026         if (NV_OK != serverutilAcquireClient(hClient, LOCK_ACCESS_READ, &pClient))
2027             return NV_ERR_INVALID_CLIENT;
2028 
2029         if (pClient->ProcID != osGetCurrentProcess())
2030         {
2031             rmStatus = NV_ERR_INVALID_CLIENT;
2032         }
2033         // LOCK: acquire GPUs lock
2034         else if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) == NV_OK)
2035         {
2036             rmStatus = RmCreateMmapContextLocked(hClient, hDevice, hMemory,
2037                                                  address, size, offset, cachingType, fd);
2038             // UNLOCK: release GPUs lock
2039             rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
2040         }
2041 
2042         serverutilReleaseClient(LOCK_ACCESS_READ, pClient);
2043 
2044         // UNLOCK: release API lock
2045         rmapiLockRelease();
2046     }
2047 
2048     return rmStatus;
2049 }
2050 
2051 static NV_STATUS RmGetAllocPrivate(
2052     NvU32       hClient,
2053     NvU32       hMemory,
2054     NvU64       offset,
2055     NvU64       length,
2056     NvU32      *pProtection,
2057     void      **ppPrivate,
2058     NvU64      *pPageIndex
2059 )
2060 {
2061     RmClient *pClient;
2062     NV_STATUS rmStatus;
2063     PMEMORY_DESCRIPTOR pMemDesc;
2064     NvU32 pageOffset;
2065     NvU64 pageCount;
2066     NvU64 endingOffset;
2067     RsResourceRef *pResourceRef;
2068     RmResource *pRmResource;
2069     void *pMemData;
2070     NvBool bPeerIoMem;
2071     NvBool bReadOnlyMem;
2072     *pProtection = NV_PROTECT_READ_WRITE;
2073     *ppPrivate = NULL;
2074 
2075     pageOffset = (offset & ~os_page_mask);
2076     offset &= os_page_mask;
2077 
2078     NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE);
2079 
2080     if (NV_OK != serverutilAcquireClient(hClient, LOCK_ACCESS_READ, &pClient))
2081         return NV_ERR_INVALID_CLIENT;
2082 
2083     rmStatus = clientGetResourceRef(staticCast(pClient, RsClient), hMemory, &pResourceRef);
2084     if (rmStatus != NV_OK)
2085         goto done;
2086 
2087     pRmResource = dynamicCast(pResourceRef->pResource, RmResource);
2088     if (!pRmResource)
2089     {
2090         rmStatus = NV_ERR_INVALID_OBJECT;
2091         goto done;
2092     }
2093 
2094     rmStatus = rmresGetMemoryMappingDescriptor(pRmResource, &pMemDesc);
2095     if (rmStatus != NV_OK)
2096         goto done;
2097 
2098     bReadOnlyMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY);
2099     bPeerIoMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM);
2100 
2101     if (!(pMemDesc->Allocated || bPeerIoMem))
2102     {
2103         rmStatus = NV_ERR_OBJECT_NOT_FOUND;
2104         goto done;
2105     }
2106 
2107     switch (memdescGetAddressSpace(pMemDesc))
2108     {
2109         case ADDR_SYSMEM:
2110             break;
2111         default:
2112             rmStatus = NV_ERR_OBJECT_NOT_FOUND;
2113             goto done;
2114     }
2115 
2116     pMemData = memdescGetMemData(pMemDesc);
2117     if (pMemData == NULL)
2118     {
2119         rmStatus = NV_ERR_OBJECT_NOT_FOUND;
2120         goto done;
2121     }
2122 
2123     rmStatus = os_match_mmap_offset(pMemData, offset, pPageIndex);
2124     if (rmStatus != NV_OK)
2125         goto done;
2126 
2127     if (!portSafeAddU64(pageOffset, length, &endingOffset))
2128     {
2129         rmStatus = NV_ERR_INVALID_ARGUMENT;
2130         goto done;
2131     }
2132 
2133     pageCount = (endingOffset / os_page_size);
2134 
2135     if (!portSafeAddU64(*pPageIndex + ((endingOffset % os_page_size) ? 1 : 0),
2136                         pageCount, &pageCount))
2137     {
2138         rmStatus = NV_ERR_INVALID_ARGUMENT;
2139         goto done;
2140     }
2141 
2142     if (pageCount > NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount))
2143     {
2144         rmStatus = NV_ERR_INVALID_ARGUMENT;
2145         goto done;
2146     }
2147 
2148     if (bReadOnlyMem)
2149        *pProtection = NV_PROTECT_READABLE;
2150 
2151     *ppPrivate = pMemData;
2152 
2153 done:
2154     serverutilReleaseClient(LOCK_ACCESS_READ, pClient);
2155 
2156     return rmStatus;
2157 }
2158 
2159 static NV_STATUS RmValidateMmapRequest(
2160     nv_state_t *pNv,
2161     NvU64       offset,
2162     NvU64       length,
2163     NvU32      *pProtection
2164 )
2165 {
2166     NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS params = { 0 };
2167     RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
2168     NV_STATUS status;
2169 
2170     if (osIsAdministrator())
2171     {
2172         *pProtection = NV_PROTECT_READ_WRITE;
2173         return NV_OK;
2174     }
2175 
2176     params.addressStart = offset;
2177     params.addressLength = length;
2178 
2179     status = pRmApi->Control(pRmApi, pNv->rmapi.hClient,
2180                              pNv->rmapi.hSubDevice,
2181                              NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST,
2182                              &params, sizeof(params));
2183 
2184     if (status == NV_OK)
2185     {
2186         *pProtection = params.protection;
2187     }
2188 
2189     return status;
2190 }
2191 
2192 NV_STATUS rm_get_adapter_status(
2193     nv_state_t *pNv,
2194     NvU32      *pStatus
2195 )
2196 {
2197     NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM;
2198 
2199     // LOCK: acquire API lock
2200     if (rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI) == NV_OK)
2201     {
2202         rmStatus = RmGetAdapterStatus(pNv, pStatus);
2203 
2204         // UNLOCK: release API lock
2205         rmapiLockRelease();
2206     }
2207 
2208     return rmStatus;
2209 }
2210 
2211 NvBool NV_API_CALL rm_init_rm(
2212     nvidia_stack_t *sp
2213 )
2214 {
2215     NvBool retval;
2216     void *fp;
2217 
2218     NV_ENTER_RM_RUNTIME(sp,fp);
2219 
2220     retval = RmInitRm();
2221 
2222     NV_EXIT_RM_RUNTIME(sp,fp);
2223 
2224     return retval;
2225 }
2226 
2227 void NV_API_CALL rm_shutdown_rm(
2228     nvidia_stack_t *sp
2229 )
2230 {
2231     void *fp;
2232 
2233     NV_ENTER_RM_RUNTIME(sp,fp);
2234 
2235     RmShutdownRm();
2236 
2237     NV_EXIT_RM_RUNTIME(sp,fp);
2238 }
2239 
2240 NvBool NV_API_CALL rm_init_event_locks(
2241     nvidia_stack_t *sp,
2242     nv_state_t *pNv
2243 )
2244 {
2245     void *fp;
2246     NvBool ret;
2247 
2248     NV_ENTER_RM_RUNTIME(sp,fp);
2249 
2250     pNv->event_spinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged());
2251     ret = (pNv->event_spinlock != NULL);
2252 
2253     NV_EXIT_RM_RUNTIME(sp,fp);
2254     return ret;
2255 }
2256 
2257 void NV_API_CALL rm_destroy_event_locks(
2258     nvidia_stack_t *sp,
2259     nv_state_t *pNv
2260 )
2261 {
2262     void *fp;
2263 
2264     NV_ENTER_RM_RUNTIME(sp,fp);
2265 
2266     if (pNv && pNv->event_spinlock)
2267         portSyncSpinlockDestroy(pNv->event_spinlock);
2268 
2269     NV_EXIT_RM_RUNTIME(sp,fp);
2270 }
2271 
2272 void NV_API_CALL rm_get_vbios_version(
2273     nvidia_stack_t *sp,
2274     nv_state_t *pNv,
2275     char *vbiosString
2276 )
2277 {
2278     void   *fp;
2279     NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS *params;
2280     RM_API *pRmApi;
2281     THREAD_STATE_NODE threadState;
2282     const size_t vbiosStringLen = 15; // "xx.xx.xx.xx.xx"
2283 
2284     os_snprintf(vbiosString, vbiosStringLen, "??.??.??.??.??");
2285 
2286     NV_ENTER_RM_RUNTIME(sp,fp);
2287 
2288     params = portMemAllocNonPaged(sizeof(*params));
2289     if (params == NULL)
2290     {
2291         NV_EXIT_RM_RUNTIME(sp,fp);
2292         return;
2293     }
2294 
2295     portMemSet(params, 0, sizeof(*params));
2296 
2297     params->biosInfoList[0].index = NV2080_CTRL_BIOS_INFO_INDEX_REVISION;
2298     params->biosInfoList[1].index = NV2080_CTRL_BIOS_INFO_INDEX_OEM_REVISION;
2299     params->biosInfoListSize = 2;
2300 
2301     pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_VBIOS);
2302     if (pRmApi != NULL)
2303     {
2304         NV_STATUS rmStatus;
2305 
2306         rmStatus = pRmApi->Control(pRmApi,
2307                                    pNv->rmapi.hClient,
2308                                    pNv->rmapi.hSubDevice,
2309                                    NV2080_CTRL_CMD_BIOS_GET_INFO_V2,
2310                                    params,
2311                                    sizeof(*params));
2312 
2313         if (rmStatus == NV_OK)
2314         {
2315             const NvU32 biosRevision = params->biosInfoList[0].data;
2316             const NvU32 biosOEMRevision = params->biosInfoList[1].data;
2317 
2318             os_snprintf(vbiosString, vbiosStringLen,
2319                         "%02x.%02x.%02x.%02x.%02x",
2320                         (biosRevision & 0xff000000) >> 24,
2321                         (biosRevision & 0x00ff0000) >> 16,
2322                         (biosRevision & 0x0000ff00) >>  8,
2323                         (biosRevision & 0x000000ff) >>  0,
2324                         biosOEMRevision);
2325         }
2326 
2327         RmUnixRmApiEpilogue(pNv, &threadState);
2328     }
2329 
2330     portMemFree(params);
2331 
2332     NV_EXIT_RM_RUNTIME(sp,fp);
2333 }
2334 
2335 NV_STATUS NV_API_CALL rm_stop_user_channels(
2336     nvidia_stack_t *sp,
2337     nv_state_t *pNv
2338 )
2339 {
2340     NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS params = { 0 };
2341     RM_API            *pRmApi;
2342     THREAD_STATE_NODE  threadState;
2343     NV_STATUS          rmStatus = NV_ERR_INVALID_STATE;
2344     void              *fp;
2345 
2346     NV_ENTER_RM_RUNTIME(sp,fp);
2347 
2348     pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_FIFO);
2349     if (pRmApi != NULL)
2350     {
2351         params.bDisable = NV_TRUE;
2352         rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient,
2353                                    pNv->rmapi.hSubDevice,
2354                                    NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS,
2355                                    &params, sizeof(params));
2356 
2357         RmUnixRmApiEpilogue(pNv, &threadState);
2358     }
2359 
2360     NV_EXIT_RM_RUNTIME(sp,fp);
2361 
2362     return rmStatus;
2363 }
2364 
2365 NV_STATUS NV_API_CALL rm_restart_user_channels(
2366     nvidia_stack_t *sp,
2367     nv_state_t *pNv
2368 )
2369 {
2370     NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS params = { 0 };
2371     RM_API            *pRmApi;
2372     THREAD_STATE_NODE  threadState;
2373     NV_STATUS          rmStatus = NV_ERR_INVALID_STATE;
2374     void              *fp;
2375 
2376     NV_ENTER_RM_RUNTIME(sp,fp);
2377 
2378     pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_FIFO);
2379     if (pRmApi != NULL)
2380     {
2381         params.bDisable = NV_FALSE;
2382         rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hSubDevice,
2383                                    NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS,
2384                                    &params, sizeof(params));
2385 
2386         RmUnixRmApiEpilogue(pNv, &threadState);
2387     }
2388 
2389     NV_EXIT_RM_RUNTIME(sp,fp);
2390 
2391     return rmStatus;
2392 }
2393 
2394 //
2395 // Use this call to check if the chipset is io coherent
2396 //
2397 NvBool NV_API_CALL rm_is_chipset_io_coherent(
2398     nvidia_stack_t  *sp)
2399 {
2400     void *fp;
2401     OBJSYS *pSys;
2402     OBJCL *pCl;
2403     NvBool bIoCoherent = NV_FALSE;
2404 
2405     NV_ENTER_RM_RUNTIME(sp,fp);
2406 
2407     pSys = SYS_GET_INSTANCE();
2408     pCl = SYS_GET_CL(pSys);
2409     if (pCl == NULL)
2410     {
2411         NV_PRINTF(LEVEL_ERROR,
2412                   "%s: no CL object found, setting io coherent by default\n",
2413                   __FUNCTION__);
2414         goto done;
2415     }
2416 
2417     bIoCoherent = pCl->getProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT);
2418 
2419 done:
2420     NV_EXIT_RM_RUNTIME(sp,fp);
2421 
2422     return bIoCoherent;
2423 }
2424 
2425 NV_STATUS NV_API_CALL rm_ioctl(
2426     nvidia_stack_t     *sp,
2427     nv_state_t         *pNv,
2428     nv_file_private_t  *nvfp,
2429     NvU32               Command,
2430     void               *pData,
2431     NvU32               dataSize
2432 )
2433 {
2434     NV_STATUS rmStatus;
2435     THREAD_STATE_NODE threadState;
2436     void *fp;
2437 
2438     NV_ENTER_RM_RUNTIME(sp,fp);
2439     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
2440 
2441     rmStatus = RmIoctl(pNv, nvfp, Command, pData, dataSize);
2442 
2443     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
2444     NV_EXIT_RM_RUNTIME(sp,fp);
2445 
2446     return rmStatus;
2447 }
2448 
2449 void NV_API_CALL rm_cleanup_file_private(
2450     nvidia_stack_t     *sp,
2451     nv_state_t         *pNv,
2452     nv_file_private_t  *nvfp
2453 )
2454 {
2455     THREAD_STATE_NODE threadState;
2456     void      *fp;
2457     RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL);
2458     RM_API_CONTEXT rmApiContext = {0};
2459     NvU32 i;
2460 
2461     NV_ENTER_RM_RUNTIME(sp,fp);
2462     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
2463     threadStateSetTimeoutOverride(&threadState, 10 * 1000);
2464 
2465     if (rmapiPrologue(pRmApi, &rmApiContext) != NV_OK)
2466         return;
2467 
2468     // LOCK: acquire API lock
2469     if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK)
2470     {
2471         // Unref any object which was exported on this file.
2472         if (nvfp->handles != NULL)
2473         {
2474             for (i = 0; i < nvfp->maxHandles; i++)
2475             {
2476                 if (nvfp->handles[i] == 0)
2477                 {
2478                     continue;
2479                 }
2480 
2481                 RmFreeObjExportHandle(nvfp->handles[i]);
2482                 nvfp->handles[i] = 0;
2483             }
2484 
2485             os_free_mem(nvfp->handles);
2486             nvfp->handles = NULL;
2487             nvfp->maxHandles = 0;
2488         }
2489 
2490         // Free any RM clients associated with this file.
2491         RmFreeUnusedClients(pNv, nvfp);
2492 
2493         // UNLOCK: release API lock
2494         rmapiLockRelease();
2495     }
2496 
2497     rmapiEpilogue(pRmApi, &rmApiContext);
2498     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
2499 
2500     if (nvfp->ctl_nvfp != NULL)
2501     {
2502         nv_put_file_private(nvfp->ctl_nvfp_priv);
2503         nvfp->ctl_nvfp = NULL;
2504         nvfp->ctl_nvfp_priv = NULL;
2505     }
2506 
2507     NV_EXIT_RM_RUNTIME(sp,fp);
2508 }
2509 
2510 void NV_API_CALL rm_unbind_lock(
2511     nvidia_stack_t *sp,
2512     nv_state_t *pNv
2513 )
2514 {
2515     THREAD_STATE_NODE threadState;
2516     void      *fp;
2517 
2518     NV_ENTER_RM_RUNTIME(sp,fp);
2519     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
2520 
2521     // LOCK: acquire API lock
2522     if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK)
2523     {
2524         RmUnbindLock(pNv);
2525 
2526         // UNLOCK: release API lock
2527         rmapiLockRelease();
2528     }
2529 
2530     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
2531     NV_EXIT_RM_RUNTIME(sp,fp);
2532 }
2533 
2534 NV_STATUS rm_alloc_os_event(
2535     NvHandle            hClient,
2536     nv_file_private_t  *nvfp,
2537     NvU32               fd
2538 )
2539 {
2540     NV_STATUS RmStatus;
2541 
2542     // LOCK: acquire API lock
2543     if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
2544     {
2545         RmStatus = RmAllocOsEvent(hClient, nvfp, fd);
2546 
2547         // UNLOCK: release API lock
2548         rmapiLockRelease();
2549     }
2550 
2551     return RmStatus;
2552 }
2553 
2554 NV_STATUS rm_free_os_event(
2555     NvHandle    hClient,
2556     NvU32       fd
2557 )
2558 {
2559     NV_STATUS RmStatus;
2560 
2561     // LOCK: acquire API lock
2562     if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
2563     {
2564         RmStatus = RmFreeOsEvent(hClient, fd);
2565 
2566         // UNLOCK: release API lock
2567         rmapiLockRelease();
2568     }
2569 
2570     return RmStatus;
2571 }
2572 
2573 NV_STATUS rm_get_event_data(
2574     nv_file_private_t  *nvfp,
2575     NvP64               pEvent,
2576     NvU32              *MoreEvents
2577 )
2578 {
2579     NV_STATUS RmStatus;
2580 
2581     // LOCK: acquire API lock
2582     if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
2583     {
2584         RmStatus = RmGetEventData(nvfp, pEvent, MoreEvents, NV_TRUE);
2585 
2586         // UNLOCK: release API lock
2587         rmapiLockRelease();
2588     }
2589 
2590     return RmStatus;
2591 }
2592 
2593 NV_STATUS NV_API_CALL rm_read_registry_dword(
2594     nvidia_stack_t *sp,
2595     nv_state_t *nv,
2596     const char *regParmStr,
2597     NvU32      *Data
2598 )
2599 {
2600     OBJGPU    *pGpu = NULL;
2601     NV_STATUS  RmStatus;
2602     void      *fp;
2603     NvBool     isApiLockTaken = NV_FALSE;
2604 
2605     NV_ENTER_RM_RUNTIME(sp,fp);
2606 
2607     //
2608     // We can be called from different contexts:
2609     //
2610     // 1) early initialization without device state.
2611     // 2) from outside the RM API (without the lock held)
2612     //
2613     // In context 1)the API lock is not needed and
2614     // in context 2), it needs to be acquired.
2615     //
2616     if (nv != NULL)
2617     {
2618         // LOCK: acquire API lock
2619         if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) != NV_OK)
2620         {
2621             NV_EXIT_RM_RUNTIME(sp,fp);
2622             return RmStatus;
2623         }
2624 
2625         isApiLockTaken = NV_TRUE;
2626     }
2627 
2628     pGpu = NV_GET_NV_PRIV_PGPU(nv);
2629 
2630     // Skipping the NULL check as osReadRegistryDword takes care of it.
2631     RmStatus = osReadRegistryDword(pGpu, regParmStr, Data);
2632 
2633     if (isApiLockTaken == NV_TRUE)
2634     {
2635         // UNLOCK: release API lock
2636         rmapiLockRelease();
2637     }
2638 
2639     NV_EXIT_RM_RUNTIME(sp,fp);
2640 
2641     return RmStatus;
2642 }
2643 
2644 NV_STATUS NV_API_CALL rm_write_registry_dword(
2645     nvidia_stack_t *sp,
2646     nv_state_t *nv,
2647     const char *regParmStr,
2648     NvU32       Data
2649 )
2650 {
2651     NV_STATUS  RmStatus;
2652     void      *fp;
2653     NvBool     isApiLockTaken = NV_FALSE;
2654 
2655     NV_ENTER_RM_RUNTIME(sp,fp);
2656 
2657     if (nv != NULL)
2658     {
2659         // LOCK: acquire API lock
2660         if ((RmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
2661         {
2662             NV_EXIT_RM_RUNTIME(sp,fp);
2663             return RmStatus;
2664         }
2665 
2666         isApiLockTaken = NV_TRUE;
2667     }
2668 
2669     RmStatus = RmWriteRegistryDword(nv, regParmStr, Data);
2670 
2671     if (isApiLockTaken == NV_TRUE)
2672     {
2673         // UNLOCK: release API lock
2674         rmapiLockRelease();
2675     }
2676 
2677     NV_EXIT_RM_RUNTIME(sp,fp);
2678 
2679     return RmStatus;
2680 }
2681 
2682 NV_STATUS NV_API_CALL rm_write_registry_binary(
2683     nvidia_stack_t *sp,
2684     nv_state_t *nv,
2685     const char *regParmStr,
2686     NvU8       *Data,
2687     NvU32       cbLen
2688 )
2689 {
2690     NV_STATUS  RmStatus;
2691     void      *fp;
2692     NvBool     isApiLockTaken = NV_FALSE;
2693 
2694     NV_ENTER_RM_RUNTIME(sp,fp);
2695 
2696     if (nv != NULL)
2697     {
2698         // LOCK: acquire API lock
2699         if ((RmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
2700         {
2701             NV_EXIT_RM_RUNTIME(sp,fp);
2702             return RmStatus;
2703         }
2704 
2705         isApiLockTaken = NV_TRUE;
2706     }
2707 
2708     RmStatus = RmWriteRegistryBinary(nv, regParmStr, Data, cbLen);
2709 
2710     if (isApiLockTaken == NV_TRUE)
2711     {
2712         // UNLOCK: release API lock
2713         rmapiLockRelease();
2714     }
2715 
2716     NV_EXIT_RM_RUNTIME(sp,fp);
2717 
2718     return RmStatus;
2719 }
2720 
2721 NV_STATUS NV_API_CALL rm_write_registry_string(
2722     nvidia_stack_t *sp,
2723     nv_state_t *nv,
2724     const char *regParmStr,
2725     const char *string,
2726     NvU32       stringLength
2727 )
2728 {
2729     NV_STATUS  rmStatus;
2730     void      *fp;
2731     NvBool     isApiLockTaken = NV_FALSE;
2732 
2733     NV_ENTER_RM_RUNTIME(sp,fp);
2734 
2735     if (nv != NULL)
2736     {
2737         // LOCK: acquire API lock
2738         if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
2739         {
2740             NV_EXIT_RM_RUNTIME(sp,fp);
2741             return rmStatus;
2742         }
2743 
2744         isApiLockTaken = NV_TRUE;
2745     }
2746 
2747     rmStatus = RmWriteRegistryString(nv, regParmStr, string, (stringLength + 1));
2748 
2749     if (isApiLockTaken == NV_TRUE)
2750     {
2751         // UNLOCK: release API lock
2752         rmapiLockRelease();
2753     }
2754 
2755     NV_EXIT_RM_RUNTIME(sp,fp);
2756 
2757     return rmStatus;
2758 }
2759 
2760 static NvBool NV_API_CALL rm_is_space(const char ch)
2761 {
2762     //
2763     // return true if it is a:
2764     // ' '  : (space - decimal 32.)
2765     // '\t' : (TAB - decimal 9)
2766     // 'LF' : (Line feed, new line - decimal 10)
2767     // 'VT' : (Vertical TAB - decimal 11)
2768     // 'FF' : (Form feed, new page - decimal 12)
2769     // '\r' : (carriage return - decimal 13)
2770     //
2771     return ((ch == ' ') || ((ch >= '\t') && (ch <= '\r')));
2772 }
2773 
2774 char* NV_API_CALL rm_remove_spaces(const char *in)
2775 {
2776     unsigned int len = os_string_length(in) + 1;
2777     const char *in_ptr;
2778     char *out, *out_ptr;
2779 
2780     if (os_alloc_mem((void **)&out, len) != NV_OK)
2781         return NULL;
2782 
2783     in_ptr = in;
2784     out_ptr = out;
2785 
2786     while (*in_ptr != '\0')
2787     {
2788         if (!rm_is_space(*in_ptr))
2789             *out_ptr++ = *in_ptr;
2790         in_ptr++;
2791     }
2792     *out_ptr = '\0';
2793 
2794     return out;
2795 }
2796 
2797 char* NV_API_CALL rm_string_token(char **strp, const char delim)
2798 {
2799     char *s, *token;
2800 
2801     if ((strp == NULL) || (*strp == NULL))
2802         return NULL;
2803 
2804     s = token = *strp;
2805     *strp = NULL;
2806 
2807     for (; *s != '\0'; s++) {
2808         if (*s == delim) {
2809             *s = '\0';
2810             *strp = ++s;
2811             break;
2812         }
2813     }
2814 
2815     return token;
2816 }
2817 
2818 // Parse string passed in NVRM as module parameter.
2819 void NV_API_CALL rm_parse_option_string(nvidia_stack_t *sp, const char *nvRegistryDwords)
2820 {
2821     unsigned int i;
2822     nv_parm_t *entry;
2823     char *option_string = NULL;
2824     char *ptr, *token;
2825     char *name, *value;
2826     NvU32 data;
2827 
2828     if (nvRegistryDwords != NULL)
2829     {
2830         if ((option_string = rm_remove_spaces(nvRegistryDwords)) == NULL)
2831         {
2832             return;
2833         }
2834 
2835         ptr = option_string;
2836 
2837         while ((token = rm_string_token(&ptr, ';')) != NULL)
2838         {
2839             if (!(name = rm_string_token(&token, '=')) || !os_string_length(name))
2840             {
2841                 continue;
2842             }
2843 
2844             if (!(value = rm_string_token(&token, '=')) || !os_string_length(value))
2845             {
2846                 continue;
2847             }
2848 
2849             if (rm_string_token(&token, '=') != NULL)
2850             {
2851                 continue;
2852             }
2853 
2854             data = os_strtoul(value, NULL, 0);
2855 
2856             for (i = 0; (entry = &nv_parms[i])->name != NULL; i++)
2857             {
2858                 if (os_string_compare(entry->name, name) == 0)
2859                     break;
2860             }
2861 
2862             if (!entry->name)
2863                 rm_write_registry_dword(sp, NULL, name, data);
2864             else
2865                 *entry->data = data;
2866         }
2867 
2868         // Free the memory allocated by rm_remove_spaces()
2869         os_free_mem(option_string);
2870     }
2871 }
2872 
2873 NV_STATUS NV_API_CALL rm_run_rc_callback(
2874     nvidia_stack_t *sp,
2875     nv_state_t *nv
2876 )
2877 {
2878     OBJGPU *pGpu;
2879     void   *fp;
2880 
2881     /* make sure our timer's not still running when it shouldn't be */
2882     if (nv == NULL)
2883         return NV_ERR_GENERIC;
2884 
2885     pGpu = NV_GET_NV_PRIV_PGPU(nv);
2886     if (pGpu == NULL)
2887         return NV_ERR_GENERIC;
2888 
2889     if (nv->rc_timer_enabled == 0)
2890         return NV_ERR_GENERIC;
2891 
2892     if (!FULL_GPU_SANITY_CHECK(pGpu))
2893     {
2894         return NV_ERR_GENERIC;
2895     }
2896 
2897     NV_ENTER_RM_RUNTIME(sp,fp);
2898 
2899     osRun1HzCallbacksNow(pGpu);
2900 
2901     NV_EXIT_RM_RUNTIME(sp,fp);
2902 
2903     return NV_OK;
2904 }
2905 
2906 static NV_STATUS RmRunNanoTimerCallback(
2907     OBJGPU *pGpu,
2908     void *pTmrEvent
2909 )
2910 {
2911     POBJTMR             pTmr = GPU_GET_TIMER(pGpu);
2912     THREAD_STATE_NODE   threadState;
2913     NV_STATUS         status = NV_OK;
2914     // LOCK: try to acquire GPUs lock
2915     if ((status = rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR)) != NV_OK)
2916     {
2917         TMR_EVENT *pEvent = (TMR_EVENT *)pTmrEvent;
2918 
2919         //
2920         // We failed to acquire the lock - depending on what's holding it,
2921         // the lock could be held for a while, so try again soon, but not too
2922         // soon to prevent the owner from making forward progress indefinitely.
2923         //
2924         return osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->pOSTmrCBdata,
2925                                 osGetTickResolution());
2926     }
2927 
2928     threadStateInitISRAndDeferredIntHandler(&threadState, pGpu,
2929         THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER);
2930 
2931     // Call timer event service
2932     status = tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, (PTMR_EVENT)pTmrEvent);
2933 
2934     // Out of conflicting thread
2935     threadStateFreeISRAndDeferredIntHandler(&threadState,
2936         pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER);
2937 
2938     // UNLOCK: release GPUs lock
2939     rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu);
2940 
2941     return status;
2942 }
2943 
2944 NV_STATUS NV_API_CALL rm_run_nano_timer_callback
2945 (
2946     nvidia_stack_t *sp,
2947     nv_state_t *nv,
2948     void *pTmrEvent
2949 )
2950 {
2951     NV_STATUS status;
2952     OBJGPU *pGpu = NULL;
2953     void   *fp;
2954 
2955     if (nv == NULL)
2956         return NV_ERR_GENERIC;
2957 
2958     pGpu = NV_GET_NV_PRIV_PGPU(nv);
2959     if (pGpu == NULL)
2960         return NV_ERR_GENERIC;
2961 
2962     if (!FULL_GPU_SANITY_CHECK(pGpu))
2963     {
2964         return NV_ERR_GENERIC;
2965     }
2966 
2967     NV_ENTER_RM_RUNTIME(sp,fp);
2968 
2969     status = RmRunNanoTimerCallback(pGpu, pTmrEvent);
2970 
2971     NV_EXIT_RM_RUNTIME(sp,fp);
2972 
2973     return status;
2974 }
2975 
2976 void NV_API_CALL rm_execute_work_item(
2977     nvidia_stack_t *sp,
2978     void *pNvWorkItem
2979 )
2980 {
2981     void *fp;
2982     THREAD_STATE_NODE threadState;
2983 
2984     NV_ENTER_RM_RUNTIME(sp, fp);
2985     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
2986 
2987     RmExecuteWorkItem(pNvWorkItem);
2988 
2989     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
2990     NV_EXIT_RM_RUNTIME(sp, fp);
2991 }
2992 
2993 const char* NV_API_CALL rm_get_device_name(
2994      NvU16       device,
2995      NvU16       subsystem_vendor,
2996      NvU16       subsystem_device
2997 )
2998 {
2999     unsigned int i;
3000     const char *tmpName = NULL;
3001 
3002     for (i = 0; i < NV_ARRAY_ELEMENTS(sChipsReleased); i++)
3003     {
3004         // if the device ID doesn't match, go to the next entry
3005         if (device != sChipsReleased[i].devID)
3006         {
3007             continue;
3008         }
3009 
3010         // if the entry has 0 for the subsystem IDs, then the device
3011         // ID match is sufficient, but continue scanning through
3012         // sChipsReleased[] in case there is a subsystem ID match later
3013         // in the table
3014         if (sChipsReleased[i].subSystemVendorID == 0 &&
3015             sChipsReleased[i].subSystemID == 0)
3016         {
3017             tmpName = sChipsReleased[i].name;
3018             continue;
3019         }
3020 
3021         if (subsystem_vendor == sChipsReleased[i].subSystemVendorID &&
3022             subsystem_device == sChipsReleased[i].subSystemID)
3023         {
3024             tmpName = sChipsReleased[i].name;
3025             break;
3026         }
3027     }
3028 
3029     return (tmpName != NULL) ? tmpName : "Unknown";
3030 }
3031 
3032 NV_STATUS rm_access_registry(
3033     NvHandle   hClient,
3034     NvHandle   hObject,
3035     NvU32      AccessType,
3036     NvP64      clientDevNodeAddress,
3037     NvU32      DevNodeLength,
3038     NvP64      clientParmStrAddress,
3039     NvU32      ParmStrLength,
3040     NvP64      clientBinaryDataAddress,
3041     NvU32      *pBinaryDataLength,
3042     NvU32      *Data,
3043     NvU32      *Entry
3044 )
3045 {
3046     NV_STATUS RmStatus;
3047     NvBool bReadOnly = (AccessType == NVOS38_ACCESS_TYPE_READ_DWORD) ||
3048                        (AccessType == NVOS38_ACCESS_TYPE_READ_BINARY);
3049 
3050     // LOCK: acquire API lock
3051     if ((RmStatus = rmapiLockAcquire(bReadOnly ? RMAPI_LOCK_FLAGS_READ : RMAPI_LOCK_FLAGS_NONE,
3052                                      RM_LOCK_MODULES_OSAPI)) == NV_OK)
3053     {
3054         RmStatus = RmAccessRegistry(hClient,
3055                                     hObject,
3056                                     AccessType,
3057                                     clientDevNodeAddress,
3058                                     DevNodeLength,
3059                                     clientParmStrAddress,
3060                                     ParmStrLength,
3061                                     clientBinaryDataAddress,
3062                                     pBinaryDataLength,
3063                                     Data,
3064                                     Entry);
3065 
3066         // UNLOCK: release API lock
3067         rmapiLockRelease();
3068     }
3069 
3070     return RmStatus;
3071 }
3072 
3073 NV_STATUS rm_update_device_mapping_info(
3074     NvHandle    hClient,
3075     NvHandle    hDevice,
3076     NvHandle    hMemory,
3077     void       *pOldCpuAddress,
3078     void       *pNewCpuAddress
3079 )
3080 {
3081     NV_STATUS RmStatus;
3082 
3083     // LOCK: acquire API lock
3084     if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK)
3085     {
3086         RmStatus = RmUpdateDeviceMappingInfo(hClient,
3087                                              hDevice,
3088                                              hMemory,
3089                                              pOldCpuAddress,
3090                                              pNewCpuAddress);
3091 
3092         // UNLOCK: release API lock
3093         rmapiLockRelease();
3094     }
3095 
3096     return RmStatus;
3097 }
3098 
3099 static void rm_is_device_rm_firmware_capable(
3100     nv_state_t *pNv,
3101     NvU32       pmcBoot42,
3102     NvBool     *pbIsFirmwareCapable,
3103     NvBool     *pbEnableByDefault
3104 )
3105 {
3106     NvBool bIsFirmwareCapable = NV_FALSE;
3107     NvBool bEnableByDefault = NV_FALSE;
3108 
3109     if (NV_IS_SOC_DISPLAY_DEVICE(pNv))
3110     {
3111         bIsFirmwareCapable = NV_TRUE;
3112     }
3113     else
3114     {
3115         bIsFirmwareCapable = gpumgrIsDeviceRmFirmwareCapable(pNv->pci_info.device_id,
3116                                                              pmcBoot42,
3117                                                              &bEnableByDefault);
3118     }
3119 
3120     if (pbIsFirmwareCapable != NULL)
3121     {
3122         *pbIsFirmwareCapable = bIsFirmwareCapable;
3123     }
3124     if (pbEnableByDefault != NULL)
3125     {
3126         *pbEnableByDefault = bEnableByDefault;
3127     }
3128 }
3129 
3130 static NvBool NV_API_CALL rm_is_legacy_device(
3131     NvU16       device_id,
3132     NvU16       subsystem_vendor,
3133     NvU16       subsystem_device,
3134     NvBool      print_warning
3135 )
3136 {
3137     return NV_FALSE;
3138 }
3139 
3140 static NvBool NV_API_CALL rm_is_legacy_arch(
3141     NvU32 pmc_boot_0,
3142     NvU32 pmc_boot_42
3143 )
3144 {
3145     NvBool      legacy = NV_FALSE;
3146 
3147     return legacy;
3148 }
3149 
3150 NV_STATUS NV_API_CALL rm_is_supported_device(
3151     nvidia_stack_t *sp,
3152     nv_state_t *pNv
3153 )
3154 {
3155     THREAD_STATE_NODE threadState;
3156     NV_STATUS   rmStatus;
3157     OBJSYS     *pSys;
3158     POBJHALMGR  pHalMgr;
3159     GPUHWREG   *reg_mapping;
3160     NvU32       myHalPublicID;
3161     void       *fp;
3162     NvU32       pmc_boot_0;
3163     NvU32       pmc_boot_42;
3164 
3165     NV_ENTER_RM_RUNTIME(sp,fp);
3166     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
3167 
3168     pSys = SYS_GET_INSTANCE();
3169     pHalMgr = SYS_GET_HALMGR(pSys);
3170 
3171     reg_mapping = osMapKernelSpace(pNv->regs->cpu_address,
3172                                    os_page_size,
3173                                    NV_MEMORY_UNCACHED,
3174                                    NV_PROTECT_READABLE);
3175 
3176     if (reg_mapping == NULL)
3177     {
3178         nv_printf(NV_DBG_ERRORS, "NVRM: failed to map registers!\n");
3179         rmStatus = NV_ERR_OPERATING_SYSTEM;
3180         goto threadfree;
3181     }
3182     NvU32 pmc_boot_1 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_1);
3183     pmc_boot_0 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_0);
3184     pmc_boot_42 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_42);
3185 
3186     osUnmapKernelSpace(reg_mapping, os_page_size);
3187 
3188     if ((pmc_boot_0 == 0xFFFFFFFF) && (pmc_boot_42 == 0xFFFFFFFF))
3189     {
3190         nv_printf(NV_DBG_ERRORS,
3191             "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x\n"
3192             "NVRM: (PCI ID: %04x:%04x) installed in this system has\n"
3193             "NVRM: fallen off the bus and is not responding to commands.\n",
3194             pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot,
3195             pNv->pci_info.function, pNv->pci_info.vendor_id,
3196             pNv->pci_info.device_id);
3197         rmStatus = NV_ERR_GPU_IS_LOST;
3198         goto threadfree;
3199     }
3200 
3201     /*
3202      * For legacy architectures, rm_is_legacy_arch() prints "legacy" message.
3203      * We do not want to print "unsupported" message for legacy architectures
3204      * to avoid confusion. Also, the probe should not continue for legacy
3205      * architectures. Hence, we set rmStatus to NV_ERR_NOT_SUPPORTED and
3206      * goto threadfree.
3207      */
3208     if (rm_is_legacy_arch(pmc_boot_0, pmc_boot_42))
3209     {
3210         rmStatus = NV_ERR_NOT_SUPPORTED;
3211         goto threadfree;
3212     }
3213 
3214     rmStatus = halmgrGetHalForGpu(pHalMgr, pmc_boot_0, pmc_boot_42, &myHalPublicID);
3215 
3216     if (rmStatus != NV_OK)
3217     {
3218         NvBool bIsFirmwareCapable;
3219 
3220         rm_is_device_rm_firmware_capable(pNv,
3221                                          pmc_boot_42,
3222                                          &bIsFirmwareCapable,
3223                                          NULL);
3224         if (!bIsFirmwareCapable)
3225         {
3226             nv_printf(NV_DBG_ERRORS,
3227                "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x (PCI ID: %04x:%04x)\n"
3228                "NVRM: installed in this system is not supported by open\n"
3229                "NVRM: nvidia.ko because it does not include the required GPU\n"
3230                "NVRM: System Processor (GSP).\n"
3231                "NVRM: Please see the 'Open Linux Kernel Modules' and 'GSP\n"
3232                "NVRM: Firmware' sections in the driver README, available on\n"
3233                "NVRM: the Linux graphics driver download page at\n"
3234                "NVRM: www.nvidia.com.\n",
3235                pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot,
3236                pNv->pci_info.function, pNv->pci_info.vendor_id,
3237                pNv->pci_info.device_id, NV_VERSION_STRING);
3238             goto threadfree;
3239         }
3240         goto print_unsupported;
3241     }
3242 
3243     rmStatus = rm_is_vgpu_supported_device(pNv, pmc_boot_1);
3244 
3245     if (rmStatus != NV_OK)
3246         goto print_unsupported;
3247     goto threadfree;
3248 
3249 print_unsupported:
3250     nv_printf(NV_DBG_ERRORS,
3251        "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x (PCI ID: %04x:%04x)\n"
3252        "NVRM: installed in this system is not supported by the\n"
3253        "NVRM: NVIDIA %s driver release.\n"
3254        "NVRM: Please see 'Appendix A - Supported NVIDIA GPU Products'\n"
3255        "NVRM: in this release's README, available on the operating system\n"
3256        "NVRM: specific graphics driver download page at www.nvidia.com.\n",
3257        pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot,
3258        pNv->pci_info.function, pNv->pci_info.vendor_id,
3259        pNv->pci_info.device_id, NV_VERSION_STRING);
3260 
3261 threadfree:
3262     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
3263     NV_EXIT_RM_RUNTIME(sp,fp);
3264 
3265     return rmStatus;
3266 }
3267 
3268 NvBool NV_API_CALL rm_is_supported_pci_device(
3269     NvU8   pci_class,
3270     NvU8   pci_subclass,
3271     NvU16  vendor,
3272     NvU16  device,
3273     NvU16  subsystem_vendor,
3274     NvU16  subsystem_device,
3275     NvBool print_legacy_warning
3276 )
3277 {
3278     const NvU16 nv_pci_vendor_id            = 0x10DE;
3279     const NvU16 nv_pci_id_riva_tnt          = 0x0020;
3280     const NvU8  nv_pci_class_display        = 0x03;
3281     const NvU8  nv_pci_subclass_display_vga = 0x00;
3282     const NvU8  nv_pci_subclass_display_3d  = 0x02;
3283 
3284     if (pci_class != nv_pci_class_display)
3285     {
3286         return NV_FALSE;
3287     }
3288 
3289     if ((pci_subclass != nv_pci_subclass_display_vga) &&
3290         (pci_subclass != nv_pci_subclass_display_3d))
3291     {
3292         return NV_FALSE;
3293     }
3294 
3295     if (vendor != nv_pci_vendor_id)
3296     {
3297         return NV_FALSE;
3298     }
3299 
3300     if (device < nv_pci_id_riva_tnt)
3301     {
3302         return NV_FALSE;
3303     }
3304 
3305     if (rm_is_legacy_device(
3306             device,
3307             subsystem_vendor,
3308             subsystem_device,
3309             print_legacy_warning))
3310     {
3311         return NV_FALSE;
3312     }
3313 
3314     return NV_TRUE;
3315 }
3316 
3317 /*
3318  * Performs the I2C transfers which are related with DP AUX channel
3319  */
3320 static NV_STATUS RmDpAuxI2CTransfer
3321 (
3322     nv_state_t  *pNv,
3323     NvU32       displayId,
3324     NvU8        addr,
3325     NvU32       len,
3326     NvU8       *pData,
3327     NvBool      bWrite
3328 )
3329 {
3330     NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS *pParams;
3331     RM_API    *pRmApi   = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
3332     NV_STATUS  status;
3333 
3334     if (len > NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE)
3335     {
3336         NV_PRINTF(LEVEL_ERROR,
3337                   "%s: requested I2C transfer length %u is greater than maximum supported length %u\n",
3338                   __FUNCTION__, len, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE);
3339         return NV_ERR_NOT_SUPPORTED;
3340     }
3341 
3342     pParams = portMemAllocNonPaged(sizeof(*pParams));
3343     if (pParams == NULL)
3344     {
3345         return NV_ERR_NO_MEMORY;
3346     }
3347 
3348     portMemSet(pParams, 0, sizeof(*pParams));
3349 
3350     pParams->subDeviceInstance = 0;
3351     pParams->displayId         = displayId;
3352     pParams->addr              = addr;
3353     pParams->size              = len;
3354     pParams->bWrite            = bWrite;
3355 
3356     if (bWrite)
3357     {
3358         portMemCopy(pParams->data, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE,
3359                     pData, len);
3360     }
3361 
3362     status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp,
3363                              NV0073_CTRL_CMD_DP_AUXCH_I2C_TRANSFER_CTRL,
3364                              pParams, sizeof(*pParams));
3365 
3366     if ((status == NV_OK) && !bWrite)
3367     {
3368         portMemCopy(pData, len, pParams->data, pParams->size);
3369     }
3370 
3371     portMemFree(pParams);
3372 
3373     return status;
3374 }
3375 
3376 /*
3377  * Performs the I2C transfers which are not related with DP AUX channel
3378  */
3379 static NV_STATUS RmNonDPAuxI2CTransfer
3380 (
3381     nv_state_t *pNv,
3382     NvU8        portId,
3383     nv_i2c_cmd_t type,
3384     NvU8        addr,
3385     NvU8        command,
3386     NvU32       len,
3387     NvU8       *pData
3388 )
3389 {
3390     NV402C_CTRL_I2C_TRANSACTION_PARAMS *params;
3391     RM_API    *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
3392     NV_STATUS  rmStatus = NV_OK;
3393 
3394     params = portMemAllocNonPaged(sizeof(*params));
3395     if (params == NULL)
3396     {
3397         return NV_ERR_NO_MEMORY;
3398     }
3399 
3400     portMemSet(params, 0, sizeof(*params));
3401 
3402     params->portId = portId;
3403     // precondition our address (our stack requires this)
3404     params->deviceAddress = addr << 1;
3405 
3406     switch (type)
3407     {
3408         case NV_I2C_CMD_WRITE:
3409             params->transData.i2cBlockData.bWrite = NV_TRUE;
3410             /* fall through*/
3411 
3412         case NV_I2C_CMD_READ:
3413             params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW;
3414             params->transData.i2cBlockData.messageLength = len;
3415             params->transData.i2cBlockData.pMessage = pData;
3416             break;
3417 
3418         case NV_I2C_CMD_SMBUS_WRITE:
3419             if (len == 2)
3420             {
3421                 params->transData.smbusWordData.bWrite = NV_TRUE;
3422             }
3423             else
3424             {
3425                 params->transData.smbusByteData.bWrite = NV_TRUE;
3426             }
3427             /* fall through*/
3428 
3429         case NV_I2C_CMD_SMBUS_READ:
3430             if (len == 2)
3431             {
3432                 params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW;
3433                 params->transData.smbusWordData.message = pData[0] | ((NvU16)pData[1] << 8);
3434                 params->transData.smbusWordData.registerAddress = command;
3435             }
3436             else
3437             {
3438                 params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW;
3439                 params->transData.smbusByteData.message = pData[0];
3440                 params->transData.smbusByteData.registerAddress = command;
3441             }
3442             break;
3443 
3444         case NV_I2C_CMD_SMBUS_BLOCK_WRITE:
3445             params->transData.smbusBlockData.bWrite = NV_TRUE;
3446             /* fall through*/
3447 
3448         case NV_I2C_CMD_SMBUS_BLOCK_READ:
3449             params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW;
3450             params->transData.smbusBlockData.registerAddress = command;
3451             params->transData.smbusBlockData.messageLength = len;
3452             params->transData.smbusBlockData.pMessage = pData;
3453             break;
3454 
3455         case NV_I2C_CMD_SMBUS_QUICK_WRITE:
3456             params->transData.smbusQuickData.bWrite = NV_TRUE;
3457             /* fall through*/
3458 
3459         case NV_I2C_CMD_SMBUS_QUICK_READ:
3460             params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW;
3461             break;
3462 
3463         case NV_I2C_CMD_BLOCK_WRITE:
3464             params->transData.i2cBufferData.bWrite = NV_TRUE;
3465             /* fall through */
3466 
3467         case NV_I2C_CMD_BLOCK_READ:
3468             params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW;
3469             params->transData.i2cBufferData.registerAddress = command;
3470             params->transData.i2cBufferData.messageLength = len;
3471             params->transData.i2cBufferData.pMessage = pData;
3472             break;
3473 
3474         default:
3475             portMemFree(params);
3476             return NV_ERR_INVALID_ARGUMENT;
3477     }
3478 
3479     rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient,
3480                                pNv->rmapi.hI2C,
3481                                NV402C_CTRL_CMD_I2C_TRANSACTION,
3482                                params, sizeof(*params));
3483 
3484     //
3485     // For NV_I2C_CMD_SMBUS_READ, copy the read data to original
3486     // data buffer.
3487     //
3488     if (rmStatus == NV_OK && type == NV_I2C_CMD_SMBUS_READ)
3489     {
3490         if (len == 2)
3491         {
3492             pData[0] = (params->transData.smbusWordData.message & 0xff);
3493             pData[1] = params->transData.smbusWordData.message >> 8;
3494         }
3495         else
3496         {
3497             pData[0] = params->transData.smbusByteData.message;
3498         }
3499     }
3500 
3501     portMemFree(params);
3502 
3503     return rmStatus;
3504 }
3505 
3506 NV_STATUS NV_API_CALL rm_i2c_transfer(
3507     nvidia_stack_t *sp,
3508     nv_state_t *pNv,
3509     void       *pI2cAdapter,
3510     nv_i2c_cmd_t type,
3511     NvU8        addr,
3512     NvU8        command,
3513     NvU32       len,
3514     NvU8       *pData
3515 )
3516 {
3517     THREAD_STATE_NODE threadState;
3518     nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv);
3519     NV_STATUS  rmStatus = NV_OK;
3520     OBJGPU    *pGpu  = NULL;
3521     NvBool     unlockApi = NV_FALSE;
3522     NvBool     unlockGpu = NV_FALSE;
3523     NvU32      x;
3524     void      *fp;
3525     NvU32      numDispId = 0;
3526 
3527     NV_ENTER_RM_RUNTIME(sp,fp);
3528     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
3529 
3530     if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C)
3531     {
3532         // LOCK: acquire API lock
3533         if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK)
3534             goto finish;
3535 
3536         unlockApi = NV_TRUE;
3537 
3538         // LOCK: acquire GPUs lock
3539         if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK)
3540             goto finish;
3541 
3542         unlockGpu = NV_TRUE;
3543     }
3544 
3545     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
3546 
3547     if (!pGpu)
3548     {
3549         rmStatus = NV_ERR_GENERIC;
3550         goto finish;
3551     }
3552 
3553     for (x = 0; x < MAX_I2C_ADAPTERS; x++)
3554     {
3555         if (pNvp->i2c_adapters[x].pOsAdapter == pI2cAdapter)
3556         {
3557             break;
3558         }
3559     }
3560 
3561     if (x == MAX_I2C_ADAPTERS)
3562     {
3563         rmStatus = NV_ERR_GENERIC;
3564         goto finish;
3565     }
3566 
3567     for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++)
3568     {
3569         NvU32 displayId = pNvp->i2c_adapters[x].displayId[numDispId];
3570 
3571         if (displayId == INVALID_DISP_ID)
3572         {
3573             continue;
3574         }
3575 
3576         // Handle i2c-over-DpAux adapters separately from regular i2c adapters
3577         if (displayId == 0)
3578         {
3579             rmStatus = RmNonDPAuxI2CTransfer(pNv, pNvp->i2c_adapters[x].port,
3580                                              type, addr, command, len, pData);
3581         }
3582         else
3583         {
3584             if ((type != NV_I2C_CMD_READ) && (type != NV_I2C_CMD_WRITE))
3585             {
3586                 rmStatus = NV_ERR_NOT_SUPPORTED;
3587                 goto semafinish;
3588             }
3589 
3590             rmStatus = RmDpAuxI2CTransfer(pNv, displayId, addr, len, pData,
3591                                           type == NV_I2C_CMD_WRITE);
3592         }
3593 semafinish:
3594         if (rmStatus == NV_OK)
3595         {
3596             break;
3597         }
3598     }
3599 
3600 finish:
3601     if (unlockGpu)
3602     {
3603         // UNLOCK: release GPU lock
3604         rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
3605     }
3606 
3607     if (unlockApi)
3608     {
3609         // UNLOCK: release API lock
3610         rmapiLockRelease();
3611     }
3612 
3613     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
3614     NV_EXIT_RM_RUNTIME(sp,fp);
3615 
3616     return rmStatus;
3617 }
3618 
3619 static void rm_i2c_add_adapter(
3620     nv_state_t *pNv,
3621     NvU32       port,
3622     NvU32       displayId
3623 )
3624 {
3625     NvU32 y, free;
3626     nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv);
3627     NvU32 numDispId = 0;
3628 
3629     for (y = 0, free = MAX_I2C_ADAPTERS; y < MAX_I2C_ADAPTERS; y++)
3630     {
3631         if (pNvp->i2c_adapters[y].pOsAdapter == NULL)
3632         {
3633             // Only find the first free entry, and ignore the rest
3634             if (free == MAX_I2C_ADAPTERS)
3635             {
3636                 free = y;
3637             }
3638         }
3639         else if (pNvp->i2c_adapters[y].port == port)
3640         {
3641             break;
3642         }
3643     }
3644 
3645     if (y < MAX_I2C_ADAPTERS)
3646     {
3647         for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++)
3648         {
3649             if (pNvp->i2c_adapters[y].displayId[numDispId] == INVALID_DISP_ID)
3650             {
3651                 pNvp->i2c_adapters[y].displayId[numDispId] = displayId;
3652                 break;
3653             }
3654             else
3655             {
3656                 NV_PRINTF(LEVEL_INFO,
3657                           "%s: adapter  already exists (port=0x%x, displayId=0x%x)\n",
3658                           __FUNCTION__, port,
3659                           pNvp->i2c_adapters[y].displayId[numDispId]);
3660             }
3661         }
3662 
3663         if (numDispId == MAX_DISP_ID_PER_ADAPTER)
3664         {
3665             NV_PRINTF(LEVEL_ERROR,
3666                       "%s: no more free display Id entries in adapter\n",
3667                       __FUNCTION__);
3668         }
3669 
3670         return;
3671     }
3672 
3673     if (free == MAX_I2C_ADAPTERS)
3674     {
3675         NV_PRINTF(LEVEL_ERROR, "%s: no more free adapter entries exist\n",
3676                   __FUNCTION__);
3677         return;
3678     }
3679 
3680     pNvp->i2c_adapters[free].pOsAdapter = nv_i2c_add_adapter(pNv, port);
3681     pNvp->i2c_adapters[free].port       = port;
3682     // When port is added, numDispId will be 0.
3683     pNvp->i2c_adapters[free].displayId[numDispId] = displayId;
3684 }
3685 
3686 void RmI2cAddGpuPorts(nv_state_t * pNv)
3687 {
3688     NvU32      x = 0;
3689     nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv);
3690     RM_API    *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
3691     NvU32      displayMask;
3692     NV_STATUS  status;
3693     NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS systemGetSupportedParams = { 0 };
3694 
3695     // Make displayId as Invalid.
3696     for (x = 0; x < MAX_I2C_ADAPTERS; x++)
3697     {
3698         NvU32 numDispId;
3699 
3700         for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++)
3701         {
3702             pNvp->i2c_adapters[x].displayId[numDispId] = INVALID_DISP_ID;
3703         }
3704     }
3705 
3706     // First, set up the regular i2c adapters - one per i2c port
3707     if (pNv->rmapi.hI2C != 0)
3708     {
3709         NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS i2cPortInfoParams = { 0 };
3710 
3711         status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hI2C,
3712                                  NV402C_CTRL_CMD_I2C_GET_PORT_INFO,
3713                                  &i2cPortInfoParams, sizeof(i2cPortInfoParams));
3714 
3715         if (status == NV_OK)
3716         {
3717             for (x = 0; x < NV_ARRAY_ELEMENTS(i2cPortInfoParams.info); x++)
3718             {
3719                 //
3720                 // Check if this port is implemented and RM I2C framework has
3721                 // validated this port. Only limited amount of ports can
3722                 // be added to the OS framework.
3723                 //
3724                 if (FLD_TEST_DRF(402C_CTRL, _I2C_GET_PORT_INFO, _IMPLEMENTED,
3725                                  _YES, i2cPortInfoParams.info[x]) &&
3726                     FLD_TEST_DRF(402C_CTRL, _I2C_GET_PORT_INFO, _VALID,
3727                                  _YES, i2cPortInfoParams.info[x]))
3728                 {
3729                     rm_i2c_add_adapter(pNv, x, 0);
3730                 }
3731             }
3732         }
3733     }
3734 
3735     //
3736     // Now set up the i2c-over-DpAux adapters - one per DP OD
3737     //
3738     // 1. Perform NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS RM control which
3739     //    will return the mask for all the display ID's.
3740     // 2. Loop for all the display ID's and do
3741     //    NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO RM control call. For each
3742     //    output resource, check for the following requirements:
3743     //    a. It must be DisplayPort.
3744     //    b. It must be internal to the GPU (ie, not on the board)
3745     //    c. It must be directly connected to the physical connector (ie, no DP
3746     //       1.2 multistream ODs).
3747     // 3. Perform NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID RM control for
3748     //    getting the I2C port data.
3749     //
3750     // With these restrictions, we should only end up with at most one OD
3751     // per DP connector.
3752     //
3753 
3754     if (pNv->rmapi.hDisp == 0)
3755     {
3756         return;
3757     }
3758 
3759     systemGetSupportedParams.subDeviceInstance = 0;
3760     status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp,
3761                              NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED,
3762                              &systemGetSupportedParams, sizeof(systemGetSupportedParams));
3763 
3764     if (status != NV_OK)
3765     {
3766         return;
3767     }
3768 
3769     for (displayMask = systemGetSupportedParams.displayMask;
3770          displayMask != 0;
3771          displayMask &= ~LOWESTBIT(displayMask))
3772     {
3773         NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS orInfoParams = { 0 };
3774         NvU32 displayId = LOWESTBIT(displayMask);
3775 
3776         orInfoParams.subDeviceInstance = 0;
3777         orInfoParams.displayId         = displayId;
3778 
3779         status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp,
3780                                 NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
3781                                 &orInfoParams, sizeof(orInfoParams));
3782 
3783         if ((status == NV_OK) &&
3784             (orInfoParams.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) &&
3785             ((orInfoParams.protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A) ||
3786              (orInfoParams.protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B)) &&
3787             (orInfoParams.location == NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP) &&
3788             (!orInfoParams.bIsDispDynamic))
3789         {
3790             NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS i2cPortIdParams = { 0 };
3791 
3792             i2cPortIdParams.subDeviceInstance = 0;
3793             i2cPortIdParams.displayId = displayId;
3794 
3795             status = pRmApi->Control(pRmApi,
3796                                      pNv->rmapi.hClient,
3797                                      pNv->rmapi.hDisp,
3798                                      NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID,
3799                                      &i2cPortIdParams,
3800                                      sizeof(i2cPortIdParams));
3801 
3802             if ((status == NV_OK) &&
3803                 (i2cPortIdParams.ddcPortId != NV0073_CTRL_SPECIFIC_I2C_PORT_NONE))
3804             {
3805                 rm_i2c_add_adapter(pNv, i2cPortIdParams.ddcPortId - 1, displayId);
3806             }
3807         }
3808     }
3809 }
3810 
3811 void NV_API_CALL rm_i2c_remove_adapters(
3812     nvidia_stack_t *sp,
3813     nv_state_t *pNv
3814 )
3815 {
3816     //
3817     // Cycle through all adapter entries, and first remove the adapter
3818     // from the list from the kernel, then remove the i2c adapter
3819     // list once that is completed. This should only be used from exit
3820     // module time. Otherwise it could fail to remove some of the
3821     // kernel adapters and subsequent transfer requests would result
3822     // in crashes.
3823     //
3824     NvU32 x = 0;
3825     nv_priv_t  *pNvp = NV_GET_NV_PRIV(pNv);
3826     NvU32 numDispId;
3827 
3828     for (x = 0; x < MAX_I2C_ADAPTERS; x++)
3829     {
3830         if (pNvp->i2c_adapters[x].pOsAdapter != NULL)
3831         {
3832             nv_i2c_del_adapter(pNv, pNvp->i2c_adapters[x].pOsAdapter);
3833 
3834             pNvp->i2c_adapters[x].pOsAdapter = NULL;
3835             pNvp->i2c_adapters[x].port       = 0;
3836             for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++)
3837             {
3838                 pNvp->i2c_adapters[x].displayId[numDispId] = INVALID_DISP_ID;
3839             }
3840         }
3841     }
3842 }
3843 
3844 NvBool NV_API_CALL rm_i2c_is_smbus_capable(
3845     nvidia_stack_t *sp,
3846     nv_state_t *pNv,
3847     void       *pI2cAdapter
3848 )
3849 {
3850     THREAD_STATE_NODE threadState;
3851     nv_priv_t  *pNvp = NV_GET_NV_PRIV(pNv);
3852     NV_STATUS   rmStatus = NV_OK;
3853     OBJGPU     *pGpu = NULL;
3854     NvBool      unlock = NV_FALSE;
3855     NvU32       x;
3856     NvBool      ret = NV_FALSE;
3857     void       *fp;
3858     NvU32       numDispId = 0;
3859 
3860     NV_ENTER_RM_RUNTIME(sp,fp);
3861     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
3862 
3863     if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C)
3864     {
3865         // LOCK: acquire API lock
3866         if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK)
3867             goto semafinish;
3868 
3869         unlock = NV_TRUE;
3870     }
3871 
3872     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
3873 
3874     if (!pGpu)
3875     {
3876         goto semafinish;
3877     }
3878 
3879     for (x = 0; x < MAX_I2C_ADAPTERS; x++)
3880     {
3881         if (pNvp->i2c_adapters[x].pOsAdapter == pI2cAdapter)
3882         {
3883             break;
3884         }
3885     }
3886 
3887     if (x == MAX_I2C_ADAPTERS)
3888     {
3889         goto semafinish;
3890     }
3891 
3892     // we do not support smbus functions on i2c-over-DPAUX
3893     for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++)
3894     {
3895         if (pNvp->i2c_adapters[x].displayId[numDispId] == 0x0)
3896         {
3897             ret = NV_TRUE;
3898         }
3899     }
3900 
3901 semafinish:
3902     if (unlock)
3903     {
3904         // UNLOCK: release API lock
3905         rmapiLockRelease();
3906     }
3907 
3908     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
3909     NV_EXIT_RM_RUNTIME(sp,fp);
3910 
3911     return ret;
3912 }
3913 
3914 NV_STATUS NV_API_CALL rm_perform_version_check(
3915     nvidia_stack_t *sp,
3916     void       *pData,
3917     NvU32       dataSize
3918 )
3919 {
3920     THREAD_STATE_NODE threadState;
3921     NV_STATUS rmStatus;
3922     void *fp;
3923 
3924     NV_ENTER_RM_RUNTIME(sp,fp);
3925     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
3926 
3927     rmStatus = RmPerformVersionCheck(pData, dataSize);
3928 
3929     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
3930     NV_EXIT_RM_RUNTIME(sp,fp);
3931 
3932     return rmStatus;
3933 }
3934 
3935 //
3936 // Handles the Power Source Change event(AC/DC) for Notebooks.
3937 // Notebooks from Maxwell have only one Gpu, so this functions grabs first Gpu
3938 // from GpuMgr and call subdevice RmControl.
3939 //
3940 void NV_API_CALL rm_power_source_change_event(
3941     nvidia_stack_t *sp,
3942     NvU32 event_val
3943 )
3944 {
3945     THREAD_STATE_NODE threadState;
3946     void       *fp;
3947     nv_state_t *nv;
3948     OBJGPU *pGpu       = gpumgrGetGpu(0);
3949     NV_STATUS rmStatus = NV_OK;
3950 
3951     NV_ENTER_RM_RUNTIME(sp,fp);
3952     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
3953 
3954     // LOCK: acquire API lock
3955     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == NV_OK)
3956     {
3957         if (pGpu != NULL)
3958         {
3959             nv = NV_GET_NV_STATE(pGpu);
3960             if ((rmStatus = os_ref_dynamic_power(nv, NV_DYNAMIC_PM_FINE)) ==
3961                                                                          NV_OK)
3962             {
3963                 // LOCK: acquire GPU lock
3964                 if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) ==
3965                                                                          NV_OK)
3966                 {
3967                     rmStatus = RmPowerSourceChangeEvent(nv, event_val);
3968 
3969                     // UNLOCK: release GPU lock
3970                     rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
3971                 }
3972                 os_unref_dynamic_power(nv, NV_DYNAMIC_PM_FINE);
3973             }
3974             // UNLOCK: release API lock
3975             rmapiLockRelease();
3976         }
3977     }
3978 
3979     if (rmStatus != NV_OK)
3980     {
3981         NV_PRINTF(LEVEL_ERROR,
3982                   "%s: Failed to handle Power Source change event, status=0x%x\n",
3983                   __FUNCTION__, rmStatus);
3984     }
3985 
3986     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
3987     NV_EXIT_RM_RUNTIME(sp,fp);
3988 }
3989 
3990 NV_STATUS NV_API_CALL rm_p2p_dma_map_pages(
3991     nvidia_stack_t  *sp,
3992     nv_dma_device_t *peer,
3993     NvU8            *pGpuUuid,
3994     NvU32            pageSize,
3995     NvU32            pageCount,
3996     NvU64           *pDmaAddresses,
3997     void           **ppPriv
3998 )
3999 {
4000     THREAD_STATE_NODE threadState;
4001     NV_STATUS rmStatus;
4002     void *fp;
4003 
4004     if (ppPriv == NULL)
4005     {
4006         return NV_ERR_INVALID_ARGUMENT;
4007     }
4008 
4009     *ppPriv = NULL;
4010 
4011     NV_ENTER_RM_RUNTIME(sp,fp);
4012     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4013 
4014     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
4015     {
4016         OBJGPU *pGpu = gpumgrGetGpuFromUuid(pGpuUuid,
4017             DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1) |
4018             DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY));
4019         if (pGpu == NULL)
4020         {
4021             rmStatus = NV_ERR_INVALID_ARGUMENT;
4022         }
4023         else
4024         {
4025             NvU32 i;
4026 
4027             if (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING))
4028             {
4029                 NV_ASSERT(pageSize == os_page_size);
4030 
4031                 rmStatus = nv_dma_map_alloc(peer, pageCount, pDmaAddresses,
4032                                             NV_FALSE, ppPriv);
4033             }
4034             else
4035             {
4036                 nv_state_t *nv = NV_GET_NV_STATE(pGpu);
4037                 for (i = 0; i < pageCount; i++)
4038                 {
4039                     // Peer mappings through this API are always via BAR1
4040                     rmStatus = nv_dma_map_peer(peer, nv->dma_dev, 0x1,
4041                                                pageSize / os_page_size,
4042                                                &pDmaAddresses[i]);
4043                     if ((rmStatus != NV_OK) && (i > 0))
4044                     {
4045                         NvU32 j;
4046                         for (j = i - 1; j < pageCount; j--)
4047                         {
4048                             nv_dma_unmap_peer(peer, pageSize / os_page_size,
4049                                               pDmaAddresses[j]);
4050                         }
4051                     }
4052                 }
4053             }
4054         }
4055 
4056         rmapiLockRelease();
4057     }
4058 
4059     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4060     NV_EXIT_RM_RUNTIME(sp,fp);
4061 
4062     return rmStatus;
4063 }
4064 
4065 NV_STATUS NV_API_CALL rm_p2p_get_gpu_info(
4066     nvidia_stack_t *sp,
4067     NvU64           gpuVirtualAddress,
4068     NvU64           length,
4069     NvU8          **ppGpuUuid,
4070     void          **ppGpuInfo
4071 )
4072 {
4073     THREAD_STATE_NODE threadState;
4074     NV_STATUS rmStatus;
4075     void *fp;
4076 
4077     NV_ENTER_RM_RUNTIME(sp,fp);
4078     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4079 
4080     // LOCK: acquire API lock
4081     rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P);
4082     if (rmStatus == NV_OK)
4083     {
4084         OBJGPU *pGpu;
4085         rmStatus = RmP2PGetGpuByAddress(gpuVirtualAddress,
4086                                         length,
4087                                         &pGpu);
4088         if (rmStatus == NV_OK)
4089         {
4090             nv_state_t *nv = NV_GET_NV_STATE(pGpu);
4091             const NvU8 *pGid;
4092 
4093             pGid = RmGetGpuUuidRaw(nv);
4094             if (pGid == NULL)
4095             {
4096                 rmStatus = NV_ERR_GPU_UUID_NOT_FOUND;
4097             }
4098             else
4099             {
4100                 rmStatus = os_alloc_mem((void **)ppGpuUuid, GPU_UUID_LEN);
4101                 if (rmStatus == NV_OK)
4102                 {
4103                     os_mem_copy(*ppGpuUuid, pGid, GPU_UUID_LEN);
4104                 }
4105             }
4106 
4107             *ppGpuInfo = (void *) pGpu;
4108         }
4109 
4110         // UNLOCK: release API lock
4111         rmapiLockRelease();
4112     }
4113 
4114     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4115     NV_EXIT_RM_RUNTIME(sp,fp);
4116 
4117     return rmStatus;
4118 }
4119 
4120 NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent(
4121     nvidia_stack_t *sp,
4122     NvU64           gpuVirtualAddress,
4123     NvU64           length,
4124     void          **p2pObject,
4125     NvU64          *pPhysicalAddresses,
4126     NvU32          *pEntries,
4127     void           *pPlatformData,
4128     void           *pGpuInfo
4129 )
4130 {
4131     THREAD_STATE_NODE threadState;
4132     NV_STATUS rmStatus;
4133     void *fp;
4134 
4135     NV_ENTER_RM_RUNTIME(sp,fp);
4136     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4137 
4138     // LOCK: acquire API lock
4139     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
4140     {
4141         rmStatus = RmP2PGetPagesPersistent(gpuVirtualAddress,
4142                                            length,
4143                                            p2pObject,
4144                                            pPhysicalAddresses,
4145                                            pEntries,
4146                                            pPlatformData,
4147                                            pGpuInfo);
4148         // UNLOCK: release API lock
4149         rmapiLockRelease();
4150     }
4151 
4152     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4153     NV_EXIT_RM_RUNTIME(sp,fp);
4154 
4155     return rmStatus;
4156 }
4157 
4158 NV_STATUS NV_API_CALL rm_p2p_get_pages(
4159     nvidia_stack_t *sp,
4160     NvU64       p2pToken,
4161     NvU32       vaSpaceToken,
4162     NvU64       gpuVirtualAddress,
4163     NvU64       length,
4164     NvU64      *pPhysicalAddresses,
4165     NvU32      *pWreqMbH,
4166     NvU32      *pRreqMbH,
4167     NvU32      *pEntries,
4168     NvU8      **ppGpuUuid,
4169     void       *pPlatformData
4170 )
4171 {
4172     THREAD_STATE_NODE threadState;
4173     NV_STATUS rmStatus;
4174     void *fp;
4175 
4176     NV_ENTER_RM_RUNTIME(sp,fp);
4177     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4178 
4179     // LOCK: acquire API lock
4180     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
4181     {
4182         OBJGPU *pGpu;
4183         rmStatus = RmP2PGetPagesWithoutCallbackRegistration(p2pToken,
4184                                                             vaSpaceToken,
4185                                                             gpuVirtualAddress,
4186                                                             length,
4187                                                             pPhysicalAddresses,
4188                                                             pWreqMbH,
4189                                                             pRreqMbH,
4190                                                             pEntries,
4191                                                             &pGpu,
4192                                                             pPlatformData);
4193         if (rmStatus == NV_OK)
4194         {
4195             nv_state_t *nv = NV_GET_NV_STATE(pGpu);
4196             const NvU8 *pGid;
4197 
4198             pGid = RmGetGpuUuidRaw(nv);
4199             if (pGid == NULL)
4200             {
4201                 NV_ASSERT_OK(RmP2PPutPages(p2pToken, vaSpaceToken,
4202                                                gpuVirtualAddress,
4203                                                pPlatformData));
4204                 rmStatus = NV_ERR_GENERIC;
4205             }
4206             else
4207             {
4208                 rmStatus = os_alloc_mem((void **)ppGpuUuid, GPU_UUID_LEN);
4209                 if (rmStatus == NV_OK)
4210                     os_mem_copy(*ppGpuUuid, pGid, GPU_UUID_LEN);
4211             }
4212         }
4213 
4214         // UNLOCK: release API lock
4215         rmapiLockRelease();
4216     }
4217 
4218     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4219     NV_EXIT_RM_RUNTIME(sp,fp);
4220 
4221     return rmStatus;
4222 }
4223 
4224 NV_STATUS NV_API_CALL rm_p2p_register_callback(
4225     nvidia_stack_t *sp,
4226     NvU64       p2pToken,
4227     NvU64       gpuVirtualAddress,
4228     NvU64       length,
4229     void       *pPlatformData,
4230     void      (*pFreeCallback)(void *pData),
4231     void       *pData
4232 )
4233 {
4234     THREAD_STATE_NODE threadState;
4235     NV_STATUS rmStatus;
4236     void *fp;
4237 
4238     NV_ENTER_RM_RUNTIME(sp,fp);
4239     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4240 
4241     // LOCK: acquire API lock
4242     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
4243     {
4244         rmStatus = RmP2PRegisterCallback(p2pToken, gpuVirtualAddress, length,
4245                                          pPlatformData, pFreeCallback, pData);
4246 
4247         // UNLOCK: release API lock
4248         rmapiLockRelease();
4249     }
4250 
4251     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4252     NV_EXIT_RM_RUNTIME(sp,fp);
4253 
4254     return rmStatus;
4255 }
4256 
4257 NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(
4258     nvidia_stack_t *sp,
4259     void           *p2pObject,
4260     void           *pKey
4261 )
4262 {
4263     THREAD_STATE_NODE threadState;
4264     NV_STATUS rmStatus;
4265     void *fp;
4266 
4267     NV_ENTER_RM_RUNTIME(sp,fp);
4268     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4269 
4270     // LOCK: acquire API lock
4271     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
4272     {
4273         rmStatus = RmP2PPutPagesPersistent(p2pObject, pKey);
4274 
4275         // UNLOCK: release API lock
4276         rmapiLockRelease();
4277     }
4278 
4279     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4280     NV_EXIT_RM_RUNTIME(sp,fp);
4281 
4282     return rmStatus;
4283 }
4284 
4285 NV_STATUS NV_API_CALL rm_p2p_put_pages(
4286     nvidia_stack_t *sp,
4287     NvU64       p2pToken,
4288     NvU32       vaSpaceToken,
4289     NvU64       gpuVirtualAddress,
4290     void       *pKey
4291 )
4292 {
4293     THREAD_STATE_NODE threadState;
4294     NV_STATUS rmStatus;
4295     void *fp;
4296 
4297     NV_ENTER_RM_RUNTIME(sp,fp);
4298     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4299 
4300     // LOCK: acquire API lock
4301     if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
4302     {
4303         rmStatus = RmP2PPutPages(p2pToken,
4304                                  vaSpaceToken,
4305                                  gpuVirtualAddress,
4306                                  pKey);
4307 
4308         // UNLOCK: release API lock
4309         rmapiLockRelease();
4310     }
4311 
4312     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4313     NV_EXIT_RM_RUNTIME(sp,fp);
4314 
4315     return rmStatus;
4316 }
4317 
4318 char* NV_API_CALL rm_get_gpu_uuid(
4319     nvidia_stack_t *sp,
4320     nv_state_t *nv
4321 )
4322 {
4323     NV_STATUS rmStatus;
4324     const NvU8 *pGid;
4325     char *pGidString;
4326 
4327     THREAD_STATE_NODE threadState;
4328     void *fp;
4329 
4330     NV_ENTER_RM_RUNTIME(sp,fp);
4331     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4332 
4333     // Allocate space for the ASCII string
4334     rmStatus = os_alloc_mem((void **)&pGidString, GPU_UUID_ASCII_LEN);
4335     if (rmStatus != NV_OK)
4336     {
4337         pGidString = NULL;
4338         goto done;
4339     }
4340 
4341     // Get the raw UUID; note the pGid is cached, so we do not need to free it
4342     pGid = RmGetGpuUuidRaw(nv);
4343 
4344     if (pGid != NULL)
4345     {
4346         // Convert the raw UUID to ASCII
4347         rmStatus = RmGpuUuidRawToString(pGid, pGidString, GPU_UUID_ASCII_LEN);
4348         if (rmStatus != NV_OK)
4349         {
4350             os_free_mem(pGidString);
4351             pGidString = NULL;
4352         }
4353     }
4354     else
4355     {
4356         const char *pTmpString = "GPU-???????\?-???\?-???\?-???\?-????????????";
4357 
4358         portStringCopy(pGidString, GPU_UUID_ASCII_LEN, pTmpString,
4359                        portStringLength(pTmpString) + 1);
4360     }
4361 
4362 done:
4363     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4364     NV_EXIT_RM_RUNTIME(sp,fp);
4365 
4366     return pGidString;
4367 }
4368 
4369 //
4370 // This function will return the UUID in the binary format
4371 //
4372 const NvU8 * NV_API_CALL rm_get_gpu_uuid_raw(
4373     nvidia_stack_t *sp,
4374     nv_state_t *nv)
4375 {
4376     THREAD_STATE_NODE threadState;
4377     void *fp;
4378     const NvU8 *pGid;
4379 
4380     NV_ENTER_RM_RUNTIME(sp,fp);
4381     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4382 
4383     pGid = RmGetGpuUuidRaw(nv);
4384 
4385     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4386     NV_EXIT_RM_RUNTIME(sp,fp);
4387 
4388     return pGid;
4389 }
4390 
4391 static void rm_set_firmware_logs(
4392     nvidia_stack_t *sp,
4393     nv_state_t *nv
4394 )
4395 {
4396     NV_STATUS status;
4397     NvU32 data;
4398     void *fp;
4399 
4400     NV_ENTER_RM_RUNTIME(sp,fp);
4401 
4402     status = RmReadRegistryDword(nv, NV_REG_ENABLE_GPU_FIRMWARE_LOGS, &data);
4403     if (status == NV_OK)
4404     {
4405         if ((data == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE)
4406 #if defined(DEBUG) || defined(DEVELOP)
4407             || (data == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG)
4408 #endif
4409            )
4410         {
4411             nv->enable_firmware_logs = NV_TRUE;
4412         }
4413     }
4414 
4415     NV_EXIT_RM_RUNTIME(sp,fp);
4416 }
4417 
4418 void NV_API_CALL rm_set_rm_firmware_requested(
4419     nvidia_stack_t *sp,
4420     nv_state_t *nv
4421 )
4422 {
4423     nv->request_firmware = NV_TRUE;
4424     nv->allow_fallback_to_monolithic_rm = NV_FALSE;
4425 
4426     // Check if we want firmware logs
4427     if (nv->request_firmware)
4428         rm_set_firmware_logs(sp, nv);
4429 }
4430 
4431 //
4432 // This function will be called by nv_procfs_read_gpu_info().
4433 // nv_procfs_read_gpu_info() will not print the 'GPU Firmware:' field at
4434 // all if the 'version' string is empty.
4435 //
4436 // If GSP is enabled (firmware was requested), this function needs to return
4437 // the firmware version or "NA" in case of any errors.
4438 //
4439 // If GSP is not enabled (firmware was not requested), this function needs to
4440 // return the empty string, regardless of error cases.
4441 //
4442 void NV_API_CALL rm_get_firmware_version(
4443     nvidia_stack_t *sp,
4444     nv_state_t *nv,
4445     char *version,
4446     NvLength version_length
4447 )
4448 {
4449     NV2080_CTRL_GSP_GET_FEATURES_PARAMS params = { 0 };
4450     RM_API            *pRmApi;
4451     THREAD_STATE_NODE  threadState;
4452     NV_STATUS          rmStatus = NV_OK;
4453     void              *fp;
4454 
4455     NV_ENTER_RM_RUNTIME(sp,fp);
4456 
4457     pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_GPU);
4458     if (pRmApi != NULL)
4459     {
4460         rmStatus = pRmApi->Control(pRmApi,
4461                                    nv->rmapi.hClient,
4462                                    nv->rmapi.hSubDevice,
4463                                    NV2080_CTRL_CMD_GSP_GET_FEATURES,
4464                                    &params,
4465                                    sizeof(params));
4466 
4467         RmUnixRmApiEpilogue(nv, &threadState);
4468     }
4469     else
4470     {
4471         rmStatus = NV_ERR_INVALID_STATE;
4472     }
4473 
4474     if (rmStatus != NV_OK)
4475     {
4476         if (RMCFG_FEATURE_GSP_CLIENT_RM && nv->request_firmware)
4477         {
4478             const char *pTmpString = "N/A";
4479             portStringCopy(version, version_length, pTmpString, portStringLength(pTmpString) + 1);
4480         }
4481         NV_PRINTF(LEVEL_INFO,
4482                   "%s: Failed to query gpu build versions, status=0x%x\n",
4483                   __FUNCTION__,
4484                   rmStatus);
4485         goto finish;
4486     }
4487     portMemCopy(version, version_length, params.firmwareVersion, sizeof(params.firmwareVersion));
4488 
4489 finish:
4490     NV_EXIT_RM_RUNTIME(sp,fp);
4491 }
4492 
4493 //
4494 // disable GPU SW state persistence
4495 //
4496 
4497 void NV_API_CALL rm_disable_gpu_state_persistence(nvidia_stack_t *sp, nv_state_t *nv)
4498 {
4499     THREAD_STATE_NODE threadState;
4500     OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
4501     void *fp;
4502 
4503     NV_ENTER_RM_RUNTIME(sp,fp);
4504     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4505 
4506     pGpu->setProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, NV_FALSE);
4507     osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, NV_FALSE);
4508 
4509     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4510     NV_EXIT_RM_RUNTIME(sp,fp);
4511 }
4512 
4513 NV_STATUS NV_API_CALL rm_log_gpu_crash(
4514     nv_stack_t *sp,
4515     nv_state_t *nv
4516 )
4517 {
4518     THREAD_STATE_NODE threadState;
4519     NV_STATUS status;
4520     void *fp;
4521 
4522     NV_ENTER_RM_RUNTIME(sp,fp);
4523     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4524 
4525     if ((status = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG)) == NV_OK)
4526     {
4527         OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
4528 
4529         if ((pGpu != NULL) &&
4530            ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG)) == NV_OK))
4531         {
4532             status = RmLogGpuCrash(pGpu);
4533 
4534             rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
4535         }
4536         rmapiLockRelease();
4537     }
4538 
4539     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4540     NV_EXIT_RM_RUNTIME(sp,fp);
4541 
4542     return status;
4543 }
4544 
4545 void  NV_API_CALL  rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd)
4546 {
4547     nvidia_kernel_rmapi_ops_t *ops = ops_cmd;
4548     void *fp;
4549 
4550     NV_ENTER_RM_RUNTIME(sp,fp);
4551 
4552     switch (ops->op)
4553     {
4554         case NV01_FREE:
4555             Nv01FreeKernel(&ops->params.free);
4556             break;
4557 
4558         case NV01_ALLOC_MEMORY:
4559             Nv01AllocMemoryKernel(&ops->params.allocMemory64);
4560             break;
4561 
4562         case NV04_ALLOC:
4563             Nv04AllocKernel(&ops->params.alloc);
4564             break;
4565 
4566         case NV04_VID_HEAP_CONTROL:
4567             Nv04VidHeapControlKernel(ops->params.pVidHeapControl);
4568             break;
4569 
4570         case NV04_MAP_MEMORY:
4571             Nv04MapMemoryKernel(&ops->params.mapMemory);
4572             break;
4573 
4574         case NV04_UNMAP_MEMORY:
4575             Nv04UnmapMemoryKernel(&ops->params.unmapMemory);
4576             break;
4577 
4578         case NV04_ALLOC_CONTEXT_DMA:
4579             Nv04AllocContextDmaKernel(&ops->params.allocContextDma2);
4580             break;
4581 
4582         case NV04_MAP_MEMORY_DMA:
4583             Nv04MapMemoryDmaKernel(&ops->params.mapMemoryDma);
4584             break;
4585 
4586         case NV04_UNMAP_MEMORY_DMA:
4587             Nv04UnmapMemoryDmaKernel(&ops->params.unmapMemoryDma);
4588             break;
4589 
4590         case NV04_BIND_CONTEXT_DMA:
4591             Nv04BindContextDmaKernel(&ops->params.bindContextDma);
4592             break;
4593 
4594         case NV04_CONTROL:
4595             Nv04ControlKernel(&ops->params.control);
4596             break;
4597 
4598         case NV04_DUP_OBJECT:
4599             Nv04DupObjectKernel(&ops->params.dupObject);
4600             break;
4601 
4602         case NV04_SHARE:
4603             Nv04ShareKernel(&ops->params.share);
4604             break;
4605 
4606         case NV04_ADD_VBLANK_CALLBACK:
4607             Nv04AddVblankCallbackKernel(&ops->params.addVblankCallback);
4608             break;
4609     }
4610 
4611     NV_EXIT_RM_RUNTIME(sp,fp);
4612 }
4613 
4614 //
4615 // ACPI method (NVIF/_DSM/WMMX/MXM*/etc.) initialization
4616 //
4617 void RmInitAcpiMethods(OBJOS *pOS, OBJSYS *pSys, OBJGPU *pGpu)
4618 {
4619     NvU32 handlesPresent;
4620 
4621     if (pSys->getProperty(pSys, PDB_PROP_SYS_NVIF_INIT_DONE))
4622         return;
4623 
4624     nv_acpi_methods_init(&handlesPresent);
4625 
4626     // Check if NVPCF _DSM functions are implemented under NVPCF or GPU device scope.
4627     RmCheckNvpcfDsmScope(pGpu);
4628     acpiDsmInit(pGpu);
4629 
4630 }
4631 
4632 //
4633 // ACPI method (NVIF/_DSM/WMMX/MXM*/etc.) teardown
4634 //
4635 void RmUnInitAcpiMethods(OBJSYS *pSys)
4636 {
4637     pSys->setProperty(pSys, PDB_PROP_SYS_NVIF_INIT_DONE, NV_FALSE);
4638 
4639     nv_acpi_methods_uninit();
4640 }
4641 
4642 //
4643 // Converts an array of OS page address to an array of RM page addresses.This
4644 // assumes that:
4645 //   (1) The pteArray is at least pageCount entries large,
4646 //   (2) The pageCount is given in RM pages, and
4647 //   (3) The OS page entries start at index 0.
4648 //
4649 void RmInflateOsToRmPageArray(RmPhysAddr *pteArray, NvU64 pageCount)
4650 {
4651     NvUPtr osPageIdx, osPageOffset;
4652     NvU64 i;
4653 
4654     //
4655     // We can do the translation in place by moving backwards, since there
4656     // will always be more RM pages than OS pages
4657     //
4658     for (i = pageCount - 1; i != NV_U64_MAX; i--)
4659     {
4660         osPageIdx = i >> NV_RM_TO_OS_PAGE_SHIFT;
4661         osPageOffset = (i & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) *
4662                 NV_RM_PAGE_SIZE;
4663         pteArray[i] = pteArray[osPageIdx] + osPageOffset;
4664     }
4665 }
4666 
4667 void RmDeflateRmToOsPageArray(RmPhysAddr *pteArray, NvU64 pageCount)
4668 {
4669     NvU64 i;
4670 
4671     for (i = 0; i < NV_RM_PAGES_TO_OS_PAGES(pageCount); i++)
4672     {
4673         pteArray[i] = pteArray[(i << NV_RM_TO_OS_PAGE_SHIFT)];
4674     }
4675 
4676     // Zero out the rest of the addresses, which are now invalid
4677     portMemSet(pteArray + i, 0, sizeof(*pteArray) * (pageCount - i));
4678 }
4679 
4680 NvBool NV_API_CALL
4681 rm_get_device_remove_flag
4682 (
4683     nvidia_stack_t * sp,
4684     NvU32 gpu_id
4685 )
4686 {
4687     THREAD_STATE_NODE threadState;
4688     void *fp;
4689     NvBool bRemove;
4690 
4691     NV_ENTER_RM_RUNTIME(sp,fp);
4692     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4693 
4694     if (gpumgrQueryGpuDrainState(gpu_id, NULL, &bRemove) != NV_OK)
4695     {
4696         bRemove = NV_FALSE;
4697     }
4698 
4699     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4700     NV_EXIT_RM_RUNTIME(sp,fp);
4701     return bRemove;
4702 }
4703 
4704 NvBool NV_API_CALL
4705 rm_gpu_need_4k_page_isolation
4706 (
4707     nv_state_t *nv
4708 )
4709 {
4710     nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
4711 
4712     return nvp->b_4k_page_isolation_required;
4713 }
4714 
4715 NV_STATUS NV_API_CALL rm_get_gpu_numa_info(
4716     nvidia_stack_t *sp,
4717     nv_state_t *nv,
4718     NvS32 *pNid,
4719     NvU64 *pNumaMemAddr,
4720     NvU64 *pNumaMemSize,
4721     NvU64 *pOfflineAddresses,
4722     NvU32 *pOfflineAddressesCount
4723 )
4724 {
4725     NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS *pParams;
4726     RM_API             *pRmApi;
4727     THREAD_STATE_NODE   threadState;
4728     void               *fp;
4729     NV_STATUS           status = NV_OK;
4730 
4731     if ((pNid == NULL) || (pNumaMemAddr == NULL) || (pNumaMemSize == NULL))
4732     {
4733         return NV_ERR_INVALID_ARGUMENT;
4734     }
4735 
4736     if ((pOfflineAddressesCount != NULL) &&
4737         ((pOfflineAddresses == NULL) ||
4738          (*pOfflineAddressesCount > NV_ARRAY_ELEMENTS(pParams->numaOfflineAddresses))))
4739     {
4740         return NV_ERR_INVALID_ARGUMENT;
4741     }
4742 
4743     NV_ENTER_RM_RUNTIME(sp,fp);
4744 
4745     pParams = portMemAllocNonPaged(sizeof(*pParams));
4746     if (pParams == NULL)
4747     {
4748         NV_EXIT_RM_RUNTIME(sp,fp);
4749         return NV_ERR_NO_MEMORY;
4750     }
4751 
4752     portMemSet(pParams, 0, sizeof(*pParams));
4753 
4754     if (pOfflineAddressesCount != NULL)
4755     {
4756         pParams->numaOfflineAddressesCount = *pOfflineAddressesCount;
4757     }
4758 
4759     pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_MEM);
4760     if (pRmApi == NULL)
4761     {
4762         status = NV_ERR_INVALID_STATE;
4763         goto finish;
4764     }
4765 
4766     status = pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice,
4767                              NV2080_CTRL_CMD_FB_GET_NUMA_INFO,
4768                              pParams, sizeof(*pParams));
4769 
4770     RmUnixRmApiEpilogue(nv, &threadState);
4771 
4772     if (status == NV_OK)
4773     {
4774         NvU32 i;
4775 
4776         *pNid = pParams->numaNodeId;
4777         *pNumaMemAddr = pParams->numaMemAddr;
4778         *pNumaMemSize = pParams->numaMemSize;
4779         *pOfflineAddressesCount = pParams->numaOfflineAddressesCount;
4780 
4781         for (i = 0; i < pParams->numaOfflineAddressesCount; i++)
4782         {
4783             pOfflineAddresses[i] = pParams->numaOfflineAddresses[i];
4784         }
4785     }
4786 
4787 finish:
4788     portMemFree(pParams);
4789 
4790     NV_EXIT_RM_RUNTIME(sp,fp);
4791 
4792     return status;
4793 }
4794 
4795 NV_STATUS NV_API_CALL rm_gpu_numa_online(
4796     nvidia_stack_t *sp,
4797     nv_state_t *nv
4798 )
4799 {
4800     NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS params = { 0 };
4801     RM_API            *pRmApi;
4802     THREAD_STATE_NODE  threadState;
4803     void              *fp;
4804     NV_STATUS          status = NV_OK;
4805 
4806     NV_ENTER_RM_RUNTIME(sp,fp);
4807 
4808     pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_MEM);
4809     if (pRmApi == NULL)
4810     {
4811         status = NV_ERR_INVALID_STATE;
4812         goto finish;
4813     }
4814 
4815     params.bOnline = NV_TRUE;
4816 
4817     status = pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice,
4818                              NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS,
4819                              &params, sizeof(params));
4820 
4821     RmUnixRmApiEpilogue(nv, &threadState);
4822 
4823 finish:
4824     NV_EXIT_RM_RUNTIME(sp,fp);
4825 
4826     return status;
4827 }
4828 
4829 
4830 NV_STATUS NV_API_CALL rm_gpu_numa_offline(
4831     nvidia_stack_t *sp,
4832     nv_state_t *nv
4833 )
4834 {
4835     NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS params = { 0 };
4836     RM_API            *pRmApi;
4837     THREAD_STATE_NODE  threadState;
4838     void              *fp;
4839     NV_STATUS          status = NV_OK;
4840 
4841     NV_ENTER_RM_RUNTIME(sp,fp);
4842 
4843     pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_MEM);
4844     if (pRmApi == NULL)
4845     {
4846         status = NV_ERR_INVALID_STATE;
4847         goto finish;
4848     }
4849 
4850     params.bOnline = NV_FALSE;
4851 
4852     status = pRmApi->Control(pRmApi, nv->rmapi.hClient,
4853                              nv->rmapi.hSubDevice,
4854                              NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS,
4855                              &params, sizeof(params));
4856 
4857     RmUnixRmApiEpilogue(nv, &threadState);
4858 
4859 finish:
4860     NV_EXIT_RM_RUNTIME(sp,fp);
4861 
4862     return status;
4863 }
4864 
4865 //
4866 // A device is considered "sequestered" if it has drain state enabled for it.
4867 // The kernel interface layer can use this to check the drain state of a device
4868 // in paths outside of initialization, e.g., when clients attempt to reference
4869 // count the device.
4870 //
4871 NvBool NV_API_CALL rm_is_device_sequestered(
4872     nvidia_stack_t *sp,
4873     nv_state_t *pNv
4874 )
4875 {
4876     THREAD_STATE_NODE threadState;
4877     void *fp;
4878     NvBool bDrain = NV_FALSE;
4879 
4880     NV_ENTER_RM_RUNTIME(sp,fp);
4881     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4882 
4883     if (rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU) == NV_OK)
4884     {
4885         //
4886         // If gpumgrQueryGpuDrainState succeeds, bDrain will be set as needed.
4887         // If gpumgrQueryGpuDrainState fails, bDrain will stay false; we assume
4888         // that if core RM can't tell us the drain state, it must not be
4889         // attached and the "sequestered" question is not relevant.
4890         //
4891         (void) gpumgrQueryGpuDrainState(pNv->gpu_id, &bDrain, NULL);
4892 
4893         rmapiLockRelease();
4894     }
4895 
4896     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4897     NV_EXIT_RM_RUNTIME(sp,fp);
4898     return bDrain;
4899 }
4900 
4901 void NV_API_CALL rm_check_for_gpu_surprise_removal(
4902     nvidia_stack_t *sp,
4903     nv_state_t *nv
4904 )
4905 {
4906     THREAD_STATE_NODE threadState;
4907     void *fp;
4908     NV_STATUS rmStatus;
4909 
4910     NV_ENTER_RM_RUNTIME(sp,fp);
4911     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4912 
4913     // LOCK: acquire API lock.
4914     if ((rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK)
4915     {
4916         OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
4917 
4918         if ((rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GPU)) == NV_OK)
4919         {
4920             osHandleGpuLost(pGpu);
4921             rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
4922         }
4923 
4924         // UNLOCK: release api lock
4925         rmapiLockRelease();
4926     }
4927 
4928     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4929     NV_EXIT_RM_RUNTIME(sp,fp);
4930 }
4931 
4932 NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(
4933     nvidia_stack_t *sp,
4934     nv_state_t *pNv,
4935     NvBool bIncr
4936 )
4937 {
4938     THREAD_STATE_NODE threadState;
4939     void *fp;
4940     OBJGPU *pGpu;
4941     NV_STATUS rmStatus = NV_OK;
4942 
4943     NV_ENTER_RM_RUNTIME(sp,fp);
4944     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
4945 
4946     pGpu = NV_GET_NV_PRIV_PGPU(pNv);
4947 
4948     if (pGpu != NULL)
4949     {
4950         rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE,
4951                                            RM_LOCK_MODULES_GPU);
4952         if (rmStatus == NV_OK)
4953         {
4954             rmStatus = gpuSetExternalKernelClientCount(pGpu, bIncr);
4955             rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
4956         }
4957     }
4958 
4959     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
4960     NV_EXIT_RM_RUNTIME(sp,fp);
4961 
4962     return rmStatus;
4963 }
4964 
4965 NvBool rm_get_uefi_console_status(
4966     nv_state_t *nv
4967 )
4968 {
4969     NvU16 fbWidth, fbHeight, fbDepth, fbPitch;
4970     NvU64 fbSize;
4971     NvU64 fbBaseAddress = 0;
4972     NvBool bConsoleDevice = NV_FALSE;
4973 
4974     // os_get_screen_info() will return dimensions and an address for
4975     // any fbdev driver (e.g., efifb, vesafb, etc).  To find if this is a
4976     // UEFI console check the fbBaseAddress: if it was set up by the EFI GOP
4977     // driver, it will point into BAR1 (FB); if it was set up by the VBIOS,
4978     // it will point to BAR2 + 16MB.
4979     os_get_screen_info(&fbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch,
4980                        nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address,
4981                        nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000);
4982 
4983     fbSize = fbHeight * fbPitch;
4984 
4985     bConsoleDevice = (fbSize != 0);
4986 
4987     return bConsoleDevice;
4988 }
4989 
4990 NvU64 rm_get_uefi_console_size(
4991     nv_state_t *nv,
4992     NvU64      *pFbBaseAddress
4993 )
4994 {
4995     NvU16 fbWidth, fbHeight, fbDepth, fbPitch;
4996     NvU64 fbSize;
4997 
4998     fbSize = fbWidth = fbHeight = fbDepth = fbPitch = 0;
4999 
5000     // os_get_screen_info() will return dimensions and an address for
5001     // any fbdev driver (e.g., efifb, vesafb, etc).  To find if this is a
5002     // UEFI console check the fbBaseAddress: if it was set up by the EFI GOP
5003     // driver, it will point into BAR1 (FB); if it was set up by the VBIOS,
5004     // it will point to BAR2 + 16MB.
5005     os_get_screen_info(pFbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch,
5006                        nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address,
5007                        nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000);
5008 
5009     fbSize = fbHeight * fbPitch;
5010 
5011     return fbSize;
5012 }
5013 
5014 /*
5015  * IOMMU needs to be present on the server to support SR-IOV vGPU, unless
5016  * we have SR-IOV enabled for remote GPU.
5017  */
5018 
5019 NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(
5020     nvidia_stack_t *sp,
5021     nv_state_t * nv
5022 )
5023 {
5024     OBJGPU *pGpu;
5025     NvU32 data;
5026     NvBool ret = NV_TRUE;
5027     void       *fp;
5028 
5029     NV_ENTER_RM_RUNTIME(sp,fp);
5030 
5031     pGpu = NV_GET_NV_PRIV_PGPU(nv);
5032     if (osReadRegistryDword(pGpu, NV_REG_STR_RM_REMOTE_GPU, &data) == NV_OK)
5033     {
5034         if (data == NV_REG_STR_RM_REMOTE_GPU_ENABLE)
5035             ret = NV_FALSE;
5036     }
5037 
5038     NV_EXIT_RM_RUNTIME(sp,fp);
5039 
5040     return ret;
5041 }
5042 
5043 NvBool NV_API_CALL rm_disable_iomap_wc(void)
5044 {
5045     OBJSYS *pSys = SYS_GET_INSTANCE();
5046     return pSys->pCl->getProperty(pSys, PDB_PROP_CL_DISABLE_IOMAP_WC) == NV_TRUE;
5047 }
5048 
5049 //
5050 // Verifies the handle, offset and size and dups hMemory.
5051 // Must be called with API lock and GPU lock held.
5052 //
5053 NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle(
5054     nvidia_stack_t  *sp,
5055     nv_state_t      *nv,
5056     NvHandle         hSrcClient,
5057     NvHandle         hDstClient,
5058     NvHandle         hDevice,
5059     NvHandle         hSubdevice,
5060     void            *pGpuInstanceInfo,
5061     NvHandle         hMemory,
5062     NvU64            offset,
5063     NvU64            size,
5064     NvHandle        *phMemoryDuped
5065 )
5066 {
5067     THREAD_STATE_NODE threadState;
5068     NV_STATUS rmStatus;
5069     OBJGPU *pGpu;
5070     void *fp;
5071 
5072     NV_ENTER_RM_RUNTIME(sp,fp);
5073     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
5074 
5075     pGpu = NV_GET_NV_PRIV_PGPU(nv);
5076 
5077     NV_ASSERT(rmapiLockIsOwner());
5078 
5079     NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
5080 
5081     rmStatus = RmDmabufVerifyMemHandle(pGpu, hSrcClient, hMemory,
5082                                        offset, size, pGpuInstanceInfo);
5083     if (rmStatus == NV_OK)
5084     {
5085         RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
5086         NvHandle hMemoryDuped = 0;
5087 
5088         rmStatus = pRmApi->DupObject(pRmApi,
5089                                      hDstClient,
5090                                      hDevice,
5091                                      &hMemoryDuped,
5092                                      hSrcClient,
5093                                      hMemory,
5094                                      0);
5095         if (rmStatus == NV_OK)
5096         {
5097             *phMemoryDuped = hMemoryDuped;
5098         }
5099         else if (rmStatus == NV_ERR_INVALID_OBJECT_PARENT)
5100         {
5101             hMemoryDuped = 0;
5102 
5103             // If duping under Device fails, try duping under Subdevice
5104             rmStatus = pRmApi->DupObject(pRmApi,
5105                                          hDstClient,
5106                                          hSubdevice,
5107                                          &hMemoryDuped,
5108                                          hSrcClient,
5109                                          hMemory,
5110                                          0);
5111             if (rmStatus == NV_OK)
5112             {
5113                 *phMemoryDuped = hMemoryDuped;
5114             }
5115         }
5116     }
5117 
5118     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
5119     NV_EXIT_RM_RUNTIME(sp,fp);
5120 
5121     return rmStatus;
5122 }
5123 
5124 //
5125 // Frees dup'd hMemory.
5126 // Must be called with API lock and GPU lock held.
5127 //
5128 void NV_API_CALL rm_dma_buf_undup_mem_handle(
5129     nvidia_stack_t  *sp,
5130     nv_state_t      *nv,
5131     NvHandle         hClient,
5132     NvHandle         hMemory
5133 )
5134 {
5135     THREAD_STATE_NODE threadState;
5136     RM_API *pRmApi;
5137     OBJGPU *pGpu;
5138     void *fp;
5139 
5140     NV_ENTER_RM_RUNTIME(sp,fp);
5141     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
5142 
5143     pGpu = NV_GET_NV_PRIV_PGPU(nv);
5144 
5145     NV_ASSERT(rmapiLockIsOwner());
5146 
5147     NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
5148 
5149     pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
5150 
5151     pRmApi->Free(pRmApi, hClient, hMemory);
5152 
5153     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
5154     NV_EXIT_RM_RUNTIME(sp,fp);
5155 }
5156 
5157 //
5158 // Maps a handle to BAR1.
5159 // Must be called with API lock and GPU lock held.
5160 //
5161 NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
5162     nvidia_stack_t  *sp,
5163     nv_state_t      *nv,
5164     NvHandle         hClient,
5165     NvHandle         hMemory,
5166     NvU64            offset,
5167     NvU64            size,
5168     NvU64           *pBar1Va
5169 )
5170 {
5171     THREAD_STATE_NODE threadState;
5172     NV_STATUS rmStatus;
5173     OBJGPU *pGpu;
5174     KernelBus *pKernelBus;
5175     void *fp;
5176 
5177     NV_ENTER_RM_RUNTIME(sp,fp);
5178     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
5179 
5180     pGpu = NV_GET_NV_PRIV_PGPU(nv);
5181     pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
5182 
5183     NV_ASSERT(rmapiLockIsOwner());
5184 
5185     NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
5186 
5187     rmStatus = kbusMapFbApertureByHandle(pGpu, pKernelBus, hClient,
5188                                          hMemory, offset, size, pBar1Va);
5189 
5190     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
5191     NV_EXIT_RM_RUNTIME(sp,fp);
5192 
5193     return rmStatus;
5194 }
5195 
5196 //
5197 // Unmaps a handle from BAR1.
5198 // Must be called with API lock and GPU lock held.
5199 //
5200 NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(
5201     nvidia_stack_t  *sp,
5202     nv_state_t      *nv,
5203     NvHandle         hClient,
5204     NvHandle         hMemory,
5205     NvU64            size,
5206     NvU64            bar1Va
5207 )
5208 {
5209     THREAD_STATE_NODE threadState;
5210     NV_STATUS rmStatus;
5211     OBJGPU *pGpu;
5212     KernelBus *pKernelBus;
5213     void *fp;
5214 
5215     NV_ENTER_RM_RUNTIME(sp,fp);
5216     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
5217 
5218     pGpu = NV_GET_NV_PRIV_PGPU(nv);
5219     pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
5220 
5221     NV_ASSERT(rmapiLockIsOwner());
5222 
5223     NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
5224 
5225     rmStatus = kbusUnmapFbApertureByHandle(pGpu, pKernelBus, hClient,
5226                                            hMemory, bar1Va);
5227 
5228     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
5229     NV_EXIT_RM_RUNTIME(sp,fp);
5230 
5231     return rmStatus;
5232 }
5233 
5234 NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(
5235     nvidia_stack_t *sp,
5236     nv_state_t     *nv,
5237     NvHandle        hClient,
5238     NvHandle       *phClient,
5239     NvHandle       *phDevice,
5240     NvHandle       *phSubdevice,
5241     void          **ppGpuInstanceInfo
5242 )
5243 {
5244     THREAD_STATE_NODE threadState;
5245     NV_STATUS rmStatus;
5246     void *fp;
5247 
5248     NV_ENTER_RM_RUNTIME(sp,fp);
5249     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
5250 
5251     // LOCK: acquire API lock
5252     rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
5253     if (rmStatus == NV_OK)
5254     {
5255         OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
5256 
5257         rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
5258         if (rmStatus == NV_OK)
5259         {
5260             rmStatus = RmDmabufGetClientAndDevice(pGpu, hClient, phClient, phDevice,
5261                                                   phSubdevice, ppGpuInstanceInfo);
5262 
5263             rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
5264         }
5265 
5266         // UNLOCK: release API lock
5267         rmapiLockRelease();
5268     }
5269 
5270     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
5271     NV_EXIT_RM_RUNTIME(sp,fp);
5272 
5273     return rmStatus;
5274 }
5275 
5276 void NV_API_CALL rm_dma_buf_put_client_and_device(
5277     nvidia_stack_t *sp,
5278     nv_state_t     *nv,
5279     NvHandle        hClient,
5280     NvHandle        hDevice,
5281     NvHandle        hSubdevice,
5282     void           *pGpuInstanceInfo
5283 )
5284 {
5285     THREAD_STATE_NODE threadState;
5286     NV_STATUS rmStatus;
5287     void *fp;
5288 
5289     NV_ENTER_RM_RUNTIME(sp,fp);
5290     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
5291 
5292     // LOCK: acquire API lock
5293     rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
5294     if (rmStatus == NV_OK)
5295     {
5296         OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
5297 
5298         rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
5299         if (rmStatus == NV_OK)
5300         {
5301             RmDmabufPutClientAndDevice(pGpu, hClient, hDevice, hSubdevice,
5302                                        pGpuInstanceInfo);
5303 
5304             rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
5305         }
5306 
5307         // UNLOCK: release API lock
5308         rmapiLockRelease();
5309     }
5310     NV_ASSERT_OK(rmStatus);
5311 
5312     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
5313     NV_EXIT_RM_RUNTIME(sp,fp);
5314 }
5315 
5316 //
5317 // Fetches GSP ucode data for usage during RM Init
5318 // NOTE: Used only on VMWware
5319 //
5320 
5321 NvBool NV_API_CALL rm_is_altstack_in_use(void)
5322 {
5323 #if defined(__use_altstack__)
5324     return NV_TRUE;
5325 #else
5326     return NV_FALSE;
5327 #endif
5328 }
5329