1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <nv.h>                     // NV device driver interface
25 #include <nv-priv.h>
26 #include <nv-caps.h>
27 #include <os/os.h>
28 #include <nvos.h>
29 #include <osapi.h>
30 #include <ctrl/ctrl0000/ctrl0000gpu.h>
31 #include <ctrl/ctrl0000/ctrl0000unix.h>
32 
33 #include <nverror.h>
34 #include <gpu/device/device.h>
35 
36 #include "gpu/gpu.h"
37 #include <osfuncs.h>
38 #include <platform/chipset/chipset.h>
39 
40 #include "nverror.h"
41 #include "kernel/gpu/bif/kernel_bif.h"
42 
43 #include "gpu/mem_sys/kern_mem_sys.h"
44 
45 #include "mem_mgr/io_vaspace.h"
46 #include <diagnostics/journal.h>
47 #include "gpu/mem_mgr/mem_desc.h"
48 #include "gpu/mem_mgr/mem_mgr.h"
49 #include "core/thread_state.h"
50 #include <nvacpitypes.h>
51 #include <platform/acpi_common.h>
52 #include <core/locks.h>
53 #include <ctrl/ctrl2080/ctrl2080gpu.h>
54 #include "virtualization/hypervisor/hypervisor.h"
55 #include "rmobjexportimport.h"
56 #include <nvi2c.h>
57 #include "rmapi/rs_utils.h"
58 #include "rmapi/client_resource.h"
59 #include "os/dce_rm_client_ipc.h"
60 #include "mem_mgr/mem.h"
61 #include "gpu/mem_mgr/virt_mem_allocator_common.h"
62 
63 #include <acpidsmguids.h>
64 #include <pex.h>
65 #include "gps.h"
66 #include "jt.h"
67 
68 
69 
70 
71 extern const char *ppOsBugCheckBugcodeStr[];
72 
73 
74 ct_assert(NV_RM_PAGE_SIZE == RM_PAGE_SIZE);
75 ct_assert(NV_RM_PAGE_MASK == RM_PAGE_MASK);
76 ct_assert(NV_RM_PAGE_SHIFT == RM_PAGE_SHIFT);
77 
78 typedef struct
79 {
80     NvU32 euid;
81     NvU32 pid;
82 } TOKEN_USER, *PTOKEN_USER;
83 
84 struct OS_RM_CAPS
85 {
86     NvU32 count;
87 
88     // This should be the last element
89     nv_cap_t **caps;
90 };
91 
92 NvBool osIsRaisedIRQL(void)
93 {
94     return (!os_semaphore_may_sleep());
95 }
96 
97 NvBool osIsISR(void)
98 {
99     return os_is_isr();
100 }
101 
102 NV_STATUS osGetDriverBlock
103 (
104     OS_GPU_INFO     *pOsGpuInfo,
105     OS_DRIVER_BLOCK *pBlock
106 )
107 {
108     return NV_ERR_NOT_SUPPORTED;
109 }
110 
111 NV_STATUS osGetCurrentTick(NvU64 *pTimeInNs)
112 {
113     *pTimeInNs = os_get_current_tick();
114     return NV_OK;
115 }
116 
117 NvU64 osGetTickResolution(void)
118 {
119     return os_get_tick_resolution();
120 }
121 
122 NV_STATUS osGetPerformanceCounter(NvU64 *pTimeInNs)
123 {
124     *pTimeInNs = os_get_current_tick_hr();
125     return NV_OK;
126 }
127 
128 NV_STATUS osGetCurrentTime(
129     NvU32 *pSeconds,
130     NvU32 *pMicroSeconds
131 )
132 {
133     return os_get_current_time(pSeconds, pMicroSeconds);
134 }
135 
136 /*!
137  * @brief Get timestamp for logging.
138  *
139  * Everything that logs a time stamp should use this routine for consistency.
140  *
141  * The returned value is OS dependent.  We want the time stamp to use
142  * KeQueryPerformanceCounter on Windows so it matches the DirectX timestamps.
143  * Linux uses microseconds since 1970 (osGetCurrentTime), since matching DirectX
144  * is not a priority.
145  *
146  * osGetTimestampFreq returns the frequency required to decode the time stamps.
147  *
148  * @returns   system dependent timestamp.
149  */
150 NvU64 osGetTimestamp(void)
151 {
152     NvU32 sec  = 0;
153     NvU32 usec = 0;
154     osGetCurrentTime(&sec, &usec);
155     return (NvU64)sec * 1000000 + usec;
156 }
157 
158 /*!
159  * @brief Get timestamp frequency.
160  *
161  * Timestamps are OS dependent.  This call returns the frequency
162  * required to decode them.
163  *
164  * @returns   Timestamp frequency.  For example, 1000000 for MHz.
165  */
166 NvU64 osGetTimestampFreq(void)
167 {
168     return 1000000;
169 }
170 
171 NV_STATUS osDelay(NvU32 milliseconds)
172 {
173     return os_delay(milliseconds);
174 }
175 
176 NV_STATUS osDelayUs(NvU32 microseconds)
177 {
178     return os_delay_us(microseconds);
179 }
180 
181 NV_STATUS osDelayNs(NvU32 nanoseconds)
182 {
183     NvU32 microseconds = NV_MAX(1, (nanoseconds / 1000));
184     return os_delay_us(microseconds);
185 }
186 
187 NvU32 osGetCpuFrequency(void)
188 {
189     /* convert os_get_cpu_frequency()'s return value from Hz to MHz */
190     return ((NvU32)(os_get_cpu_frequency() / 1000000ULL));
191 }
192 
193 void* osPciInitHandle(
194     NvU32  Domain,
195     NvU8   Bus,
196     NvU8   Slot,
197     NvU8   Function,
198     NvU16 *pVendor,
199     NvU16 *pDevice
200 )
201 {
202     //
203     // Check if the BDF is for a GPU that's already been attached, for which
204     // we should already have a handle cached. This won't catch devices that
205     // have been probed but not yet attached, but that shouldn't be a common
206     // occurrence.
207     //
208     // More importantly, having this check here means we don't need to check
209     // a global list of devices in the kernel interface layer, which could
210     // have the implication of taking another lock, causing hairy lock
211     // ordering issues.
212     //
213     if (Function == 0)
214     {
215         OBJGPU *pGpu = gpumgrGetGpuFromBusInfo(Domain, Bus, Slot);
216         if (pGpu != NULL)
217         {
218             nv_state_t *nv = NV_GET_NV_STATE(pGpu);
219             if (pVendor) *pVendor = nv->pci_info.vendor_id;
220             if (pDevice) *pDevice = nv->pci_info.device_id;
221             return nv->handle;
222         }
223     }
224 
225     return os_pci_init_handle(Domain, Bus, Slot, Function, pVendor, pDevice);
226 }
227 
228 NvU8 osPciReadByte(
229     void *pHandle,
230     NvU32 Offset
231 )
232 {
233     NvU8 val;
234     os_pci_read_byte(pHandle, Offset, &val);
235     return val;
236 }
237 
238 NvU16 osPciReadWord(
239     void *pHandle,
240     NvU32 Offset
241 )
242 {
243     NvU16 val;
244     os_pci_read_word(pHandle, Offset, &val);
245     return val;
246 }
247 
248 NvU32 osPciReadDword(
249     void *pHandle,
250     NvU32 Offset
251 )
252 {
253     NvU32 val;
254     os_pci_read_dword(pHandle, Offset, &val);
255     return val;
256 }
257 
258 void osPciWriteByte(
259     void *pHandle,
260     NvU32 Offset,
261     NvU8  Value
262 )
263 {
264     os_pci_write_byte(pHandle, Offset, Value);
265 }
266 
267 void osPciWriteWord(
268     void  *pHandle,
269     NvU32 Offset,
270     NvU16 Value
271 )
272 {
273     os_pci_write_word(pHandle, Offset, Value);
274 }
275 
276 void osPciWriteDword(
277     void  *pHandle,
278     NvU32 Offset,
279     NvU32 Value
280 )
281 {
282     os_pci_write_dword(pHandle, Offset, Value);
283 }
284 
285 void* osMapKernelSpace(
286     RmPhysAddr Start,
287     NvU64      Size,
288     NvU32      Mode,
289     NvU32      Protect
290 )
291 {
292     NvU64 offset;
293     NvU8 *ptr;
294 
295     if (0 == Size)
296     {
297         NV_ASSERT(Size != 0);
298         return NULL;
299     }
300 
301     offset = (Start & ~os_page_mask);
302     Start &= os_page_mask;
303 
304     if (!portSafeAddU64(Size, offset, &Size) ||
305         !portSafeAddU64(Size, ~os_page_mask, &Size))
306     {
307         return NULL;
308     }
309     Size &= os_page_mask;
310 
311     ptr = os_map_kernel_space(Start, Size, Mode);
312     if (ptr != NULL)
313         return (ptr + offset);
314 
315     return NULL;
316 }
317 
318 void osUnmapKernelSpace(
319     void *pAddress,
320     NvU64 Size
321 )
322 {
323     NvU64 offset;
324     NvUPtr ptr = (NvUPtr)pAddress;
325 
326     if (0 == Size)
327     {
328         NV_ASSERT(Size != 0);
329         return;
330     }
331 
332     offset = (ptr & ~os_page_mask);
333     ptr &= os_page_mask;
334     Size = ((Size + offset + ~os_page_mask) & os_page_mask);
335     os_unmap_kernel_space((void *)ptr, Size);
336 }
337 
338 void* osMapIOSpace(
339     RmPhysAddr Start,
340     NvU64      Size,
341     void **    pData,
342     NvU32      User,
343     NvU32      Mode,
344     NvU32      Protect
345 )
346 {
347 
348     NvU64 offset;
349     NvU8 *addr;
350 
351     if (0 == Size)
352     {
353         NV_ASSERT(Size != 0);
354         return NULL;
355     }
356 
357     offset = (Start & ~os_page_mask);
358     Start &= os_page_mask;
359     Size = ((Size + offset + ~os_page_mask) & os_page_mask);
360 
361     if (User)
362         addr = os_map_user_space(Start, Size, Mode, Protect, pData);
363     else
364         addr = os_map_kernel_space(Start, Size, Mode);
365     if (addr != NULL)
366         return (addr + offset);
367 
368     return addr;
369 }
370 
371 void osUnmapIOSpace(
372     void    *pAddress,
373     NvU64    Size,
374     void    *pData,
375     NvU32    User
376 )
377 {
378     NvU64 offset;
379     NvUPtr addr = (NvUPtr)pAddress;
380 
381     if (0 == Size)
382     {
383         NV_ASSERT(Size != 0);
384         return;
385     }
386 
387     offset = (addr & ~os_page_mask);
388     addr &= os_page_mask;
389     Size = ((Size + offset + ~os_page_mask) & os_page_mask);
390 
391     if (User)
392         os_unmap_user_space((void *)addr, Size, pData);
393     else
394         os_unmap_kernel_space((void *)addr, Size);
395 }
396 
397 static NV_STATUS setNumaPrivData
398 (
399     KernelMemorySystem      *pKernelMemorySystem,
400     nv_state_t              *nv,
401     MEMORY_DESCRIPTOR       *pMemDesc
402 )
403 {
404     NV_STATUS rmStatus = NV_OK;
405     void *pAllocPrivate = NULL;
406     NvU64 *addrArray = NULL;
407     NvU64 numOsPages = pMemDesc->PageCount;
408 
409     addrArray = pMemDesc->_pteArray;
410 
411     if (NV_RM_PAGE_SIZE < os_page_size)
412     {
413         NvU64 numPages;
414         NvU64 i;
415 
416         numPages = pMemDesc->PageCount;
417         addrArray = portMemAllocNonPaged(numPages * sizeof(NvU64));
418         if (addrArray == NULL)
419         {
420             return NV_ERR_NO_MEMORY;
421         }
422 
423         portMemCopy((void*)addrArray,
424                     (numPages * sizeof(NvU64)), (void*)pMemDesc->_pteArray,
425                     (numPages * sizeof(NvU64)));
426         RmDeflateRmToOsPageArray(addrArray, numPages);
427         numOsPages = NV_RM_PAGES_TO_OS_PAGES(numPages);
428 
429         for (i = 0; i < numOsPages; i++)
430         {
431             // Update GPA to system physical address
432             addrArray[i] += pKernelMemorySystem->coherentCpuFbBase;
433         }
434     }
435 
436     rmStatus = nv_register_phys_pages(nv, addrArray, numOsPages, NV_MEMORY_CACHED, &pAllocPrivate);
437     if (rmStatus != NV_OK)
438     {
439         goto errors;
440     }
441 
442     memdescSetMemData(pMemDesc, pAllocPrivate, NULL);
443 
444 errors:
445     if (NV_RM_PAGE_SIZE < os_page_size)
446     {
447         portMemFree(addrArray);
448     }
449 
450     return rmStatus;
451 }
452 
453 NV_STATUS osGetNumMemoryPages
454 (
455     MEMORY_DESCRIPTOR *pMemDesc,
456     NvU32 *pNumPages
457 )
458 {
459     void *pAllocPrivate = NULL;
460 
461     pAllocPrivate = memdescGetMemData(pMemDesc);
462     if (pAllocPrivate == NULL)
463     {
464         NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n");
465         return NV_ERR_INVALID_STATE;
466     }
467 
468     return nv_get_num_phys_pages(pAllocPrivate, pNumPages);
469 }
470 
471 NV_STATUS osGetMemoryPages
472 (
473     MEMORY_DESCRIPTOR *pMemDesc,
474     void *pPages,
475     NvU32 *pNumPages
476 )
477 {
478     void *pAllocPrivate = NULL;
479 
480     pAllocPrivate = memdescGetMemData(pMemDesc);
481     if (pAllocPrivate == NULL)
482     {
483         NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n");
484         return NV_ERR_INVALID_STATE;
485     }
486 
487     return nv_get_phys_pages(pAllocPrivate, pPages, pNumPages);
488 }
489 
490 NV_STATUS osMapSystemMemory
491 (
492     MEMORY_DESCRIPTOR *pMemDesc,
493     NvU64 Offset,
494     NvU64 Length,
495     NvBool Kernel,
496     NvU32 Protect,
497     NvP64 *ppAddress,
498     NvP64 *ppPrivate
499 )
500 {
501     OBJGPU *pGpu = pMemDesc->pGpu;
502 
503     RmPhysAddr userAddress;
504     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
505     NV_STATUS rmStatus = NV_OK;
506     void *pAllocPrivate = NULL;
507     void *pAddress;
508     void *pPrivate = NULL;
509     NvU64 pageIndex;
510     NvU32 pageOffset;
511 
512     if (pGpu != NULL &&
513         pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) &&
514         memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM)
515     {
516         KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
517 
518         rmStatus = setNumaPrivData(pKernelMemorySystem, nv, pMemDesc);
519         if (rmStatus != NV_OK)
520             return rmStatus;
521     }
522 
523     *ppAddress = NvP64_NULL;
524     *ppPrivate = NvP64_NULL;
525 
526     if ((Offset + Length) < Length)
527         return NV_ERR_INVALID_ARGUMENT;
528     if ((Offset + Length) > pMemDesc->Size)
529         return NV_ERR_INVALID_ARGUMENT;
530 
531     pageIndex = (Offset >> os_page_shift);
532     pageOffset = (Offset & ~os_page_mask);
533 
534     pAllocPrivate = memdescGetMemData(pMemDesc);
535     if (!pAllocPrivate)
536     {
537         NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n");
538         return NV_ERR_INVALID_STATE;
539     }
540 
541     if (Kernel)
542     {
543         pAddress = nv_alloc_kernel_mapping(nv, pAllocPrivate,
544                 pageIndex, pageOffset, Length, &pPrivate);
545         if (pAddress == NULL)
546         {
547             NV_PRINTF(LEVEL_ERROR,
548                       "failed to create system memory kernel mapping!\n");
549             rmStatus = NV_ERR_GENERIC;
550         }
551         else
552         {
553             *ppAddress = NV_PTR_TO_NvP64(pAddress);
554             *ppPrivate = NV_PTR_TO_NvP64(pPrivate);
555         }
556     }
557     else
558     {
559         rmStatus = nv_alloc_user_mapping(nv, pAllocPrivate,
560                 pageIndex, pageOffset, Length, Protect, &userAddress,
561                 &pPrivate);
562         if (rmStatus != NV_OK)
563         {
564             NV_PRINTF(LEVEL_ERROR,
565                       "failed to create system memory user mapping!\n");
566         }
567         else
568         {
569             *ppAddress = (NvP64)(userAddress);
570             *ppPrivate = NV_PTR_TO_NvP64(pPrivate);
571         }
572     }
573 
574     return rmStatus;
575 }
576 
577 void osUnmapSystemMemory
578 (
579     MEMORY_DESCRIPTOR *pMemDesc,
580     NvBool Kernel,
581     NvU32  ProcessId,
582     NvP64  pAddress,
583     NvP64  pPrivate
584 )
585 {
586     NV_STATUS status;
587     void *pAllocPrivate = memdescGetMemData(pMemDesc);
588     OBJGPU *pGpu = pMemDesc->pGpu;
589     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
590 
591     if (Kernel)
592     {
593         status = nv_free_kernel_mapping(nv, pAllocPrivate, NvP64_VALUE(pAddress),
594                 NvP64_VALUE(pPrivate));
595     }
596     else
597     {
598         status = nv_free_user_mapping(nv, pAllocPrivate, (NvU64)pAddress,
599                 NvP64_VALUE(pPrivate));
600     }
601 
602     if (pGpu != NULL &&
603         pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) &&
604         memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM &&
605         pAllocPrivate != NULL)
606     {
607         nv_unregister_phys_pages(nv, pAllocPrivate);
608         memdescSetMemData(pMemDesc, NULL, NULL);
609     }
610 
611     NV_ASSERT(status == NV_OK);
612 }
613 
614 void osIoWriteByte(
615     NvU32   Address,
616     NvU8    Value
617 )
618 {
619     os_io_write_byte(Address, Value);
620 }
621 
622 NvU16 osIoReadWord(
623     NvU32   Address
624 )
625 {
626     return os_io_read_word(Address);
627 }
628 
629 void osIoWriteWord(
630     NvU32 Address,
631     NvU16 Value
632 )
633 {
634     os_io_write_word(Address, Value);
635 }
636 
637 NvU8 osIoReadByte(
638     NvU32   Address
639 )
640 {
641     return os_io_read_byte(Address);
642 }
643 
644 NvBool osIsAdministrator(void)
645 {
646     return os_is_administrator();
647 }
648 
649 NvBool osAllowPriorityOverride(void)
650 {
651     return os_allow_priority_override();
652 }
653 
654 NvU32 osGetCurrentProcess(void)
655 {
656     return os_get_current_process();
657 }
658 
659 void osGetCurrentProcessName(char *ProcName, NvU32 Length)
660 {
661     return os_get_current_process_name(ProcName, Length);
662 }
663 
664 NV_STATUS osGetCurrentThread(OS_THREAD_HANDLE *pThreadId)
665 {
666     NV_STATUS rmStatus;
667     NvU64 threadId = 0;
668 
669     if (pThreadId == NULL)
670     {
671         return NV_ERR_INVALID_ARGUMENT;
672     }
673 
674     rmStatus = os_get_current_thread(&threadId);
675     if (rmStatus == NV_OK)
676     {
677         *pThreadId = threadId;
678     }
679     else
680     {
681         *pThreadId = 0;
682     }
683 
684     return rmStatus;
685 }
686 
687 NV_STATUS osAttachToProcess(void** ppProcessInfo, NvU32 ProcessId)
688 {
689     //
690     // This function is used by RmUnmapMemory() to attach to the
691     // process for which a given device memory mapping was
692     // created, in order to be able to unmap it. On Linux/UNIX
693     // platforms, we can't "attach" to a random process, but
694     // since we don't create/destroy user mappings in the RM, we
695     // don't need to, either.
696     //
697     // Report success to the caller to keep RmUnmapMemory() from
698     // failing, and memory from being leaked as a result.
699     //
700     *ppProcessInfo = NULL;
701     return NV_OK;
702 }
703 
704 void osDetachFromProcess(void* pProcessInfo)
705 {
706     // stub
707     return;
708 }
709 
710 NvBool osDbgBreakpointEnabled(void)
711 {
712     return NV_TRUE;
713 }
714 
715 NV_STATUS osAcquireRmSema(void *pSema)
716 {
717     return NV_OK;
718 }
719 
720 NV_STATUS osCondAcquireRmSema(void *pSema)
721 {
722     return NV_OK;
723 }
724 
725 NvU32 osReleaseRmSema(void *pSema, OBJGPU *pDpcGpu)
726 {
727     return NV_SEMA_RELEASE_SUCCEED;
728 }
729 
730 void osSpinLoop(void)
731 {
732     // Enable this code to get debug prints from Libos.
733 }
734 
735 NvU64 osGetMaxUserVa(void)
736 {
737     return os_get_max_user_va();
738 }
739 
740 NV_STATUS osSchedule(void)
741 {
742     return os_schedule();
743 }
744 
745 NV_STATUS osQueueWorkItemWithFlags(
746     OBJGPU *pGpu,
747     OSWorkItemFunction pFunction,
748     void *pParams,
749     NvU32 flags
750 )
751 {
752     nv_work_item_t *pWi;
753     nv_state_t *nv;
754     NV_STATUS status;
755 
756     pWi = portMemAllocNonPaged(sizeof(nv_work_item_t));
757 
758     if (NULL == pWi)
759     {
760         return NV_ERR_NO_MEMORY;
761     }
762 
763     pWi->flags = NV_WORK_ITEM_FLAGS_REQUIRES_GPU;
764     if (flags & OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS)
765         pWi->flags |= NV_WORK_ITEM_FLAGS_DONT_FREE_DATA;
766 
767     if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA)
768         pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA;
769     if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW)
770         pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW;
771     if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW)
772         pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW;
773     if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW)
774         pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW;
775     if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW)
776         pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW;
777 
778     if (flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY)
779         pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY;
780 
781     pWi->gpuInstance = gpuGetInstance(pGpu);
782     pWi->func.pGpuFunction = pFunction;
783     pWi->pData = pParams;
784     nv = NV_GET_NV_STATE(pGpu);
785 
786     status = os_queue_work_item(nv ? nv->queue : NULL, pWi);
787 
788     if (NV_OK != status)
789     {
790         portMemFree((void *)pWi);
791     }
792 
793     return status;
794 }
795 
796 NV_STATUS osQueueWorkItem(
797     OBJGPU *pGpu,
798     OSWorkItemFunction pFunction,
799     void *pParams
800 )
801 {
802     return osQueueWorkItemWithFlags(pGpu, pFunction, pParams, OS_QUEUE_WORKITEM_FLAGS_NONE);
803 }
804 
805 NV_STATUS osQueueSystemWorkItem(
806     OSSystemWorkItemFunction pFunction,
807     void *pParams
808 )
809 {
810     nv_work_item_t *pWi;
811     NV_STATUS status;
812 
813     pWi = portMemAllocNonPaged(sizeof(nv_work_item_t));
814 
815     if (NULL == pWi)
816     {
817         return NV_ERR_NO_MEMORY;
818     }
819 
820     pWi->flags = NV_WORK_ITEM_FLAGS_NONE;
821     pWi->func.pSystemFunction = pFunction;
822     pWi->pData = pParams;
823 
824     status = os_queue_work_item(NULL, pWi);
825 
826     if (NV_OK != status)
827     {
828         portMemFree((void *)pWi);
829     }
830 
831     return status;
832 }
833 
834 void osQueueMMUFaultHandler(OBJGPU *pGpu)
835 {
836     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
837 
838     nv_schedule_uvm_isr(nv);
839 }
840 
841 static inline nv_dma_device_t* osGetDmaDeviceForMemDesc(
842     OS_GPU_INFO *pOsGpuInfo,
843     MEMORY_DESCRIPTOR *pMemDesc
844 )
845 {
846     return (pOsGpuInfo->niso_dma_dev != NULL) &&
847            memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO) ?
848            pOsGpuInfo->niso_dma_dev : pOsGpuInfo->dma_dev;
849 }
850 
851 NV_STATUS osDmaMapPages(
852     OS_GPU_INFO       *pOsGpuInfo,
853     MEMORY_DESCRIPTOR *pMemDesc
854 )
855 {
856     return nv_dma_map_pages(
857         osGetDmaDeviceForMemDesc(pOsGpuInfo, pMemDesc),
858         NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
859         memdescGetPteArray(pMemDesc, AT_CPU),
860         memdescGetContiguity(pMemDesc, AT_CPU),
861         memdescGetCpuCacheAttrib(pMemDesc),
862         NULL);
863 }
864 
865 NV_STATUS osDmaUnmapPages(
866     OS_GPU_INFO       *pOsGpuInfo,
867     MEMORY_DESCRIPTOR *pMemDesc
868 )
869 {
870     return nv_dma_unmap_pages(
871         pOsGpuInfo->dma_dev,
872         NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
873         memdescGetPteArray(pMemDesc, AT_CPU),
874         NULL);
875 }
876 
877 //
878 // Set the DMA address size for the given GPU
879 //
880 // This is a global device setting and care would need to be taken if it was to
881 // be modified outside of GPU initialization. At least on Linux other drivers,
882 // like UVM, might be requesting its own DMA mappings for the same GPU after
883 // the GPU has been initialized.
884 //
885 void osDmaSetAddressSize(
886     OS_GPU_INFO *pOsGpuInfo,
887     NvU32        bits
888 )
889 {
890     nv_set_dma_address_size(pOsGpuInfo, bits);
891 }
892 
893 NV_STATUS osAllocPagesInternal(
894     MEMORY_DESCRIPTOR *pMemDesc
895 )
896 {
897     OBJSYS    *pSys = SYS_GET_INSTANCE();
898     OBJGPU *pGpu = pMemDesc->pGpu;
899     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
900     void *pMemData = NULL;
901     NV_STATUS status;
902     NvS32     nodeId = -1;
903 
904     memdescSetAddress(pMemDesc, NvP64_NULL);
905     memdescSetMemData(pMemDesc, NULL, NULL);
906 
907     NV_ASSERT_OR_RETURN(pMemDesc->PageCount > 0, NV_ERR_INVALID_ARGUMENT);
908 
909     if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED))
910     {
911         if (NV_RM_PAGE_SIZE < os_page_size &&
912             !memdescGetContiguity(pMemDesc, AT_CPU))
913         {
914             RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU),
915                                      pMemDesc->PageCount);
916         }
917 
918         status = nv_alias_pages(
919             NV_GET_NV_STATE(pGpu),
920             NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
921             memdescGetContiguity(pMemDesc, AT_CPU),
922             memdescGetCpuCacheAttrib(pMemDesc),
923             memdescGetGuestId(pMemDesc),
924             memdescGetPteArray(pMemDesc, AT_CPU),
925             &pMemData);
926     }
927     else
928     {
929         NvBool unencrypted = 0;
930 
931         if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE)))
932             nv->force_dma32_alloc = NV_TRUE;
933 
934         {
935             status = nv_alloc_pages(
936                 NV_GET_NV_STATE(pGpu),
937                 NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
938                 memdescGetContiguity(pMemDesc, AT_CPU),
939                 memdescGetCpuCacheAttrib(pMemDesc),
940                 pSys->getProperty(pSys,
941                     PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
942                 unencrypted,
943                 nodeId,
944                 memdescGetPteArray(pMemDesc, AT_CPU),
945                 &pMemData);
946         }
947 
948         if (nv && nv->force_dma32_alloc)
949             nv->force_dma32_alloc = NV_FALSE;
950     }
951 
952     if (status != NV_OK)
953     {
954         return status;
955     }
956 
957     //
958     // If the OS layer doesn't think in RM page size, we need to inflate the
959     // PTE array into RM pages.
960     //
961     if (NV_RM_PAGE_SIZE < os_page_size &&
962         !memdescGetContiguity(pMemDesc, AT_CPU))
963     {
964         RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU),
965                                  pMemDesc->PageCount);
966     }
967 
968     memdescSetMemData(pMemDesc, pMemData, NULL);
969 
970     return status;
971 }
972 
973 void osFreePagesInternal(
974     MEMORY_DESCRIPTOR *pMemDesc
975 )
976 {
977     OBJGPU *pGpu = pMemDesc->pGpu;
978     NV_STATUS rmStatus;
979 
980     if (NV_RM_PAGE_SIZE < os_page_size &&
981         !memdescGetContiguity(pMemDesc, AT_CPU))
982     {
983         RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU),
984                                  pMemDesc->PageCount);
985     }
986 
987     rmStatus = nv_free_pages(NV_GET_NV_STATE(pGpu),
988         NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
989         memdescGetContiguity(pMemDesc, AT_CPU),
990         memdescGetCpuCacheAttrib(pMemDesc),
991         memdescGetMemData(pMemDesc));
992     NV_ASSERT(rmStatus == NV_OK);
993 }
994 
995 NV_STATUS osLockMem(
996     MEMORY_DESCRIPTOR *pMemDesc
997 )
998 {
999     // Not supported on this OS.
1000     DBG_BREAKPOINT();
1001     return NV_ERR_NOT_SUPPORTED;
1002 }
1003 
1004 NV_STATUS osUnlockMem(
1005     MEMORY_DESCRIPTOR *pMemDesc
1006 )
1007 {
1008     // Not supported on this OS.
1009     DBG_BREAKPOINT();
1010     return NV_ERR_NOT_SUPPORTED;
1011 }
1012 
1013 NV_STATUS osMapPciMemoryUser(
1014     OS_GPU_INFO *pOsGpuInfo,
1015     RmPhysAddr   busAddress,
1016     NvU64        length,
1017     NvU32        Protect,
1018     NvP64       *pVirtualAddress,
1019     NvP64       *pPriv,
1020     NvU32        modeFlag
1021 )
1022 {
1023     void *addr;
1024     void *priv = NULL;
1025 
1026     addr = osMapIOSpace(busAddress, length, &priv, NV_TRUE, modeFlag, Protect);
1027 
1028     *pPriv = NV_PTR_TO_NvP64(priv);
1029     *pVirtualAddress = NV_PTR_TO_NvP64(addr);
1030 
1031     return (addr != NULL) ? NV_OK : NV_ERR_GENERIC;
1032 }
1033 
1034 void osUnmapPciMemoryUser(
1035     OS_GPU_INFO *pOsGpuInfo,
1036     NvP64        virtualAddress,
1037     NvU64        length,
1038     NvP64        pPriv
1039 )
1040 {
1041     void *addr, *priv;
1042 
1043     addr = NvP64_VALUE(virtualAddress);
1044     priv = NvP64_VALUE(pPriv);
1045 
1046     osUnmapIOSpace(addr, length, priv, NV_TRUE);
1047 }
1048 
1049 NV_STATUS osMapPciMemoryKernelOld
1050 (
1051     OBJGPU    *pGpu,
1052     RmPhysAddr busAddress,
1053     NvU64      length,
1054     NvU32      Protect,
1055     void     **pVirtualAddress,
1056     NvU32      modeFlag
1057 )
1058 {
1059     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1060     nv_kern_mapping_t *mapping;
1061 
1062     if (pVirtualAddress == NULL)
1063         return NV_ERR_GENERIC;
1064 
1065     *pVirtualAddress = os_map_kernel_space(busAddress, length, modeFlag);
1066     if (*pVirtualAddress == NULL)
1067         return NV_ERR_GENERIC;
1068 
1069     mapping = portMemAllocNonPaged(sizeof(nv_kern_mapping_t));
1070     if (NULL == mapping)
1071     {
1072         os_unmap_kernel_space(*pVirtualAddress, length);
1073         *pVirtualAddress = 0;
1074         return NV_ERR_GENERIC;
1075     }
1076 
1077     mapping->addr = *pVirtualAddress;
1078     mapping->size = length;
1079     mapping->modeFlag = modeFlag;
1080 
1081     mapping->next = nv->kern_mappings;
1082     nv->kern_mappings = mapping;
1083 
1084     return NV_OK;
1085 }
1086 
1087 NV_STATUS osMapPciMemoryKernel64
1088 (
1089     OBJGPU    *pGpu,
1090     RmPhysAddr busAddress,
1091     NvU64      length,
1092     NvU32      Protect,
1093     NvP64     *pVirtualAddress,
1094     NvU32      modeFlag
1095 )
1096 {
1097     void *tmppVirtualAddress = NvP64_VALUE(pVirtualAddress);
1098     NV_STATUS rc;
1099 
1100     rc = osMapPciMemoryKernelOld(pGpu,
1101                                  busAddress,
1102                                  length,
1103                                  Protect,
1104                                  &tmppVirtualAddress,
1105                                  modeFlag);
1106 
1107     *pVirtualAddress = NV_PTR_TO_NvP64(tmppVirtualAddress);
1108 
1109     return rc;
1110 }
1111 
1112 void osUnmapPciMemoryKernelOld
1113 (
1114     OBJGPU *pGpu,
1115     void*   virtualAddress
1116 )
1117 {
1118     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1119     nv_kern_mapping_t *mapping, *tmp;
1120 
1121     // this can happen, for example, during a call to RmShutdownAdapter()
1122     // from a failed RmInitAdapter()
1123     if (virtualAddress == NULL)
1124     {
1125         return;
1126     }
1127 
1128     tmp = mapping = nv->kern_mappings;
1129     while (mapping)
1130     {
1131         if (mapping->addr == virtualAddress)
1132         {
1133             if (mapping == nv->kern_mappings)
1134             {
1135                 nv->kern_mappings = mapping->next;
1136             }
1137             else
1138             {
1139                 tmp->next = mapping->next;
1140             }
1141 
1142             os_unmap_kernel_space(mapping->addr, mapping->size);
1143 
1144             portMemFree(mapping);
1145             return;
1146         }
1147         tmp = mapping;
1148         mapping = mapping->next;
1149     }
1150 
1151     DBG_BREAKPOINT();
1152 }
1153 
1154 void osUnmapPciMemoryKernel64
1155 (
1156     OBJGPU *pGpu,
1157     NvP64   virtualAddress
1158 )
1159 {
1160     osUnmapPciMemoryKernelOld(pGpu, NvP64_VALUE(virtualAddress));
1161 }
1162 
1163 NV_STATUS osMapGPU(
1164     OBJGPU        *pGpu,
1165     RS_PRIV_LEVEL  privLevel,
1166     NvU64          offset,
1167     NvU64          length,
1168     NvU32          Protect,
1169     NvP64         *pAddress,
1170     NvP64         *pPriv
1171 )
1172 {
1173     NV_STATUS rmStatus = NV_OK;
1174 
1175     if (privLevel >= RS_PRIV_LEVEL_KERNEL)
1176     {
1177         if (!portSafeAddU64((NvUPtr)pGpu->deviceMappings[0].gpuNvAddr, offset, (NvU64*)pAddress))
1178         {
1179             rmStatus = NV_ERR_INVALID_LIMIT;
1180         }
1181     }
1182     else
1183     {
1184         RmPhysAddr busAddress;
1185         if (!portSafeAddU64(pGpu->busInfo.gpuPhysAddr, offset, &busAddress))
1186         {
1187             rmStatus = NV_ERR_INVALID_LIMIT;
1188         }
1189         else
1190         {
1191             rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo,
1192                                           busAddress,
1193                                           length,
1194                                           Protect,
1195                                           pAddress,
1196                                           pPriv,
1197                                           NV_FALSE);
1198         }
1199     }
1200 
1201     return rmStatus;
1202 }
1203 
1204 void osUnmapGPU(
1205     OS_GPU_INFO   *pOsGpuInfo,
1206     RS_PRIV_LEVEL  privLevel,
1207     NvP64          address,
1208     NvU64          length,
1209     NvP64          priv
1210 )
1211 {
1212     if (privLevel < RS_PRIV_LEVEL_KERNEL)
1213     {
1214         osUnmapPciMemoryUser(pOsGpuInfo, address, length, priv);
1215     }
1216 }
1217 
1218 NV_STATUS osDeviceClassToDeviceName(
1219     NvU32 deviceInstance,
1220     NvU8 *szName
1221 )
1222 {
1223     return NV_ERR_GENERIC;
1224 }
1225 
1226 static void postEvent(
1227     nv_event_t *event,
1228     NvU32 hEvent,
1229     NvU32 notifyIndex,
1230     NvU32 info32,
1231     NvU16 info16,
1232     NvBool dataValid
1233 )
1234 {
1235     if (osReferenceObjectCount(event) != NV_OK)
1236         return;
1237     nv_post_event(event, hEvent, notifyIndex,
1238                   info32, info16, dataValid);
1239     osDereferenceObjectCount(event);
1240 }
1241 
1242 NvU32 osSetEvent
1243 (
1244     OBJGPU   *pGpu,
1245     NvP64     eventID
1246 )
1247 {
1248     nv_event_t *event = NvP64_VALUE(eventID);
1249     postEvent(event, 0, 0, 0, 0, NV_FALSE);
1250     return 1;
1251 }
1252 
1253 NV_STATUS osNotifyEvent(
1254     OBJGPU             *pGpu,
1255     PEVENTNOTIFICATION  NotifyEvent,
1256     NvU32               Method,
1257     NvU32               Data,
1258     NV_STATUS           Status
1259 )
1260 {
1261     NV_STATUS rmStatus = NV_OK;
1262 
1263     // notify the event
1264     switch (NotifyEvent->NotifyType)
1265     {
1266         case NV01_EVENT_OS_EVENT:
1267         {
1268             nv_event_t *event = NvP64_VALUE(NotifyEvent->Data);
1269             postEvent(event,
1270                       NotifyEvent->hEvent,
1271                       NotifyEvent->NotifyIndex,
1272                       0, 0,
1273                       NotifyEvent->bEventDataRequired);
1274             break;
1275         }
1276 
1277         // NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. please use NV01_EVENT_KERNEL_CALLBACK_EX.
1278         case NV01_EVENT_KERNEL_CALLBACK:
1279         {
1280             MINIPORT_CALLBACK callBackToMiniport =
1281                 (MINIPORT_CALLBACK)NvP64_VALUE(NotifyEvent->Data);
1282 
1283             // perform a direct callback to the miniport
1284             if (callBackToMiniport)
1285                 callBackToMiniport(NV_GET_NV_STATE(pGpu));
1286             break;
1287         }
1288 
1289         case NV01_EVENT_KERNEL_CALLBACK_EX:
1290         {
1291             NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(NotifyEvent->Data);
1292 
1293             // passes two arguments (arg, params) to the kernel callback instead of one (arg).
1294             if (kc && kc->func)
1295             {
1296                 kc->func(kc->arg, NULL, NotifyEvent->hEvent, Data, Status);
1297             }
1298             break;
1299         }
1300 
1301 
1302         default:
1303         {
1304             rmStatus = NV_ERR_GENERIC;
1305             break;
1306         }
1307     }
1308 
1309     return rmStatus;
1310 
1311 } // end of osNotifyEvent()
1312 
1313 // Allow CPL Events to be callback or events
1314 NV_STATUS osEventNotification
1315 (
1316     OBJGPU   *pGpu,
1317     PEVENTNOTIFICATION pNotifyEvent,
1318     NvU32 notifyIndex,
1319     void * pEventData,
1320     NvU32 eventDataSize
1321 )
1322 {
1323     return osEventNotificationWithInfo(pGpu, pNotifyEvent, notifyIndex, 0, 0,
1324                                        pEventData, eventDataSize);
1325 }
1326 
1327 NV_STATUS osEventNotificationWithInfo
1328 (
1329     OBJGPU   *pGpu,
1330     PEVENTNOTIFICATION pNotifyEvent,
1331     NvU32 notifyIndex,
1332     NvU32 info32,
1333     NvU16 info16,
1334     void * pEventData,
1335     NvU32 eventDataSize
1336 )
1337 {
1338     NV_STATUS rmStatus = NV_OK;
1339 
1340     // walk this object's event list and find any matches for this specific notify
1341     for (; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next)
1342     {
1343         // notifyIndex must match if request isn't for all
1344         if ((notifyIndex != OS_EVENT_NOTIFICATION_INDEX_ALL) &&
1345             (pNotifyEvent->NotifyIndex != notifyIndex))
1346         {
1347             continue;
1348         }
1349 
1350         switch (pNotifyEvent->NotifyType)
1351         {
1352             case NV_EVENT_BUFFER_BIND:
1353             case NV01_EVENT_WIN32_EVENT:
1354             {
1355                 nv_event_t *event = NvP64_VALUE(pNotifyEvent->Data);
1356                 postEvent(event,
1357                           pNotifyEvent->hEvent,
1358                           pNotifyEvent->NotifyIndex,
1359                           info32, info16,
1360                           pNotifyEvent->bEventDataRequired);
1361                 break;
1362             }
1363 
1364             // NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. please use NV01_EVENT_KERNEL_CALLBACK_EX.
1365             case NV01_EVENT_KERNEL_CALLBACK:
1366             {
1367                 MINIPORT_CALLBACK callBackToMiniport =
1368                     (MINIPORT_CALLBACK)NvP64_VALUE(pNotifyEvent->Data);
1369 
1370                 // perform a direct callback to the miniport
1371                 if (callBackToMiniport)
1372                     callBackToMiniport(NV_GET_NV_STATE(pGpu));
1373                 break;
1374             }
1375 
1376             case NV01_EVENT_KERNEL_CALLBACK_EX:
1377             {
1378                 NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(pNotifyEvent->Data);
1379 
1380                 if (kc && kc->func)
1381                 {
1382                     kc->func(kc->arg, pEventData, pNotifyEvent->hEvent, 0, NV_OK);
1383                 }
1384                 break;
1385             }
1386 
1387             default:
1388                 break;
1389         }
1390     }
1391 
1392     return rmStatus;
1393 }
1394 
1395 // Allow CPL Events to be callback or events
1396 NV_STATUS osObjectEventNotification
1397 (
1398     NvHandle            hClient,
1399     NvHandle            hObject,
1400     NvU32               hClass,
1401     PEVENTNOTIFICATION  pNotifyEvent,
1402     NvU32               notifyIndex,
1403     void                *pEventData,
1404     NvU32               eventDataSize
1405 )
1406 {
1407     NV_STATUS rmStatus = NV_OK;
1408 
1409     NV_PRINTF(LEVEL_INFO, "%s()\n", __FUNCTION__);
1410     // walk this object's event list and find any matches for this specific notify
1411     for (; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next)
1412     {
1413         // notifyIndex must match if request isn't for all
1414         if ((notifyIndex != OS_EVENT_NOTIFICATION_INDEX_ALL) &&
1415             (pNotifyEvent->NotifyIndex != notifyIndex))
1416         {
1417             continue;
1418         }
1419 
1420         switch (pNotifyEvent->NotifyType)
1421         {
1422             case NV01_EVENT_OS_EVENT:
1423             {
1424                 nv_event_t *event = NvP64_VALUE(pNotifyEvent->Data);
1425                 postEvent(event,
1426                           pNotifyEvent->hEvent,
1427                           pNotifyEvent->NotifyIndex,
1428                           0, 0,
1429                           pNotifyEvent->bEventDataRequired);
1430                 break;
1431             }
1432 
1433             case NV01_EVENT_KERNEL_CALLBACK_EX:
1434             {
1435                 NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(pNotifyEvent->Data);
1436 
1437                 if (kc && kc->func)
1438                 {
1439                     kc->func(kc->arg, pEventData, pNotifyEvent->hEvent, 0, NV_OK);
1440                 }
1441                 break;
1442             }
1443 
1444             default:
1445                 break;
1446         }
1447     }
1448 
1449     return rmStatus;
1450 }
1451 
1452 NV_STATUS osReferenceObjectCount(void *pEvent)
1453 {
1454     nv_state_t *nv = nv_get_ctl_state();
1455     nv_event_t *event = pEvent;
1456 
1457     portSyncSpinlockAcquire(nv->event_spinlock);
1458     // If event->active is false, don't allow any more reference
1459     if (!event->active)
1460     {
1461         portSyncSpinlockRelease(nv->event_spinlock);
1462         return NV_ERR_INVALID_EVENT;
1463     }
1464     ++event->refcount;
1465     portSyncSpinlockRelease(nv->event_spinlock);
1466     return NV_OK;
1467 }
1468 
1469 NV_STATUS osDereferenceObjectCount(void *pOSEvent)
1470 {
1471     nv_state_t *nv = nv_get_ctl_state();
1472     nv_event_t *event = pOSEvent;
1473 
1474     portSyncSpinlockAcquire(nv->event_spinlock);
1475     NV_ASSERT(event->refcount > 0);
1476     // If event->refcount == 0 but event->active is true, the client
1477     // has not yet freed the OS event.  free_os_event will free its
1478     // memory when they do, or else when the client itself is freed.
1479     if (--event->refcount == 0 && !event->active)
1480         portMemFree(event);
1481     portSyncSpinlockRelease(nv->event_spinlock);
1482 
1483     return NV_OK;
1484 }
1485 
1486 NV_STATUS osUserHandleToKernelPtr(NvHandle hClient, NvP64 hEvent, NvP64 *pEvent)
1487 {
1488     nv_state_t *nv = nv_get_ctl_state();
1489     NvU32 fd = (NvU64)hEvent;
1490     NV_STATUS result;
1491 
1492     portSyncSpinlockAcquire(nv->event_spinlock);
1493     nv_event_t *e = nv->event_list;
1494     while (e != NULL)
1495     {
1496         if (e->fd == fd && e->hParent == hClient)
1497             break;
1498         e = e->next;
1499     }
1500 
1501     if (e != NULL)
1502     {
1503         ++e->refcount;
1504         *pEvent = NV_PTR_TO_NvP64(e);
1505         result = NV_OK;
1506     }
1507     else
1508         result = NV_ERR_OBJECT_NOT_FOUND;
1509     portSyncSpinlockRelease(nv->event_spinlock);
1510 
1511     return result;
1512 }
1513 
1514 NV_STATUS osFlushCpuCache(void)
1515 {
1516     return os_flush_cpu_cache_all();
1517 }
1518 
1519 void osFlushCpuWriteCombineBuffer(void)
1520 {
1521     os_flush_cpu_write_combine_buffer();
1522 }
1523 
1524 
1525 //
1526 // Evict GPU memory range from the CPU caches.
1527 //
1528 // On some platforms (e.g. P9+V100), the CPU can coherently cache GPU memory
1529 // and RM takes advantage of that. Most everything is handled transparently,
1530 // but there are two exceptions that require explicitly flushing any CPU cache
1531 // lines of GPU memory. These are:
1532 //
1533 // 1) Flushing memory backing ACR regions before they get locked.
1534 //
1535 // Otherwise the cache could get flushed while the regions are locked causing a
1536 // region violation physical fault. See more details in
1537 // acrFlushRegionsFromGpuCoherentCpuCache_IMPL().
1538 //
1539 // 2) Flushing all of FB before GPU reset (NVLink going down)
1540 //
1541 // Leaving cache entries on the CPU causes fatal errors when the CPU tries
1542 // flushing them later while the link is down. See more details in
1543 // nvlinkStatePostUnload_IMPL().
1544 //
1545 void osFlushGpuCoherentCpuCacheRange
1546 (
1547     OS_GPU_INFO *pOsGpuInfo,
1548     NvU64        cpuVirtual,
1549     NvU64        size
1550 )
1551 {
1552     nv_flush_coherent_cpu_cache_range(pOsGpuInfo, cpuVirtual, size);
1553 }
1554 
1555 void osErrorLogV(OBJGPU *pGpu, NvU32 num, const char * pFormat, va_list arglist)
1556 {
1557     NV_STATUS        rmStatus;
1558     nv_state_t      *nv             = NV_GET_NV_STATE(pGpu);
1559 
1560     if ((pFormat == NULL) || (*pFormat == '\0'))
1561     {
1562         return;
1563     }
1564 
1565     rmStatus = nv_log_error(nv, num, pFormat, arglist);
1566     NV_ASSERT(rmStatus == NV_OK);
1567 }
1568 
1569 void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...)
1570 {
1571     va_list arglist;
1572     va_start(arglist, pFormat);
1573     osErrorLogV(pGpu, num, pFormat, arglist);
1574     va_end(arglist);
1575 }
1576 
1577 NvU32
1578 osPollHotkeyState
1579 (
1580     OBJGPU  *pGpu
1581 )
1582 {
1583     return 0;
1584 }
1585 
1586 void osDevWriteReg008(
1587      OBJGPU            *pGpu,
1588      DEVICE_MAPPING    *pMapping,
1589      NvU32              thisAddress,
1590      NvV8               thisValue
1591 )
1592 {
1593     if (thisAddress >= pMapping->gpuNvLength)
1594     {
1595         NV_ASSERT(thisAddress < pMapping->gpuNvLength);
1596         return;
1597     }
1598 
1599     NV_PRIV_REG_WR08(pMapping->gpuNvAddr, thisAddress, thisValue);
1600 }
1601 
1602 void osDevWriteReg016(
1603      OBJGPU            *pGpu,
1604      DEVICE_MAPPING    *pMapping,
1605      NvU32              thisAddress,
1606      NvV16              thisValue
1607 )
1608 {
1609     if (thisAddress >= pMapping->gpuNvLength)
1610     {
1611         NV_ASSERT(thisAddress < pMapping->gpuNvLength);
1612         return;
1613     }
1614 
1615     NV_PRIV_REG_WR16(pMapping->gpuNvAddr, thisAddress, thisValue);
1616 }
1617 
1618 void osDevWriteReg032(
1619      OBJGPU            *pGpu,
1620      DEVICE_MAPPING    *pMapping,
1621      NvU32              thisAddress,
1622      NvV32              thisValue
1623 )
1624 {
1625     NvBool vgpuHandled = NV_FALSE;
1626 
1627     vgpuDevWriteReg032(pGpu, thisAddress, thisValue, &vgpuHandled);
1628     if (vgpuHandled)
1629     {
1630         return;
1631     }
1632 
1633     if (thisAddress >= pMapping->gpuNvLength)
1634     {
1635         NV_ASSERT(thisAddress < pMapping->gpuNvLength);
1636         return;
1637     }
1638 
1639     NV_PRIV_REG_WR32(pMapping->gpuNvAddr, thisAddress, thisValue);
1640 }
1641 
1642 NvU8 osDevReadReg008(
1643     OBJGPU             *pGpu,
1644     DEVICE_MAPPING     *pMapping,
1645     NvU32               thisAddress
1646 )
1647 {
1648     NvU8 retval = 0;
1649 
1650     if (thisAddress >= pMapping->gpuNvLength)
1651     {
1652         NV_ASSERT(thisAddress < pMapping->gpuNvLength);
1653     }
1654     else
1655         retval = NV_PRIV_REG_RD08(pMapping->gpuNvAddr, thisAddress);
1656 
1657     return retval;
1658 }
1659 
1660 NvU16 osDevReadReg016(
1661     OBJGPU             *pGpu,
1662     DEVICE_MAPPING     *pMapping,
1663     NvU32               thisAddress
1664 )
1665 {
1666     NvU16 retval = 0;
1667 
1668     if (thisAddress >= pMapping->gpuNvLength)
1669     {
1670         NV_ASSERT(thisAddress < pMapping->gpuNvLength);
1671     }
1672     else
1673         retval = NV_PRIV_REG_RD16(pMapping->gpuNvAddr, thisAddress);
1674 
1675     return retval;
1676 }
1677 
1678 NvU32 osDevReadReg032(
1679     OBJGPU             *pGpu,
1680     DEVICE_MAPPING     *pMapping,
1681     NvU32               thisAddress
1682 )
1683 {
1684     NvU32 retval = 0;
1685     NvBool vgpuHandled = NV_FALSE;
1686 
1687     retval = vgpuDevReadReg032(pGpu, thisAddress, &vgpuHandled);
1688     if (vgpuHandled)
1689     {
1690         return retval;
1691     }
1692 
1693     if (thisAddress >= pMapping->gpuNvLength)
1694     {
1695         NV_ASSERT(thisAddress < pMapping->gpuNvLength);
1696     }
1697     else
1698         retval = NV_PRIV_REG_RD32(pMapping->gpuNvAddr, thisAddress);
1699 
1700     return retval;
1701 }
1702 
1703 NV_STATUS osReadRegistryDwordBase(
1704     OBJGPU     *pGpu,
1705     const char *regParmStr,
1706     NvU32      *Data
1707 )
1708 {
1709     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1710     return RmReadRegistryDword(nv, regParmStr, Data);
1711 }
1712 
1713 NV_STATUS osWriteRegistryDword(
1714     OBJGPU     *pGpu,
1715     const char *regParmStr,
1716     NvU32       Data
1717 )
1718 {
1719     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1720     return RmWriteRegistryDword(nv, regParmStr, Data);
1721 }
1722 
1723 NV_STATUS osReadRegistryBinary(
1724     OBJGPU     *pGpu,
1725     const char *regParmStr,
1726     NvU8       *Data,
1727     NvU32      *cbLen
1728 )
1729 {
1730     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1731     return RmReadRegistryBinary(nv, regParmStr, Data, cbLen);
1732 }
1733 
1734 NV_STATUS osWriteRegistryBinary(
1735     OBJGPU     *pGpu,
1736     const char *regParmStr,
1737     NvU8       *Data,
1738     NvU32       cbLen
1739 )
1740 {
1741     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1742     return RmWriteRegistryBinary(nv, regParmStr, Data, cbLen);
1743 }
1744 
1745 NV_STATUS osWriteRegistryVolatile(
1746     OBJGPU     *pGpu,
1747     const char *regParmStr,
1748     NvU8       *Data,
1749     NvU32       cbLen
1750 )
1751 {
1752     return NV_ERR_NOT_SUPPORTED;
1753 }
1754 
1755 NV_STATUS osReadRegistryVolatile
1756 (
1757     OBJGPU     *pGpu,
1758     const char *regParmStr,
1759     NvU8       *Data,
1760     NvU32       cbLen
1761 )
1762 {
1763     return NV_ERR_NOT_SUPPORTED;
1764 }
1765 
1766 NV_STATUS osReadRegistryVolatileSize
1767 (
1768     OBJGPU     *pGpu,
1769     const char *regParmStr,
1770     NvU32      *pSize
1771 )
1772 {
1773     return NV_ERR_NOT_SUPPORTED;
1774 }
1775 
1776 NV_STATUS osReadRegistryStringBase(
1777     OBJGPU     *pGpu,
1778     const char *regParmStr,
1779     NvU8       *buffer,
1780     NvU32      *pBufferLength
1781 )
1782 {
1783     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1784     return RmReadRegistryString(nv, regParmStr, buffer, pBufferLength);
1785 }
1786 
1787 NV_STATUS osPackageRegistry(
1788     OBJGPU                 *pGpu,
1789     PACKED_REGISTRY_TABLE  *pRegTable,
1790     NvU32                  *pSize
1791 )
1792 {
1793     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
1794     return RmPackageRegistry(nv, pRegTable, pSize);
1795 }
1796 
1797 NvU32 osGetCpuCount(void)
1798 {
1799     return os_get_cpu_count();   // Total number of logical CPUs.
1800 }
1801 
1802 NvU32 osGetCurrentProcessorNumber(void)
1803 {
1804     return os_get_cpu_number();
1805 }
1806 
1807 void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *pFlags)
1808 {
1809     NvU32 gpuMode   = gpuGetMode(pGpu);
1810 
1811     NV_ASSERT((NV_GPU_MODE_GRAPHICS_MODE == gpuMode) ||
1812               (NV_GPU_MODE_COMPUTE_MODE  == gpuMode));
1813 
1814     if (hypervisorIsVgxHyper())
1815     {
1816         //
1817         // 1.8 seconds is chosen because it is 90% of the overall hard limit of 2.0
1818         // seconds, imposed by WDDM driver rules.
1819         // Currently primary use case of VGX is Windows, so setting 1.8 as default
1820         //
1821         *pTimeoutUs = 1.8 * 1000000;
1822     }
1823     else
1824     {
1825         switch (gpuMode)
1826         {
1827         default:
1828         case NV_GPU_MODE_GRAPHICS_MODE:
1829             *pTimeoutUs = 4 * 1000000;
1830             break;
1831 
1832         case NV_GPU_MODE_COMPUTE_MODE:
1833             *pTimeoutUs = 30 * 1000000;
1834             break;
1835         }
1836     }
1837 
1838     *pFlags = GPU_TIMEOUT_FLAGS_OSTIMER;
1839 
1840     *pScale = 1;
1841     if (IS_EMULATION(pGpu) || IS_SIMULATION(pGpu))
1842     {
1843         *pScale = 60;       // 1s -> 1m
1844     }
1845     return;
1846 }
1847 
1848 void osFlushLog(void)
1849 {
1850     // Not implemented
1851 }
1852 
1853 NvU32 osGetSimulationMode(void)
1854 {
1855     return NV_SIM_MODE_HARDWARE;
1856 }
1857 
1858 NV_STATUS
1859 cliresCtrlCmdOsUnixFlushUserCache_IMPL
1860 (
1861     RmClientResource *pRmCliRes,
1862     NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams
1863 )
1864 {
1865     Memory *pMemory;
1866     MEMORY_DESCRIPTOR *pMemDesc;
1867     NvU64 start, end;
1868     NvBool bInvalidateOnly;
1869 
1870     NV_CHECK_OK_OR_RETURN(LEVEL_SILENT,
1871         memGetByHandle(RES_GET_CLIENT(pRmCliRes),
1872                        pAddressSpaceParams->hObject,
1873                        &pMemory));
1874 
1875     pMemDesc = pMemory->pMemDesc;
1876 
1877     if (memdescGetAddressSpace(pMemDesc) != ADDR_SYSMEM)
1878     {
1879         NV_PRINTF(LEVEL_ERROR, "%s: wrong address space %d\n",
1880                   __FUNCTION__, memdescGetAddressSpace(pMemDesc));
1881         return NV_ERR_INVALID_COMMAND;
1882     }
1883 
1884     if (memdescGetCpuCacheAttrib(pMemDesc) != NV_MEMORY_CACHED)
1885     {
1886         NV_PRINTF(LEVEL_ERROR, "%s: wrong caching type %d\n",
1887                   __FUNCTION__, memdescGetCpuCacheAttrib(pMemDesc));
1888         return NV_ERR_INVALID_COMMAND;
1889     }
1890 
1891     start = pAddressSpaceParams->offset;
1892     end = start + pAddressSpaceParams->length;
1893 
1894     switch(pAddressSpaceParams->cacheOps)
1895     {
1896         case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE:
1897         case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH:
1898             bInvalidateOnly = NV_FALSE;
1899             break;
1900 
1901         case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE:
1902             bInvalidateOnly = NV_TRUE;
1903             break;
1904 
1905         default:
1906             NV_PRINTF(LEVEL_ERROR, "%s: cacheOps not specified\n", __FUNCTION__);
1907             return NV_ERR_INVALID_COMMAND;
1908     }
1909 
1910     if ((end - start) > pMemory->Length)
1911     {
1912         NV_PRINTF(LEVEL_ERROR,
1913                   "%s: end address 0x%llx exceeded buffer length: 0x%llx\n",
1914                   __FUNCTION__, end, pMemory->Length);
1915         return NV_ERR_INVALID_LIMIT;
1916     }
1917 
1918     if (bInvalidateOnly)
1919     {
1920         //
1921         // XXX: this seems fishy - I'm not sure if invalidating by the kernel
1922         // VA only as nv_dma_cache_invalidate() does here is sufficient for
1923         // this control call.
1924         // pAddressSpaceParams->internalOnly is expected to be the RM client
1925         // VA for this control call; if we wanted to invalidate the user VA we
1926         // could do so using that.
1927         //
1928         // For I/O coherent platforms this won't actually do anything.
1929         // On non-I/O-coherent platforms, there's no need to do a second
1930         // invalidation after the full flush.
1931         //
1932         nv_state_t *nv = NV_GET_NV_STATE(pMemDesc->pGpu);
1933         if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE)
1934         {
1935             PIOVAMAPPING pIovaMapping = memdescGetIommuMap(pMemDesc, nv->iovaspace_id);
1936             //
1937             // This should only be called for devices that map memory descriptors
1938             // through the nv-dma library, where the memory descriptor data
1939             // contains all the kernel-specific context we need for the
1940             // invalidation.
1941             //
1942             // (These checks match those in osIovaUnmap() leading up to
1943             // nv_dma_unmap_alloc()).
1944             //
1945             if (pIovaMapping == NULL ||
1946                 pIovaMapping->pOsData == NULL ||
1947                 memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED) ||
1948                 memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM))
1949             {
1950                 return NV_ERR_INVALID_ARGUMENT;
1951             }
1952 
1953             nv_dma_cache_invalidate(nv->dma_dev, pIovaMapping->pOsData);
1954         }
1955         else
1956         {
1957             return NV_ERR_INVALID_ARGUMENT;
1958         }
1959     }
1960     else
1961     {
1962         return os_flush_user_cache();
1963     }
1964 
1965     return NV_OK;
1966 }
1967 
1968 static NV_STATUS
1969 _initializeExportObjectFd
1970 (
1971     nv_file_private_t *nvfp,
1972     NvHandle           hClient,
1973     NvHandle           hDevice,
1974     NvU16              maxObjects,
1975     NvU8              *metadata
1976 )
1977 {
1978     NV_STATUS      status;
1979     RsResourceRef *pResourceRef;
1980     Device        *pDevice;
1981     NvU32          deviceInstance = NV_MAX_DEVICES;
1982 
1983     if (nvfp->handles != NULL)
1984     {
1985         return NV_ERR_STATE_IN_USE;
1986     }
1987 
1988     if (hDevice != 0)
1989     {
1990         status = serverutilGetResourceRef(hClient, hDevice, &pResourceRef);
1991         if (status != NV_OK)
1992         {
1993             return status;
1994         }
1995 
1996         pDevice = dynamicCast(pResourceRef->pResource, Device);
1997         if (pDevice == NULL)
1998         {
1999             return NV_ERR_INVALID_PARAMETER;
2000         }
2001 
2002         deviceInstance = pDevice->deviceInst;
2003     }
2004 
2005     NV_ASSERT_OK_OR_RETURN(os_alloc_mem((void **)&nvfp->handles,
2006                            sizeof(nvfp->handles[0]) * maxObjects));
2007 
2008     os_mem_set(nvfp->handles, 0,
2009                sizeof(nvfp->handles[0]) * maxObjects);
2010 
2011     nvfp->maxHandles     = maxObjects;
2012     nvfp->deviceInstance = deviceInstance;
2013 
2014     if (metadata != NULL)
2015     {
2016         os_mem_copy(nvfp->metadata, metadata, sizeof(nvfp->metadata));
2017     }
2018 
2019     return NV_OK;
2020 }
2021 
2022 NV_STATUS
2023 cliresCtrlCmdOsUnixExportObjectToFd_IMPL
2024 (
2025     RmClientResource *pRmCliRes,
2026     NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams
2027 )
2028 {
2029     NvHandle           hClient = RES_GET_CLIENT_HANDLE(pRmCliRes);
2030     RmObjExportHandle  hExportHandle = 0;
2031     nv_file_private_t *nvfp = NULL;
2032     void              *priv = NULL;
2033     NV_STATUS          status = NV_OK;
2034 
2035     /*
2036      * This flag is intended to be implemented entirely in the rmapi library in
2037      * userspace, we should never encounter it here.
2038      */
2039     if (FLD_TEST_DRF(0000_CTRL, _OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS,
2040                      _EMPTY_FD, _TRUE, pParams->flags))
2041     {
2042         return NV_ERR_INVALID_PARAMETER;
2043     }
2044 
2045     if (pParams->object.type != NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM ||
2046         pParams->fd == -1)
2047     {
2048         return NV_ERR_INVALID_PARAMETER;
2049     }
2050 
2051     status = RmExportObject(hClient,
2052                             pParams->object.data.rmObject.hObject,
2053                             &hExportHandle, NULL);
2054     if (status != NV_OK)
2055     {
2056         goto done;
2057     }
2058     NV_ASSERT(hExportHandle != 0);
2059 
2060     nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv);
2061     if (nvfp == NULL)
2062     {
2063         NV_ASSERT(priv == NULL);
2064         status = NV_ERR_INVALID_PARAMETER;
2065         goto done;
2066     }
2067 
2068     status = _initializeExportObjectFd(nvfp, hClient,
2069                                        pParams->object.data.rmObject.hDevice,
2070                                        1, NULL);
2071     if (status != NV_OK)
2072     {
2073         goto done;
2074     }
2075 
2076     nvfp->handles[0] = hExportHandle;
2077 
2078 done:
2079 
2080     if (status != NV_OK && hExportHandle != 0)
2081     {
2082         RmFreeObjExportHandle(hExportHandle);
2083     }
2084 
2085     if (priv != NULL)
2086     {
2087         nv_put_file_private(priv);
2088     }
2089 
2090     return status;
2091 }
2092 
2093 // This control call has been deprecated. It will be deleted soon.
2094 NV_STATUS
2095 cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL
2096 (
2097     RmClientResource *pRmCliRes,
2098     NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams
2099 )
2100 {
2101     NV_STATUS          status;
2102     NvHandle           hClient = RES_GET_CLIENT_HANDLE(pRmCliRes);
2103     nv_file_private_t *nvfp = NULL;
2104     void              *priv = NULL;
2105 
2106     ct_assert(sizeof(nvfp->metadata) == sizeof(pParams->metadata));
2107 
2108     if (pParams->maxObjects == 0)
2109     {
2110         return NV_ERR_INVALID_PARAMETER;
2111     }
2112 
2113     nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv);
2114     if (nvfp == NULL)
2115     {
2116         NV_ASSERT(priv == NULL);
2117         status = NV_ERR_INVALID_PARAMETER;
2118         goto done;
2119     }
2120 
2121     status = _initializeExportObjectFd(nvfp, hClient, pParams->hDevice,
2122                                        pParams->maxObjects, pParams->metadata);
2123 
2124 done:
2125     if (priv != NULL)
2126     {
2127         nv_put_file_private(priv);
2128     }
2129 
2130     return status;
2131 }
2132 
2133 NV_STATUS
2134 cliresCtrlCmdOsUnixExportObjectsToFd_IMPL
2135 (
2136     RmClientResource *pRmCliRes,
2137     NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams
2138 )
2139 {
2140     NvHandle           hClient = RES_GET_CLIENT_HANDLE(pRmCliRes);
2141     RmObjExportHandle *pExportHandle;
2142     nv_file_private_t *nvfp = NULL;
2143     void              *priv = NULL;
2144     NV_STATUS          status = NV_OK;
2145     NvU32              i;
2146     NvU32              deviceInstance;
2147     NvU32              result;
2148     NvHandle          *exportHandles = NULL;
2149     NvBool             bFdSetup = NV_FALSE;
2150 
2151     nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv);
2152     if (nvfp == NULL)
2153     {
2154         NV_ASSERT(priv == NULL);
2155         status = NV_ERR_INVALID_PARAMETER;
2156         goto done;
2157     }
2158 
2159     ct_assert(sizeof(nvfp->metadata) == sizeof(pParams->metadata));
2160 
2161     /* Setup export FD if not done */
2162     if (nvfp->handles == NULL)
2163     {
2164         if (pParams->maxObjects == 0)
2165         {
2166             status = NV_ERR_INVALID_PARAMETER;
2167             goto done;
2168         }
2169 
2170         status = _initializeExportObjectFd(nvfp, hClient, pParams->hDevice,
2171                                            pParams->maxObjects,
2172                                            pParams->metadata);
2173         if (status != NV_OK)
2174         {
2175             goto done;
2176         }
2177 
2178         bFdSetup = NV_TRUE;
2179     }
2180 
2181     if ((nvfp->handles == NULL) ||
2182         (pParams->numObjects >
2183             NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS))
2184     {
2185         status = NV_ERR_INVALID_PARAMETER;
2186         goto done;
2187     }
2188 
2189     if (!portSafeAddU32(pParams->numObjects, pParams->index, &result) ||
2190         (result > nvfp->maxHandles))
2191     {
2192         status = NV_ERR_OUT_OF_RANGE;
2193         goto done;
2194     }
2195 
2196     status = os_alloc_mem((void **)&exportHandles,
2197                           sizeof(exportHandles[0]) *
2198                             pParams->numObjects);
2199     if (status != NV_OK)
2200     {
2201         goto done;
2202     }
2203 
2204     for (i = 0; i < pParams->numObjects; i++)
2205     {
2206         exportHandles[i] = 0;
2207 
2208         if (pParams->objects[i] == 0)
2209         {
2210             continue;
2211         }
2212 
2213         status = RmExportObject(hClient,
2214                                 pParams->objects[i],
2215                                 &exportHandles[i],
2216                                 &deviceInstance);
2217         if (status != NV_OK)
2218         {
2219             goto done;
2220         }
2221 
2222         NV_ASSERT(exportHandles[i] != 0);
2223 
2224         if (deviceInstance != nvfp->deviceInstance)
2225         {
2226             status = NV_ERR_INVALID_PARAMETER;
2227             goto done;
2228         }
2229     }
2230 
2231     for (i = 0; i < pParams->numObjects; i++)
2232     {
2233         pExportHandle = &nvfp->handles[i + pParams->index];
2234 
2235         // If the handle already exists in this position, free it
2236         if (*pExportHandle != 0)
2237         {
2238             RmFreeObjExportHandle(*pExportHandle);
2239             *pExportHandle = 0;
2240         }
2241 
2242         *pExportHandle = exportHandles[i];
2243     }
2244 
2245 done:
2246 
2247     if ((status != NV_OK) && (exportHandles != NULL))
2248     {
2249         for (i = 0; i < pParams->numObjects; i++)
2250         {
2251             if (exportHandles[i] != 0)
2252             {
2253                 RmFreeObjExportHandle(exportHandles[i]);
2254             }
2255         }
2256     }
2257 
2258     if (exportHandles != NULL)
2259     {
2260         os_free_mem(exportHandles);
2261     }
2262 
2263     if ((status != NV_OK) && bFdSetup)
2264     {
2265         os_free_mem(nvfp->handles);
2266         nvfp->handles = NULL;
2267         nvfp->maxHandles = 0;
2268     }
2269 
2270     if (priv != NULL)
2271     {
2272         nv_put_file_private(priv);
2273     }
2274 
2275     return status;
2276 }
2277 
2278 NV_STATUS
2279 cliresCtrlCmdOsUnixImportObjectFromFd_IMPL
2280 (
2281     RmClientResource *pRmCliRes,
2282     NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams
2283 )
2284 {
2285     NvHandle           hClient = RES_GET_CLIENT_HANDLE(pRmCliRes);
2286     nv_file_private_t *nvfp = NULL;
2287     void              *priv = NULL;
2288     NV_STATUS          status = NV_OK;
2289 
2290     if (pParams->object.type != NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM ||
2291         pParams->fd == -1)
2292     {
2293         return NV_ERR_INVALID_PARAMETER;
2294     }
2295 
2296     nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv);
2297     if (nvfp == NULL)
2298     {
2299         NV_ASSERT(priv == NULL);
2300         status = NV_ERR_INVALID_PARAMETER;
2301         goto done;
2302     }
2303 
2304     if ((nvfp->handles == NULL) || (nvfp->handles[0] == 0) ||
2305         (nvfp->maxHandles > 1))
2306     {
2307         status = NV_ERR_INVALID_PARAMETER;
2308         goto done;
2309     }
2310 
2311     status = RmImportObject(hClient,
2312                             pParams->object.data.rmObject.hParent,
2313                             &pParams->object.data.rmObject.hObject,
2314                             nvfp->handles[0], NULL);
2315 
2316 done:
2317     if (priv != NULL)
2318     {
2319         nv_put_file_private(priv);
2320     }
2321 
2322     return status;
2323 }
2324 
2325 NV_STATUS
2326 cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL
2327 (
2328     RmClientResource *pRmCliRes,
2329     NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams
2330 )
2331 {
2332     NvHandle           hClient = RES_GET_CLIENT_HANDLE(pRmCliRes);
2333     nv_file_private_t *nvfp = NULL;
2334     void              *priv = NULL;
2335     NV_STATUS          status = NV_OK;
2336     NvU32              i = 0;
2337     RmObjExportHandle  hImportHandle;
2338     NvU32              result;
2339     RM_API            *pRmApi;
2340 
2341     nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv);
2342     if (nvfp == NULL)
2343     {
2344         NV_ASSERT(priv == NULL);
2345         status = NV_ERR_INVALID_PARAMETER;
2346         goto done;
2347     }
2348 
2349     if ((nvfp->handles == NULL) ||
2350         (pParams->numObjects >
2351            NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS))
2352     {
2353         status = NV_ERR_INVALID_PARAMETER;
2354         goto done;
2355     }
2356 
2357     if (!portSafeAddU32(pParams->numObjects, pParams->index, &result) ||
2358         (result > nvfp->maxHandles))
2359     {
2360         status = NV_ERR_OUT_OF_RANGE;
2361         goto done;
2362     }
2363 
2364     for (i = 0; i < pParams->numObjects; i++)
2365     {
2366         hImportHandle = nvfp->handles[i + pParams->index];
2367 
2368         /* Nothing to import, just continue */
2369         if (hImportHandle == 0)
2370         {
2371             pParams->objectTypes[i] = \
2372                 NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_NONE;
2373             continue;
2374         }
2375 
2376         status = RmImportObject(hClient,
2377                                 pParams->hParent,
2378                                 &pParams->objects[i],
2379                                 hImportHandle,
2380                                 &pParams->objectTypes[i]);
2381         if (status != NV_OK)
2382         {
2383             NV_PRINTF(LEVEL_ERROR, "%s: Unable to import handle (%x, %x, %x)\n",
2384                 __FUNCTION__, pParams->hParent, pParams->objects[i], hImportHandle);
2385             goto done;
2386         }
2387     }
2388 
2389 done:
2390 
2391     if (status != NV_OK)
2392     {
2393         pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
2394         while (i > 0)
2395         {
2396             i--;
2397 
2398             if (pParams->objects[i] != 0)
2399             {
2400                 pRmApi->Free(pRmApi, hClient, pParams->objects[i]);
2401             }
2402         }
2403     }
2404 
2405     if (priv != NULL)
2406     {
2407         nv_put_file_private(priv);
2408     }
2409 
2410     return status;
2411 }
2412 
2413 NV_STATUS
2414 cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL
2415 (
2416     RmClientResource *pRmCliRes,
2417     NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams
2418 )
2419 {
2420     nv_file_private_t *nvfp = NULL;
2421     void              *priv = NULL;
2422     NV_STATUS         status = NV_OK;
2423 
2424     if (pParams->fd < 0)
2425     {
2426         return NV_ERR_INVALID_PARAMETER;
2427     }
2428 
2429     nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv);
2430     if (nvfp == NULL)
2431     {
2432         NV_ASSERT(priv == NULL);
2433         status = NV_ERR_INVALID_PARAMETER;
2434         goto done;
2435     }
2436 
2437     if (nvfp->handles == NULL)
2438     {
2439         status = NV_ERR_INVALID_PARAMETER;
2440         goto done;
2441     }
2442 
2443     pParams->maxObjects = nvfp->maxHandles;
2444     pParams->deviceInstance = nvfp->deviceInstance;
2445     os_mem_copy(pParams->metadata, nvfp->metadata, sizeof(nvfp->metadata));
2446 
2447 done:
2448     if (priv != NULL)
2449     {
2450         nv_put_file_private(priv);
2451     }
2452 
2453     return status;
2454 }
2455 
2456 /*!
2457  * osAcpiDsm
2458  *
2459  * @brief Handles os specific _DSM method function calls.
2460  *
2461  * Input parameters:
2462  * @param[in]     pGpu   : OBJGPU pointer
2463  * @param[in]     acpiDsmFunction    : ACPI DSM function
2464  * @param[in]     acpiDsmSubFunction : ACPI DSM subfunction
2465  * @param[in/out] pInOut : in/out buffer, caller should make sure the buffer is large enough.
2466  * @param[in]     pSize  : when input, size of data that the caller wants to read, in bytes.
2467  *                         when output, size of valid data in pInOuta in bytes.
2468  */
2469 NV_STATUS osCallACPI_DSM
2470 (
2471     OBJGPU            *pGpu,
2472     ACPI_DSM_FUNCTION  acpiDsmFunction,
2473     NvU32              acpiDsmSubFunction,
2474     NvU32             *pInOut,
2475     NvU16             *pSize
2476 )
2477 {
2478     NV_STATUS   status;
2479     NvU8       *pAcpiDsmGuid = NULL;
2480     NvU32       acpiDsmRev;
2481     nv_state_t *nv  = NV_GET_NV_STATE(pGpu);
2482     nv_priv_t  *nvp = NV_GET_NV_PRIV(nv);
2483     NvU16       acpiDsmInArgSize = 4;
2484     NvBool      acpiNvpcfDsmFunction = NV_FALSE;
2485 
2486     // do any handling/remapping of guid needed.
2487     status = checkDsmCall(pGpu,
2488                           (ACPI_DSM_FUNCTION *) &acpiDsmFunction,
2489                           &acpiDsmSubFunction,
2490                           pInOut,
2491                           pSize);
2492 
2493     // return if subfunction is not supported or we're returning cache data
2494     if (status != NV_WARN_MORE_PROCESSING_REQUIRED)
2495     {
2496         return status;
2497     }
2498 
2499     switch ((NvU32) acpiDsmFunction)
2500     {
2501         case ACPI_DSM_FUNCTION_NBSI:
2502             pAcpiDsmGuid = (NvU8 *) &NBSI_DSM_GUID;
2503             acpiDsmRev  = NBSI_REVISION_ID;
2504             break;
2505         case ACPI_DSM_FUNCTION_NVHG:
2506             pAcpiDsmGuid = (NvU8 *) &NVHG_DSM_GUID;
2507             acpiDsmRev  = NVHG_REVISION_ID;
2508             break;
2509         case ACPI_DSM_FUNCTION_MXM:
2510             pAcpiDsmGuid = (NvU8 *) &DSM_MXM_GUID;
2511             acpiDsmRev  = ACPI_MXM_REVISION_ID;
2512             break;
2513         case ACPI_DSM_FUNCTION_NBCI:
2514             pAcpiDsmGuid = (NvU8 *) &NBCI_DSM_GUID;
2515             acpiDsmRev  = NBCI_REVISION_ID;
2516             break;
2517         case ACPI_DSM_FUNCTION_NVOP:
2518             pAcpiDsmGuid = (NvU8 *) &NVOP_DSM_GUID;
2519             acpiDsmRev  = NVOP_REVISION_ID;
2520             break;
2521         case ACPI_DSM_FUNCTION_PCFG:
2522             pAcpiDsmGuid = (NvU8 *) &PCFG_DSM_GUID;
2523             acpiDsmRev  = PCFG_REVISION_ID;
2524             break;
2525         case ACPI_DSM_FUNCTION_PEX:
2526             pAcpiDsmGuid = (NvU8 *) &PEX_DSM_GUID;
2527             acpiDsmRev   = PEX_REVISION_ID;
2528             if (acpiDsmSubFunction == PEX_FUNC_SETLTRLATENCY)
2529             {
2530                 acpiDsmInArgSize = (3 + *pSize);
2531             }
2532             break;
2533         case (ACPI_DSM_FUNCTION_JT):
2534             pAcpiDsmGuid = (NvU8 *) &JT_DSM_GUID;
2535             acpiDsmRev = JT_REVISION_ID;
2536             break;
2537         case ACPI_DSM_FUNCTION_NVPCF:
2538             {
2539                 pAcpiDsmGuid = (NvU8 *)&NVPCF_ACPI_DSM_GUID;
2540                 acpiDsmRev = NVPCF_ACPI_DSM_REVISION_ID;
2541                 acpiDsmInArgSize = (*pSize);
2542                 acpiNvpcfDsmFunction = NV_TRUE;
2543                 break;
2544             }
2545         case ACPI_DSM_FUNCTION_NVPCF_2X:
2546             pAcpiDsmGuid = (NvU8 *)&NVPCF_ACPI_DSM_GUID;
2547             acpiDsmRev = NVPCF_2X_ACPI_DSM_REVISION_ID;
2548             acpiDsmInArgSize = (*pSize);
2549             if (!nv->nvpcf_dsm_in_gpu_scope)
2550             {
2551                 acpiNvpcfDsmFunction = NV_TRUE;
2552             }
2553             break;
2554 
2555         default:
2556             return NV_ERR_NOT_SUPPORTED;
2557             break;
2558     }
2559 
2560     status = nv_acpi_dsm_method(nv,
2561                                 pAcpiDsmGuid,
2562                                 acpiDsmRev,
2563                                 acpiNvpcfDsmFunction,
2564                                 acpiDsmSubFunction,
2565                                 pInOut,
2566                                 acpiDsmInArgSize,
2567                                 NULL,
2568                                 pInOut,
2569                                 pSize);
2570 
2571     if (status == NV_OK)
2572     {
2573         if (acpiDsmSubFunction == NV_ACPI_ALL_FUNC_SUPPORT)
2574         {
2575             // if handling get supported functions list... cache it for later calls
2576             cacheDsmSupportedFunction(pGpu, acpiDsmFunction, acpiDsmSubFunction, pInOut, *pSize);
2577         }
2578     }
2579     else if (nvp->b_mobile_config_enabled)
2580     {
2581         NV_PRINTF(LEVEL_ERROR,
2582                   "osCallACPI_DSM: Error during 0x%x DSM subfunction 0x%x! status=0x%x\n",
2583                   acpiDsmFunction, acpiDsmSubFunction, status);
2584     }
2585 
2586     return status;
2587 }
2588 
2589 NV_STATUS osCallACPI_DOD
2590 (
2591     OBJGPU  *pGpu,
2592     NvU32   *pOut,
2593     NvU32   *pSize
2594 )
2595 {
2596     NV_STATUS rmStatus;
2597     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
2598 
2599     if ((pOut == NULL) || (pSize == NULL))
2600     {
2601         return NV_ERR_INVALID_POINTER;
2602     }
2603 
2604     rmStatus = nv_acpi_dod_method(nv, pOut, pSize);
2605 
2606     return rmStatus;
2607 }
2608 
2609 //
2610 // osAcpiDdc
2611 //
2612 // Handles os specific _DDC method function calls. _DDC is to get EDID from SBIOS.
2613 //
2614 NV_STATUS osCallACPI_DDC
2615 (
2616     OBJGPU *pGpu,
2617     NvU32   ulAcpiId,
2618     NvU8   *pOutData,
2619     NvU32  *pOutSize,
2620     NvBool  bReadMultiBlock
2621 )
2622 {
2623     NV_STATUS rmStatus;
2624 
2625     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
2626 
2627     if ((pOutData == NULL) || (pOutSize == NULL))
2628     {
2629         return NV_ERR_INVALID_POINTER;
2630     }
2631 
2632     portMemSet(pOutData, 0, *pOutSize);
2633 
2634     rmStatus = nv_acpi_ddc_method(nv, pOutData, pOutSize, bReadMultiBlock);
2635 
2636     return rmStatus;
2637 }
2638 
2639 // osCallACPI_NVHG_ROM
2640 // Making ACPI Call into SBIOS with ROM method to get display device's ROM data.
2641 //
2642 NV_STATUS  osCallACPI_NVHG_ROM
2643 (
2644     OBJGPU *pGpu,
2645     NvU32 *pInData,
2646     NvU32 *pOutData
2647 )
2648 {
2649     NV_STATUS rmStatus;
2650     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
2651 
2652     if ((pOutData == NULL) || (pInData == NULL))
2653     {
2654         return NV_ERR_INVALID_POINTER;
2655     }
2656 
2657     if (pInData[1] > ROM_METHOD_MAX_RETURN_BUFFER_SIZE)
2658     {
2659         return NV_ERR_INVALID_ARGUMENT;
2660     }
2661 
2662     rmStatus = nv_acpi_rom_method(nv, pInData, pOutData);
2663 
2664     return rmStatus;
2665 }
2666 
2667 void osInitSystemStaticConfig(SYS_STATIC_CONFIG *pConfig)
2668 {
2669     pConfig->bIsNotebook = rm_is_system_notebook();
2670     pConfig->osType = nv_get_os_type();
2671     pConfig->osSevStatus = os_sev_status;
2672     pConfig->bOsSevEnabled = os_sev_enabled;
2673 }
2674 
2675 NvU32 osApiLockAcquireConfigureFlags(NvU32 flags)
2676 {
2677     return flags;
2678 }
2679 
2680 NV_STATUS osGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLocksRelease)
2681 {
2682     return NV_SEMA_RELEASE_FAILED;
2683 }
2684 
2685 void osSyncWithRmDestroy(void)
2686 {
2687 }
2688 
2689 void osSyncWithGpuDestroy(NvBool bEntry)
2690 {
2691 }
2692 
2693 void osModifyGpuSwStatePersistence
2694 (
2695     OS_GPU_INFO *pOsGpuInfo,
2696     NvBool       bEnable
2697 )
2698 {
2699     if (bEnable)
2700     {
2701         pOsGpuInfo->flags |= NV_FLAG_PERSISTENT_SW_STATE;
2702     }
2703     else
2704     {
2705         pOsGpuInfo->flags &= ~NV_FLAG_PERSISTENT_SW_STATE;
2706     }
2707 }
2708 
2709 NV_STATUS
2710 osSystemGetBatteryDrain(NvS32 *pChargeRate)
2711 {
2712     NV_PRINTF(LEVEL_WARNING, "%s: Platform not supported!\n", __FUNCTION__);
2713     return NV_ERR_NOT_SUPPORTED;
2714 }
2715 
2716 NV_STATUS
2717 osPexRecoveryCallback
2718 (
2719     OS_GPU_INFO           *pOsGpuInfo,
2720     OS_PEX_RECOVERY_STATUS Status
2721 )
2722 {
2723     NV_ASSERT_FAILED("Not supported");
2724     return NV_ERR_NOT_SUPPORTED;
2725 }
2726 
2727 //
2728 //osCallACPI_MXDS
2729 //
2730 //Handles OS specific MXDS function call.
2731 //
2732 NV_STATUS osCallACPI_MXDS
2733 (
2734     OBJGPU *pGpu,
2735     NvU32 acpiId,
2736     NvU32 *pInOut
2737 )
2738 {
2739     NV_STATUS rmStatus;
2740 
2741     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
2742 
2743     if (pInOut == NULL)
2744     {
2745         return NV_ERR_INVALID_POINTER;
2746     }
2747 
2748     rmStatus = nv_acpi_mux_method(nv, pInOut, acpiId, "MXDS");
2749 
2750     return rmStatus;
2751 }
2752 
2753 //
2754 //osCallACPI_MXDM
2755 //
2756 //Handles OS specific MXDM function call.
2757 //
2758 NV_STATUS osCallACPI_MXDM
2759 (
2760     OBJGPU *pGpu,
2761     NvU32 acpiId,
2762     NvU32 *pInOut
2763 )
2764 {
2765     NV_STATUS rmStatus;
2766 
2767     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
2768 
2769     if (pInOut == NULL)
2770     {
2771         return NV_ERR_INVALID_POINTER;
2772     }
2773 
2774     rmStatus = nv_acpi_mux_method(nv, pInOut, acpiId, "MXDM");
2775 
2776     return rmStatus;
2777 }
2778 
2779 #include "lib/protobuf/prb.h"
2780 #include "lib/protobuf/prb_util.h"
2781 #include "g_nvdebug_pb.h"
2782 
2783 NV_STATUS osGetVersionDump(void * pVoid)
2784 {
2785     PRB_ENCODER * pPrbEnc = (PRB_ENCODER *)pVoid;
2786     NV_STATUS rmStatus;
2787     os_version_info * pOsVersionInfo = NULL;
2788     const char NV_UNKNOWN_BUILD_VERSION[] = "Unknown build version";
2789     const char NV_UNKNOWN_BUILD_DATE[]    = "Unknown build date";
2790 
2791     NV_ASSERT_OK_OR_RETURN(os_alloc_mem((void**)&pOsVersionInfo,
2792                                         sizeof(os_version_info)));
2793     portMemSet(pOsVersionInfo, 0, sizeof(os_version_info));
2794 
2795     prbEncAddUInt32(pPrbEnc,
2796                     NVDEBUG_SYSTEMINFO_OSINFO_FAMILY,
2797                     NVDEBUG_OS_UNIX);
2798 
2799     rmStatus = os_get_version_info(pOsVersionInfo);
2800     if (rmStatus != NV_OK)
2801     {
2802         goto cleanup;
2803     }
2804 
2805     prbEncAddUInt32(pPrbEnc,
2806                     NVDEBUG_SYSTEMINFO_OSINFO_OSMAJORVERSION,
2807                     pOsVersionInfo->os_major_version);
2808 
2809     prbEncAddUInt32(pPrbEnc,
2810                     NVDEBUG_SYSTEMINFO_OSINFO_OSMINORVERSION,
2811                     pOsVersionInfo->os_minor_version);
2812 
2813     prbEncAddUInt32(pPrbEnc,
2814                     NVDEBUG_SYSTEMINFO_OSINFO_OSBLDNUM,
2815                     pOsVersionInfo->os_build_number);
2816 
2817     if (NULL == pOsVersionInfo->os_build_version_str)
2818     {
2819         pOsVersionInfo->os_build_version_str = NV_UNKNOWN_BUILD_VERSION;
2820     }
2821 
2822     prbEncAddString(pPrbEnc,
2823                     NVDEBUG_SYSTEMINFO_OSINFO_BUILDVERSION,
2824                     pOsVersionInfo->os_build_version_str);
2825 
2826     if (NULL == pOsVersionInfo->os_build_date_plus_str)
2827     {
2828         pOsVersionInfo->os_build_date_plus_str = NV_UNKNOWN_BUILD_DATE;
2829     }
2830 
2831     prbEncAddString(pPrbEnc,
2832                     NVDEBUG_SYSTEMINFO_OSINFO_BUILDDATEPLUS,
2833                     pOsVersionInfo->os_build_date_plus_str);
2834 
2835 cleanup:
2836     os_free_mem(pOsVersionInfo);
2837     return rmStatus;
2838 }
2839 
2840 NV_STATUS osGetVersion(NvU32 *majorVer, NvU32 *minorVer, NvU32 *buildNum, NvU16 *unusedPatchVersion, NvU16 *unusedProductType)
2841 {
2842     os_version_info osVersionInfo;
2843     NV_STATUS rmStatus;
2844 
2845     portMemSet(&osVersionInfo, 0, sizeof(osVersionInfo));
2846 
2847     rmStatus = os_get_version_info(&osVersionInfo);
2848     if (rmStatus == NV_OK)
2849     {
2850         if (majorVer)
2851             *majorVer = osVersionInfo.os_major_version;
2852         if (minorVer)
2853             *minorVer = osVersionInfo.os_minor_version;
2854         if (buildNum)
2855             *buildNum = osVersionInfo.os_build_number;
2856     }
2857 
2858     return rmStatus;
2859 }
2860 
2861 NV_STATUS
2862 osGetSystemCpuLogicalCoreCounts
2863 (
2864     NvU32 *pCpuCoreCount
2865 )
2866 {
2867     return NV_ERR_NOT_SUPPORTED;
2868 }
2869 
2870 NV_STATUS
2871 osGetSystemCpuC0AndAPerfCounters
2872 (
2873     NvU32                      coreIndex,
2874     POS_CPU_CORE_PERF_COUNTERS pCpuPerfData
2875 )
2876 {
2877     return NV_ERR_NOT_SUPPORTED;
2878 }
2879 
2880 void
2881 osEnableCpuPerformanceCounters
2882 (
2883     OBJOS *pOS
2884 )
2885 {
2886     NV_ASSERT_FAILED("Not supported");
2887     return;
2888 }
2889 
2890 NV_STATUS
2891 osCpuDpcObjInit
2892 (
2893     void  **ppCpuDpcObj,
2894     OBJGPU *pGpu,
2895     NvU32   coreCount
2896 )
2897 {
2898     NV_ASSERT_FAILED("Not supported");
2899     return NV_ERR_NOT_SUPPORTED;
2900 }
2901 
2902 void
2903 osCpuDpcObjQueue
2904 (
2905     void                     **ppCpuDpcObj,
2906     NvU32                      coreCount,
2907     POS_CPU_CORE_PERF_COUNTERS pCpuPerfData
2908 )
2909 {
2910     NV_ASSERT_FAILED("Not supported");
2911 }
2912 
2913 void
2914 osCpuDpcObjFree
2915 (
2916     void **ppCpuDpcObj
2917 )
2918 {
2919     NV_ASSERT_FAILED("Not supported");
2920 }
2921 
2922 NV_STATUS
2923 osGetCarveoutInfo
2924 (
2925     NvU64 *pAddr,
2926     NvU64 *pSize
2927 )
2928 {
2929     return NV_ERR_NOT_SUPPORTED;
2930 }
2931 
2932 NV_STATUS
2933 osGetVPRInfo
2934 (
2935     NvU64 *pAddr,
2936     NvU64 *pSize
2937 )
2938 {
2939     return NV_ERR_NOT_SUPPORTED;
2940 }
2941 
2942 NV_STATUS
2943 osAllocInVPR
2944 (
2945     MEMORY_DESCRIPTOR *pMemDesc
2946 )
2947 {
2948     return NV_ERR_NOT_SUPPORTED;
2949 }
2950 
2951 NV_STATUS
2952 osGetGenCarveout
2953 (
2954     NvU64  *pAddr,
2955     NvU64  *pSize,
2956     NvU32   id,
2957     NvU64   align
2958 )
2959 {
2960     return NV_ERR_NOT_SUPPORTED;
2961 }
2962 
2963 NV_STATUS
2964 osI2CClosePorts
2965 (
2966     OS_GPU_INFO *pOsGpuInfo,
2967     NvU32        numPorts
2968 )
2969 {
2970     return NV_ERR_NOT_SUPPORTED;
2971 }
2972 
2973 NV_STATUS
2974 osI2CTransfer
2975 (
2976     OBJGPU *pGpu,
2977     NvU32 Port,
2978     NvU8 Address,
2979     nv_i2c_msg_t *nv_i2c_msgs,
2980     NvU32 count
2981 )
2982 {
2983     return NV_ERR_NOT_SUPPORTED;
2984 }
2985 
2986 NV_STATUS
2987 osTegraI2CGetBusState
2988 (
2989     OS_GPU_INFO *pOsGpuInfo,
2990     NvU32 port,
2991     NvS32 *scl,
2992     NvS32 *sda
2993 )
2994 {
2995     return NV_ERR_NOT_SUPPORTED;
2996 }
2997 
2998 NV_STATUS
2999 osReadI2CBufferDirect
3000 (
3001     OBJGPU *pGpu,
3002     NvU32   Port,
3003     NvU8    Address,
3004     void   *pOutputBuffer,
3005     NvU32   OutputSize,
3006     void   *pInputBuffer,
3007     NvU32   InputSize
3008 )
3009 {
3010     return NV_ERR_NOT_SUPPORTED;
3011 }
3012 
3013 NV_STATUS
3014 osWriteI2CBufferDirect
3015 (
3016     OBJGPU  *pGpu,
3017     NvU32    Port,
3018     NvU8     Address,
3019     void    *pOutputBuffer0,
3020     NvU32    OutputSize0,
3021     void    *pOutputBuffer1,
3022     NvU32    OutputSize1
3023 )
3024 {
3025     return NV_ERR_NOT_SUPPORTED;
3026 }
3027 
3028 NV_STATUS
3029 osGC6PowerControl
3030 (
3031     OBJGPU *pGpu,
3032     NvU32   cmd,
3033     NvU32  *pOut
3034 )
3035 {
3036     return NV_ERR_NOT_SUPPORTED;
3037 }
3038 
3039 NvBool osTestPcieExtendedConfigAccess(void *handle, NvU32 offset)
3040 {
3041     OBJGPU    *pGpu;
3042     KernelBif *pKernelBif;
3043     NvU32      nvXveId       = 0;
3044     NvU32      nvXveVccapHdr = 0;
3045     NvU32      pciStart      = 0;
3046     NvU32      pcieStart     = 0;
3047 
3048     static NvBool retryAllowed = NV_TRUE;
3049     static NvBool configAccess = NV_FALSE;
3050 
3051     //
3052     // Return early for offset within PCI space
3053     // and does not requires extended config space access
3054     //
3055     if (offset < 0x100)
3056     {
3057         return NV_TRUE;
3058     }
3059 
3060     if (!retryAllowed)
3061     {
3062         return configAccess;
3063     }
3064 
3065     pGpu = gpumgrGetSomeGpu();
3066     if (pGpu == NULL)
3067     {
3068         return configAccess;
3069     }
3070 
3071     retryAllowed = NV_FALSE;
3072 
3073     pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
3074     if (pKernelBif == NULL || kbifGetBusIntfType_HAL(pKernelBif) !=
3075                                   NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS)
3076     {
3077         return configAccess;
3078     }
3079 
3080     // Now verify PCI and PCIe config start registers.
3081     kbifGetPcieConfigAccessTestRegisters_HAL(pGpu, pKernelBif, &pciStart, &pcieStart);
3082     os_pci_read_dword(handle, pciStart,  &nvXveId);
3083     os_pci_read_dword(handle, pcieStart, &nvXveVccapHdr);
3084 
3085     if (NV_OK == kbifVerifyPcieConfigAccessTestRegisters_HAL(pGpu,
3086                                                              pKernelBif,
3087                                                              nvXveId,
3088                                                              nvXveVccapHdr))
3089     {
3090         configAccess = NV_TRUE;
3091     }
3092 
3093     return configAccess;
3094 }
3095 
3096 /*!
3097  * @brief Map memory into an IOVA space according to the given mapping info.
3098  *
3099  * @param[in]   pIovaMapping    IOVA mapping info
3100  *
3101  * @return      NV_ERR_NOT_SUPPORTED
3102  */
3103 NV_STATUS
3104 osIovaMap
3105 (
3106     PIOVAMAPPING pIovaMapping
3107 )
3108 {
3109     OBJGPU *pGpu;
3110     nv_state_t *nv, *peer;
3111     NV_STATUS status;
3112     RmPhysAddr base;
3113     NvBool bIsBar0;
3114     PMEMORY_DESCRIPTOR pRootMemDesc;
3115     NvBool bIsFbOffset = NV_FALSE;
3116     NvBool bIsIndirectPeerMapping = NV_FALSE;
3117     NvBool bIsContig;
3118     NV_ADDRESS_SPACE addressSpace;
3119     NvU32 osPageCount;
3120 
3121     if (pIovaMapping == NULL)
3122     {
3123         return NV_ERR_INVALID_ARGUMENT;
3124     }
3125 
3126     pGpu = gpumgrGetGpuFromId(pIovaMapping->iovaspaceId);
3127     if (pGpu == NULL)
3128     {
3129         return NV_ERR_INVALID_ARGUMENT;
3130     }
3131 
3132     pRootMemDesc = memdescGetRootMemDesc(pIovaMapping->pPhysMemDesc, NULL);
3133     addressSpace = memdescGetAddressSpace(pIovaMapping->pPhysMemDesc);
3134     if (gpumgrCheckIndirectPeer(pGpu, pRootMemDesc->pGpu) &&
3135         (addressSpace == ADDR_FBMEM))
3136     {
3137         bIsIndirectPeerMapping = NV_TRUE;
3138     }
3139 
3140     if ((addressSpace != ADDR_SYSMEM) && !bIsIndirectPeerMapping)
3141     {
3142         NV_PRINTF(LEVEL_INFO,
3143                   "%s passed memory descriptor in an unsupported address space (%s)\n",
3144                   __FUNCTION__,
3145                   memdescGetApertureString(memdescGetAddressSpace(pIovaMapping->pPhysMemDesc)));
3146         return NV_ERR_NOT_SUPPORTED;
3147     }
3148 
3149     //
3150     // For guest-allocated memory, we don't actually want to do any remapping,
3151     // since the physical address is already the DMA address to be used by the
3152     // GPU.
3153     //
3154     if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED))
3155     {
3156         return NV_OK;
3157     }
3158 
3159     nv = NV_GET_NV_STATE(pGpu);
3160 
3161     //
3162     // Intercept peer IO type memory. These are contiguous allocations, so no
3163     // need to adjust pages.
3164     //
3165     if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM))
3166     {
3167         NV_ASSERT(memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU));
3168 
3169         status = nv_dma_map_mmio(nv->dma_dev,
3170             NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount),
3171             &pIovaMapping->iovaArray[0]);
3172 
3173         if (status != NV_OK)
3174         {
3175             NV_PRINTF(LEVEL_ERROR,
3176                       "%s: failed to map peer IO mem (status = 0x%x)\n",
3177                       __FUNCTION__, status);
3178         }
3179 
3180         return status;
3181     }
3182 
3183     //
3184     // We need to check against the "root" GPU, e.g., the GPU that owns this
3185     // allocation. If we're trying to map one of its BARs for a peer, we need
3186     // to handle it differently because it wouldn't have gone through our system
3187     // memory page allocation paths, obviously, and wouldn't have alloc private
3188     // data associated with it.
3189     //
3190     peer = NV_GET_NV_STATE(pRootMemDesc->pGpu);
3191     bIsContig = memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU);
3192     if (NV_RM_PAGE_SIZE < os_page_size && !bIsContig)
3193     {
3194         RmDeflateRmToOsPageArray(&pIovaMapping->iovaArray[0],
3195                                  pIovaMapping->pPhysMemDesc->PageCount);
3196     }
3197 
3198     base = memdescGetPhysAddr(pIovaMapping->pPhysMemDesc, AT_CPU, 0);
3199     bIsBar0 = IS_REG_OFFSET(peer, base, pIovaMapping->pPhysMemDesc->Size);
3200 
3201     bIsFbOffset = IS_FB_OFFSET(peer, base, pIovaMapping->pPhysMemDesc->Size);
3202 
3203     //
3204     // For indirect peers bIsFbOffset should be NV_TRUE
3205     // TODO:IS_FB_OFFSET macro is currently broken for P9 systems
3206     // Bug 2010857 tracks fixing this
3207     //
3208 #if defined(NVCPU_PPC64LE)
3209     KernelMemorySystem *pRootKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pRootMemDesc->pGpu);
3210     if (bIsIndirectPeerMapping)
3211     {
3212         NvU64 atsBase = base + pRootKernelMemorySystem->coherentCpuFbBase;
3213         if ((atsBase >= pRootKernelMemorySystem->coherentCpuFbBase) &&
3214              (atsBase + pIovaMapping->pPhysMemDesc->Size <=
3215               pRootKernelMemorySystem->coherentCpuFbEnd))
3216         {
3217             bIsFbOffset = NV_TRUE;
3218         }
3219         else
3220         {
3221             NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE);
3222         }
3223     }
3224 #endif
3225 
3226     void *pPriv = memdescGetMemData(pIovaMapping->pPhysMemDesc);
3227     osPageCount = NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount);
3228 
3229     if (!bIsBar0 && !bIsFbOffset)
3230     {
3231         if (pPriv == NULL)
3232         {
3233             return NV_ERR_INVALID_STATE;
3234         }
3235     }
3236     else if(bIsIndirectPeerMapping)
3237     {
3238         NV_ASSERT(!bIsBar0 && bIsFbOffset);
3239         //
3240         // TODO: Align onlined GPU memory allocation paths with system memory allocation
3241         //       That way pMemDesc->pMemData is setup correctly when we try to create mapping
3242         //       to onlined memory of indirect peer. After that we can also get rid of some
3243         //       extra code in nv_dma_map_alloc. See bug 190324 for details
3244         //
3245 
3246         status = memdescGetNvLinkGpa(pRootMemDesc->pGpu, (bIsContig ? 1 : osPageCount),
3247                                     &pIovaMapping->iovaArray[0]);
3248         if (status != NV_OK)
3249         {
3250             NV_PRINTF(LEVEL_ERROR, "%s Failed to get SPA\n", __FUNCTION__);
3251             return status;
3252         }
3253     }
3254 
3255     if (!bIsBar0 && (!bIsFbOffset || bIsIndirectPeerMapping))
3256     {
3257         status = nv_dma_map_alloc(
3258                     osGetDmaDeviceForMemDesc(nv, pIovaMapping->pPhysMemDesc),
3259                     osPageCount,
3260                     &pIovaMapping->iovaArray[0],
3261                     bIsContig, &pPriv);
3262         if (status != NV_OK)
3263         {
3264             NV_PRINTF(LEVEL_ERROR,
3265                       "%s: failed to map allocation (status = 0x%x)\n",
3266                       __FUNCTION__, status);
3267             return status;
3268         }
3269 
3270         pIovaMapping->pOsData = pPriv;
3271     }
3272     else if (peer != nv)
3273     {
3274         status = nv_dma_map_peer(nv->dma_dev, peer->dma_dev, bIsBar0 ? 0 : 1,
3275                                  osPageCount, &pIovaMapping->iovaArray[0]);
3276         if (status != NV_OK)
3277         {
3278             NV_PRINTF(LEVEL_INFO,
3279                       "%s: failed to map peer (base = 0x%llx, status = 0x%x)\n",
3280                       __FUNCTION__, base, status);
3281             return status;
3282         }
3283 
3284         //
3285         // pOsData must be NULL to distinguish a peer DMA mapping from a
3286         // system memory mapping in osIovaUnmap(), so make sure to set it
3287         // accordingly here.
3288         //
3289         pIovaMapping->pOsData = NULL;
3290     }
3291     else
3292     {
3293         NV_PRINTF(LEVEL_INFO, "cannot map a GPU's BAR to itself\n");
3294         return NV_ERR_NOT_SUPPORTED;
3295     }
3296 
3297     //
3298     // If the OS layer doesn't think in RM page size, we need to inflate the
3299     // PTE array into RM pages.
3300     //
3301     if (NV_RM_PAGE_SIZE < os_page_size && !bIsContig)
3302     {
3303         RmInflateOsToRmPageArray(&pIovaMapping->iovaArray[0],
3304                                  pIovaMapping->pPhysMemDesc->PageCount);
3305     }
3306 
3307     return NV_OK;
3308 }
3309 
3310 /*!
3311  * @brief Unmap memory from an IOVA space according to the given mapping info.
3312  *
3313  * This mapping info must have been previously mapped by osIovaMap().
3314  *
3315  * @param[in]   pIovaMapping    IOVA mapping info
3316  *
3317  */
3318 void
3319 osIovaUnmap
3320 (
3321     PIOVAMAPPING pIovaMapping
3322 )
3323 {
3324     OBJGPU *pGpu;
3325     nv_state_t *nv;
3326     void *pPriv;
3327     NV_STATUS status;
3328 
3329     if (pIovaMapping == NULL)
3330     {
3331         return;
3332     }
3333 
3334     pGpu = gpumgrGetGpuFromId(pIovaMapping->iovaspaceId);
3335     if (pGpu == NULL)
3336     {
3337         return;
3338     }
3339 
3340     //
3341     // For guest-allocated memory, we never actually remapped the memory, so we
3342     // shouldn't try to unmap it here.
3343     //
3344     if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED))
3345     {
3346         return;
3347     }
3348 
3349     nv = NV_GET_NV_STATE(pGpu);
3350 
3351     if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM))
3352     {
3353         nv_dma_unmap_mmio(nv->dma_dev,
3354             NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount),
3355             pIovaMapping->iovaArray[0]);
3356 
3357         return;
3358     }
3359 
3360     //
3361     // TODO: Formalize the interface with the OS layers so we can use a common
3362     // definition of OS_IOVA_MAPPING_DATA.
3363     //
3364     pPriv = (void *)pIovaMapping->pOsData;
3365 
3366     if (NV_RM_PAGE_SIZE < os_page_size &&
3367         !memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU))
3368     {
3369         RmDeflateRmToOsPageArray(&pIovaMapping->iovaArray[0],
3370                                  pIovaMapping->pPhysMemDesc->PageCount);
3371     }
3372 
3373     if (pPriv != NULL)
3374     {
3375         status = nv_dma_unmap_alloc(nv->dma_dev,
3376             NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount),
3377             &pIovaMapping->iovaArray[0], &pPriv);
3378         if (status != NV_OK)
3379         {
3380             NV_PRINTF(LEVEL_ERROR,
3381                       "%s: failed to unmap allocation (status = 0x%x)\n",
3382                       __FUNCTION__, status);
3383         }
3384     }
3385     else
3386     {
3387         nv_dma_unmap_peer(nv->dma_dev,
3388             NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount),
3389             pIovaMapping->iovaArray[0]);
3390     }
3391 
3392     //
3393     // If the OS layer doesn't think in RM page size, we need to fluff out the
3394     // PTE array into RM pages.
3395     //
3396     if (NV_RM_PAGE_SIZE < os_page_size &&
3397         !memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU))
3398     {
3399         RmInflateOsToRmPageArray(&pIovaMapping->iovaArray[0],
3400                                  pIovaMapping->pPhysMemDesc->PageCount);
3401     }
3402 
3403     pIovaMapping->pOsData = NULL;
3404 }
3405 
3406 /*!
3407  * @brief Set the GPU Rail Voltage in Tegra SoC. Currently not supported
3408  *
3409  * @param[in]  pGpu            GPU object pointer
3410  * @param[in]  reqVoltageuV    Rail Voltage requested in uV
3411  * @param[out] pSetVoltageuV   Rail Voltage set in uV
3412  *
3413  * @return    NV_ERR_NOT_SUPPORTED
3414  */
3415 NV_STATUS
3416 osSetGpuRailVoltage
3417 (
3418    OBJGPU   *pGpu,
3419    NvU32     reqVoltageuV,
3420    NvU32    *pSetVoltageuV
3421 )
3422 {
3423     return NV_ERR_NOT_SUPPORTED;
3424 }
3425 
3426 /*!
3427  * @brief Get the GPU Rail Voltage in Tegra SoC. Currently not supported
3428  *
3429  * @param[in]  pGpu        GPU object pointer
3430  * @param[out] voltageuV   Rail Voltage in uV
3431  *
3432  * @return    NV_ERR_NOT_SUPPORTED
3433  */
3434 NV_STATUS
3435 osGetGpuRailVoltage
3436 (
3437     OBJGPU   *pGpu,
3438     NvU32    *pVoltageuV
3439 )
3440 {
3441     return NV_ERR_NOT_SUPPORTED;
3442 }
3443 
3444 /*!
3445  * @brief Bring down system in a controlled manner on known error conditions.
3446  *
3447  * @bugCode[in] Error code / reason.
3448  */
3449 void osBugCheck(NvU32 bugCode)
3450 {
3451     if (bugCode > OS_BUG_CHECK_BUGCODE_LAST)
3452     {
3453         bugCode = OS_BUG_CHECK_BUGCODE_UNKNOWN;
3454     }
3455 
3456     os_bug_check(bugCode, ppOsBugCheckBugcodeStr[bugCode]);
3457 }
3458 
3459 /*!
3460  * @brief Perform an action at assertion failure.
3461  */
3462 void osAssertFailed(void)
3463 {
3464     os_dump_stack();
3465 }
3466 
3467 /*!
3468  * @brief Get the GPU Chip Info - Speedo and IDDQ values
3469  *
3470  *
3471  * @param[in]   pGpu           GPU object pointer
3472  * @param[out]  pGpuSpeedoHv   Pointer to GPU Speedo value at high voltage corner.
3473  * @param[out]  pGpuSpeedoLv   Pointer to GPU Speedo value at low voltage corner.
3474  * @param[out]  pGpuIddq       Pointer to GPU Iddq Value
3475  * @param[out]  pChipSkuId     SKU ID for the chip
3476  *
3477  * @return     NV_ERR_NOT_SUPPORTED
3478  */
3479 NV_STATUS
3480 osGetChipInfo
3481 (
3482     OBJGPU   *pGpu,
3483     NvU32    *pGpuSpeedoHv,
3484     NvU32    *pGpuSpeedoLv,
3485     NvU32    *pGpuIddq,
3486     NvU32    *pChipSkuId
3487 )
3488 {
3489     return NV_ERR_NOT_SUPPORTED;
3490 }
3491 
3492 /*
3493  * @brief Get the GPU Rail Voltage Info (i.e. Min, Max and StepSize) in Tegra SoC.
3494  *
3495  * @param[in]  pGpu            GPU object pointer
3496  * @param[out] pMinVoltageuV   Minimum Voltage supported on the Rail in Micro Volts
3497  * @param[out] pMaxVoltageuV   Maximum Voltage supported on the Rail in Micro Volts
3498  * @param[out] pStepVoltageuV  Voltage Step-size supported on the Rail in Micro Volts
3499  *
3500  * @return    NV_ERR_NOT_SUPPORTED
3501  */
3502 NV_STATUS
3503 osGetGpuRailVoltageInfo
3504 (
3505     OBJGPU    *pGpu,
3506     NvU32     *pMinVoltageuV,
3507     NvU32     *pMaxVoltageuV,
3508     NvU32     *pStepVoltageuV
3509 )
3510 {
3511     return NV_ERR_NOT_SUPPORTED;
3512 }
3513 
3514 /*!
3515  * @brief Get the current opaque security token.
3516  *
3517  * For Linux the security token is the effective UID of a process and process ID
3518  *
3519  * Note: This function allocates memory for the token. The onus is on the calling
3520  * function to free the memory associated with the token once its done with it.
3521  *
3522  * @return pointer to the security token.
3523  */
3524 PSECURITY_TOKEN
3525 osGetSecurityToken(void)
3526 {
3527     NV_STATUS rmStatus;
3528     TOKEN_USER *pTokenUser;
3529 
3530     pTokenUser = portMemAllocNonPaged(sizeof(TOKEN_USER));
3531     if (pTokenUser == NULL)
3532     {
3533         return NULL;
3534     }
3535     rmStatus = os_get_euid(&pTokenUser->euid);
3536     if (rmStatus != NV_OK)
3537     {
3538         portMemFree(pTokenUser);
3539         return NULL;
3540     }
3541 
3542     pTokenUser->pid = os_get_current_process();
3543 
3544     return (PSECURITY_TOKEN)pTokenUser;
3545 }
3546 
3547 PUID_TOKEN
3548 osGetCurrentUidToken(void)
3549 {
3550     NV_STATUS rmStatus;
3551     NvU32 *pUidToken;
3552 
3553     pUidToken = portMemAllocNonPaged(sizeof(NvU32));
3554     if (pUidToken == NULL)
3555     {
3556         return NULL;
3557     }
3558 
3559     rmStatus = os_get_euid(pUidToken);
3560     if (rmStatus != NV_OK)
3561     {
3562         portMemFree(pUidToken);
3563         return NULL;
3564     }
3565 
3566     return (PUID_TOKEN)pUidToken;
3567 }
3568 
3569 /*!
3570  * @brief Interface function to validate the token for the current client
3571  *
3572  * This function takes two tokens as parameters, validates them and checks
3573  * if either the PID or EUID from client database matches the current PID or EUID.
3574  *
3575  * @param[in] pClientSecurityToken   security token cached in the client db
3576  * @param[in] pCurrentSecurityToken  security token of the current client
3577  * @return             NV_OK    if the validation is successful
3578  *                     NV_ERR_INVALID_CLIENT if the tokens do not match
3579  *                     NV_ERR_INVALID_POINTER if the tokens are invalid
3580  */
3581 NV_STATUS
3582 osValidateClientTokens
3583 (
3584    PSECURITY_TOKEN pClientSecurityToken,
3585    PSECURITY_TOKEN pCurrentSecurityToken
3586 )
3587 {
3588     PTOKEN_USER  pClientTokenUser  = (PTOKEN_USER)pClientSecurityToken;
3589     PTOKEN_USER  pCurrentTokenUser = (PTOKEN_USER)pCurrentSecurityToken;
3590 
3591     NV_ASSERT_OR_RETURN((pClientTokenUser != NULL), NV_ERR_INVALID_POINTER);
3592     NV_ASSERT_OR_RETURN((pCurrentTokenUser != NULL), NV_ERR_INVALID_POINTER);
3593 
3594     if ((pClientTokenUser->euid != pCurrentTokenUser->euid) &&
3595         (pClientTokenUser->pid != pCurrentTokenUser->pid))
3596     {
3597         NV_PRINTF(LEVEL_INFO,
3598                   "NVRM: %s: Current security token doesn't match the one in the client database. "
3599                   "Current EUID: %d, PID: %d; Client DB EUID: %d, PID: %d\n",
3600                   __FUNCTION__, pCurrentTokenUser->euid, pCurrentTokenUser->pid,
3601                   pClientTokenUser->euid, pClientTokenUser->pid);
3602         return NV_ERR_INVALID_CLIENT;
3603     }
3604 
3605     return NV_OK;
3606 }
3607 
3608 /*!
3609  * @brief Interface function to compare the tokens for two client
3610  *
3611  * This function takes two tokens as parameters, validates them and checks
3612  * if the EUIDs of each token match.
3613  *
3614  * @param[in] pToken1  Token to compare
3615  * @param[in] pToken2  Token to compare
3616  * @return             NV_TRUE if the tokens match
3617  *                     NV_FALSE if the tokens do not match
3618  */
3619 NvBool
3620 osUidTokensEqual
3621 (
3622    PUID_TOKEN pUidToken1,
3623    PUID_TOKEN pUidToken2
3624 )
3625 {
3626     NvU32 * pTokenUser1 = (NvU32*)pUidToken1;
3627     NvU32 * pTokenUser2 = (NvU32*)pUidToken2;
3628 
3629     NV_ASSERT_OR_RETURN((pTokenUser1 != NULL), NV_FALSE);
3630     NV_ASSERT_OR_RETURN((pTokenUser2 != NULL), NV_FALSE);
3631 
3632     if (*pTokenUser1 != *pTokenUser2)
3633     {
3634         return NV_FALSE;
3635     }
3636 
3637     return NV_TRUE;
3638 }
3639 
3640 NvBool
3641 osRemoveGpuSupported
3642 (
3643     void
3644 )
3645 {
3646     return os_pci_remove_supported();
3647 }
3648 
3649 /*
3650  * @brief Get the address ranges assigned to local or peer GPUs on a system that
3651  * supports hardware address translation services (ATS) over NVLink/C2C.
3652  *
3653  * @note
3654  * - All address values are in the System Physical Address (SPA) space
3655  * - Targets can either be "Local" (bIsPeer=False) or for a specified "Peer"
3656  *   (bIsPeer=True, peerIndex=#) GPU
3657  * - Target address and mask values have a specified bit width, and represent
3658  *   the higher order bits above the target address granularity
3659  *
3660  * @param[in]   pGpu                GPU object pointer
3661  * @param[out]  pAddrSysPhys        Pointer to hold SPA
3662  * @param[out]  pAddrWidth          Address range width value pointer
3663  * @param[out]  pMask               Mask value pointer
3664  * @param[out]  pMaskWidth          Mask width value pointer
3665  * @param[in]   bIsPeer             NV_TRUE if this is a peer, local GPU otherwise
3666  * @param[in]   peerIndex           Peer index
3667  *
3668  * @return      NV_OK or NV_ERR_NOT_SUPPORTED
3669  *
3670  *              A return value of NV_ERR_NOT_SUPPORTED for the local GPU would
3671  *              indicate that the system does not support ATS over NVLink/C2C
3672  */
3673 NV_STATUS
3674 osGetAtsTargetAddressRange
3675 (
3676     OBJGPU *pGpu,
3677     NvU64   *pAddrSysPhys,
3678     NvU32   *pAddrWidth,
3679     NvU32   *pMask,
3680     NvU32   *pMaskWidth,
3681     NvBool  bIsPeer,
3682     NvU32   peerIndex
3683 )
3684 {
3685 #if RMCFG_MODULE_KERNEL_BIF && RMCFG_MODULE_KERNEL_NVLINK && (defined(NVCPU_PPC64LE) || defined(NVCPU_AARCH64))
3686     KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu);
3687     KernelBif    *pKernelBif    = GPU_GET_KERNEL_BIF(pGpu);
3688     nv_state_t   *nv;
3689     const int addrMaskWidth = 0x10;
3690 
3691     if (!pKernelNvlink && !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_C2C_LINK_UP))
3692         return NV_ERR_INVALID_ARGUMENT;
3693 
3694     nv = NV_GET_NV_STATE(pGpu);
3695 
3696     //
3697     // TODO : Bug 1848958 restricts peer device tree parsing. Return early if
3698     // peer values are requested. This should be fixed by passing correct pGpu
3699     // pointer of the peer GPU retrieved using peerIds.
3700     //
3701     if (bIsPeer)
3702     {
3703         const int addrWidth = 0x10;
3704 
3705         *pAddrSysPhys = 0;
3706         *pAddrWidth = addrWidth;
3707         *pMask = 0;
3708         *pMaskWidth = addrMaskWidth;
3709         return NV_OK;
3710     }
3711     else
3712     {
3713         NV_STATUS status = nv_get_device_memory_config(nv, pAddrSysPhys, NULL,
3714                                                        pAddrWidth, NULL);
3715         if (status == NV_OK)
3716         {
3717             *pMask = NVBIT(*pAddrWidth) - 1U;
3718             *pMaskWidth = addrMaskWidth;
3719         }
3720         return status;
3721     }
3722 
3723     return NV_OK;
3724 #endif
3725     return NV_ERR_NOT_SUPPORTED;
3726 }
3727 
3728 /*
3729  * @brief Get the physical address in CPU address map and NUMA node id
3730  * of the GPU memory.
3731  *
3732  * @note
3733  * - The physical address is System Physical Address (SPA) in baremetal/host
3734  *   and Intermediate Physical Address(IPA) or Guest Physical Address(GPA)
3735  *   inside a VM.
3736  *
3737  * @param[in]   pGpu             GPU object pointer
3738  * @param[out]  pAddrPhys        Pointer to hold the physical address of FB in
3739  *                               CPU address map
3740  * @param[out]  pNodeId          NUMA nodeID of respective GPU memory
3741  *
3742  * @return      NV_OK or NV_ERR_NOT_SUPPORTED
3743  *
3744  */
3745 NV_STATUS
3746 osGetFbNumaInfo
3747 (
3748     OBJGPU *pGpu,
3749     NvU64  *pAddrPhys,
3750     NvS32  *pNodeId
3751 )
3752 {
3753 #if RMCFG_MODULE_KERNEL_BIF && RMCFG_MODULE_KERNEL_NVLINK && (defined(NVCPU_PPC64LE) || defined(NVCPU_AARCH64))
3754     KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu);
3755     KernelBif    *pKernelBif    = GPU_GET_KERNEL_BIF(pGpu);
3756     nv_state_t   *nv;
3757 
3758     *pNodeId = NV0000_CTRL_NO_NUMA_NODE;
3759 
3760     if (!pKernelNvlink && !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_C2C_LINK_UP))
3761         return NV_ERR_INVALID_ARGUMENT;
3762 
3763     nv = NV_GET_NV_STATE(pGpu);
3764 
3765     NV_STATUS status = nv_get_device_memory_config(nv, NULL, pAddrPhys, NULL, pNodeId);
3766 
3767     return status;
3768 #endif
3769     return NV_ERR_NOT_SUPPORTED;
3770 }
3771 
3772 
3773 /*
3774  * @brief Verif only function to get the chiplib overrides for link connection
3775  *        state for all C2C links.
3776  *
3777  * If chiplib overrides exist, each link can either be enabled (1) or disabled (0)
3778  *
3779  * @param[in]   pGpu                GPU object pointer
3780  * @param[in]   maxLinks            Size of pLinkConnection array
3781  * @param[out]  pLinkConnection     array of pLinkConnection values to be populated by MODS
3782  *
3783  * @return      NV_OK or NV_ERR_NOT_SUPPORTED (no overrides available)
3784  */
3785 NV_STATUS
3786 osGetForcedC2CConnection
3787 (
3788     OBJGPU *pGpu,
3789     NvU32   maxLinks,
3790     NvU32   *pLinkConnection
3791 )
3792 {
3793     int i, ret;
3794     NV_STATUS status;
3795     char path[64];
3796     OBJOS *pOS;
3797     OBJSYS *pSys;
3798 
3799     NV_ASSERT_OR_RETURN((pLinkConnection != NULL), NV_ERR_INVALID_POINTER);
3800     NV_ASSERT_OR_RETURN((maxLinks > 0), NV_ERR_NOT_SUPPORTED);
3801     NV_ASSERT_OR_RETURN((pGpu != NULL), NV_ERR_INVALID_ARGUMENT);
3802 
3803     pSys = SYS_GET_INSTANCE();
3804     pOS = SYS_GET_OS(pSys);
3805     if (pOS == NULL || pOS->osSimEscapeRead == NULL)
3806     {
3807         NV_PRINTF(LEVEL_ERROR, "%s: escape reads not supported on platform\n",
3808                   __FUNCTION__);
3809         return NV_ERR_NOT_SUPPORTED;
3810     }
3811 
3812     for (i = 0; i < maxLinks; i++)
3813     {
3814         ret = os_snprintf(path, sizeof(path), "CPU_MODEL|CM_ATS_ADDRESS|C2C%u", i);
3815         NV_ASSERT((ret > 0) && (ret < (sizeof(path) - 1)));
3816 
3817         status = pOS->osSimEscapeRead(pGpu, path, 0, 4, &pLinkConnection[i]);
3818         if (status == NV_OK)
3819         {
3820             NV_PRINTF(LEVEL_INFO, "%s: %s=0x%X\n", __FUNCTION__,
3821                       path, pLinkConnection[i]);
3822         }
3823         else
3824         {
3825             NV_PRINTF(LEVEL_INFO, "%s: osSimEscapeRead for '%s' failed (%u)\n",
3826                       __FUNCTION__, path, status);
3827             return NV_ERR_NOT_SUPPORTED;
3828         }
3829     }
3830     return NV_OK;
3831 }
3832 
3833 static NV_STATUS
3834 osGetSmbiosTableInfo
3835 (
3836     const NvU8 *pMappedAddr,
3837     NvU64 *pBaseAddr,
3838     NvU64 *pLength,
3839     NvU64 *pNumSubTypes,
3840     NvU32 *pVersion
3841 )
3842 {
3843     *pBaseAddr = 0;
3844     *pLength = 0;
3845     *pNumSubTypes = 0;
3846     *pVersion = 0;
3847 
3848     if (portMemCmp(pMappedAddr, "_SM3_", 5) == 0)
3849     {
3850         *pVersion = (pMappedAddr[7] << 8) | pMappedAddr[8];
3851         portMemCopy(pLength, 4, pMappedAddr + 12, 4);
3852         portMemCopy(pBaseAddr, 8, pMappedAddr + 16, 8);
3853 
3854         *pNumSubTypes = *pLength / 4;
3855 
3856         return NV_OK;
3857     }
3858 
3859     if (portMemCmp(pMappedAddr, "_SM_", 4) == 0)
3860     {
3861         *pVersion = (pMappedAddr[6] << 8) | pMappedAddr[7];
3862 
3863         pMappedAddr += 16;
3864 
3865         if (portMemCmp(pMappedAddr, "_DMI_", 5) == 0)
3866         {
3867             portMemCopy(pLength, 2, pMappedAddr + 6, 2);
3868             portMemCopy(pBaseAddr, 4, pMappedAddr + 8, 4);
3869             portMemCopy(pNumSubTypes, 2, pMappedAddr + 12, 2);
3870 
3871             if (!*pVersion)
3872                 *pVersion = (pMappedAddr[14] & 0xF0) << 4 |
3873                             (pMappedAddr[14] & 0x0F);
3874 
3875             return NV_OK;
3876         }
3877     }
3878 
3879     return NV_ERR_INVALID_ADDRESS;
3880 }
3881 
3882 
3883 /*
3884  * @brief Function to export SMBIOS table. Also, maps table in kernel-space.
3885  *
3886  * @param[out]   ppBaseVAddr         Base virtual address of SMBIOS table.
3887  * @param[out]   pLength             Size of SMBIOS table.
3888  * @param[out]   pNumSubTypes        Count of structures (types) embedded in
3889  *                                   the SMBIOS table.
3890  * @param[out]   pVersion            SMBIOS version
3891  *
3892  * @return NV_OK, NV_ERR_INSUFFICIENT_RESOURCES or NV_ERR_INVALID_ADDRESS
3893  *          or errors from OS layer
3894  */
3895 NV_STATUS
3896 osGetSmbiosTable
3897 (
3898     void **ppBaseVAddr,
3899     NvU64 *pLength,
3900     NvU64 *pNumSubTypes,
3901     NvU32 *pVersion
3902 )
3903 {
3904     NV_STATUS status = NV_OK;
3905     NvU64 physSmbiosAddr = ~0ull;
3906     void *pMappedAddr = NULL;
3907     NvU64 basePAddr = 0;
3908 
3909     if (!NVCPU_IS_X86_64)
3910     {
3911         return NV_ERR_NOT_SUPPORTED;
3912     }
3913 
3914     status = os_get_smbios_header(&physSmbiosAddr);
3915     if (status != NV_OK)
3916     {
3917         NV_PRINTF(LEVEL_INFO, "%s: Failed query SMBIOS table with error: %x \n",
3918                   __FUNCTION__, status);
3919         return status;
3920     }
3921 
3922     NV_ASSERT(physSmbiosAddr != ~0ull);
3923 
3924     pMappedAddr = osMapKernelSpace(physSmbiosAddr,
3925                                    os_page_size,
3926                                    NV_MEMORY_CACHED,
3927                                    NV_PROTECT_READ_WRITE);
3928     if (!pMappedAddr)
3929     {
3930         return NV_ERR_INSUFFICIENT_RESOURCES;
3931     }
3932 
3933     status = osGetSmbiosTableInfo(pMappedAddr,
3934                                   &basePAddr,
3935                                   pLength,
3936                                   pNumSubTypes,
3937                                   pVersion);
3938 
3939     osUnmapKernelSpace(pMappedAddr, os_page_size);
3940 
3941     if (status != NV_OK)
3942     {
3943         return status;
3944     }
3945 
3946     *ppBaseVAddr = osMapKernelSpace(basePAddr,
3947                                     *pLength,
3948                                     NV_MEMORY_CACHED,
3949                                     NV_PROTECT_READ_WRITE);
3950     if (!*ppBaseVAddr)
3951     {
3952         return NV_ERR_INSUFFICIENT_RESOURCES;
3953     }
3954 
3955     return NV_OK;
3956 }
3957 
3958 /*
3959  * @brief Function to free SMBIOS table mappings
3960  *
3961  * @param[in]   pBaseVAddr        Base virtual address of SMBIOS table.
3962  * @param[in]   length            Size of SMBIOS table.
3963  *
3964  */
3965 void
3966 osPutSmbiosTable
3967 (
3968     void *pBaseVAddr,
3969     NvU64 length
3970 )
3971 {
3972     osUnmapKernelSpace(pBaseVAddr, length);
3973 }
3974 
3975 NV_STATUS
3976 osGetAcpiRsdpFromUefi
3977 (
3978     NvU32  *pRsdpAddr
3979 )
3980 {
3981     return os_get_acpi_rsdp_from_uefi(pRsdpAddr);
3982 }
3983 
3984 /*
3985  *  @brief Returns NV_TRUE if NvSwitch device is present in the system.
3986  */
3987 NvBool
3988 osIsNvswitchPresent
3989 (
3990     void
3991 )
3992 {
3993     return os_is_nvswitch_present();
3994 }
3995 
3996 /*
3997  *  @brief Function to add crashlog buffer entry.
3998  *
3999  *  @param[in] pBuffer              virt_addr of nvlog buffer
4000  *  @param[in] length               size of nvlog buffer
4001  */
4002 void
4003 osAddRecordForCrashLog
4004 (
4005     void *pBuffer,
4006     NvU32 length
4007 )
4008 {
4009     os_add_record_for_crashLog(pBuffer, length);
4010 }
4011 
4012 /*
4013  *  @brief Function to delete crashlog buffer entry.
4014  *
4015  *  @param[in] pBuffer              virt_addr of nvlog buffer
4016  */
4017 void
4018 osDeleteRecordForCrashLog
4019 (
4020     void *pBuffer
4021 )
4022 {
4023     os_delete_record_for_crashLog(pBuffer);
4024 }
4025 
4026 /*
4027  *  @brief Queries the sysfs interface to get memblock size
4028  *  @param[out] memblock_size Pointer to the memblock_size
4029  */
4030 NV_STATUS
4031 osNumaMemblockSize
4032 (
4033     NvU64 *memblock_size
4034 )
4035 {
4036     return os_numa_memblock_size(memblock_size);
4037 }
4038 
4039 NvBool
4040 osNumaOnliningEnabled
4041 (
4042     OS_GPU_INFO *pOsGpuInfo
4043 )
4044 {
4045     NvS32 numaNodeId = NV0000_CTRL_NO_NUMA_NODE;
4046 
4047     //
4048     // Note that this numaNodeId value fetched from Linux layer might not be
4049     // accurate since it is possible to overwrite it with regkey on some configs
4050     //
4051     if (nv_get_device_memory_config(pOsGpuInfo, NULL, NULL, NULL,
4052                                     &numaNodeId) != NV_OK)
4053     {
4054         return NV_FALSE;
4055     }
4056 
4057     return (numaNodeId != NV0000_CTRL_NO_NUMA_NODE);
4058 }
4059 
4060 /*
4061  *  @brief Function to call NUMA allocation entry.
4062  *
4063  *  @param[in]  nid       NUMA node id
4064  *  @param[in]  size      Allocation size
4065  *  @param[in]  flag      Allocation flags
4066  *  @param[out] pAddress  Ptr to the allocated physical address
4067  */
4068 NV_STATUS
4069 osAllocPagesNode
4070 (
4071     NvS32       nid,
4072     NvLength    size,
4073     NvU32       flag,
4074     NvU64      *pAddress
4075 )
4076 {
4077     NV_STATUS  status    = NV_OK;
4078     NvU32      localFlag = NV_ALLOC_PAGES_NODE_NONE;
4079 
4080     if (pAddress == NULL || nid < 0 || size > NV_U32_MAX)
4081     {
4082         return NV_ERR_INVALID_ARGUMENT;
4083     }
4084 
4085     // Translate the flags
4086     if (flag & OS_ALLOC_PAGES_NODE_SKIP_RECLAIM)
4087     {
4088         localFlag |= NV_ALLOC_PAGES_NODE_SKIP_RECLAIM;
4089     }
4090 
4091     status = os_alloc_pages_node(nid, (NvU32)size, localFlag, pAddress);
4092     return status;
4093 }
4094 
4095 NV_STATUS
4096 osAllocAcquirePage
4097 (
4098     NvU64      pAddress
4099 )
4100 {
4101     os_get_page(pAddress);
4102     return NV_OK;
4103 }
4104 
4105 NV_STATUS
4106 osAllocReleasePage
4107 (
4108     NvU64       pAddress
4109 )
4110 {
4111     os_put_page(pAddress);
4112     return NV_OK;
4113 }
4114 
4115 /*
4116  *  @brief Function to return refcount on a page
4117  *  @param[in] address  The physical address of the page
4118  */
4119 NvU32
4120 osGetPageRefcount
4121 (
4122     NvU64       pAddress
4123 )
4124 {
4125     return os_get_page_refcount(pAddress);
4126 }
4127 
4128 /*
4129  *  @brief Function to return the number of tail pages if the address is
4130  *  referring to a compound page; For non-compound pages, 1 is returned.
4131  *  @param[in] address  The physical address of the page
4132  */
4133 NvU32
4134 osCountTailPages
4135 (
4136     NvU64       pAddress
4137 )
4138 {
4139     return os_count_tail_pages(pAddress);
4140 }
4141 
4142 /*
4143  *  @brief Upon success, gets NPU register address range.
4144  *
4145  *  @param[in]  pOsGpuInfo       OS specific GPU information pointer
4146  *  @param[out] pBase            base (physical) of NPU register address range
4147  *  @param[out] pSize            size of NPU register address range
4148  */
4149 NV_STATUS
4150 osGetIbmnpuGenregInfo
4151 (
4152     OS_GPU_INFO *pOsGpuInfo,
4153     NvU64       *pBase,
4154     NvU64       *pSize
4155 )
4156 {
4157     return nv_get_ibmnpu_genreg_info(pOsGpuInfo, pBase, pSize, NULL);
4158 }
4159 
4160 /*
4161  *  @brief Upon success, gets NPU's relaxed ordering mode.
4162  *
4163  *  @param[in]  pOsGpuInfo       OS specific GPU information pointer
4164  *  @param[out] pMode            relaxed ordering mode
4165  */
4166 NV_STATUS
4167 osGetIbmnpuRelaxedOrderingMode
4168 (
4169     OS_GPU_INFO *pOsGpuInfo,
4170     NvBool      *pMode
4171 )
4172 {
4173     return nv_get_ibmnpu_relaxed_ordering_mode(pOsGpuInfo, pMode);
4174 }
4175 
4176 /*
4177  *  @brief Waits for NVLink HW flush on an NPU associated with a GPU.
4178  *
4179  *  @param[in]  pOsGpuInfo       OS specific GPU information pointer
4180  */
4181 void
4182 osWaitForIbmnpuRsync
4183 (
4184     OS_GPU_INFO *pOsGpuInfo
4185 )
4186 {
4187     nv_wait_for_ibmnpu_rsync(pOsGpuInfo);
4188 }
4189 
4190 NvU32
4191 osGetPageSize(void)
4192 {
4193     return os_page_size;
4194 }
4195 
4196 
4197 
4198 /*
4199  * @brief Opens a new temporary file for reading and writing
4200  *
4201  * @param[in]  ppFile      void double pointer
4202  *
4203  * @returns NV_STATUS, NV_OK if success,
4204                        NV_ERR_GENERIC, if error
4205                        NV_ERR_NOT_SUPPORTED, for unsupported platforms
4206  */
4207 NV_STATUS
4208 osOpenTemporaryFile
4209 (
4210     void **ppFile
4211 )
4212 {
4213     return os_open_temporary_file(ppFile);
4214 }
4215 
4216 /*
4217  * @brief Closes the specified temporary file
4218  *
4219  * @param[in]  pFile      Pointer to file
4220  *
4221  * @returns void
4222  */
4223 void
4224 osCloseFile
4225 (
4226     void *pFile
4227 )
4228 {
4229     os_close_file(pFile);
4230 }
4231 
4232 /*
4233  * @brief Writes the buffer to the specified file at the given offset
4234  *
4235  * @param[in]  pFile            Pointer to file (void)
4236  * @param[in]  pBuffer          Pointer to buffer from which to copy
4237  * @param[in]  size             Size of the copy
4238  * @parma[in]  offset           offset in the file
4239  *
4240  * @returns NV_STATUS, NV_OK if success,
4241                        NV_ERR_GENERIC, if error
4242                        NV_ERR_NOT_SUPPORTED, for unsupported platforms
4243  */
4244 NV_STATUS
4245 osWriteToFile
4246 (
4247     void *pFile,
4248     NvU8 *pBuffer,
4249     NvU64 size,
4250     NvU64 offset
4251 )
4252 {
4253     return os_write_file(pFile, pBuffer, size, offset);
4254 }
4255 
4256 /*
4257  * @brief Reads from the specified file at the given offset
4258  *
4259  * @param[in]  pFile            Pointer to file (void *)
4260  * @param[in]  pBuffer          Pointer to buffer to which the data is copied
4261  * @param[in]  size             Size of the copy
4262  * @parma[in]  offset           offset in the file
4263  *
4264  * @returns NV_STATUS, NV_OK if success,
4265                        NV_ERR_GENERIC, if error
4266                        NV_ERR_NOT_SUPPORTED, for unsupported platforms
4267  */
4268 NV_STATUS
4269 osReadFromFile
4270 (
4271     void *pFile,
4272     NvU8 *pBuffer,
4273     NvU64 size,
4274     NvU64 offset
4275 )
4276 {
4277     return os_read_file(pFile, pBuffer, size, offset);
4278 }
4279 
4280 /*
4281  * @brief Unregisters caps from the capability framework.
4282  *        The function assumes that the caps are allocated and stored in the
4283  *        hierarchical order. If they aren't, OS (Linux kernel) would warn and
4284  *        leak the caps.
4285  *
4286  * @param[in]  pOsRmCaps           caps of interest
4287  */
4288 void
4289 osRmCapUnregister
4290 (
4291     OS_RM_CAPS **ppOsRmCaps
4292 )
4293 {
4294     OS_RM_CAPS *pOsRmCaps = *ppOsRmCaps;
4295     NvS32 i;
4296 
4297     if (pOsRmCaps == NULL)
4298     {
4299         return;
4300     }
4301 
4302     for (i = pOsRmCaps->count - 1; i >= 0; i--)
4303     {
4304         if (pOsRmCaps->caps[i] != NULL)
4305         {
4306             os_nv_cap_destroy_entry(pOsRmCaps->caps[i]);
4307         }
4308     }
4309 
4310     os_free_mem(pOsRmCaps->caps);
4311     os_free_mem(pOsRmCaps);
4312 
4313     *ppOsRmCaps = NULL;
4314 }
4315 
4316 static NV_STATUS
4317 _allocOsRmCaps
4318 (
4319     OS_RM_CAPS **ppOsRmCaps,
4320     NvU32        count
4321 )
4322 {
4323     NV_STATUS   status;
4324     OS_RM_CAPS *pOsRmCaps;
4325 
4326     *ppOsRmCaps = NULL;
4327 
4328     status = os_alloc_mem((void**)&pOsRmCaps, sizeof(OS_RM_CAPS));
4329     if (status != NV_OK)
4330         return status;
4331 
4332     pOsRmCaps->count = count;
4333 
4334     status = os_alloc_mem((void**)&pOsRmCaps->caps, sizeof(pOsRmCaps->caps[0]) * count);
4335     if (status != NV_OK)
4336     {
4337         os_free_mem(pOsRmCaps);
4338         return status;
4339     }
4340 
4341     os_mem_set(pOsRmCaps->caps, 0, sizeof(pOsRmCaps->caps[0]) * count);
4342 
4343     *ppOsRmCaps = pOsRmCaps;
4344     return NV_OK;
4345 }
4346 
4347 #define OS_RM_CAP_GPU_DIR 0
4348 #define OS_RM_CAP_GPU_MIG_DIR 1
4349 #define OS_RM_CAP_GPU_COUNT 2
4350 
4351 /*
4352  * @brief Registers OBJGPU with the capability framework.
4353  *
4354  * @param[in]  pOsGpuInfo  OS specific GPU information pointer
4355  * @param[out] ppOsRmCaps  GPU OS specific capabilities pointer
4356  */
4357 NV_STATUS
4358 osRmCapRegisterGpu
4359 (
4360     OS_GPU_INFO   *pOsGpuInfo,
4361     OS_RM_CAPS   **ppOsRmCaps
4362 )
4363 {
4364     NvU32 minor = nv_get_dev_minor(pOsGpuInfo);
4365     char name[16];
4366     NV_STATUS   status;
4367     OS_RM_CAPS *pOsRmCaps;
4368     nv_cap_t   *parent;
4369     nv_cap_t   *cap;
4370 
4371     // Return success on the unsupported platforms.
4372     if (nvidia_caps_root == NULL)
4373     {
4374         return NV_OK;
4375     }
4376 
4377     if (*ppOsRmCaps != NULL)
4378     {
4379         return NV_ERR_INVALID_ARGUMENT;
4380     }
4381 
4382     status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_GPU_COUNT);
4383     if (status != NV_OK)
4384         return status;
4385 
4386     *ppOsRmCaps = pOsRmCaps;
4387 
4388     os_snprintf(name, sizeof(name), "gpu%u", minor);
4389     name[sizeof(name) - 1] = '\0';
4390     parent = nvidia_caps_root;
4391 
4392     cap = os_nv_cap_create_dir_entry(parent, name, (OS_RUGO | OS_XUGO));
4393     if (cap == NULL)
4394     {
4395         NV_PRINTF(LEVEL_ERROR, "Failed to setup gpu%u directory\n", minor);
4396         status = NV_ERR_OPERATING_SYSTEM;
4397         goto failed;
4398     }
4399 
4400     pOsRmCaps->caps[OS_RM_CAP_GPU_DIR] = cap;
4401     parent = cap;
4402 
4403     // TODO: Bug 2679591: Add MIG directory only if SMC is enabled.
4404     // For now, always add "mig" directory.
4405     cap = os_nv_cap_create_dir_entry(parent, "mig", (OS_RUGO | OS_XUGO));
4406     if (cap == NULL)
4407     {
4408         NV_PRINTF(LEVEL_ERROR, "Failed to setup mig directory\n");
4409         status = NV_ERR_OPERATING_SYSTEM;
4410         goto failed;
4411     }
4412 
4413     pOsRmCaps->caps[OS_RM_CAP_GPU_MIG_DIR] = cap;
4414 
4415     return NV_OK;
4416 
4417 failed:
4418     osRmCapUnregister(ppOsRmCaps);
4419 
4420     return status;
4421 }
4422 
4423 #define OS_RM_CAP_SMC_PART_DIR 0
4424 #define OS_RM_CAP_SMC_PART_ACCESS_FILE 1
4425 #define OS_RM_CAP_SMC_PART_COUNT 2
4426 
4427 /*
4428  * @brief Registers SMC partition (a.k.a. GPU instance) with the capability
4429  *        framework
4430  *
4431  * @param[in]  pGpuOsRmCaps         GPU OS specific capabilities pointer
4432  * @param[out] ppPartitionOsRmCaps  OS specific capabilities pointer for SMC partition
4433  * @param[in]  swizzId              SMC partition swizz ID
4434  */
4435 NV_STATUS
4436 osRmCapRegisterSmcPartition
4437 (
4438     OS_RM_CAPS  *pGpuOsRmCaps,
4439     OS_RM_CAPS **ppPartitionOsRmCaps,
4440     NvU32        swizzId
4441 )
4442 {
4443     char        name[16];
4444     NV_STATUS   status;
4445     nv_cap_t   *parent;
4446     nv_cap_t   *cap;
4447     OS_RM_CAPS *pOsRmCaps;
4448 
4449     // Return success as there is nothing to do.
4450     if (pGpuOsRmCaps == NULL)
4451     {
4452         return NV_OK;
4453     }
4454 
4455     if (*ppPartitionOsRmCaps != NULL || swizzId >= NV_U32_MAX)
4456     {
4457         return NV_ERR_INVALID_ARGUMENT;
4458     }
4459 
4460     parent = pGpuOsRmCaps->caps[OS_RM_CAP_GPU_MIG_DIR];
4461     if (parent == NULL)
4462     {
4463         return NV_ERR_INVALID_STATE;
4464     }
4465 
4466     status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SMC_PART_COUNT);
4467     if (status != NV_OK)
4468         return status;
4469 
4470     *ppPartitionOsRmCaps = pOsRmCaps;
4471 
4472     os_snprintf(name, sizeof(name), "gi%u", swizzId);
4473     name[sizeof(name) - 1] = '\0';
4474 
4475     cap = os_nv_cap_create_dir_entry(parent, name, OS_RUGO | OS_XUGO);
4476     if (cap == NULL)
4477     {
4478         NV_PRINTF(LEVEL_ERROR, "Failed to setup gi%u directory\n",
4479                   swizzId);
4480         status = NV_ERR_OPERATING_SYSTEM;
4481         goto failed;
4482     }
4483 
4484     pOsRmCaps->caps[OS_RM_CAP_SMC_PART_DIR] = cap;
4485     parent = cap;
4486 
4487     cap = os_nv_cap_create_file_entry(parent, "access", OS_RUGO);
4488     if (cap == NULL)
4489     {
4490         NV_PRINTF(LEVEL_ERROR, "Failed to setup access file for ID:%u\n",
4491                   swizzId);
4492         status = NV_ERR_OPERATING_SYSTEM;
4493         goto failed;
4494     }
4495 
4496     pOsRmCaps->caps[OS_RM_CAP_SMC_PART_ACCESS_FILE] = cap;
4497 
4498     return NV_OK;
4499 
4500 failed:
4501     osRmCapUnregister(ppPartitionOsRmCaps);
4502 
4503     return status;
4504 }
4505 
4506 #define OS_RM_CAP_SMC_EXEC_PART_DIR 0
4507 #define OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE 1
4508 #define OS_RM_CAP_SMC_EXEC_PART_COUNT 2
4509 
4510 /*
4511  * @brief Registers SMC execution partition (a.k.a. compute instance) with the
4512  *        capability framework
4513  *
4514  * @param[in]  pPartitionOsRmCaps       OS specific capabilities pointer for SMC partition
4515  * @param[out] ppExecPartitionOsRmCaps  OS specific capabilities pointer for SMC execution partition
4516  * @param[in]  execPartitionId          SMC execution partition ID
4517  */
4518 NV_STATUS
4519 osRmCapRegisterSmcExecutionPartition
4520 (
4521     OS_RM_CAPS  *pPartitionOsRmCaps,
4522     OS_RM_CAPS **ppExecPartitionOsRmCaps,
4523     NvU32        execPartitionId
4524 )
4525 {
4526     char        name[16];
4527     NV_STATUS   status;
4528     nv_cap_t   *parent;
4529     nv_cap_t   *cap;
4530     OS_RM_CAPS *pOsRmCaps;
4531 
4532     // Return success as there is nothing to do.
4533     if (pPartitionOsRmCaps == NULL)
4534     {
4535         return NV_OK;
4536     }
4537 
4538     if ((*ppExecPartitionOsRmCaps != NULL) || (execPartitionId >= NV_U32_MAX))
4539     {
4540         return NV_ERR_INVALID_ARGUMENT;
4541     }
4542 
4543     parent = pPartitionOsRmCaps->caps[OS_RM_CAP_SMC_PART_DIR];
4544     if (parent == NULL)
4545     {
4546         return NV_ERR_INVALID_STATE;
4547     }
4548 
4549     status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SMC_EXEC_PART_COUNT);
4550     if (status != NV_OK)
4551     {
4552         return status;
4553     }
4554 
4555     *ppExecPartitionOsRmCaps = pOsRmCaps;
4556 
4557     os_snprintf(name, sizeof(name), "ci%u", execPartitionId);
4558     name[sizeof(name) - 1] = '\0';
4559 
4560     cap = os_nv_cap_create_dir_entry(parent, name, OS_RUGO | OS_XUGO);
4561     if (cap == NULL)
4562     {
4563         NV_PRINTF(LEVEL_ERROR, "Failed to setup ci%u directory\n",
4564                   execPartitionId);
4565         status = NV_ERR_OPERATING_SYSTEM;
4566         goto failed;
4567     }
4568 
4569     pOsRmCaps->caps[OS_RM_CAP_SMC_EXEC_PART_DIR] = cap;
4570     parent = cap;
4571 
4572     cap = os_nv_cap_create_file_entry(parent, "access", OS_RUGO);
4573     if (cap == NULL)
4574     {
4575         NV_PRINTF(LEVEL_ERROR, "Failed to setup access file for ID:%u\n",
4576                   execPartitionId);
4577         status = NV_ERR_OPERATING_SYSTEM;
4578         goto failed;
4579     }
4580 
4581     pOsRmCaps->caps[OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE] = cap;
4582 
4583     return NV_OK;
4584 
4585 failed:
4586     osRmCapUnregister(ppExecPartitionOsRmCaps);
4587 
4588     return status;
4589 }
4590 
4591 /*
4592  * @brief Release the acquired capability
4593  *
4594  * @param[in]  dupedCapDescriptor  descriptor to be released
4595  */
4596 void
4597 osRmCapRelease
4598 (
4599     NvU64 dupedCapDescriptor
4600 )
4601 {
4602     if (dupedCapDescriptor == NV_U64_MAX)
4603     {
4604         return;
4605     }
4606 
4607     os_nv_cap_close_fd((int)dupedCapDescriptor);
4608 }
4609 
4610 #define OS_RM_CAP_SYS_MIG_DIR                   0
4611 #define OS_RM_CAP_SYS_SMC_CONFIG_FILE           1
4612 #define OS_RM_CAP_SYS_SMC_MONITOR_FILE          2
4613 #define OS_RM_CAP_SYS_COUNT                     3
4614 
4615 NV_STATUS
4616 osRmCapRegisterSys
4617 (
4618     OS_RM_CAPS **ppOsRmCaps
4619 )
4620 {
4621     nv_cap_t  **ppCaps;
4622     nv_cap_t   *parent;
4623     nv_cap_t   *cap;
4624     NV_STATUS   status;
4625     OS_RM_CAPS *pOsRmCaps;
4626 
4627     if (nvidia_caps_root == NULL)
4628         return NV_ERR_NOT_SUPPORTED;
4629 
4630     status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SYS_COUNT);
4631     if (status != NV_OK)
4632         return status;
4633 
4634     *ppOsRmCaps = pOsRmCaps;
4635 
4636     ppCaps = pOsRmCaps->caps;
4637 
4638     parent = os_nv_cap_create_dir_entry(nvidia_caps_root, "mig", OS_RUGO | OS_XUGO);
4639     if (parent == NULL)
4640     {
4641         NV_PRINTF(LEVEL_ERROR, "Failed to create mig directory\n");
4642         status = NV_ERR_OPERATING_SYSTEM;
4643         goto failed;
4644     }
4645     ppCaps[OS_RM_CAP_SYS_MIG_DIR] = parent;
4646 
4647     cap = os_nv_cap_create_file_entry(parent, "config", OS_RUSR);
4648     if (cap == NULL)
4649     {
4650         NV_PRINTF(LEVEL_ERROR, "Failed to create mig config file\n");
4651         status = NV_ERR_OPERATING_SYSTEM;
4652         goto failed;
4653     }
4654     ppCaps[OS_RM_CAP_SYS_SMC_CONFIG_FILE] = cap;
4655 
4656     cap = os_nv_cap_create_file_entry(parent, "monitor", OS_RUGO);
4657     if (cap == NULL)
4658     {
4659         NV_PRINTF(LEVEL_ERROR, "Failed to create mig monitor file\n");
4660         status = NV_ERR_OPERATING_SYSTEM;
4661         goto failed;
4662     }
4663     ppCaps[OS_RM_CAP_SYS_SMC_MONITOR_FILE] = cap;
4664 
4665     return NV_OK;
4666 
4667 failed:
4668     osRmCapUnregister(ppOsRmCaps);
4669     return status;
4670 }
4671 
4672 /*
4673  * @brief Acquire the requested capability
4674  *
4675  * @param[in]  pOsRmCaps           opaque pointer to the caps.
4676  * @param[in]  rmCap               the capability to be acquired.
4677  * @param[in]  capDescriptor       descriptor to be used for validation
4678  * @param[out] dupedCapDescriptor  returns duplicated descriptor if validation
4679  *                                 is successful
4680  *
4681  * Note: On Linux, duplicating fd is helpful to let administrators know about
4682  * the capability users. See https://linux.die.net/man/8/lsof usage.
4683  */
4684 NV_STATUS
4685 osRmCapAcquire
4686 (
4687     OS_RM_CAPS *pOsRmCaps,
4688     NvU32       rmCap,
4689     NvU64       capDescriptor,
4690     NvU64      *dupedCapDescriptor
4691 )
4692 {
4693     nv_cap_t *cap;
4694     int fd = (int)capDescriptor;
4695     int duped_fd;
4696     NvU32 index;
4697     NV_STATUS status;
4698 
4699     *dupedCapDescriptor = NV_U64_MAX;
4700 
4701     switch (rmCap)
4702     {
4703         case NV_RM_CAP_SMC_PARTITION_ACCESS:
4704         {
4705             index = OS_RM_CAP_SMC_PART_ACCESS_FILE;
4706             break;
4707         }
4708         case NV_RM_CAP_EXT_FABRIC_MGMT:
4709         {
4710             status = nv_acquire_fabric_mgmt_cap(fd, &duped_fd);
4711             if (status != NV_OK)
4712             {
4713                 return status;
4714             }
4715 
4716             goto done;
4717         }
4718         case NV_RM_CAP_SMC_EXEC_PARTITION_ACCESS:
4719         {
4720             index = OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE;
4721             break;
4722         }
4723         case NV_RM_CAP_SYS_SMC_CONFIG:
4724         {
4725             index = OS_RM_CAP_SYS_SMC_CONFIG_FILE;
4726             break;
4727         }
4728         case NV_RM_CAP_SYS_SMC_MONITOR:
4729         {
4730             index = OS_RM_CAP_SYS_SMC_MONITOR_FILE;
4731             break;
4732         }
4733         default:
4734         {
4735             return NV_ERR_INVALID_ARGUMENT;
4736         }
4737     }
4738 
4739     if (pOsRmCaps == NULL)
4740     {
4741         return NV_ERR_NOT_SUPPORTED;
4742     }
4743 
4744     if (index >= pOsRmCaps->count)
4745     {
4746         return NV_ERR_INVALID_ARGUMENT;
4747     }
4748 
4749     cap = pOsRmCaps->caps[index];
4750 
4751     duped_fd = os_nv_cap_validate_and_dup_fd(cap, fd);
4752     if (duped_fd < 0)
4753     {
4754         return NV_ERR_INSUFFICIENT_PERMISSIONS;
4755     }
4756 
4757 done:
4758     *dupedCapDescriptor = duped_fd;
4759 
4760     return NV_OK;
4761 }
4762 
4763 /*
4764  * @brief Initializes capability descriptor
4765  *
4766  * @param[out] pCapDescriptor        descriptor to be used
4767  *
4768  */
4769 void
4770 osRmCapInitDescriptor
4771 (
4772     NvU64 *pCapDescriptor
4773 )
4774 {
4775     *pCapDescriptor = NV_U64_MAX;
4776 }
4777 
4778 /*
4779  * @brief Generates random bytes which can be used as a universally unique
4780  *        identifier.
4781  *
4782  * @param[out] pBytes        Array of random bytes
4783  * @param[in]  numBytes      Size of the array
4784  */
4785 NV_STATUS
4786 osGetRandomBytes
4787 (
4788     NvU8 *pBytes,
4789     NvU16 numBytes
4790 )
4791 {
4792     os_get_random_bytes(pBytes, numBytes);
4793 
4794     return NV_OK;
4795 }
4796 
4797 /*
4798  * @brief Allocate wait queue
4799  *
4800  * @param[out] ppWq        Wait queue
4801  */
4802 NV_STATUS
4803 osAllocWaitQueue
4804 (
4805     OS_WAIT_QUEUE **ppWq
4806 )
4807 {
4808     return os_alloc_wait_queue(ppWq);
4809 }
4810 
4811 /*
4812  * @brief Free wait queue
4813  *
4814  * @param[in] pWq        Wait queue
4815  */
4816 void
4817 osFreeWaitQueue
4818 (
4819     OS_WAIT_QUEUE *pWq
4820 )
4821 {
4822     os_free_wait_queue(pWq);
4823 }
4824 
4825 /*
4826  * @brief Put thread to uninterruptible sleep
4827  *
4828  * @param[in] pWq        Wait queue
4829  */
4830 void
4831 osWaitUninterruptible
4832 (
4833     OS_WAIT_QUEUE *pWq
4834 )
4835 {
4836     os_wait_uninterruptible(pWq);
4837 }
4838 
4839 /*
4840  * @brief Put thread to interruptible sleep
4841  *
4842  * @param[in] pWq        Wait queue
4843  */
4844 void
4845 osWaitInterruptible
4846 (
4847     OS_WAIT_QUEUE *pWq
4848 )
4849 {
4850     os_wait_interruptible(pWq);
4851 }
4852 
4853 /*
4854  * @brief Wake up thread from uninterruptible sleep
4855  *
4856  * @param[in] pWq        Wait queue
4857  */
4858 void
4859 osWakeUp
4860 (
4861     OS_WAIT_QUEUE *pWq
4862 )
4863 {
4864     os_wake_up(pWq);
4865 }
4866 
4867 NV_STATUS
4868 osReadPFPciConfigInVF
4869 (
4870     NvU32 addr,
4871     NvU32 *data
4872 )
4873 {
4874     return NV_ERR_NOT_SUPPORTED;
4875 }
4876 
4877 /*!
4878  * @brief Returns IMP-relevant data collected from other modules
4879  *
4880  * This function is basically a wrapper to call the unix/linux layer.
4881  *
4882  * @param[out]  pTegraImpImportData  Structure to receive the data
4883  *
4884  * @returns NV_OK if successful,
4885  *          NV_ERR_BUFFER_TOO_SMALL if the array in TEGRA_IMP_IMPORT_DATA is
4886  *            too small,
4887  *          NV_ERR_NOT_SUPPORTED if the functionality is not available, or
4888  *          other errors as may be returned by subfunctions.
4889  */
4890 NV_STATUS
4891 osTegraSocGetImpImportData
4892 (
4893     TEGRA_IMP_IMPORT_DATA *pTegraImpImportData
4894 )
4895 {
4896     return NV_ERR_NOT_SUPPORTED;
4897 }
4898 
4899 /*!
4900  * @brief Tells BPMP whether or not RFL is valid
4901  *
4902  * Display HW generates an ok_to_switch signal which asserts when mempool
4903  * occupancy is high enough to be able to turn off memory long enough to
4904  * execute a dramclk frequency switch without underflowing display output.
4905  * ok_to_switch drives the RFL ("request for latency") signal in the memory
4906  * unit, and the switch sequencer waits for this signal to go active before
4907  * starting a dramclk switch.  However, if the signal is not valid (e.g., if
4908  * display HW or SW has not been initialized yet), the switch sequencer ignores
4909  * the signal.  This API tells BPMP whether or not the signal is valid.
4910  *
4911  * @param[in] pOsGpuInfo    Per GPU Linux state
4912  * @param[in] bEnable       True if RFL will be valid; false if invalid
4913  *
4914  * @returns NV_OK if successful,
4915  *          NV_ERR_NOT_SUPPORTED if the functionality is not available, or
4916  *          NV_ERR_GENERIC if some other kind of error occurred.
4917  */
4918 NV_STATUS
4919 osTegraSocEnableDisableRfl
4920 (
4921     OS_GPU_INFO *pOsGpuInfo,
4922     NvBool       bEnable
4923 )
4924 {
4925     return NV_ERR_NOT_SUPPORTED;
4926 }
4927 
4928 /*!
4929  * @brief Allocates a specified amount of ISO memory bandwidth for display
4930  *
4931  * floorBandwidthKBPS is the minimum required (i.e., floor) dramclk frequency
4932  * multiplied by the width of the pipe over which the display data will travel.
4933  * (It is understood that the bandwidth calculated by multiplying the clock
4934  * frequency by the pipe width will not be realistically achievable, due to
4935  * overhead in the memory subsystem.  The infrastructure will not actually use
4936  * the bandwidth value, except to reverse the calculation to get the required
4937  * dramclk frequency.)
4938  *
4939  * This function is basically a wrapper to call the unix/linux layer.
4940  *
4941  * @param[in]   pOsGpuInfo            OS specific GPU information pointer
4942  * @param[in]   averageBandwidthKBPS  Amount of ISO memory bandwidth requested
4943  * @param[in]   floorBandwidhtKBPS    Min required dramclk freq * pipe width
4944  *
4945  * @returns NV_OK if successful,
4946  *          NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too
4947  *            high, and bandwidth cannot be allocated,
4948  *          NV_ERR_NOT_SUPPORTED if the functionality is not available, or
4949  *          NV_ERR_GENERIC if some other kind of error occurred.
4950  */
4951 NV_STATUS
4952 osTegraAllocateDisplayBandwidth
4953 (
4954     OS_GPU_INFO *pOsGpuInfo,
4955     NvU32        averageBandwidthKBPS,
4956     NvU32        floorBandwidthKBPS
4957 )
4958 {
4959     return NV_ERR_NOT_SUPPORTED;
4960 }
4961 
4962 /*!
4963  * @brief Creates or sets up platform specific nano second resolution timer
4964  *
4965  * @param[in]     pOsGpuInfo     OS specific GPU information pointer
4966  * @param[in]     pTmrEvent      Pointer to timer event information
4967  * @param[in/out] pTimer         pointer to hold high resolution timer object
4968  */
4969 NV_STATUS
4970 osCreateNanoTimer
4971 (
4972     OS_GPU_INFO *pOsGpuInfo,
4973     void *pTmrEvent,
4974     void **pTimer
4975 )
4976 {
4977     nv_create_nano_timer(pOsGpuInfo, pTmrEvent, (nv_nano_timer_t **)pTimer);
4978     return NV_OK;
4979 }
4980 
4981 /*!
4982  * @brief Starts platform specific nano second resolution timer
4983  *
4984  * @param[in] pOsGpuInfo     OS specific GPU information pointer
4985  * @param[in] pTimer         pointer to high resolution timer object
4986  * @param[in] timens         time in nano seconds
4987  */
4988 NV_STATUS
4989 osStartNanoTimer
4990 (
4991     OS_GPU_INFO *pOsGpuInfo,
4992     void *pTimer,
4993     NvU64 timeNs
4994 )
4995 {
4996     nv_start_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer, timeNs);
4997     return NV_OK;
4998 }
4999 
5000 /*!
5001  * @brief Cancels platform specific nano second resolution timer
5002  *
5003  * @param[in] pOsGpuInfo     OS specific GPU information pointer
5004  * @param[in] pTimer       pointer to timer object
5005  */
5006 NV_STATUS
5007 osCancelNanoTimer
5008 (
5009     OS_GPU_INFO *pOsGpuInfo,
5010     void *pTimer
5011 )
5012 {
5013     nv_cancel_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer);
5014     return NV_OK;
5015 }
5016 
5017 /*!
5018  * @brief Destroys & cancels platform specific nano second resolution timer
5019  *
5020  *
5021  * @param[in] pGpu         Device of interest
5022  * @param[in] pTimer       pointer to timer object
5023  */
5024 NV_STATUS
5025 osDestroyNanoTimer
5026 (
5027     OS_GPU_INFO *pOsGpuInfo,
5028     void *pTimer
5029 )
5030 {
5031     nv_destroy_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer);
5032     return NV_OK;
5033 }
5034 
5035 /*!
5036  * @brief Get number of dpAux instances.
5037  * It is wrapper function to call unix/linux layer.
5038  *
5039  * @param[in]  pGpu           GPU object pointer
5040  * @param[out]  pNumIntances  Number of dpAux instances.
5041  *
5042  * @returns NV_STATUS, NV_OK if success,
5043  *                     NV_ERR_GENERIC, if error
5044  *                     NV_ERR_NOT_SUPPORTED, for unsupported platforms
5045  */
5046 
5047 NV_STATUS
5048 osGetTegraNumDpAuxInstances
5049 (
5050     OS_GPU_INFO *pOsGpuInfo,
5051     NvU32  *pNumIntances
5052 )
5053 {
5054     return NV_ERR_NOT_SUPPORTED;
5055 }
5056 
5057 /*
5058  * @brief Return the priv Data of current IRQ.
5059  * It is wrapper function to call unix/linux layer.
5060  *
5061  * @param[in]  pGpu           Device of interest
5062  * @param[out] pPrivData      privData of current IRQ
5063  *
5064  * @returns NV_STATUS, NV_OK if success,
5065  *                     NV_ERR_GENERIC, if error
5066  *                     NV_ERR_NOT_SUPPORTED, for unsupported platforms
5067  */
5068 NV_STATUS
5069 osGetCurrentIrqPrivData
5070 (
5071     OS_GPU_INFO *pOsGpuInfo,
5072     NvU32  *pPrivData
5073 )
5074 {
5075     return NV_ERR_NOT_SUPPORTED;
5076 }
5077 
5078 /*!
5079  * @brief Get the brightness level
5080  * It is wrapper function to call unix/linux layer.
5081  *
5082  * @param[in]  pGpu           GPU object pointer
5083  * @param[out] brightness     Pointer to brightness level
5084  *
5085  * @returns NV_STATUS, NV_OK if success,
5086  *                     NV_ERR_GENERIC, if error
5087  *                     NV_ERR_NOT_SUPPORTED, for unsupported platforms
5088  */
5089 NV_STATUS
5090 osGetTegraBrightnessLevel
5091 (
5092     OS_GPU_INFO *pOsGpuInfo,
5093     NvU32 *brightness
5094 )
5095 {
5096     return NV_ERR_NOT_SUPPORTED;
5097 }
5098 
5099 /*!
5100  * @brief Set the brightness level
5101  * It is wrapper function to call unix/linux layer.
5102  *
5103  * @param[in]  pGpu           GPU object pointer
5104  * @param[out] brightness     brightness level
5105  *
5106  * @returns NV_STATUS, NV_OK if success,
5107  *                     NV_ERR_GENERIC, if error
5108  *                     NV_ERR_NOT_SUPPORTED, for unsupported platforms
5109  */
5110 NV_STATUS
5111 osSetTegraBrightnessLevel
5112 (
5113     OS_GPU_INFO *pOsGpuInfo,
5114     NvU32 brightness
5115 )
5116 {
5117     return NV_ERR_NOT_SUPPORTED;
5118 }
5119 
5120 /* @brief Gets syncpoint aperture information
5121  *
5122  * @param[in] OS_GPU_INFO OS specific GPU information pointer
5123  * @param[in]  syncpointId
5124  * @param[out] *physAddr
5125  * @param[out] *limit
5126  * @param[out] *offset
5127  */
5128 NV_STATUS
5129 osGetSyncpointAperture
5130 (
5131     OS_GPU_INFO *pOsGpuInfo,
5132     NvU32 syncpointId,
5133     NvU64 *physAddr,
5134     NvU64 *limit,
5135     NvU32 *offset
5136 )
5137 {
5138     return NV_ERR_NOT_SUPPORTED;
5139 }
5140 
5141 /*!
5142  * @brief Enable PCIe AtomicOp Requester Enable and return
5143  * the completer side capabilities that the requester can send.
5144  *
5145  * @param[in]    pOsGpuInfo   OS_GPU_INFO OS specific GPU information pointer
5146  * @param[out]   pMask        mask of supported atomic size, including one or more of:
5147  *                            OS_PCIE_CAP_MASK_REQ_ATOMICS_32
5148  *                            OS_PCIE_CAP_MASK_REQ_ATOMICS_64
5149  *                            OS_PCIE_CAP_MASK_REQ_ATOMICS_128
5150  *
5151  * @returns NV_STATUS, NV_OK if success
5152  *                     NV_ERR_NOT_SUPPORTED if platform doesn't support this
5153  *                     feature.
5154  *                     NV_ERR_GENERIC for any other error
5155  */
5156 
5157 NV_STATUS
5158 osConfigurePcieReqAtomics
5159 (
5160     OS_GPU_INFO *pOsGpuInfo,
5161     NvU32       *pMask
5162 )
5163 {
5164     if (pMask)
5165     {
5166         *pMask = 0U;
5167         if (pOsGpuInfo)
5168         {
5169             if (os_enable_pci_req_atomics(pOsGpuInfo->handle,
5170                                           OS_INTF_PCIE_REQ_ATOMICS_32BIT) == NV_OK)
5171                 *pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_32;
5172             if (os_enable_pci_req_atomics(pOsGpuInfo->handle,
5173                                           OS_INTF_PCIE_REQ_ATOMICS_64BIT) == NV_OK)
5174                 *pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_64;
5175             if (os_enable_pci_req_atomics(pOsGpuInfo->handle,
5176                                           OS_INTF_PCIE_REQ_ATOMICS_128BIT) == NV_OK)
5177                 *pMask |= OS_PCIE_CAP_MASK_REQ_ATOMICS_128;
5178 
5179             if (*pMask != 0)
5180                 return NV_OK;
5181         }
5182     }
5183     return NV_ERR_NOT_SUPPORTED;
5184 }
5185 
5186 /*!
5187  * @brief Check GPU is accessible or not
5188  *
5189  * @param[in]  pGpu           GPU object pointer
5190  *
5191  * @returns NVBool, Returns TRUE if the GPU is accessible,
5192  *                  FALSE, if error
5193  */
5194 NvBool
5195 osIsGpuAccessible
5196 (
5197     OBJGPU *pGpu
5198 )
5199 {
5200     return nv_is_gpu_accessible(NV_GET_NV_STATE(pGpu));
5201 }
5202 
5203 /*!
5204  * @brief Check whether GPU has received a shutdown notification from the OS
5205  */
5206 NvBool
5207 osIsGpuShutdown
5208 (
5209     OBJGPU *pGpu
5210 )
5211 {
5212     nv_state_t *nv = NV_GET_NV_STATE(pGpu);
5213     return nv ? nv->is_shutdown : NV_TRUE;
5214 }
5215 
5216 /*!
5217  * @brief Check GPU OS info matches
5218  *
5219  * @param[in]  pGpu           GPU object pointer
5220  *
5221  * @returns NVBool, Returns TRUE if matched.
5222  */
5223 NvBool
5224 osMatchGpuOsInfo
5225 (
5226     OBJGPU *pGpu,
5227     void   *pOsInfo
5228 )
5229 {
5230     return nv_match_gpu_os_info(NV_GET_NV_STATE(pGpu), pOsInfo);
5231 }
5232 
5233 /*!
5234  * @brief Release GPU OS info.
5235  *
5236  * @param[in]  pOsInfo        GPU OS info pointer
5237  *
5238  * @returns void
5239  */
5240 void
5241 osReleaseGpuOsInfo
5242 (
5243     void   *pOsInfo
5244 )
5245 {
5246     nv_put_file_private(pOsInfo);
5247 }
5248 
5249 NvBool
5250 osDmabufIsSupported(void)
5251 {
5252     return os_dma_buf_enabled;
5253 }
5254 
5255