1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 #ifndef _NV_H_
26 #define _NV_H_
27 
28 
29 
30 #include <nvlimits.h>
31 
32 #if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__)
33   #include <sys/stddef.h>   // NULL
34 #elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
35   #include <linux/stddef.h> // NULL
36 #else
37   #include <stddef.h>       // NULL
38 #endif
39 
40 #include <nvstatus.h>
41 #include "nv_stdarg.h"
42 #include <nv-caps.h>
43 #include <nv-firmware.h>
44 #include <nv-ioctl.h>
45 #include <nv-ioctl-numa.h>
46 #include <nvmisc.h>
47 
48 extern nv_cap_t *nvidia_caps_root;
49 
50 extern const NvBool nv_is_rm_firmware_supported_os;
51 
52 #include <nv-kernel-interface-api.h>
53 
54 #define GPU_UUID_LEN    (16)
55 
56 /*
57  * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space
58  * for "GPU", 5 dashes, and '\0' termination:
59  */
60 #define GPU_UUID_ASCII_LEN  (GPU_UUID_LEN * 2 + 9)
61 
62 /*
63  * #define an absolute maximum used as a sanity check for the
64  * NV_ESC_IOCTL_XFER_CMD ioctl() size argument.
65  */
66 #define NV_ABSOLUTE_MAX_IOCTL_SIZE  16384
67 
68 /*
69  * Solaris provides no more than 8 bits for the argument size in
70  * the ioctl() command encoding; make sure we don't exceed this
71  * limit.
72  */
73 #define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1)
74 #define __NV_IOWR(nr, type) ({                                        \
75     typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)];  \
76     _IOWR(NV_IOCTL_MAGIC, (nr), type);                                \
77 })
78 
79 #define NV_PCI_DEV_FMT          "%04x:%02x:%02x.%x"
80 #define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \
81                                 (nv)->pci_info.slot, (nv)->pci_info.function
82 
83 #define NV_RM_DEVICE_INTR_ADDRESS 0x100
84 
85 /*!
86  * @brief The order of the display clocks in the below defined enum
87  * should be synced with below mapping array and macro.
88  * All four should be updated simultaneously in case
89  * of removal or addition of clocks in below order.
90  * Also, TEGRASOC_WHICH_CLK_MAX is used in various places
91  * in below mentioned files.
92  * arch/nvalloc/unix/Linux/nv-linux.h
93  *
94  * arch/nvalloc/unix/src/os.c
95  * dispClkMapRmToOsArr[] = {...};
96  *
97  * arch/nvalloc/unix/Linux/nv-clk.c
98  * osMapClk[] = {...};
99  *
100  */
101 typedef enum _TEGRASOC_WHICH_CLK
102 {
103     TEGRASOC_WHICH_CLK_NVDISPLAYHUB,
104     TEGRASOC_WHICH_CLK_NVDISPLAY_DISP,
105     TEGRASOC_WHICH_CLK_NVDISPLAY_P0,
106     TEGRASOC_WHICH_CLK_NVDISPLAY_P1,
107     TEGRASOC_WHICH_CLK_DPAUX0,
108     TEGRASOC_WHICH_CLK_FUSE,
109     TEGRASOC_WHICH_CLK_DSIPLL_VCO,
110     TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN,
111     TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA,
112     TEGRASOC_WHICH_CLK_SPPLL0_VCO,
113     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
114     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA,
115     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB,
116     TEGRASOC_WHICH_CLK_SPPLL0_DIV10,
117     TEGRASOC_WHICH_CLK_SPPLL0_DIV25,
118     TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
119     TEGRASOC_WHICH_CLK_SPPLL1_VCO,
120     TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
121     TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
122     TEGRASOC_WHICH_CLK_VPLL0_REF,
123     TEGRASOC_WHICH_CLK_VPLL0,
124     TEGRASOC_WHICH_CLK_VPLL1,
125     TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF,
126     TEGRASOC_WHICH_CLK_RG0,
127     TEGRASOC_WHICH_CLK_RG1,
128     TEGRASOC_WHICH_CLK_DISPPLL,
129     TEGRASOC_WHICH_CLK_DISPHUBPLL,
130     TEGRASOC_WHICH_CLK_DSI_LP,
131     TEGRASOC_WHICH_CLK_DSI_CORE,
132     TEGRASOC_WHICH_CLK_DSI_PIXEL,
133     TEGRASOC_WHICH_CLK_PRE_SOR0,
134     TEGRASOC_WHICH_CLK_PRE_SOR1,
135     TEGRASOC_WHICH_CLK_DP_LINK_REF,
136     TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT,
137     TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO,
138     TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M,
139     TEGRASOC_WHICH_CLK_RG0_M,
140     TEGRASOC_WHICH_CLK_RG1_M,
141     TEGRASOC_WHICH_CLK_SOR0_M,
142     TEGRASOC_WHICH_CLK_SOR1_M,
143     TEGRASOC_WHICH_CLK_PLLHUB,
144     TEGRASOC_WHICH_CLK_SOR0,
145     TEGRASOC_WHICH_CLK_SOR1,
146     TEGRASOC_WHICH_CLK_SOR_PAD_INPUT,
147     TEGRASOC_WHICH_CLK_PRE_SF0,
148     TEGRASOC_WHICH_CLK_SF0,
149     TEGRASOC_WHICH_CLK_SF1,
150     TEGRASOC_WHICH_CLK_DSI_PAD_INPUT,
151     TEGRASOC_WHICH_CLK_PRE_SOR0_REF,
152     TEGRASOC_WHICH_CLK_PRE_SOR1_REF,
153     TEGRASOC_WHICH_CLK_SOR0_PLL_REF,
154     TEGRASOC_WHICH_CLK_SOR1_PLL_REF,
155     TEGRASOC_WHICH_CLK_SOR0_REF,
156     TEGRASOC_WHICH_CLK_SOR1_REF,
157     TEGRASOC_WHICH_CLK_OSC,
158     TEGRASOC_WHICH_CLK_DSC,
159     TEGRASOC_WHICH_CLK_MAUD,
160     TEGRASOC_WHICH_CLK_AZA_2XBIT,
161     TEGRASOC_WHICH_CLK_AZA_BIT,
162     TEGRASOC_WHICH_CLK_MIPI_CAL,
163     TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL,
164     TEGRASOC_WHICH_CLK_SOR0_DIV,
165     TEGRASOC_WHICH_CLK_DISP_ROOT,
166     TEGRASOC_WHICH_CLK_HUB_ROOT,
167     TEGRASOC_WHICH_CLK_PLLA_DISP,
168     TEGRASOC_WHICH_CLK_PLLA_DISPHUB,
169     TEGRASOC_WHICH_CLK_PLLA,
170     TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only.
171 } TEGRASOC_WHICH_CLK;
172 
173 #ifdef NVRM
174 
175 extern const char *pNVRM_ID;
176 
177 /*
178  * ptr arithmetic convenience
179  */
180 
181 typedef union
182 {
183     volatile NvV8 Reg008[1];
184     volatile NvV16 Reg016[1];
185     volatile NvV32 Reg032[1];
186 } nv_hwreg_t, * nv_phwreg_t;
187 
188 
189 #define NVRM_PCICFG_NUM_BARS            6
190 #define NVRM_PCICFG_BAR_OFFSET(i)       (0x10 + (i) * 4)
191 #define NVRM_PCICFG_BAR_REQTYPE_MASK    0x00000001
192 #define NVRM_PCICFG_BAR_REQTYPE_MEMORY  0x00000000
193 #define NVRM_PCICFG_BAR_MEMTYPE_MASK    0x00000006
194 #define NVRM_PCICFG_BAR_MEMTYPE_64BIT   0x00000004
195 #define NVRM_PCICFG_BAR_ADDR_MASK       0xfffffff0
196 
197 #define NVRM_PCICFG_NUM_DWORDS          16
198 
199 #define NV_GPU_NUM_BARS                 3
200 #define NV_GPU_BAR_INDEX_REGS           0
201 #define NV_GPU_BAR_INDEX_FB             1
202 #define NV_GPU_BAR_INDEX_IMEM           2
203 
204 typedef struct
205 {
206     NvU64 cpu_address;
207     NvU64 size;
208     NvU32 offset;
209     NvU32 *map;
210     nv_phwreg_t map_u;
211 } nv_aperture_t;
212 
213 typedef struct
214 {
215     char *name;
216     NvU32 *data;
217 } nv_parm_t;
218 
219 #define NV_RM_PAGE_SHIFT    12
220 #define NV_RM_PAGE_SIZE     (1 << NV_RM_PAGE_SHIFT)
221 #define NV_RM_PAGE_MASK     (NV_RM_PAGE_SIZE - 1)
222 
223 #define NV_RM_TO_OS_PAGE_SHIFT      (os_page_shift - NV_RM_PAGE_SHIFT)
224 #define NV_RM_PAGES_TO_OS_PAGES(count) \
225     ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
226      ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
227 
228 #if defined(NVCPU_X86_64)
229 #define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3)
230 #else
231 #define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2)
232 #endif
233 
234 typedef struct nvidia_stack_s
235 {
236     NvU32 size;
237     void *top;
238     NvU8  stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16)));
239 } nvidia_stack_t;
240 
241 /*
242  * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t
243  */
244 typedef nvidia_stack_t nv_stack_t;
245 
246 typedef struct nv_file_private_t nv_file_private_t;
247 
248 /*
249  * this is a wrapper for unix events
250  * unlike the events that will be returned to clients, this includes
251  * kernel-specific data, such as file pointer, etc..
252  */
253 typedef struct nv_event_s
254 {
255     NvHandle            hParent;
256     NvHandle            hObject;
257     NvU32               index;
258     NvU32               info32;
259     NvU16               info16;
260     nv_file_private_t  *nvfp;  /* per file-descriptor data pointer */
261     NvU32               fd;
262     NvBool              active; /* whether the event should be signaled */
263     NvU32               refcount; /* count of associated RM events */
264     struct nv_event_s  *next;
265 } nv_event_t;
266 
267 typedef struct nv_kern_mapping_s
268 {
269     void  *addr;
270     NvU64 size;
271     NvU32 modeFlag;
272     struct nv_kern_mapping_s *next;
273 } nv_kern_mapping_t;
274 
275 typedef struct nv_usermap_access_params_s
276 {
277     NvU64    addr;
278     NvU64    size;
279     NvU64    offset;
280     NvU64   *page_array;
281     NvU64    num_pages;
282     NvU64    mmap_start;
283     NvU64    mmap_size;
284     NvU64    access_start;
285     NvU64    access_size;
286     NvU64    remap_prot_extra;
287     NvBool   contig;
288     NvU32    caching;
289 } nv_usermap_access_params_t;
290 
291 /*
292  * It stores mapping context per mapping
293  */
294 typedef struct nv_alloc_mapping_context_s {
295     void  *alloc;
296     NvU64  page_index;
297     NvU64 *page_array;
298     NvU64  num_pages;
299     NvU64  mmap_start;
300     NvU64  mmap_size;
301     NvU64  access_start;
302     NvU64  access_size;
303     NvU64  remap_prot_extra;
304     NvU32  prot;
305     NvBool valid;
306     NvU32  caching;
307 } nv_alloc_mapping_context_t;
308 
309 typedef enum
310 {
311     NV_SOC_IRQ_DISPLAY_TYPE = 0x1,
312     NV_SOC_IRQ_DPAUX_TYPE,
313     NV_SOC_IRQ_GPIO_TYPE,
314     NV_SOC_IRQ_HDACODEC_TYPE,
315     NV_SOC_IRQ_TCPC2DISP_TYPE,
316     NV_SOC_IRQ_INVALID_TYPE
317 } nv_soc_irq_type_t;
318 
319 /*
320  * It stores interrupt numbers and interrupt type and private data
321  */
322 typedef struct nv_soc_irq_info_s {
323     NvU32 irq_num;
324     nv_soc_irq_type_t irq_type;
325     NvBool bh_pending;
326     union {
327         NvU32 gpio_num;
328         NvU32 dpaux_instance;
329     } irq_data;
330     NvS32 ref_count;
331 } nv_soc_irq_info_t;
332 
333 #define NV_MAX_SOC_IRQS              6
334 #define NV_MAX_DPAUX_NUM_DEVICES     4
335 #define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING
336 
337 #define NV_IGPU_LEGACY_STALL_IRQ     70
338 #define NV_IGPU_MAX_STALL_IRQS       3
339 #define NV_IGPU_MAX_NONSTALL_IRQS    1
340 /*
341  * per device state
342  */
343 
344 /* DMA-capable device data, defined by kernel interface layer */
345 typedef struct nv_dma_device nv_dma_device_t;
346 
347 typedef struct nv_phys_addr_range
348 {
349     NvU64 addr;
350     NvU64 len;
351 } nv_phys_addr_range_t;
352 
353 typedef struct nv_state_t
354 {
355     void  *priv;                    /* private data */
356     void  *os_state;                /* os-specific device state */
357 
358     int    flags;
359 
360     /* PCI config info */
361     nv_pci_info_t pci_info;
362     NvU16 subsystem_id;
363     NvU16 subsystem_vendor;
364     NvU32 gpu_id;
365     NvU32 iovaspace_id;
366     struct
367     {
368         NvBool         valid;
369         NvU8           uuid[GPU_UUID_LEN];
370     } nv_uuid_cache;
371     void *handle;
372 
373     NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS];
374 
375     /* physical characteristics */
376     nv_aperture_t bars[NV_GPU_NUM_BARS];
377     nv_aperture_t *regs;
378     nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES];
379     nv_aperture_t *hdacodec_regs;
380     nv_aperture_t *mipical_regs;
381     nv_aperture_t *fb, ud;
382     nv_aperture_t *simregs;
383     nv_aperture_t *emc_regs;
384 
385     NvU32  num_dpaux_instance;
386     NvU32  interrupt_line;
387     NvU32  dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES];
388     nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS];
389     NvS32 current_soc_irq;
390     NvU32 num_soc_irqs;
391     NvU32 hdacodec_irq;
392     NvU32 tcpc2disp_irq;
393     NvU8 *soc_dcb_blob;
394     NvU32 soc_dcb_size;
395     NvU32 disp_sw_soc_chip_id;
396     NvBool soc_is_dpalt_mode_supported;
397 
398     NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
399     NvU32 igpu_nonstall_irq;
400     NvU32 num_stall_irqs;
401     NvU64 dma_mask;
402 
403     NvBool primary_vga;
404 
405     NvU32 sim_env;
406 
407     NvU32 rc_timer_enabled;
408 
409     /* list of events allocated for this device */
410     nv_event_t *event_list;
411 
412     /* lock to protect event_list */
413     void *event_spinlock;
414 
415     nv_kern_mapping_t *kern_mappings;
416 
417     /* Kernel interface DMA device data */
418     nv_dma_device_t *dma_dev;
419     nv_dma_device_t *niso_dma_dev;
420 
421     /*
422      * Per-GPU queue.  The actual queue object is usually allocated in the
423      * arch-specific parent structure (e.g. nv_linux_state_t), and this
424      * pointer just points to it.
425      */
426     struct os_work_queue *queue;
427 
428     /* For loading RM as a firmware (DCE or GSP) client */
429     NvBool request_firmware;                /* request firmware from the OS */
430     NvBool request_fw_client_rm;            /* attempt to init RM as FW a client */
431     NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */
432     NvBool enable_firmware_logs;            /* attempt to enable firmware log decoding/printing */
433 
434     /* Variable to track, if nvidia_remove is called */
435     NvBool removed;
436 
437     NvBool console_device;
438 
439     /* Variable to track, if GPU is external GPU */
440     NvBool is_external_gpu;
441 
442     /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */
443     NvBool preserve_vidmem_allocations;
444 
445     /* Variable to force allocation of 32-bit addressable memory */
446     NvBool force_dma32_alloc;
447 
448     /* PCI power state should be D0 during system suspend */
449     NvBool d0_state_in_suspend;
450 
451     /* Current cyclestats client and context */
452     NvU32 profiler_owner;
453     void *profiler_context;
454 
455     /*
456      * RMAPI objects to use in the OS layer to talk to core RM.
457      *
458      * Note that we only need to store one subdevice handle: in SLI, we will
459      * have a separate nv_state_t per physical GPU.
460      */
461     struct {
462         NvHandle hClient;
463         NvHandle hDevice;
464         NvHandle hSubDevice;
465         NvHandle hI2C;
466         NvHandle hDisp;
467     } rmapi;
468 
469     /* Bool to check if dma-buf is supported */
470     NvBool dma_buf_supported;
471 
472     /* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
473     NvBool nvpcf_dsm_in_gpu_scope;
474 
475     /* Bool to check if the device received a shutdown notification */
476     NvBool is_shutdown;
477 
478     /* Bool to check if the GPU has a coherent sysmem link */
479     NvBool coherent;
480 
481     /*
482      * NUMA node ID of the CPU to which the GPU is attached.
483      * Holds NUMA_NO_NODE on platforms that don't support NUMA configuration.
484      */
485     NvS32 cpu_numa_node_id;
486 
487     struct {
488         /* Bool to check if ISO iommu enabled */
489         NvBool iso_iommu_present;
490         /* Bool to check if NISO iommu enabled */
491         NvBool niso_iommu_present;
492         /* Display SMMU Stream IDs */
493         NvU32 dispIsoStreamId;
494         NvU32 dispNisoStreamId;
495     } iommus;
496 } nv_state_t;
497 
498 // These define need to be in sync with defines in system.h
499 #define OS_TYPE_LINUX   0x1
500 #define OS_TYPE_FREEBSD 0x2
501 #define OS_TYPE_SUNOS   0x3
502 #define OS_TYPE_VMWARE  0x4
503 
504 #define NVFP_TYPE_NONE       0x0
505 #define NVFP_TYPE_REFCOUNTED 0x1
506 #define NVFP_TYPE_REGISTERED 0x2
507 
508 struct nv_file_private_t
509 {
510     NvHandle *handles;
511     NvU16 maxHandles;
512     NvU32 deviceInstance;
513     NvU32 gpuInstanceId;
514     NvU8 metadata[64];
515 
516     nv_file_private_t *ctl_nvfp;
517     void *ctl_nvfp_priv;
518     NvU32 register_or_refcount;
519 
520     //
521     // True if a client or an event was ever allocated on this fd.
522     // If false, RMAPI cleanup is skipped.
523     //
524     NvBool bCleanupRmapi;
525 };
526 
527 // Forward define the gpu ops structures
528 typedef struct gpuSession                           *nvgpuSessionHandle_t;
529 typedef struct gpuDevice                            *nvgpuDeviceHandle_t;
530 typedef struct gpuAddressSpace                      *nvgpuAddressSpaceHandle_t;
531 typedef struct gpuTsg                               *nvgpuTsgHandle_t;
532 typedef struct UvmGpuTsgAllocParams_tag              nvgpuTsgAllocParams_t;
533 typedef struct gpuChannel                           *nvgpuChannelHandle_t;
534 typedef struct UvmGpuChannelInfo_tag                *nvgpuChannelInfo_t;
535 typedef struct UvmGpuChannelAllocParams_tag          nvgpuChannelAllocParams_t;
536 typedef struct UvmGpuCaps_tag                       *nvgpuCaps_t;
537 typedef struct UvmGpuCopyEnginesCaps_tag            *nvgpuCesCaps_t;
538 typedef struct UvmGpuAddressSpaceInfo_tag           *nvgpuAddressSpaceInfo_t;
539 typedef struct UvmGpuAllocInfo_tag                  *nvgpuAllocInfo_t;
540 typedef struct UvmGpuP2PCapsParams_tag              *nvgpuP2PCapsParams_t;
541 typedef struct UvmGpuFbInfo_tag                     *nvgpuFbInfo_t;
542 typedef struct UvmGpuEccInfo_tag                    *nvgpuEccInfo_t;
543 typedef struct UvmGpuFaultInfo_tag                  *nvgpuFaultInfo_t;
544 typedef struct UvmGpuAccessCntrInfo_tag             *nvgpuAccessCntrInfo_t;
545 typedef struct UvmGpuAccessCntrConfig_tag           *nvgpuAccessCntrConfig_t;
546 typedef struct UvmGpuInfo_tag                       nvgpuInfo_t;
547 typedef struct UvmGpuClientInfo_tag                 nvgpuClientInfo_t;
548 typedef struct UvmPmaAllocationOptions_tag          *nvgpuPmaAllocationOptions_t;
549 typedef struct UvmPmaStatistics_tag                 *nvgpuPmaStatistics_t;
550 typedef struct UvmGpuMemoryInfo_tag                 *nvgpuMemoryInfo_t;
551 typedef struct UvmGpuExternalMappingInfo_tag        *nvgpuExternalMappingInfo_t;
552 typedef struct UvmGpuChannelResourceInfo_tag        *nvgpuChannelResourceInfo_t;
553 typedef struct UvmGpuChannelInstanceInfo_tag        *nvgpuChannelInstanceInfo_t;
554 typedef struct UvmGpuChannelResourceBindParams_tag  *nvgpuChannelResourceBindParams_t;
555 typedef struct UvmGpuPagingChannelAllocParams_tag    nvgpuPagingChannelAllocParams_t;
556 typedef struct UvmGpuPagingChannel_tag              *nvgpuPagingChannelHandle_t;
557 typedef struct UvmGpuPagingChannelInfo_tag          *nvgpuPagingChannelInfo_t;
558 typedef enum   UvmPmaGpuMemoryType_tag               nvgpuGpuMemoryType_t;
559 typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU64, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t);
560 typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemoryType_t);
561 
562 /*
563  * flags
564  */
565 
566 #define NV_FLAG_OPEN                   0x0001
567 #define NV_FLAG_EXCLUDE                0x0002
568 #define NV_FLAG_CONTROL                0x0004
569 // Unused                              0x0008
570 #define NV_FLAG_SOC_DISPLAY            0x0010
571 #define NV_FLAG_USES_MSI               0x0020
572 #define NV_FLAG_USES_MSIX              0x0040
573 #define NV_FLAG_PASSTHRU               0x0080
574 #define NV_FLAG_SUSPENDED              0x0100
575 #define NV_FLAG_SOC_IGPU               0x0200
576 // Unused                              0x0400
577 #define NV_FLAG_PERSISTENT_SW_STATE    0x0800
578 #define NV_FLAG_IN_RECOVERY            0x1000
579 // Unused                              0x2000
580 #define NV_FLAG_UNBIND_LOCK            0x4000
581 /* To be set when GPU is not present on the bus, to help device teardown */
582 #define NV_FLAG_IN_SURPRISE_REMOVAL    0x8000
583 
584 typedef enum
585 {
586     NV_PM_ACTION_HIBERNATE,
587     NV_PM_ACTION_STANDBY,
588     NV_PM_ACTION_RESUME
589 } nv_pm_action_t;
590 
591 typedef enum
592 {
593     NV_PM_ACTION_DEPTH_DEFAULT,
594     NV_PM_ACTION_DEPTH_MODESET,
595     NV_PM_ACTION_DEPTH_UVM
596 } nv_pm_action_depth_t;
597 
598 typedef enum
599 {
600     NV_DYNAMIC_PM_NEVER,
601     NV_DYNAMIC_PM_COARSE,
602     NV_DYNAMIC_PM_FINE
603 } nv_dynamic_power_mode_t;
604 
605 typedef enum
606 {
607     NV_POWER_STATE_IN_HIBERNATE,
608     NV_POWER_STATE_IN_STANDBY,
609     NV_POWER_STATE_RUNNING
610 } nv_power_state_t;
611 
612 #define NV_PRIMARY_VGA(nv)      ((nv)->primary_vga)
613 
614 #define NV_IS_CTL_DEVICE(nv)    ((nv)->flags & NV_FLAG_CONTROL)
615 #define NV_IS_SOC_DISPLAY_DEVICE(nv)    \
616         ((nv)->flags & NV_FLAG_SOC_DISPLAY)
617 
618 #define NV_IS_SOC_IGPU_DEVICE(nv)    \
619         ((nv)->flags & NV_FLAG_SOC_IGPU)
620 
621 #define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)    \
622         (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
623 
624 /*
625  * For console setup by EFI GOP, the base address is BAR1.
626  * For console setup by VBIOS, the base address is BAR2 + 16MB.
627  */
628 #define NV_IS_CONSOLE_MAPPED(nv, addr)  \
629         (((addr) == (nv)->bars[NV_GPU_BAR_INDEX_FB].cpu_address) || \
630          ((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000)))
631 
632 #define NV_SOC_IS_ISO_IOMMU_PRESENT(nv)     \
633         ((nv)->iommus.iso_iommu_present)
634 
635 #define NV_SOC_IS_NISO_IOMMU_PRESENT(nv)     \
636         ((nv)->iommus.niso_iommu_present)
637 /*
638  * GPU add/remove events
639  */
640 #define NV_SYSTEM_GPU_ADD_EVENT             0x9001
641 #define NV_SYSTEM_GPU_REMOVE_EVENT          0x9002
642 
643 /*
644  * NVIDIA ACPI sub-event IDs (event types) to be passed into
645  * to core NVIDIA driver for ACPI events.
646  */
647 #define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT    0
648 #define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED       0
649 #define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED         1
650 
651 #define NV_ACPI_NVIF_HANDLE_PRESENT 0x01
652 #define NV_ACPI_DSM_HANDLE_PRESENT  0x02
653 #define NV_ACPI_WMMX_HANDLE_PRESENT 0x04
654 
655 #define NV_EVAL_ACPI_METHOD_NVIF     0x01
656 #define NV_EVAL_ACPI_METHOD_WMMX     0x02
657 
658 typedef enum {
659     NV_I2C_CMD_READ = 1,
660     NV_I2C_CMD_WRITE,
661     NV_I2C_CMD_SMBUS_READ,
662     NV_I2C_CMD_SMBUS_WRITE,
663     NV_I2C_CMD_SMBUS_QUICK_WRITE,
664     NV_I2C_CMD_SMBUS_QUICK_READ,
665     NV_I2C_CMD_SMBUS_BLOCK_READ,
666     NV_I2C_CMD_SMBUS_BLOCK_WRITE,
667     NV_I2C_CMD_BLOCK_READ,
668     NV_I2C_CMD_BLOCK_WRITE
669 } nv_i2c_cmd_t;
670 
671 // Flags needed by OSAllocPagesNode
672 #define NV_ALLOC_PAGES_NODE_NONE                0x0
673 #define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM        0x1
674 
675 /*
676 ** where we hide our nv_state_t * ...
677 */
678 #define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p))
679 #define NV_GET_NV_STATE(pGpu) \
680     (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL)
681 
682 static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
683 {
684     return ((offset >= nv->regs->cpu_address) &&
685             ((offset + (length - 1)) >= offset) &&
686             ((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1))));
687 }
688 
689 static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
690 {
691     return  ((nv->fb) && (nv->fb->size != 0) &&
692              (offset >= nv->fb->cpu_address) &&
693              ((offset + (length - 1)) >= offset) &&
694              ((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
695 }
696 
697 static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
698 {
699     return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) &&
700             (offset >= nv->ud.cpu_address) &&
701             ((offset + (length - 1)) >= offset) &&
702             ((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1))));
703 }
704 
705 static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
706 {
707     return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) &&
708             (nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) &&
709             (offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) &&
710             ((offset + (length - 1)) >= offset) &&
711             ((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address +
712                                          (nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))));
713 }
714 
715 #define NV_RM_MAX_MSIX_LINES  8
716 
717 #define NV_MAX_ISR_DELAY_US           20000
718 #define NV_MAX_ISR_DELAY_MS           (NV_MAX_ISR_DELAY_US / 1000)
719 
720 #define NV_TIMERCMP(a, b, CMP)                                              \
721     (((a)->tv_sec == (b)->tv_sec) ?                                         \
722         ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec))
723 
724 #define NV_TIMERADD(a, b, result)                                           \
725     {                                                                       \
726         (result)->tv_sec = (a)->tv_sec + (b)->tv_sec;                       \
727         (result)->tv_usec = (a)->tv_usec + (b)->tv_usec;                    \
728         if ((result)->tv_usec >= 1000000)                                   \
729         {                                                                   \
730             ++(result)->tv_sec;                                             \
731             (result)->tv_usec -= 1000000;                                   \
732         }                                                                   \
733     }
734 
735 #define NV_TIMERSUB(a, b, result)                                           \
736     {                                                                       \
737         (result)->tv_sec = (a)->tv_sec - (b)->tv_sec;                       \
738         (result)->tv_usec = (a)->tv_usec - (b)->tv_usec;                    \
739         if ((result)->tv_usec < 0)                                          \
740         {                                                                   \
741           --(result)->tv_sec;                                               \
742           (result)->tv_usec += 1000000;                                     \
743         }                                                                   \
744     }
745 
746 #define NV_TIMEVAL_TO_US(tv)    ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec)
747 
748 #ifndef NV_ALIGN_UP
749 #define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1))
750 #endif
751 #ifndef NV_ALIGN_DOWN
752 #define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
753 #endif
754 
755 /*
756  * driver internal interfaces
757  */
758 
759 /*
760  * ---------------------------------------------------------------------------
761  *
762  * Function prototypes for UNIX specific OS interface.
763  *
764  * ---------------------------------------------------------------------------
765  */
766 
767 NvU32      NV_API_CALL  nv_get_dev_minor         (nv_state_t *);
768 void*      NV_API_CALL  nv_alloc_kernel_mapping  (nv_state_t *, void *, NvU64, NvU32, NvU64, void **);
769 NV_STATUS  NV_API_CALL  nv_free_kernel_mapping   (nv_state_t *, void *, void *, void *);
770 NV_STATUS  NV_API_CALL  nv_alloc_user_mapping    (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **);
771 NV_STATUS  NV_API_CALL  nv_free_user_mapping     (nv_state_t *, void *, NvU64, void *);
772 NV_STATUS  NV_API_CALL  nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32);
773 
774 NvU64  NV_API_CALL  nv_get_kern_phys_address     (NvU64);
775 NvU64  NV_API_CALL  nv_get_user_phys_address     (NvU64);
776 nv_state_t*  NV_API_CALL  nv_get_adapter_state   (NvU32, NvU8, NvU8);
777 nv_state_t*  NV_API_CALL  nv_get_ctl_state       (void);
778 
779 void   NV_API_CALL  nv_set_dma_address_size      (nv_state_t *, NvU32 );
780 
781 NV_STATUS  NV_API_CALL  nv_alias_pages           (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
782 NV_STATUS  NV_API_CALL  nv_alloc_pages           (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
783 NV_STATUS  NV_API_CALL  nv_free_pages            (nv_state_t *, NvU32, NvBool, NvU32, void *);
784 
785 NV_STATUS  NV_API_CALL  nv_register_user_pages   (nv_state_t *, NvU64, NvU64 *, void *, void **);
786 void       NV_API_CALL  nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **);
787 
788 NV_STATUS NV_API_CALL   nv_register_peer_io_mem  (nv_state_t *, NvU64 *, NvU64, void **);
789 void      NV_API_CALL   nv_unregister_peer_io_mem(nv_state_t *, void *);
790 
791 struct sg_table;
792 
793 NV_STATUS NV_API_CALL   nv_register_sgt          (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *);
794 void      NV_API_CALL   nv_unregister_sgt        (nv_state_t *, struct sg_table **, void **, void *);
795 NV_STATUS NV_API_CALL   nv_register_phys_pages   (nv_state_t *, NvU64 *, NvU64, NvU32, void **);
796 void      NV_API_CALL   nv_unregister_phys_pages (nv_state_t *, void *);
797 
798 NV_STATUS  NV_API_CALL  nv_dma_map_sgt           (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **);
799 
800 NV_STATUS  NV_API_CALL  nv_dma_map_alloc         (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **);
801 NV_STATUS  NV_API_CALL  nv_dma_unmap_alloc       (nv_dma_device_t *, NvU64, NvU64 *, void **);
802 
803 NV_STATUS  NV_API_CALL  nv_dma_map_peer          (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *);
804 void       NV_API_CALL  nv_dma_unmap_peer        (nv_dma_device_t *, NvU64, NvU64);
805 
806 NV_STATUS  NV_API_CALL  nv_dma_map_mmio          (nv_dma_device_t *, NvU64, NvU64 *);
807 void       NV_API_CALL  nv_dma_unmap_mmio        (nv_dma_device_t *, NvU64, NvU64);
808 
809 void       NV_API_CALL  nv_dma_cache_invalidate  (nv_dma_device_t *, void *);
810 void       NV_API_CALL  nv_dma_enable_nvlink     (nv_dma_device_t *);
811 
812 NvS32  NV_API_CALL  nv_start_rc_timer            (nv_state_t *);
813 NvS32  NV_API_CALL  nv_stop_rc_timer             (nv_state_t *);
814 
815 void   NV_API_CALL  nv_post_event                (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool);
816 NvS32  NV_API_CALL  nv_get_event                 (nv_file_private_t *, nv_event_t *, NvU32 *);
817 
818 void*  NV_API_CALL  nv_i2c_add_adapter           (nv_state_t *, NvU32);
819 void   NV_API_CALL  nv_i2c_del_adapter           (nv_state_t *, void *);
820 
821 void   NV_API_CALL  nv_acpi_methods_init         (NvU32 *);
822 void   NV_API_CALL  nv_acpi_methods_uninit       (void);
823 
824 NV_STATUS  NV_API_CALL  nv_acpi_method           (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
825 NV_STATUS  NV_API_CALL  nv_acpi_dsm_method       (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
826 NV_STATUS  NV_API_CALL  nv_acpi_ddc_method       (nv_state_t *, void *, NvU32 *, NvBool);
827 NV_STATUS  NV_API_CALL  nv_acpi_dod_method       (nv_state_t *, NvU32 *, NvU32 *);
828 NV_STATUS  NV_API_CALL  nv_acpi_rom_method       (nv_state_t *, NvU32 *, NvU32 *);
829 NV_STATUS  NV_API_CALL  nv_acpi_get_powersource  (NvU32 *);
830 NvBool     NV_API_CALL  nv_acpi_is_battery_present(void);
831 
832 NV_STATUS  NV_API_CALL  nv_acpi_mux_method       (nv_state_t *, NvU32 *, NvU32, const char *);
833 
834 NV_STATUS  NV_API_CALL  nv_log_error             (nv_state_t *, NvU32, const char *, va_list);
835 
836 NvU64      NV_API_CALL  nv_get_dma_start_address (nv_state_t *);
837 NV_STATUS  NV_API_CALL  nv_set_primary_vga_status(nv_state_t *);
838 NV_STATUS  NV_API_CALL  nv_pci_trigger_recovery  (nv_state_t *);
839 NvBool     NV_API_CALL  nv_requires_dma_remap    (nv_state_t *);
840 
841 NvBool     NV_API_CALL  nv_is_rm_firmware_active(nv_state_t *);
842 const void*NV_API_CALL  nv_get_firmware(nv_state_t *, nv_firmware_type_t, nv_firmware_chip_family_t, const void **, NvU32 *);
843 void       NV_API_CALL  nv_put_firmware(const void *);
844 
845 nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
846 void               NV_API_CALL nv_put_file_private(void *);
847 
848 NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
849 NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
850 
851 NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
852 NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
853 
854 void      NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv);
855 
856 void      NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64);
857 
858 void      NV_API_CALL nv_p2p_free_platform_data(void *data);
859 
860 #if defined(NVCPU_PPC64LE)
861 NV_STATUS NV_API_CALL nv_get_nvlink_line_rate    (nv_state_t *, NvU32 *);
862 #endif
863 
864 NV_STATUS NV_API_CALL nv_revoke_gpu_mappings     (nv_state_t *);
865 void      NV_API_CALL nv_acquire_mmap_lock       (nv_state_t *);
866 void      NV_API_CALL nv_release_mmap_lock       (nv_state_t *);
867 NvBool    NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *);
868 void      NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool);
869 
870 NV_STATUS NV_API_CALL nv_indicate_idle           (nv_state_t *);
871 NV_STATUS NV_API_CALL nv_indicate_not_idle       (nv_state_t *);
872 void      NV_API_CALL nv_idle_holdoff            (nv_state_t *);
873 
874 NvBool    NV_API_CALL nv_dynamic_power_available (nv_state_t *);
875 void      NV_API_CALL nv_audio_dynamic_power     (nv_state_t *);
876 
877 void      NV_API_CALL nv_control_soc_irqs        (nv_state_t *, NvBool bEnable);
878 NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *);
879 
880 NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*);
881 int       NV_API_CALL nv_cap_drv_init(void);
882 void      NV_API_CALL nv_cap_drv_exit(void);
883 NvBool    NV_API_CALL nv_is_gpu_accessible(nv_state_t *);
884 NvBool    NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
885 
886 NvU32     NV_API_CALL nv_get_os_type(void);
887 
888 void      NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
889 void      NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64 *);
890 
891 struct dma_buf;
892 typedef struct nv_dma_buf nv_dma_buf_t;
893 struct drm_gem_object;
894 
895 NV_STATUS NV_API_CALL nv_dma_import_sgt  (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
896 void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
897 NV_STATUS NV_API_CALL nv_dma_import_dma_buf      (nv_dma_device_t *, struct dma_buf *, NvU32 *, struct sg_table **, nv_dma_buf_t **);
898 NV_STATUS NV_API_CALL nv_dma_import_from_fd      (nv_dma_device_t *, NvS32, NvU32 *, struct sg_table **, nv_dma_buf_t **);
899 void      NV_API_CALL nv_dma_release_dma_buf     (nv_dma_buf_t *);
900 
901 void      NV_API_CALL nv_schedule_uvm_isr        (nv_state_t *);
902 
903 NvBool    NV_API_CALL nv_platform_supports_s0ix  (void);
904 NvBool    NV_API_CALL nv_s2idle_pm_configured    (void);
905 
906 NvBool    NV_API_CALL nv_is_chassis_notebook      (void);
907 void      NV_API_CALL nv_allow_runtime_suspend    (nv_state_t *nv);
908 void      NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv);
909 
910 typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *);
911 
912 NV_STATUS NV_API_CALL nv_get_num_phys_pages      (void *, NvU32 *);
913 NV_STATUS NV_API_CALL nv_get_phys_pages          (void *, void *, NvU32 *);
914 
915 void      NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *);
916 
917 /*
918  * ---------------------------------------------------------------------------
919  *
920  * Function prototypes for Resource Manager interface.
921  *
922  * ---------------------------------------------------------------------------
923  */
924 
925 NvBool     NV_API_CALL  rm_init_rm               (nvidia_stack_t *);
926 void       NV_API_CALL  rm_shutdown_rm           (nvidia_stack_t *);
927 NvBool     NV_API_CALL  rm_init_private_state    (nvidia_stack_t *, nv_state_t *);
928 void       NV_API_CALL  rm_free_private_state    (nvidia_stack_t *, nv_state_t *);
929 NvBool     NV_API_CALL  rm_init_adapter          (nvidia_stack_t *, nv_state_t *);
930 void       NV_API_CALL  rm_disable_adapter       (nvidia_stack_t *, nv_state_t *);
931 void       NV_API_CALL  rm_shutdown_adapter      (nvidia_stack_t *, nv_state_t *);
932 NV_STATUS  NV_API_CALL  rm_exclude_adapter       (nvidia_stack_t *, nv_state_t *);
933 NV_STATUS  NV_API_CALL  rm_acquire_api_lock      (nvidia_stack_t *);
934 NV_STATUS  NV_API_CALL  rm_release_api_lock      (nvidia_stack_t *);
935 NV_STATUS  NV_API_CALL  rm_acquire_gpu_lock      (nvidia_stack_t *, nv_state_t *);
936 NV_STATUS  NV_API_CALL  rm_release_gpu_lock      (nvidia_stack_t *, nv_state_t *);
937 NV_STATUS  NV_API_CALL  rm_acquire_all_gpus_lock (nvidia_stack_t *);
938 NV_STATUS  NV_API_CALL  rm_release_all_gpus_lock (nvidia_stack_t *);
939 NV_STATUS  NV_API_CALL  rm_ioctl                 (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
940 NvBool     NV_API_CALL  rm_isr                   (nvidia_stack_t *, nv_state_t *, NvU32 *);
941 void       NV_API_CALL  rm_isr_bh                (nvidia_stack_t *, nv_state_t *);
942 void       NV_API_CALL  rm_isr_bh_unlocked       (nvidia_stack_t *, nv_state_t *);
943 NvBool     NV_API_CALL  rm_is_msix_allowed       (nvidia_stack_t *, nv_state_t *);
944 NV_STATUS  NV_API_CALL  rm_power_management      (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
945 NV_STATUS  NV_API_CALL  rm_stop_user_channels    (nvidia_stack_t *, nv_state_t *);
946 NV_STATUS  NV_API_CALL  rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);
947 NV_STATUS  NV_API_CALL  rm_save_low_res_mode     (nvidia_stack_t *, nv_state_t *);
948 void       NV_API_CALL  rm_get_vbios_version     (nvidia_stack_t *, nv_state_t *, char *);
949 char*      NV_API_CALL  rm_get_gpu_uuid          (nvidia_stack_t *, nv_state_t *);
950 const NvU8* NV_API_CALL rm_get_gpu_uuid_raw      (nvidia_stack_t *, nv_state_t *);
951 void       NV_API_CALL  rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *);
952 void       NV_API_CALL  rm_get_firmware_version  (nvidia_stack_t *, nv_state_t *, char *, NvLength);
953 void       NV_API_CALL  rm_cleanup_file_private  (nvidia_stack_t *, nv_state_t *, nv_file_private_t *);
954 void       NV_API_CALL  rm_unbind_lock           (nvidia_stack_t *, nv_state_t *);
955 NV_STATUS  NV_API_CALL  rm_read_registry_dword   (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *);
956 NV_STATUS  NV_API_CALL  rm_write_registry_dword  (nvidia_stack_t *, nv_state_t *, const char *, NvU32);
957 NV_STATUS  NV_API_CALL  rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32);
958 NV_STATUS  NV_API_CALL  rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32);
959 void       NV_API_CALL  rm_parse_option_string   (nvidia_stack_t *, const char *);
960 char*      NV_API_CALL  rm_remove_spaces         (const char *);
961 char*      NV_API_CALL  rm_string_token          (char **, const char);
962 void       NV_API_CALL  rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
963 NV_STATUS  NV_API_CALL  rm_get_adapter_status_external(nvidia_stack_t *, nv_state_t *);
964 
965 NV_STATUS  NV_API_CALL  rm_run_rc_callback       (nvidia_stack_t *, nv_state_t *);
966 void       NV_API_CALL  rm_execute_work_item     (nvidia_stack_t *, void *);
967 const char* NV_API_CALL rm_get_device_name       (NvU16, NvU16, NvU16);
968 
969 NV_STATUS  NV_API_CALL  rm_is_supported_device   (nvidia_stack_t *, nv_state_t *);
970 NvBool     NV_API_CALL  rm_is_supported_pci_device(NvU8   pci_class,
971                                                    NvU8   pci_subclass,
972                                                    NvU16  vendor,
973                                                    NvU16  device,
974                                                    NvU16  subsystem_vendor,
975                                                    NvU16  subsystem_device,
976                                                    NvBool print_legacy_warning);
977 
978 void       NV_API_CALL  rm_i2c_remove_adapters    (nvidia_stack_t *, nv_state_t *);
979 NvBool     NV_API_CALL  rm_i2c_is_smbus_capable   (nvidia_stack_t *, nv_state_t *, void *);
980 NV_STATUS  NV_API_CALL  rm_i2c_transfer           (nvidia_stack_t *, nv_state_t *, void *, nv_i2c_cmd_t, NvU8, NvU8, NvU32, NvU8 *);
981 
982 NV_STATUS  NV_API_CALL  rm_perform_version_check  (nvidia_stack_t *, void *, NvU32);
983 
984 void       NV_API_CALL  rm_power_source_change_event        (nvidia_stack_t *, NvU32);
985 
986 void       NV_API_CALL  rm_request_dnotifier_state          (nvidia_stack_t *, nv_state_t *);
987 
988 void       NV_API_CALL  rm_disable_gpu_state_persistence    (nvidia_stack_t *sp, nv_state_t *);
989 NV_STATUS  NV_API_CALL  rm_p2p_init_mapping       (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
990 NV_STATUS  NV_API_CALL  rm_p2p_destroy_mapping    (nvidia_stack_t *, NvU64);
991 NV_STATUS  NV_API_CALL  rm_p2p_get_pages          (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
992 NV_STATUS  NV_API_CALL  rm_p2p_get_gpu_info       (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
993 NV_STATUS  NV_API_CALL  rm_p2p_get_pages_persistent (nvidia_stack_t *,  NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *);
994 NV_STATUS  NV_API_CALL  rm_p2p_register_callback  (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
995 NV_STATUS  NV_API_CALL  rm_p2p_put_pages          (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
996 NV_STATUS  NV_API_CALL  rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *);
997 NV_STATUS  NV_API_CALL  rm_p2p_dma_map_pages      (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
998 NV_STATUS  NV_API_CALL  rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
999 void       NV_API_CALL  rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
1000 NV_STATUS  NV_API_CALL  rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, void *, nv_phys_addr_range_t **, NvU32 *);
1001 void       NV_API_CALL  rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, nv_phys_addr_range_t **, NvU32);
1002 NV_STATUS  NV_API_CALL  rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
1003 void       NV_API_CALL  rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
1004 NV_STATUS  NV_API_CALL  rm_log_gpu_crash          (nv_stack_t *, nv_state_t *);
1005 
1006 void       NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
1007 NvBool     NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
1008 NV_STATUS  NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
1009 NV_STATUS  NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
1010 NvBool     NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
1011 NvBool     NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
1012 NvBool     NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
1013 void       NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
1014 NV_STATUS  NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, nv_ioctl_numa_info_t *);
1015 NV_STATUS  NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *);
1016 NV_STATUS  NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *);
1017 NvBool     NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
1018 void       NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *);
1019 NV_STATUS  NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool);
1020 NV_STATUS  NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
1021 NvBool     NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *);
1022 NvBool     NV_API_CALL rm_disable_iomap_wc(void);
1023 
1024 void       NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
1025 void       NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
1026 void       NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
1027 NV_STATUS  NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
1028 void       NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
1029 NV_STATUS  NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool, NvBool *);
1030 const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *);
1031 const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *);
1032 const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
1033 
1034 void       NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
1035 void       NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
1036 
1037 NvBool     NV_API_CALL rm_is_altstack_in_use(void);
1038 
1039 /* vGPU VFIO specific functions */
1040 NV_STATUS  NV_API_CALL  nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
1041 NV_STATUS  NV_API_CALL  nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
1042 NV_STATUS  NV_API_CALL  nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
1043 NV_STATUS  NV_API_CALL  nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
1044 NV_STATUS  NV_API_CALL  nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *, NvBool *);
1045 NV_STATUS  NV_API_CALL  nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
1046 NV_STATUS  NV_API_CALL  nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
1047 NV_STATUS  NV_API_CALL  nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
1048 NV_STATUS  NV_API_CALL  nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
1049 NV_STATUS  NV_API_CALL  nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *);
1050 NV_STATUS  NV_API_CALL  nv_gpu_bind_event(nvidia_stack_t *);
1051 
1052 NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
1053 nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);
1054 void       NV_API_CALL  nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size);
1055 
1056 /* Callbacks should occur roughly every 10ms. */
1057 #define NV_SNAPSHOT_TIMER_HZ 100
1058 void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context));
1059 void NV_API_CALL nv_flush_snapshot_timer(void);
1060 void NV_API_CALL nv_stop_snapshot_timer(void);
1061 
1062 static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv)
1063 {
1064     return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL;
1065 }
1066 
1067 /* nano second resolution timer callback structure */
1068 typedef struct nv_nano_timer nv_nano_timer_t;
1069 
1070 /* nano timer functions */
1071 void        NV_API_CALL nv_create_nano_timer(nv_state_t *, void *pTmrEvent, nv_nano_timer_t **);
1072 void        NV_API_CALL nv_start_nano_timer(nv_state_t *nv, nv_nano_timer_t *, NvU64 timens);
1073 NV_STATUS   NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t *, void *pTmrEvent);
1074 void        NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *);
1075 void        NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *);
1076 
1077 #if defined(NVCPU_X86_64)
1078 
1079 static inline NvU64 nv_rdtsc(void)
1080 {
1081     NvU64 val;
1082     __asm__ __volatile__ ("rdtsc               \t\n"
1083                           "shlq   $0x20,%%rdx  \t\n"
1084                           "orq    %%rdx,%%rax  \t\n"
1085                           : "=A" (val));
1086     return val;
1087 }
1088 
1089 #endif
1090 
1091 #endif /* NVRM */
1092 
1093 static inline int nv_count_bits(NvU64 word)
1094 {
1095     NvU64 bits;
1096 
1097     bits = (word & 0x5555555555555555ULL) + ((word >>  1) & 0x5555555555555555ULL);
1098     bits = (bits & 0x3333333333333333ULL) + ((bits >>  2) & 0x3333333333333333ULL);
1099     bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >>  4) & 0x0f0f0f0f0f0f0f0fULL);
1100     bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >>  8) & 0x00ff00ff00ff00ffULL);
1101     bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL);
1102     bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL);
1103 
1104     return (int)(bits);
1105 }
1106 
1107 #endif
1108