1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 #ifndef _NV_H_
26 #define _NV_H_
27 
28 
29 
30 #include <nvlimits.h>
31 
32 #if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__)
33   #include <sys/stddef.h>   // NULL
34 #elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
35   #include <linux/stddef.h> // NULL
36 #else
37   #include <stddef.h>       // NULL
38 #endif
39 
40 #include <nvstatus.h>
41 #include "nv_stdarg.h"
42 #include <nv-caps.h>
43 #include <nv-firmware.h>
44 #include <nv-ioctl.h>
45 #include <nvmisc.h>
46 
47 extern nv_cap_t *nvidia_caps_root;
48 
49 extern const NvBool nv_is_rm_firmware_supported_os;
50 
51 #include <nv-kernel-interface-api.h>
52 
53 /* NVIDIA's reserved major character device number (Linux). */
54 #define NV_MAJOR_DEVICE_NUMBER 195
55 
56 #define GPU_UUID_LEN    (16)
57 
58 /*
59  * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space
60  * for "GPU", 5 dashes, and '\0' termination:
61  */
62 #define GPU_UUID_ASCII_LEN  (GPU_UUID_LEN * 2 + 9)
63 
64 /*
65  * #define an absolute maximum used as a sanity check for the
66  * NV_ESC_IOCTL_XFER_CMD ioctl() size argument.
67  */
68 #define NV_ABSOLUTE_MAX_IOCTL_SIZE  16384
69 
70 /*
71  * Solaris provides no more than 8 bits for the argument size in
72  * the ioctl() command encoding; make sure we don't exceed this
73  * limit.
74  */
75 #define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1)
76 #define __NV_IOWR(nr, type) ({                                        \
77     typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)];  \
78     _IOWR(NV_IOCTL_MAGIC, (nr), type);                                \
79 })
80 
81 #define NV_PCI_DEV_FMT          "%04x:%02x:%02x.%x"
82 #define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \
83                                 (nv)->pci_info.slot, (nv)->pci_info.function
84 
85 #define NV_RM_DEVICE_INTR_ADDRESS 0x100
86 
87 /*!
88  * @brief The order of the display clocks in the below defined enum
89  * should be synced with below mapping array and macro.
90  * All four should be updated simultaneously in case
91  * of removal or addition of clocks in below order.
92  * Also, TEGRASOC_WHICH_CLK_MAX is used in various places
93  * in below mentioned files.
94  * arch/nvalloc/unix/Linux/nv-linux.h
95  *
96  * arch/nvalloc/unix/src/os.c
97  * dispClkMapRmToOsArr[] = {...};
98  *
99  * arch/nvalloc/unix/Linux/nv-clk.c
100  * osMapClk[] = {...};
101  *
102  */
103 typedef enum _TEGRASOC_WHICH_CLK
104 {
105     TEGRASOC_WHICH_CLK_NVDISPLAYHUB,
106     TEGRASOC_WHICH_CLK_NVDISPLAY_DISP,
107     TEGRASOC_WHICH_CLK_NVDISPLAY_P0,
108     TEGRASOC_WHICH_CLK_NVDISPLAY_P1,
109     TEGRASOC_WHICH_CLK_DPAUX0,
110     TEGRASOC_WHICH_CLK_FUSE,
111     TEGRASOC_WHICH_CLK_DSIPLL_VCO,
112     TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN,
113     TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA,
114     TEGRASOC_WHICH_CLK_SPPLL0_VCO,
115     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
116     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA,
117     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB,
118     TEGRASOC_WHICH_CLK_SPPLL0_DIV10,
119     TEGRASOC_WHICH_CLK_SPPLL0_DIV25,
120     TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
121     TEGRASOC_WHICH_CLK_SPPLL1_VCO,
122     TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
123     TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
124     TEGRASOC_WHICH_CLK_VPLL0_REF,
125     TEGRASOC_WHICH_CLK_VPLL0,
126     TEGRASOC_WHICH_CLK_VPLL1,
127     TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF,
128     TEGRASOC_WHICH_CLK_RG0,
129     TEGRASOC_WHICH_CLK_RG1,
130     TEGRASOC_WHICH_CLK_DISPPLL,
131     TEGRASOC_WHICH_CLK_DISPHUBPLL,
132     TEGRASOC_WHICH_CLK_DSI_LP,
133     TEGRASOC_WHICH_CLK_DSI_CORE,
134     TEGRASOC_WHICH_CLK_DSI_PIXEL,
135     TEGRASOC_WHICH_CLK_PRE_SOR0,
136     TEGRASOC_WHICH_CLK_PRE_SOR1,
137     TEGRASOC_WHICH_CLK_DP_LINK_REF,
138     TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT,
139     TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO,
140     TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M,
141     TEGRASOC_WHICH_CLK_RG0_M,
142     TEGRASOC_WHICH_CLK_RG1_M,
143     TEGRASOC_WHICH_CLK_SOR0_M,
144     TEGRASOC_WHICH_CLK_SOR1_M,
145     TEGRASOC_WHICH_CLK_PLLHUB,
146     TEGRASOC_WHICH_CLK_SOR0,
147     TEGRASOC_WHICH_CLK_SOR1,
148     TEGRASOC_WHICH_CLK_SOR_PAD_INPUT,
149     TEGRASOC_WHICH_CLK_PRE_SF0,
150     TEGRASOC_WHICH_CLK_SF0,
151     TEGRASOC_WHICH_CLK_SF1,
152     TEGRASOC_WHICH_CLK_DSI_PAD_INPUT,
153     TEGRASOC_WHICH_CLK_PRE_SOR0_REF,
154     TEGRASOC_WHICH_CLK_PRE_SOR1_REF,
155     TEGRASOC_WHICH_CLK_SOR0_PLL_REF,
156     TEGRASOC_WHICH_CLK_SOR1_PLL_REF,
157     TEGRASOC_WHICH_CLK_SOR0_REF,
158     TEGRASOC_WHICH_CLK_SOR1_REF,
159     TEGRASOC_WHICH_CLK_OSC,
160     TEGRASOC_WHICH_CLK_DSC,
161     TEGRASOC_WHICH_CLK_MAUD,
162     TEGRASOC_WHICH_CLK_AZA_2XBIT,
163     TEGRASOC_WHICH_CLK_AZA_BIT,
164     TEGRASOC_WHICH_CLK_MIPI_CAL,
165     TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL,
166     TEGRASOC_WHICH_CLK_SOR0_DIV,
167     TEGRASOC_WHICH_CLK_DISP_ROOT,
168     TEGRASOC_WHICH_CLK_HUB_ROOT,
169     TEGRASOC_WHICH_CLK_PLLA_DISP,
170     TEGRASOC_WHICH_CLK_PLLA_DISPHUB,
171     TEGRASOC_WHICH_CLK_PLLA,
172     TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only.
173 } TEGRASOC_WHICH_CLK;
174 
175 #ifdef NVRM
176 
177 extern const char *pNVRM_ID;
178 
179 /*
180  * ptr arithmetic convenience
181  */
182 
183 typedef union
184 {
185     volatile NvV8 Reg008[1];
186     volatile NvV16 Reg016[1];
187     volatile NvV32 Reg032[1];
188 } nv_hwreg_t, * nv_phwreg_t;
189 
190 
191 #define NVRM_PCICFG_NUM_BARS            6
192 #define NVRM_PCICFG_BAR_OFFSET(i)       (0x10 + (i) * 4)
193 #define NVRM_PCICFG_BAR_REQTYPE_MASK    0x00000001
194 #define NVRM_PCICFG_BAR_REQTYPE_MEMORY  0x00000000
195 #define NVRM_PCICFG_BAR_MEMTYPE_MASK    0x00000006
196 #define NVRM_PCICFG_BAR_MEMTYPE_64BIT   0x00000004
197 #define NVRM_PCICFG_BAR_ADDR_MASK       0xfffffff0
198 
199 #define NVRM_PCICFG_NUM_DWORDS          16
200 
201 #define NV_GPU_NUM_BARS                 3
202 #define NV_GPU_BAR_INDEX_REGS           0
203 #define NV_GPU_BAR_INDEX_FB             1
204 #define NV_GPU_BAR_INDEX_IMEM           2
205 
206 typedef struct
207 {
208     NvU64 cpu_address;
209     NvU64 size;
210     NvU32 offset;
211     NvU32 *map;
212     nv_phwreg_t map_u;
213 } nv_aperture_t;
214 
215 typedef struct
216 {
217     char *name;
218     NvU32 *data;
219 } nv_parm_t;
220 
221 #define NV_RM_PAGE_SHIFT    12
222 #define NV_RM_PAGE_SIZE     (1 << NV_RM_PAGE_SHIFT)
223 #define NV_RM_PAGE_MASK     (NV_RM_PAGE_SIZE - 1)
224 
225 #define NV_RM_TO_OS_PAGE_SHIFT      (os_page_shift - NV_RM_PAGE_SHIFT)
226 #define NV_RM_PAGES_PER_OS_PAGE     (1U << NV_RM_TO_OS_PAGE_SHIFT)
227 #define NV_RM_PAGES_TO_OS_PAGES(count) \
228     ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
229      ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
230 
231 #if defined(NVCPU_X86_64)
232 #define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3)
233 #else
234 #define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2)
235 #endif
236 
237 typedef struct nvidia_stack_s
238 {
239     NvU32 size;
240     void *top;
241     NvU8  stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16)));
242 } nvidia_stack_t;
243 
244 /*
245  * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t
246  */
247 typedef nvidia_stack_t nv_stack_t;
248 
249 typedef struct nv_file_private_t nv_file_private_t;
250 
251 /*
252  * this is a wrapper for unix events
253  * unlike the events that will be returned to clients, this includes
254  * kernel-specific data, such as file pointer, etc..
255  */
256 typedef struct nv_event_s
257 {
258     NvHandle            hParent;
259     NvHandle            hObject;
260     NvU32               index;
261     NvU32               info32;
262     NvU16               info16;
263     nv_file_private_t  *nvfp;  /* per file-descriptor data pointer */
264     NvU32               fd;
265     NvBool              active; /* whether the event should be signaled */
266     NvU32               refcount; /* count of associated RM events */
267     struct nv_event_s  *next;
268 } nv_event_t;
269 
270 typedef struct nv_kern_mapping_s
271 {
272     void  *addr;
273     NvU64 size;
274     NvU32 modeFlag;
275     struct nv_kern_mapping_s *next;
276 } nv_kern_mapping_t;
277 
278 typedef struct nv_usermap_access_params_s
279 {
280     NvU64    addr;
281     NvU64    size;
282     NvU64    offset;
283     NvU64   *page_array;
284     NvU64    num_pages;
285     NvU64    mmap_start;
286     NvU64    mmap_size;
287     NvU64    access_start;
288     NvU64    access_size;
289     NvU64    remap_prot_extra;
290     NvBool   contig;
291     NvU32    caching;
292 } nv_usermap_access_params_t;
293 
294 /*
295  * It stores mapping context per mapping
296  */
297 typedef struct nv_alloc_mapping_context_s {
298     void  *alloc;
299     NvU64  page_index;
300     NvU64 *page_array;
301     NvU64  num_pages;
302     NvU64  mmap_start;
303     NvU64  mmap_size;
304     NvU64  access_start;
305     NvU64  access_size;
306     NvU64  remap_prot_extra;
307     NvU32  prot;
308     NvBool valid;
309     NvU32  caching;
310 } nv_alloc_mapping_context_t;
311 
312 typedef enum
313 {
314     NV_SOC_IRQ_DISPLAY_TYPE = 0x1,
315     NV_SOC_IRQ_DPAUX_TYPE,
316     NV_SOC_IRQ_GPIO_TYPE,
317     NV_SOC_IRQ_HDACODEC_TYPE,
318     NV_SOC_IRQ_TCPC2DISP_TYPE,
319     NV_SOC_IRQ_INVALID_TYPE
320 } nv_soc_irq_type_t;
321 
322 /*
323  * It stores interrupt numbers and interrupt type and private data
324  */
325 typedef struct nv_soc_irq_info_s {
326     NvU32 irq_num;
327     nv_soc_irq_type_t irq_type;
328     NvBool bh_pending;
329     union {
330         NvU32 gpio_num;
331         NvU32 dpaux_instance;
332     } irq_data;
333     NvS32 ref_count;
334 } nv_soc_irq_info_t;
335 
336 #define NV_MAX_SOC_IRQS              6
337 #define NV_MAX_DPAUX_NUM_DEVICES     4
338 #define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING
339 
340 #define NV_IGPU_LEGACY_STALL_IRQ     70
341 #define NV_IGPU_MAX_STALL_IRQS       3
342 #define NV_IGPU_MAX_NONSTALL_IRQS    1
343 /*
344  * per device state
345  */
346 
347 /* DMA-capable device data, defined by kernel interface layer */
348 typedef struct nv_dma_device nv_dma_device_t;
349 
350 typedef struct nv_phys_addr_range
351 {
352     NvU64 addr;
353     NvU64 len;
354 } nv_phys_addr_range_t;
355 
356 typedef struct nv_state_t
357 {
358     void  *priv;                    /* private data */
359     void  *os_state;                /* os-specific device state */
360 
361     int    flags;
362 
363     /* PCI config info */
364     nv_pci_info_t pci_info;
365     NvU16 subsystem_id;
366     NvU16 subsystem_vendor;
367     NvU32 gpu_id;
368     NvU32 iovaspace_id;
369     struct
370     {
371         NvBool         valid;
372         NvU8           uuid[GPU_UUID_LEN];
373     } nv_uuid_cache;
374     void *handle;
375 
376     NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS];
377 
378     /* physical characteristics */
379     nv_aperture_t bars[NV_GPU_NUM_BARS];
380     nv_aperture_t *regs;
381     nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES];
382     nv_aperture_t *hdacodec_regs;
383     nv_aperture_t *mipical_regs;
384     nv_aperture_t *fb, ud;
385     nv_aperture_t *simregs;
386     nv_aperture_t *emc_regs;
387 
388     NvU32  num_dpaux_instance;
389     NvU32  interrupt_line;
390     NvU32  dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES];
391     nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS];
392     NvS32 current_soc_irq;
393     NvU32 num_soc_irqs;
394     NvU32 hdacodec_irq;
395     NvU32 tcpc2disp_irq;
396     NvU8 *soc_dcb_blob;
397     NvU32 soc_dcb_size;
398     NvU32 disp_sw_soc_chip_id;
399     NvBool soc_is_dpalt_mode_supported;
400 
401     NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
402     NvU32 igpu_nonstall_irq;
403     NvU32 num_stall_irqs;
404     NvU64 dma_mask;
405 
406     NvBool primary_vga;
407 
408     NvU32 sim_env;
409 
410     NvU32 rc_timer_enabled;
411 
412     /* list of events allocated for this device */
413     nv_event_t *event_list;
414 
415     /* lock to protect event_list */
416     void *event_spinlock;
417 
418     nv_kern_mapping_t *kern_mappings;
419 
420     /* Kernel interface DMA device data */
421     nv_dma_device_t *dma_dev;
422     nv_dma_device_t *niso_dma_dev;
423 
424     /*
425      * Per-GPU queue.  The actual queue object is usually allocated in the
426      * arch-specific parent structure (e.g. nv_linux_state_t), and this
427      * pointer just points to it.
428      */
429     struct os_work_queue *queue;
430 
431     /* For loading RM as a firmware (DCE or GSP) client */
432     NvBool request_firmware;                /* request firmware from the OS */
433     NvBool request_fw_client_rm;            /* attempt to init RM as FW a client */
434     NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */
435     NvBool enable_firmware_logs;            /* attempt to enable firmware log decoding/printing */
436 
437     /* Variable to track, if nvidia_remove is called */
438     NvBool removed;
439 
440     NvBool console_device;
441 
442     /* Variable to track, if GPU is external GPU */
443     NvBool is_external_gpu;
444 
445     /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */
446     NvBool preserve_vidmem_allocations;
447 
448     /* Variable to force allocation of 32-bit addressable memory */
449     NvBool force_dma32_alloc;
450 
451     /* PCI power state should be D0 during system suspend */
452     NvBool d0_state_in_suspend;
453 
454     /* Current cyclestats client and context */
455     NvU32 profiler_owner;
456     void *profiler_context;
457 
458     /*
459      * RMAPI objects to use in the OS layer to talk to core RM.
460      *
461      * Note that we only need to store one subdevice handle: in SLI, we will
462      * have a separate nv_state_t per physical GPU.
463      */
464     struct {
465         NvHandle hClient;
466         NvHandle hDevice;
467         NvHandle hSubDevice;
468         NvHandle hI2C;
469         NvHandle hDisp;
470     } rmapi;
471 
472     /* Bool to check if ISO iommu enabled */
473     NvBool iso_iommu_present;
474 
475     /* Bool to check if NISO iommu enabled */
476     NvBool niso_iommu_present;
477 
478     /* Bool to check if dma-buf is supported */
479     NvBool dma_buf_supported;
480 
481     NvBool printed_openrm_enable_unsupported_gpus_error;
482 
483     /* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
484     NvBool nvpcf_dsm_in_gpu_scope;
485 
486     /* Bool to check if the device received a shutdown notification */
487     NvBool is_shutdown;
488 
489     /* Bool to check if the GPU has a coherent sysmem link */
490     NvBool coherent;
491 } nv_state_t;
492 
493 // These define need to be in sync with defines in system.h
494 #define OS_TYPE_LINUX   0x1
495 #define OS_TYPE_FREEBSD 0x2
496 #define OS_TYPE_SUNOS   0x3
497 #define OS_TYPE_VMWARE  0x4
498 
499 #define NVFP_TYPE_NONE       0x0
500 #define NVFP_TYPE_REFCOUNTED 0x1
501 #define NVFP_TYPE_REGISTERED 0x2
502 
503 struct nv_file_private_t
504 {
505     NvHandle *handles;
506     NvU16 maxHandles;
507     NvU32 deviceInstance;
508     NvU8 metadata[64];
509 
510     nv_file_private_t *ctl_nvfp;
511     void *ctl_nvfp_priv;
512     NvU32 register_or_refcount;
513 };
514 
515 // Forward define the gpu ops structures
516 typedef struct gpuSession                           *nvgpuSessionHandle_t;
517 typedef struct gpuDevice                            *nvgpuDeviceHandle_t;
518 typedef struct gpuAddressSpace                      *nvgpuAddressSpaceHandle_t;
519 typedef struct gpuTsg                               *nvgpuTsgHandle_t;
520 typedef struct UvmGpuTsgAllocParams_tag              nvgpuTsgAllocParams_t;
521 typedef struct gpuChannel                           *nvgpuChannelHandle_t;
522 typedef struct UvmGpuChannelInfo_tag                *nvgpuChannelInfo_t;
523 typedef struct UvmGpuChannelAllocParams_tag          nvgpuChannelAllocParams_t;
524 typedef struct UvmGpuCaps_tag                       *nvgpuCaps_t;
525 typedef struct UvmGpuCopyEnginesCaps_tag            *nvgpuCesCaps_t;
526 typedef struct UvmGpuAddressSpaceInfo_tag           *nvgpuAddressSpaceInfo_t;
527 typedef struct UvmGpuAllocInfo_tag                  *nvgpuAllocInfo_t;
528 typedef struct UvmGpuP2PCapsParams_tag              *nvgpuP2PCapsParams_t;
529 typedef struct UvmGpuFbInfo_tag                     *nvgpuFbInfo_t;
530 typedef struct UvmGpuEccInfo_tag                    *nvgpuEccInfo_t;
531 typedef struct UvmGpuFaultInfo_tag                  *nvgpuFaultInfo_t;
532 typedef struct UvmGpuAccessCntrInfo_tag             *nvgpuAccessCntrInfo_t;
533 typedef struct UvmGpuAccessCntrConfig_tag           *nvgpuAccessCntrConfig_t;
534 typedef struct UvmGpuInfo_tag                       nvgpuInfo_t;
535 typedef struct UvmGpuClientInfo_tag                 nvgpuClientInfo_t;
536 typedef struct UvmPmaAllocationOptions_tag          *nvgpuPmaAllocationOptions_t;
537 typedef struct UvmPmaStatistics_tag                 *nvgpuPmaStatistics_t;
538 typedef struct UvmGpuMemoryInfo_tag                 *nvgpuMemoryInfo_t;
539 typedef struct UvmGpuExternalMappingInfo_tag        *nvgpuExternalMappingInfo_t;
540 typedef struct UvmGpuChannelResourceInfo_tag        *nvgpuChannelResourceInfo_t;
541 typedef struct UvmGpuChannelInstanceInfo_tag        *nvgpuChannelInstanceInfo_t;
542 typedef struct UvmGpuChannelResourceBindParams_tag  *nvgpuChannelResourceBindParams_t;
543 typedef struct UvmGpuPagingChannelAllocParams_tag    nvgpuPagingChannelAllocParams_t;
544 typedef struct UvmGpuPagingChannel_tag              *nvgpuPagingChannelHandle_t;
545 typedef struct UvmGpuPagingChannelInfo_tag          *nvgpuPagingChannelInfo_t;
546 typedef enum   UvmPmaGpuMemoryType_tag               nvgpuGpuMemoryType_t;
547 typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU64, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t);
548 typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemoryType_t);
549 
550 /*
551  * flags
552  */
553 
554 #define NV_FLAG_OPEN                   0x0001
555 #define NV_FLAG_EXCLUDE                0x0002
556 #define NV_FLAG_CONTROL                0x0004
557 // Unused                              0x0008
558 #define NV_FLAG_SOC_DISPLAY            0x0010
559 #define NV_FLAG_USES_MSI               0x0020
560 #define NV_FLAG_USES_MSIX              0x0040
561 #define NV_FLAG_PASSTHRU               0x0080
562 #define NV_FLAG_SUSPENDED              0x0100
563 #define NV_FLAG_SOC_IGPU               0x0200
564 // Unused                              0x0400
565 #define NV_FLAG_PERSISTENT_SW_STATE    0x0800
566 #define NV_FLAG_IN_RECOVERY            0x1000
567 // Unused                              0x2000
568 #define NV_FLAG_UNBIND_LOCK            0x4000
569 /* To be set when GPU is not present on the bus, to help device teardown */
570 #define NV_FLAG_IN_SURPRISE_REMOVAL    0x8000
571 
572 typedef enum
573 {
574     NV_PM_ACTION_HIBERNATE,
575     NV_PM_ACTION_STANDBY,
576     NV_PM_ACTION_RESUME
577 } nv_pm_action_t;
578 
579 typedef enum
580 {
581     NV_PM_ACTION_DEPTH_DEFAULT,
582     NV_PM_ACTION_DEPTH_MODESET,
583     NV_PM_ACTION_DEPTH_UVM
584 } nv_pm_action_depth_t;
585 
586 typedef enum
587 {
588     NV_DYNAMIC_PM_NEVER,
589     NV_DYNAMIC_PM_COARSE,
590     NV_DYNAMIC_PM_FINE
591 } nv_dynamic_power_mode_t;
592 
593 typedef enum
594 {
595     NV_POWER_STATE_IN_HIBERNATE,
596     NV_POWER_STATE_IN_STANDBY,
597     NV_POWER_STATE_RUNNING
598 } nv_power_state_t;
599 
600 #define NV_PRIMARY_VGA(nv)      ((nv)->primary_vga)
601 
602 #define NV_IS_CTL_DEVICE(nv)    ((nv)->flags & NV_FLAG_CONTROL)
603 #define NV_IS_SOC_DISPLAY_DEVICE(nv)    \
604         ((nv)->flags & NV_FLAG_SOC_DISPLAY)
605 
606 #define NV_IS_SOC_IGPU_DEVICE(nv)    \
607         ((nv)->flags & NV_FLAG_SOC_IGPU)
608 
609 #define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)    \
610         (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
611 
612 #define NV_SOC_IS_ISO_IOMMU_PRESENT(nv)     \
613         ((nv)->iso_iommu_present)
614 
615 #define NV_SOC_IS_NISO_IOMMU_PRESENT(nv)     \
616         ((nv)->niso_iommu_present)
617 /*
618  * GPU add/remove events
619  */
620 #define NV_SYSTEM_GPU_ADD_EVENT             0x9001
621 #define NV_SYSTEM_GPU_REMOVE_EVENT          0x9002
622 
623 /*
624  * NVIDIA ACPI sub-event IDs (event types) to be passed into
625  * to core NVIDIA driver for ACPI events.
626  */
627 #define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT    0
628 #define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED       0
629 #define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED         1
630 
631 #define NV_ACPI_NVIF_HANDLE_PRESENT 0x01
632 #define NV_ACPI_DSM_HANDLE_PRESENT  0x02
633 #define NV_ACPI_WMMX_HANDLE_PRESENT 0x04
634 
635 #define NV_EVAL_ACPI_METHOD_NVIF     0x01
636 #define NV_EVAL_ACPI_METHOD_WMMX     0x02
637 
638 typedef enum {
639     NV_I2C_CMD_READ = 1,
640     NV_I2C_CMD_WRITE,
641     NV_I2C_CMD_SMBUS_READ,
642     NV_I2C_CMD_SMBUS_WRITE,
643     NV_I2C_CMD_SMBUS_QUICK_WRITE,
644     NV_I2C_CMD_SMBUS_QUICK_READ,
645     NV_I2C_CMD_SMBUS_BLOCK_READ,
646     NV_I2C_CMD_SMBUS_BLOCK_WRITE,
647     NV_I2C_CMD_BLOCK_READ,
648     NV_I2C_CMD_BLOCK_WRITE
649 } nv_i2c_cmd_t;
650 
651 // Flags needed by OSAllocPagesNode
652 #define NV_ALLOC_PAGES_NODE_NONE                0x0
653 #define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM        0x1
654 
655 /*
656 ** where we hide our nv_state_t * ...
657 */
658 #define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p))
659 #define NV_GET_NV_STATE(pGpu) \
660     (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL)
661 
662 static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
663 {
664     return ((offset >= nv->regs->cpu_address) &&
665             ((offset + (length - 1)) >= offset) &&
666             ((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1))));
667 }
668 
669 static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
670 {
671     return  ((nv->fb) && (nv->fb->size != 0) &&
672              (offset >= nv->fb->cpu_address) &&
673              ((offset + (length - 1)) >= offset) &&
674              ((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
675 }
676 
677 static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
678 {
679     return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) &&
680             (offset >= nv->ud.cpu_address) &&
681             ((offset + (length - 1)) >= offset) &&
682             ((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1))));
683 }
684 
685 static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
686 {
687     return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) &&
688             (nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) &&
689             (offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) &&
690             ((offset + (length - 1)) >= offset) &&
691             ((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address +
692                                          (nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))));
693 }
694 
695 #define NV_RM_MAX_MSIX_LINES  8
696 
697 #define NV_MAX_ISR_DELAY_US           20000
698 #define NV_MAX_ISR_DELAY_MS           (NV_MAX_ISR_DELAY_US / 1000)
699 
700 #define NV_TIMERCMP(a, b, CMP)                                              \
701     (((a)->tv_sec == (b)->tv_sec) ?                                         \
702         ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec))
703 
704 #define NV_TIMERADD(a, b, result)                                           \
705     {                                                                       \
706         (result)->tv_sec = (a)->tv_sec + (b)->tv_sec;                       \
707         (result)->tv_usec = (a)->tv_usec + (b)->tv_usec;                    \
708         if ((result)->tv_usec >= 1000000)                                   \
709         {                                                                   \
710             ++(result)->tv_sec;                                             \
711             (result)->tv_usec -= 1000000;                                   \
712         }                                                                   \
713     }
714 
715 #define NV_TIMERSUB(a, b, result)                                           \
716     {                                                                       \
717         (result)->tv_sec = (a)->tv_sec - (b)->tv_sec;                       \
718         (result)->tv_usec = (a)->tv_usec - (b)->tv_usec;                    \
719         if ((result)->tv_usec < 0)                                          \
720         {                                                                   \
721           --(result)->tv_sec;                                               \
722           (result)->tv_usec += 1000000;                                     \
723         }                                                                   \
724     }
725 
726 #define NV_TIMEVAL_TO_US(tv)    ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec)
727 
728 #ifndef NV_ALIGN_UP
729 #define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1))
730 #endif
731 #ifndef NV_ALIGN_DOWN
732 #define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
733 #endif
734 
735 /*
736  * driver internal interfaces
737  */
738 
739 /*
740  * ---------------------------------------------------------------------------
741  *
742  * Function prototypes for UNIX specific OS interface.
743  *
744  * ---------------------------------------------------------------------------
745  */
746 
747 NvU32      NV_API_CALL  nv_get_dev_minor         (nv_state_t *);
748 void*      NV_API_CALL  nv_alloc_kernel_mapping  (nv_state_t *, void *, NvU64, NvU32, NvU64, void **);
749 NV_STATUS  NV_API_CALL  nv_free_kernel_mapping   (nv_state_t *, void *, void *, void *);
750 NV_STATUS  NV_API_CALL  nv_alloc_user_mapping    (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **);
751 NV_STATUS  NV_API_CALL  nv_free_user_mapping     (nv_state_t *, void *, NvU64, void *);
752 NV_STATUS  NV_API_CALL  nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32);
753 
754 NvU64  NV_API_CALL  nv_get_kern_phys_address     (NvU64);
755 NvU64  NV_API_CALL  nv_get_user_phys_address     (NvU64);
756 nv_state_t*  NV_API_CALL  nv_get_adapter_state   (NvU32, NvU8, NvU8);
757 nv_state_t*  NV_API_CALL  nv_get_ctl_state       (void);
758 
759 void   NV_API_CALL  nv_set_dma_address_size      (nv_state_t *, NvU32 );
760 
761 NV_STATUS  NV_API_CALL  nv_alias_pages           (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
762 NV_STATUS  NV_API_CALL  nv_alloc_pages           (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
763 NV_STATUS  NV_API_CALL  nv_free_pages            (nv_state_t *, NvU32, NvBool, NvU32, void *);
764 
765 NV_STATUS  NV_API_CALL  nv_register_user_pages   (nv_state_t *, NvU64, NvU64 *, void *, void **);
766 void       NV_API_CALL  nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **);
767 
768 NV_STATUS NV_API_CALL   nv_register_peer_io_mem  (nv_state_t *, NvU64 *, NvU64, void **);
769 void      NV_API_CALL   nv_unregister_peer_io_mem(nv_state_t *, void *);
770 
771 struct sg_table;
772 
773 NV_STATUS NV_API_CALL   nv_register_sgt          (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *);
774 void      NV_API_CALL   nv_unregister_sgt        (nv_state_t *, struct sg_table **, void **, void *);
775 NV_STATUS NV_API_CALL   nv_register_phys_pages   (nv_state_t *, NvU64 *, NvU64, NvU32, void **);
776 void      NV_API_CALL   nv_unregister_phys_pages (nv_state_t *, void *);
777 
778 NV_STATUS  NV_API_CALL  nv_dma_map_sgt           (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **);
779 NV_STATUS  NV_API_CALL  nv_dma_map_pages         (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **);
780 NV_STATUS  NV_API_CALL  nv_dma_unmap_pages       (nv_dma_device_t *, NvU64, NvU64 *, void **);
781 
782 NV_STATUS  NV_API_CALL  nv_dma_map_alloc         (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **);
783 NV_STATUS  NV_API_CALL  nv_dma_unmap_alloc       (nv_dma_device_t *, NvU64, NvU64 *, void **);
784 
785 NV_STATUS  NV_API_CALL  nv_dma_map_peer          (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *);
786 void       NV_API_CALL  nv_dma_unmap_peer        (nv_dma_device_t *, NvU64, NvU64);
787 
788 NV_STATUS  NV_API_CALL  nv_dma_map_mmio          (nv_dma_device_t *, NvU64, NvU64 *);
789 void       NV_API_CALL  nv_dma_unmap_mmio        (nv_dma_device_t *, NvU64, NvU64);
790 
791 void       NV_API_CALL  nv_dma_cache_invalidate  (nv_dma_device_t *, void *);
792 void       NV_API_CALL  nv_dma_enable_nvlink     (nv_dma_device_t *);
793 
794 NvS32  NV_API_CALL  nv_start_rc_timer            (nv_state_t *);
795 NvS32  NV_API_CALL  nv_stop_rc_timer             (nv_state_t *);
796 
797 void   NV_API_CALL  nv_post_event                (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool);
798 NvS32  NV_API_CALL  nv_get_event                 (nv_file_private_t *, nv_event_t *, NvU32 *);
799 
800 void*  NV_API_CALL  nv_i2c_add_adapter           (nv_state_t *, NvU32);
801 void   NV_API_CALL  nv_i2c_del_adapter           (nv_state_t *, void *);
802 
803 void   NV_API_CALL  nv_acpi_methods_init         (NvU32 *);
804 void   NV_API_CALL  nv_acpi_methods_uninit       (void);
805 
806 NV_STATUS  NV_API_CALL  nv_acpi_method           (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
807 NV_STATUS  NV_API_CALL  nv_acpi_dsm_method       (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
808 NV_STATUS  NV_API_CALL  nv_acpi_ddc_method       (nv_state_t *, void *, NvU32 *, NvBool);
809 NV_STATUS  NV_API_CALL  nv_acpi_dod_method       (nv_state_t *, NvU32 *, NvU32 *);
810 NV_STATUS  NV_API_CALL  nv_acpi_rom_method       (nv_state_t *, NvU32 *, NvU32 *);
811 NV_STATUS  NV_API_CALL  nv_acpi_get_powersource  (NvU32 *);
812 NvBool     NV_API_CALL  nv_acpi_is_battery_present(void);
813 
814 NV_STATUS  NV_API_CALL  nv_acpi_mux_method       (nv_state_t *, NvU32 *, NvU32, const char *);
815 
816 NV_STATUS  NV_API_CALL  nv_log_error             (nv_state_t *, NvU32, const char *, va_list);
817 
818 NvU64      NV_API_CALL  nv_get_dma_start_address (nv_state_t *);
819 NV_STATUS  NV_API_CALL  nv_set_primary_vga_status(nv_state_t *);
820 NV_STATUS  NV_API_CALL  nv_pci_trigger_recovery  (nv_state_t *);
821 NvBool     NV_API_CALL  nv_requires_dma_remap    (nv_state_t *);
822 
823 NvBool     NV_API_CALL  nv_is_rm_firmware_active(nv_state_t *);
824 const void*NV_API_CALL  nv_get_firmware(nv_state_t *, nv_firmware_type_t, nv_firmware_chip_family_t, const void **, NvU32 *);
825 void       NV_API_CALL  nv_put_firmware(const void *);
826 
827 nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
828 void               NV_API_CALL nv_put_file_private(void *);
829 
830 NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
831 NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
832 
833 NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
834 NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
835 
836 void      NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv);
837 
838 void      NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64);
839 
840 void      NV_API_CALL nv_p2p_free_platform_data(void *data);
841 
842 #if defined(NVCPU_PPC64LE)
843 NV_STATUS NV_API_CALL nv_get_nvlink_line_rate    (nv_state_t *, NvU32 *);
844 #endif
845 
846 NV_STATUS NV_API_CALL nv_revoke_gpu_mappings     (nv_state_t *);
847 void      NV_API_CALL nv_acquire_mmap_lock       (nv_state_t *);
848 void      NV_API_CALL nv_release_mmap_lock       (nv_state_t *);
849 NvBool    NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *);
850 void      NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool);
851 
852 NV_STATUS NV_API_CALL nv_indicate_idle           (nv_state_t *);
853 NV_STATUS NV_API_CALL nv_indicate_not_idle       (nv_state_t *);
854 void      NV_API_CALL nv_idle_holdoff            (nv_state_t *);
855 
856 NvBool    NV_API_CALL nv_dynamic_power_available (nv_state_t *);
857 void      NV_API_CALL nv_audio_dynamic_power     (nv_state_t *);
858 
859 void      NV_API_CALL nv_control_soc_irqs        (nv_state_t *, NvBool bEnable);
860 NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *);
861 
862 NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*);
863 int       NV_API_CALL nv_cap_drv_init(void);
864 void      NV_API_CALL nv_cap_drv_exit(void);
865 NvBool    NV_API_CALL nv_is_gpu_accessible(nv_state_t *);
866 NvBool    NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
867 
868 NvU32     NV_API_CALL nv_get_os_type(void);
869 
870 void      NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
871 struct dma_buf;
872 typedef struct nv_dma_buf nv_dma_buf_t;
873 struct drm_gem_object;
874 
875 NV_STATUS NV_API_CALL nv_dma_import_sgt  (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
876 void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
877 NV_STATUS NV_API_CALL nv_dma_import_dma_buf      (nv_dma_device_t *, struct dma_buf *, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
878 NV_STATUS NV_API_CALL nv_dma_import_from_fd      (nv_dma_device_t *, NvS32, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
879 void      NV_API_CALL nv_dma_release_dma_buf     (void *, nv_dma_buf_t *);
880 
881 void      NV_API_CALL nv_schedule_uvm_isr        (nv_state_t *);
882 
883 NvBool    NV_API_CALL nv_platform_supports_s0ix  (void);
884 NvBool    NV_API_CALL nv_s2idle_pm_configured    (void);
885 
886 NvBool    NV_API_CALL nv_is_chassis_notebook      (void);
887 void      NV_API_CALL nv_allow_runtime_suspend    (nv_state_t *nv);
888 void      NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv);
889 
890 typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *);
891 
892 NV_STATUS NV_API_CALL nv_get_num_phys_pages      (void *, NvU32 *);
893 NV_STATUS NV_API_CALL nv_get_phys_pages          (void *, void *, NvU32 *);
894 
895 /*
896  * ---------------------------------------------------------------------------
897  *
898  * Function prototypes for Resource Manager interface.
899  *
900  * ---------------------------------------------------------------------------
901  */
902 
903 NvBool     NV_API_CALL  rm_init_rm               (nvidia_stack_t *);
904 void       NV_API_CALL  rm_shutdown_rm           (nvidia_stack_t *);
905 NvBool     NV_API_CALL  rm_init_private_state    (nvidia_stack_t *, nv_state_t *);
906 void       NV_API_CALL  rm_free_private_state    (nvidia_stack_t *, nv_state_t *);
907 NvBool     NV_API_CALL  rm_init_adapter          (nvidia_stack_t *, nv_state_t *);
908 void       NV_API_CALL  rm_disable_adapter       (nvidia_stack_t *, nv_state_t *);
909 void       NV_API_CALL  rm_shutdown_adapter      (nvidia_stack_t *, nv_state_t *);
910 NV_STATUS  NV_API_CALL  rm_exclude_adapter       (nvidia_stack_t *, nv_state_t *);
911 NV_STATUS  NV_API_CALL  rm_acquire_api_lock      (nvidia_stack_t *);
912 NV_STATUS  NV_API_CALL  rm_release_api_lock      (nvidia_stack_t *);
913 NV_STATUS  NV_API_CALL  rm_acquire_gpu_lock      (nvidia_stack_t *, nv_state_t *);
914 NV_STATUS  NV_API_CALL  rm_release_gpu_lock      (nvidia_stack_t *, nv_state_t *);
915 NV_STATUS  NV_API_CALL  rm_acquire_all_gpus_lock (nvidia_stack_t *);
916 NV_STATUS  NV_API_CALL  rm_release_all_gpus_lock (nvidia_stack_t *);
917 NV_STATUS  NV_API_CALL  rm_ioctl                 (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
918 NvBool     NV_API_CALL  rm_isr                   (nvidia_stack_t *, nv_state_t *, NvU32 *);
919 void       NV_API_CALL  rm_isr_bh                (nvidia_stack_t *, nv_state_t *);
920 void       NV_API_CALL  rm_isr_bh_unlocked       (nvidia_stack_t *, nv_state_t *);
921 NV_STATUS  NV_API_CALL  rm_power_management      (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
922 NV_STATUS  NV_API_CALL  rm_stop_user_channels    (nvidia_stack_t *, nv_state_t *);
923 NV_STATUS  NV_API_CALL  rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);
924 NV_STATUS  NV_API_CALL  rm_save_low_res_mode     (nvidia_stack_t *, nv_state_t *);
925 void       NV_API_CALL  rm_get_vbios_version     (nvidia_stack_t *, nv_state_t *, char *);
926 char*      NV_API_CALL  rm_get_gpu_uuid          (nvidia_stack_t *, nv_state_t *);
927 const NvU8* NV_API_CALL rm_get_gpu_uuid_raw      (nvidia_stack_t *, nv_state_t *);
928 void       NV_API_CALL  rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *);
929 void       NV_API_CALL  rm_get_firmware_version  (nvidia_stack_t *, nv_state_t *, char *, NvLength);
930 void       NV_API_CALL  rm_cleanup_file_private  (nvidia_stack_t *, nv_state_t *, nv_file_private_t *);
931 void       NV_API_CALL  rm_unbind_lock           (nvidia_stack_t *, nv_state_t *);
932 NV_STATUS  NV_API_CALL  rm_read_registry_dword   (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *);
933 NV_STATUS  NV_API_CALL  rm_write_registry_dword  (nvidia_stack_t *, nv_state_t *, const char *, NvU32);
934 NV_STATUS  NV_API_CALL  rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32);
935 NV_STATUS  NV_API_CALL  rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32);
936 void       NV_API_CALL  rm_parse_option_string   (nvidia_stack_t *, const char *);
937 char*      NV_API_CALL  rm_remove_spaces         (const char *);
938 char*      NV_API_CALL  rm_string_token          (char **, const char);
939 void       NV_API_CALL  rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
940 
941 NV_STATUS  NV_API_CALL  rm_run_rc_callback       (nvidia_stack_t *, nv_state_t *);
942 void       NV_API_CALL  rm_execute_work_item     (nvidia_stack_t *, void *);
943 const char* NV_API_CALL rm_get_device_name       (NvU16, NvU16, NvU16);
944 
945 NV_STATUS  NV_API_CALL  rm_is_supported_device   (nvidia_stack_t *, nv_state_t *);
946 NvBool     NV_API_CALL  rm_is_supported_pci_device(NvU8   pci_class,
947                                                    NvU8   pci_subclass,
948                                                    NvU16  vendor,
949                                                    NvU16  device,
950                                                    NvU16  subsystem_vendor,
951                                                    NvU16  subsystem_device,
952                                                    NvBool print_legacy_warning);
953 
954 void       NV_API_CALL  rm_i2c_remove_adapters    (nvidia_stack_t *, nv_state_t *);
955 NvBool     NV_API_CALL  rm_i2c_is_smbus_capable   (nvidia_stack_t *, nv_state_t *, void *);
956 NV_STATUS  NV_API_CALL  rm_i2c_transfer           (nvidia_stack_t *, nv_state_t *, void *, nv_i2c_cmd_t, NvU8, NvU8, NvU32, NvU8 *);
957 
958 NV_STATUS  NV_API_CALL  rm_perform_version_check  (nvidia_stack_t *, void *, NvU32);
959 
960 void       NV_API_CALL  rm_power_source_change_event        (nvidia_stack_t *, NvU32);
961 
962 void       NV_API_CALL  rm_disable_gpu_state_persistence    (nvidia_stack_t *sp, nv_state_t *);
963 NV_STATUS  NV_API_CALL  rm_p2p_init_mapping       (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
964 NV_STATUS  NV_API_CALL  rm_p2p_destroy_mapping    (nvidia_stack_t *, NvU64);
965 NV_STATUS  NV_API_CALL  rm_p2p_get_pages          (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
966 NV_STATUS  NV_API_CALL  rm_p2p_get_gpu_info       (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
967 NV_STATUS  NV_API_CALL  rm_p2p_get_pages_persistent (nvidia_stack_t *,  NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *);
968 NV_STATUS  NV_API_CALL  rm_p2p_register_callback  (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
969 NV_STATUS  NV_API_CALL  rm_p2p_put_pages          (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
970 NV_STATUS  NV_API_CALL  rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *);
971 NV_STATUS  NV_API_CALL  rm_p2p_dma_map_pages      (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
972 NV_STATUS  NV_API_CALL  rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
973 void       NV_API_CALL  rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
974 NV_STATUS  NV_API_CALL  rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, void *, nv_phys_addr_range_t **, NvU32 *);
975 void       NV_API_CALL  rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, nv_phys_addr_range_t **, NvU32);
976 NV_STATUS  NV_API_CALL  rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
977 void       NV_API_CALL  rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
978 NV_STATUS  NV_API_CALL  rm_log_gpu_crash          (nv_stack_t *, nv_state_t *);
979 
980 void       NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
981 NvBool     NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
982 NV_STATUS  NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
983 NV_STATUS  NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
984 NvBool     NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
985 NvBool     NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
986 NvBool     NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
987 void       NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
988 NV_STATUS  NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *);
989 NV_STATUS  NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *);
990 NV_STATUS  NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *);
991 NvBool     NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
992 void       NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *);
993 NV_STATUS  NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool);
994 NV_STATUS  NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
995 NvBool     NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *);
996 NvBool     NV_API_CALL rm_disable_iomap_wc(void);
997 
998 void       NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
999 void       NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
1000 void       NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
1001 NV_STATUS  NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
1002 void       NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
1003 NV_STATUS  NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool);
1004 const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *);
1005 const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *);
1006 const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
1007 
1008 void       NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
1009 void       NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
1010 
1011 NvBool     NV_API_CALL rm_is_altstack_in_use(void);
1012 
1013 /* vGPU VFIO specific functions */
1014 NV_STATUS  NV_API_CALL  nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
1015 NV_STATUS  NV_API_CALL  nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
1016 NV_STATUS  NV_API_CALL  nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
1017 NV_STATUS  NV_API_CALL  nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
1018 NV_STATUS  NV_API_CALL  nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *);
1019 NV_STATUS  NV_API_CALL  nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
1020 NV_STATUS  NV_API_CALL  nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
1021 NV_STATUS  NV_API_CALL  nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
1022 NV_STATUS  NV_API_CALL  nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *);
1023 NV_STATUS  NV_API_CALL  nv_gpu_bind_event(nvidia_stack_t *);
1024 
1025 NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
1026 nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);
1027 void       NV_API_CALL  nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size);
1028 
1029 /* Callbacks should occur roughly every 10ms. */
1030 #define NV_SNAPSHOT_TIMER_HZ 100
1031 void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context));
1032 void NV_API_CALL nv_flush_snapshot_timer(void);
1033 void NV_API_CALL nv_stop_snapshot_timer(void);
1034 
1035 static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv)
1036 {
1037     return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL;
1038 }
1039 
1040 /* nano second resolution timer callback structure */
1041 typedef struct nv_nano_timer nv_nano_timer_t;
1042 
1043 /* nano timer functions */
1044 void        NV_API_CALL nv_create_nano_timer(nv_state_t *, void *pTmrEvent, nv_nano_timer_t **);
1045 void        NV_API_CALL nv_start_nano_timer(nv_state_t *nv, nv_nano_timer_t *, NvU64 timens);
1046 NV_STATUS   NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t *, void *pTmrEvent);
1047 void        NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *);
1048 void        NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *);
1049 
1050 #if defined(NVCPU_X86_64)
1051 
1052 static inline NvU64 nv_rdtsc(void)
1053 {
1054     NvU64 val;
1055     __asm__ __volatile__ ("rdtsc               \t\n"
1056                           "shlq   $0x20,%%rdx  \t\n"
1057                           "orq    %%rdx,%%rax  \t\n"
1058                           : "=A" (val));
1059     return val;
1060 }
1061 
1062 #endif
1063 
1064 #endif /* NVRM */
1065 
1066 static inline int nv_count_bits(NvU64 word)
1067 {
1068     NvU64 bits;
1069 
1070     bits = (word & 0x5555555555555555ULL) + ((word >>  1) & 0x5555555555555555ULL);
1071     bits = (bits & 0x3333333333333333ULL) + ((bits >>  2) & 0x3333333333333333ULL);
1072     bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >>  4) & 0x0f0f0f0f0f0f0f0fULL);
1073     bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >>  8) & 0x00ff00ff00ff00ffULL);
1074     bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL);
1075     bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL);
1076 
1077     return (int)(bits);
1078 }
1079 
1080 #endif
1081