1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 #ifndef _NV_H_
26 #define _NV_H_
27 
28 
29 
30 #include <nvlimits.h>
31 
32 #if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__)
33   #include <sys/stddef.h>   // NULL
34 #elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
35   #include <linux/stddef.h> // NULL
36 #else
37   #include <stddef.h>       // NULL
38 #endif
39 
40 #include <nvstatus.h>
41 #include "nv_stdarg.h"
42 #include <nv-caps.h>
43 #include <nv-firmware.h>
44 #include <nv-ioctl.h>
45 #include <nv-ioctl-numa.h>
46 #include <nvmisc.h>
47 
48 extern nv_cap_t *nvidia_caps_root;
49 
50 extern const NvBool nv_is_rm_firmware_supported_os;
51 
52 #include <nv-kernel-interface-api.h>
53 
54 #define GPU_UUID_LEN    (16)
55 
56 /*
57  * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space
58  * for "GPU", 5 dashes, and '\0' termination:
59  */
60 #define GPU_UUID_ASCII_LEN  (GPU_UUID_LEN * 2 + 9)
61 
62 /*
63  * #define an absolute maximum used as a sanity check for the
64  * NV_ESC_IOCTL_XFER_CMD ioctl() size argument.
65  */
66 #define NV_ABSOLUTE_MAX_IOCTL_SIZE  16384
67 
68 /*
69  * Solaris provides no more than 8 bits for the argument size in
70  * the ioctl() command encoding; make sure we don't exceed this
71  * limit.
72  */
73 #define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1)
74 #define __NV_IOWR(nr, type) ({                                        \
75     typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)];  \
76     _IOWR(NV_IOCTL_MAGIC, (nr), type);                                \
77 })
78 
79 #define NV_PCI_DEV_FMT          "%04x:%02x:%02x.%x"
80 #define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \
81                                 (nv)->pci_info.slot, (nv)->pci_info.function
82 
83 #define NV_RM_DEVICE_INTR_ADDRESS 0x100
84 
85 /*!
86  * @brief The order of the display clocks in the below defined enum
87  * should be synced with below mapping array and macro.
88  * All four should be updated simultaneously in case
89  * of removal or addition of clocks in below order.
90  * Also, TEGRASOC_WHICH_CLK_MAX is used in various places
91  * in below mentioned files.
92  * arch/nvalloc/unix/Linux/nv-linux.h
93  *
94  * arch/nvalloc/unix/src/os.c
95  * dispClkMapRmToOsArr[] = {...};
96  *
97  * arch/nvalloc/unix/Linux/nv-clk.c
98  * osMapClk[] = {...};
99  *
100  */
101 typedef enum _TEGRASOC_WHICH_CLK
102 {
103     TEGRASOC_WHICH_CLK_NVDISPLAYHUB,
104     TEGRASOC_WHICH_CLK_NVDISPLAY_DISP,
105     TEGRASOC_WHICH_CLK_NVDISPLAY_P0,
106     TEGRASOC_WHICH_CLK_NVDISPLAY_P1,
107     TEGRASOC_WHICH_CLK_DPAUX0,
108     TEGRASOC_WHICH_CLK_FUSE,
109     TEGRASOC_WHICH_CLK_DSIPLL_VCO,
110     TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN,
111     TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA,
112     TEGRASOC_WHICH_CLK_SPPLL0_VCO,
113     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
114     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA,
115     TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB,
116     TEGRASOC_WHICH_CLK_SPPLL0_DIV10,
117     TEGRASOC_WHICH_CLK_SPPLL0_DIV25,
118     TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
119     TEGRASOC_WHICH_CLK_SPPLL1_VCO,
120     TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
121     TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
122     TEGRASOC_WHICH_CLK_VPLL0_REF,
123     TEGRASOC_WHICH_CLK_VPLL0,
124     TEGRASOC_WHICH_CLK_VPLL1,
125     TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF,
126     TEGRASOC_WHICH_CLK_RG0,
127     TEGRASOC_WHICH_CLK_RG1,
128     TEGRASOC_WHICH_CLK_DISPPLL,
129     TEGRASOC_WHICH_CLK_DISPHUBPLL,
130     TEGRASOC_WHICH_CLK_DSI_LP,
131     TEGRASOC_WHICH_CLK_DSI_CORE,
132     TEGRASOC_WHICH_CLK_DSI_PIXEL,
133     TEGRASOC_WHICH_CLK_PRE_SOR0,
134     TEGRASOC_WHICH_CLK_PRE_SOR1,
135     TEGRASOC_WHICH_CLK_DP_LINK_REF,
136     TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT,
137     TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO,
138     TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M,
139     TEGRASOC_WHICH_CLK_RG0_M,
140     TEGRASOC_WHICH_CLK_RG1_M,
141     TEGRASOC_WHICH_CLK_SOR0_M,
142     TEGRASOC_WHICH_CLK_SOR1_M,
143     TEGRASOC_WHICH_CLK_PLLHUB,
144     TEGRASOC_WHICH_CLK_SOR0,
145     TEGRASOC_WHICH_CLK_SOR1,
146     TEGRASOC_WHICH_CLK_SOR_PAD_INPUT,
147     TEGRASOC_WHICH_CLK_PRE_SF0,
148     TEGRASOC_WHICH_CLK_SF0,
149     TEGRASOC_WHICH_CLK_SF1,
150     TEGRASOC_WHICH_CLK_DSI_PAD_INPUT,
151     TEGRASOC_WHICH_CLK_PRE_SOR0_REF,
152     TEGRASOC_WHICH_CLK_PRE_SOR1_REF,
153     TEGRASOC_WHICH_CLK_SOR0_PLL_REF,
154     TEGRASOC_WHICH_CLK_SOR1_PLL_REF,
155     TEGRASOC_WHICH_CLK_SOR0_REF,
156     TEGRASOC_WHICH_CLK_SOR1_REF,
157     TEGRASOC_WHICH_CLK_OSC,
158     TEGRASOC_WHICH_CLK_DSC,
159     TEGRASOC_WHICH_CLK_MAUD,
160     TEGRASOC_WHICH_CLK_AZA_2XBIT,
161     TEGRASOC_WHICH_CLK_AZA_BIT,
162     TEGRASOC_WHICH_CLK_MIPI_CAL,
163     TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL,
164     TEGRASOC_WHICH_CLK_SOR0_DIV,
165     TEGRASOC_WHICH_CLK_DISP_ROOT,
166     TEGRASOC_WHICH_CLK_HUB_ROOT,
167     TEGRASOC_WHICH_CLK_PLLA_DISP,
168     TEGRASOC_WHICH_CLK_PLLA_DISPHUB,
169     TEGRASOC_WHICH_CLK_PLLA,
170     TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only.
171 } TEGRASOC_WHICH_CLK;
172 
173 #ifdef NVRM
174 
175 extern const char *pNVRM_ID;
176 
177 /*
178  * ptr arithmetic convenience
179  */
180 
181 typedef union
182 {
183     volatile NvV8 Reg008[1];
184     volatile NvV16 Reg016[1];
185     volatile NvV32 Reg032[1];
186 } nv_hwreg_t, * nv_phwreg_t;
187 
188 
189 #define NVRM_PCICFG_NUM_BARS            6
190 #define NVRM_PCICFG_BAR_OFFSET(i)       (0x10 + (i) * 4)
191 #define NVRM_PCICFG_BAR_REQTYPE_MASK    0x00000001
192 #define NVRM_PCICFG_BAR_REQTYPE_MEMORY  0x00000000
193 #define NVRM_PCICFG_BAR_MEMTYPE_MASK    0x00000006
194 #define NVRM_PCICFG_BAR_MEMTYPE_64BIT   0x00000004
195 #define NVRM_PCICFG_BAR_ADDR_MASK       0xfffffff0
196 
197 #define NVRM_PCICFG_NUM_DWORDS          16
198 
199 #define NV_GPU_NUM_BARS                 3
200 #define NV_GPU_BAR_INDEX_REGS           0
201 #define NV_GPU_BAR_INDEX_FB             1
202 #define NV_GPU_BAR_INDEX_IMEM           2
203 
204 typedef struct
205 {
206     NvU64 cpu_address;
207     NvU64 size;
208     NvU32 offset;
209     NvU32 *map;
210     nv_phwreg_t map_u;
211 } nv_aperture_t;
212 
213 typedef struct
214 {
215     char *name;
216     NvU32 *data;
217 } nv_parm_t;
218 
219 #define NV_RM_PAGE_SHIFT    12
220 #define NV_RM_PAGE_SIZE     (1 << NV_RM_PAGE_SHIFT)
221 #define NV_RM_PAGE_MASK     (NV_RM_PAGE_SIZE - 1)
222 
223 #define NV_RM_TO_OS_PAGE_SHIFT      (os_page_shift - NV_RM_PAGE_SHIFT)
224 #define NV_RM_PAGES_PER_OS_PAGE     (1U << NV_RM_TO_OS_PAGE_SHIFT)
225 #define NV_RM_PAGES_TO_OS_PAGES(count) \
226     ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
227      ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
228 
229 #if defined(NVCPU_X86_64)
230 #define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3)
231 #else
232 #define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2)
233 #endif
234 
235 typedef struct nvidia_stack_s
236 {
237     NvU32 size;
238     void *top;
239     NvU8  stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16)));
240 } nvidia_stack_t;
241 
242 /*
243  * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t
244  */
245 typedef nvidia_stack_t nv_stack_t;
246 
247 typedef struct nv_file_private_t nv_file_private_t;
248 
249 /*
250  * this is a wrapper for unix events
251  * unlike the events that will be returned to clients, this includes
252  * kernel-specific data, such as file pointer, etc..
253  */
254 typedef struct nv_event_s
255 {
256     NvHandle            hParent;
257     NvHandle            hObject;
258     NvU32               index;
259     NvU32               info32;
260     NvU16               info16;
261     nv_file_private_t  *nvfp;  /* per file-descriptor data pointer */
262     NvU32               fd;
263     NvBool              active; /* whether the event should be signaled */
264     NvU32               refcount; /* count of associated RM events */
265     struct nv_event_s  *next;
266 } nv_event_t;
267 
268 typedef struct nv_kern_mapping_s
269 {
270     void  *addr;
271     NvU64 size;
272     NvU32 modeFlag;
273     struct nv_kern_mapping_s *next;
274 } nv_kern_mapping_t;
275 
276 typedef struct nv_usermap_access_params_s
277 {
278     NvU64    addr;
279     NvU64    size;
280     NvU64    offset;
281     NvU64   *page_array;
282     NvU64    num_pages;
283     NvU64    mmap_start;
284     NvU64    mmap_size;
285     NvU64    access_start;
286     NvU64    access_size;
287     NvU64    remap_prot_extra;
288     NvBool   contig;
289     NvU32    caching;
290 } nv_usermap_access_params_t;
291 
292 /*
293  * It stores mapping context per mapping
294  */
295 typedef struct nv_alloc_mapping_context_s {
296     void  *alloc;
297     NvU64  page_index;
298     NvU64 *page_array;
299     NvU64  num_pages;
300     NvU64  mmap_start;
301     NvU64  mmap_size;
302     NvU64  access_start;
303     NvU64  access_size;
304     NvU64  remap_prot_extra;
305     NvU32  prot;
306     NvBool valid;
307     NvU32  caching;
308 } nv_alloc_mapping_context_t;
309 
310 typedef enum
311 {
312     NV_SOC_IRQ_DISPLAY_TYPE = 0x1,
313     NV_SOC_IRQ_DPAUX_TYPE,
314     NV_SOC_IRQ_GPIO_TYPE,
315     NV_SOC_IRQ_HDACODEC_TYPE,
316     NV_SOC_IRQ_TCPC2DISP_TYPE,
317     NV_SOC_IRQ_INVALID_TYPE
318 } nv_soc_irq_type_t;
319 
320 /*
321  * It stores interrupt numbers and interrupt type and private data
322  */
323 typedef struct nv_soc_irq_info_s {
324     NvU32 irq_num;
325     nv_soc_irq_type_t irq_type;
326     NvBool bh_pending;
327     union {
328         NvU32 gpio_num;
329         NvU32 dpaux_instance;
330     } irq_data;
331     NvS32 ref_count;
332 } nv_soc_irq_info_t;
333 
334 #define NV_MAX_SOC_IRQS              6
335 #define NV_MAX_DPAUX_NUM_DEVICES     4
336 #define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING
337 
338 #define NV_IGPU_LEGACY_STALL_IRQ     70
339 #define NV_IGPU_MAX_STALL_IRQS       3
340 #define NV_IGPU_MAX_NONSTALL_IRQS    1
341 /*
342  * per device state
343  */
344 
345 /* DMA-capable device data, defined by kernel interface layer */
346 typedef struct nv_dma_device nv_dma_device_t;
347 
348 typedef struct nv_phys_addr_range
349 {
350     NvU64 addr;
351     NvU64 len;
352 } nv_phys_addr_range_t;
353 
354 typedef struct nv_state_t
355 {
356     void  *priv;                    /* private data */
357     void  *os_state;                /* os-specific device state */
358 
359     int    flags;
360 
361     /* PCI config info */
362     nv_pci_info_t pci_info;
363     NvU16 subsystem_id;
364     NvU16 subsystem_vendor;
365     NvU32 gpu_id;
366     NvU32 iovaspace_id;
367     struct
368     {
369         NvBool         valid;
370         NvU8           uuid[GPU_UUID_LEN];
371     } nv_uuid_cache;
372     void *handle;
373 
374     NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS];
375 
376     /* physical characteristics */
377     nv_aperture_t bars[NV_GPU_NUM_BARS];
378     nv_aperture_t *regs;
379     nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES];
380     nv_aperture_t *hdacodec_regs;
381     nv_aperture_t *mipical_regs;
382     nv_aperture_t *fb, ud;
383     nv_aperture_t *simregs;
384     nv_aperture_t *emc_regs;
385 
386     NvU32  num_dpaux_instance;
387     NvU32  interrupt_line;
388     NvU32  dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES];
389     nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS];
390     NvS32 current_soc_irq;
391     NvU32 num_soc_irqs;
392     NvU32 hdacodec_irq;
393     NvU32 tcpc2disp_irq;
394     NvU8 *soc_dcb_blob;
395     NvU32 soc_dcb_size;
396     NvU32 disp_sw_soc_chip_id;
397     NvBool soc_is_dpalt_mode_supported;
398 
399     NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
400     NvU32 igpu_nonstall_irq;
401     NvU32 num_stall_irqs;
402     NvU64 dma_mask;
403 
404     NvBool primary_vga;
405 
406     NvU32 sim_env;
407 
408     NvU32 rc_timer_enabled;
409 
410     /* list of events allocated for this device */
411     nv_event_t *event_list;
412 
413     /* lock to protect event_list */
414     void *event_spinlock;
415 
416     nv_kern_mapping_t *kern_mappings;
417 
418     /* Kernel interface DMA device data */
419     nv_dma_device_t *dma_dev;
420     nv_dma_device_t *niso_dma_dev;
421 
422     /*
423      * Per-GPU queue.  The actual queue object is usually allocated in the
424      * arch-specific parent structure (e.g. nv_linux_state_t), and this
425      * pointer just points to it.
426      */
427     struct os_work_queue *queue;
428 
429     /* For loading RM as a firmware (DCE or GSP) client */
430     NvBool request_firmware;                /* request firmware from the OS */
431     NvBool request_fw_client_rm;            /* attempt to init RM as FW a client */
432     NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */
433     NvBool enable_firmware_logs;            /* attempt to enable firmware log decoding/printing */
434 
435     /* Variable to track, if nvidia_remove is called */
436     NvBool removed;
437 
438     NvBool console_device;
439 
440     /* Variable to track, if GPU is external GPU */
441     NvBool is_external_gpu;
442 
443     /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */
444     NvBool preserve_vidmem_allocations;
445 
446     /* Variable to force allocation of 32-bit addressable memory */
447     NvBool force_dma32_alloc;
448 
449     /* PCI power state should be D0 during system suspend */
450     NvBool d0_state_in_suspend;
451 
452     /* Current cyclestats client and context */
453     NvU32 profiler_owner;
454     void *profiler_context;
455 
456     /*
457      * RMAPI objects to use in the OS layer to talk to core RM.
458      *
459      * Note that we only need to store one subdevice handle: in SLI, we will
460      * have a separate nv_state_t per physical GPU.
461      */
462     struct {
463         NvHandle hClient;
464         NvHandle hDevice;
465         NvHandle hSubDevice;
466         NvHandle hI2C;
467         NvHandle hDisp;
468     } rmapi;
469 
470     /* Bool to check if ISO iommu enabled */
471     NvBool iso_iommu_present;
472 
473     /* Bool to check if NISO iommu enabled */
474     NvBool niso_iommu_present;
475 
476     /* Bool to check if dma-buf is supported */
477     NvBool dma_buf_supported;
478 
479     /* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
480     NvBool nvpcf_dsm_in_gpu_scope;
481 
482     /* Bool to check if the device received a shutdown notification */
483     NvBool is_shutdown;
484 
485     /* Bool to check if the GPU has a coherent sysmem link */
486     NvBool coherent;
487 } nv_state_t;
488 
489 // These define need to be in sync with defines in system.h
490 #define OS_TYPE_LINUX   0x1
491 #define OS_TYPE_FREEBSD 0x2
492 #define OS_TYPE_SUNOS   0x3
493 #define OS_TYPE_VMWARE  0x4
494 
495 #define NVFP_TYPE_NONE       0x0
496 #define NVFP_TYPE_REFCOUNTED 0x1
497 #define NVFP_TYPE_REGISTERED 0x2
498 
499 struct nv_file_private_t
500 {
501     NvHandle *handles;
502     NvU16 maxHandles;
503     NvU32 deviceInstance;
504     NvU32 gpuInstanceId;
505     NvU8 metadata[64];
506 
507     nv_file_private_t *ctl_nvfp;
508     void *ctl_nvfp_priv;
509     NvU32 register_or_refcount;
510 
511     //
512     // True if a client or an event was ever allocated on this fd.
513     // If false, RMAPI cleanup is skipped.
514     //
515     NvBool bCleanupRmapi;
516 };
517 
518 // Forward define the gpu ops structures
519 typedef struct gpuSession                           *nvgpuSessionHandle_t;
520 typedef struct gpuDevice                            *nvgpuDeviceHandle_t;
521 typedef struct gpuAddressSpace                      *nvgpuAddressSpaceHandle_t;
522 typedef struct gpuTsg                               *nvgpuTsgHandle_t;
523 typedef struct UvmGpuTsgAllocParams_tag              nvgpuTsgAllocParams_t;
524 typedef struct gpuChannel                           *nvgpuChannelHandle_t;
525 typedef struct UvmGpuChannelInfo_tag                *nvgpuChannelInfo_t;
526 typedef struct UvmGpuChannelAllocParams_tag          nvgpuChannelAllocParams_t;
527 typedef struct UvmGpuCaps_tag                       *nvgpuCaps_t;
528 typedef struct UvmGpuCopyEnginesCaps_tag            *nvgpuCesCaps_t;
529 typedef struct UvmGpuAddressSpaceInfo_tag           *nvgpuAddressSpaceInfo_t;
530 typedef struct UvmGpuAllocInfo_tag                  *nvgpuAllocInfo_t;
531 typedef struct UvmGpuP2PCapsParams_tag              *nvgpuP2PCapsParams_t;
532 typedef struct UvmGpuFbInfo_tag                     *nvgpuFbInfo_t;
533 typedef struct UvmGpuEccInfo_tag                    *nvgpuEccInfo_t;
534 typedef struct UvmGpuFaultInfo_tag                  *nvgpuFaultInfo_t;
535 typedef struct UvmGpuAccessCntrInfo_tag             *nvgpuAccessCntrInfo_t;
536 typedef struct UvmGpuAccessCntrConfig_tag           *nvgpuAccessCntrConfig_t;
537 typedef struct UvmGpuInfo_tag                       nvgpuInfo_t;
538 typedef struct UvmGpuClientInfo_tag                 nvgpuClientInfo_t;
539 typedef struct UvmPmaAllocationOptions_tag          *nvgpuPmaAllocationOptions_t;
540 typedef struct UvmPmaStatistics_tag                 *nvgpuPmaStatistics_t;
541 typedef struct UvmGpuMemoryInfo_tag                 *nvgpuMemoryInfo_t;
542 typedef struct UvmGpuExternalMappingInfo_tag        *nvgpuExternalMappingInfo_t;
543 typedef struct UvmGpuChannelResourceInfo_tag        *nvgpuChannelResourceInfo_t;
544 typedef struct UvmGpuChannelInstanceInfo_tag        *nvgpuChannelInstanceInfo_t;
545 typedef struct UvmGpuChannelResourceBindParams_tag  *nvgpuChannelResourceBindParams_t;
546 typedef struct UvmGpuPagingChannelAllocParams_tag    nvgpuPagingChannelAllocParams_t;
547 typedef struct UvmGpuPagingChannel_tag              *nvgpuPagingChannelHandle_t;
548 typedef struct UvmGpuPagingChannelInfo_tag          *nvgpuPagingChannelInfo_t;
549 typedef enum   UvmPmaGpuMemoryType_tag               nvgpuGpuMemoryType_t;
550 typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU64, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t);
551 typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemoryType_t);
552 
553 /*
554  * flags
555  */
556 
557 #define NV_FLAG_OPEN                   0x0001
558 #define NV_FLAG_EXCLUDE                0x0002
559 #define NV_FLAG_CONTROL                0x0004
560 // Unused                              0x0008
561 #define NV_FLAG_SOC_DISPLAY            0x0010
562 #define NV_FLAG_USES_MSI               0x0020
563 #define NV_FLAG_USES_MSIX              0x0040
564 #define NV_FLAG_PASSTHRU               0x0080
565 #define NV_FLAG_SUSPENDED              0x0100
566 #define NV_FLAG_SOC_IGPU               0x0200
567 // Unused                              0x0400
568 #define NV_FLAG_PERSISTENT_SW_STATE    0x0800
569 #define NV_FLAG_IN_RECOVERY            0x1000
570 // Unused                              0x2000
571 #define NV_FLAG_UNBIND_LOCK            0x4000
572 /* To be set when GPU is not present on the bus, to help device teardown */
573 #define NV_FLAG_IN_SURPRISE_REMOVAL    0x8000
574 
575 typedef enum
576 {
577     NV_PM_ACTION_HIBERNATE,
578     NV_PM_ACTION_STANDBY,
579     NV_PM_ACTION_RESUME
580 } nv_pm_action_t;
581 
582 typedef enum
583 {
584     NV_PM_ACTION_DEPTH_DEFAULT,
585     NV_PM_ACTION_DEPTH_MODESET,
586     NV_PM_ACTION_DEPTH_UVM
587 } nv_pm_action_depth_t;
588 
589 typedef enum
590 {
591     NV_DYNAMIC_PM_NEVER,
592     NV_DYNAMIC_PM_COARSE,
593     NV_DYNAMIC_PM_FINE
594 } nv_dynamic_power_mode_t;
595 
596 typedef enum
597 {
598     NV_POWER_STATE_IN_HIBERNATE,
599     NV_POWER_STATE_IN_STANDBY,
600     NV_POWER_STATE_RUNNING
601 } nv_power_state_t;
602 
603 #define NV_PRIMARY_VGA(nv)      ((nv)->primary_vga)
604 
605 #define NV_IS_CTL_DEVICE(nv)    ((nv)->flags & NV_FLAG_CONTROL)
606 #define NV_IS_SOC_DISPLAY_DEVICE(nv)    \
607         ((nv)->flags & NV_FLAG_SOC_DISPLAY)
608 
609 #define NV_IS_SOC_IGPU_DEVICE(nv)    \
610         ((nv)->flags & NV_FLAG_SOC_IGPU)
611 
612 #define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)    \
613         (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
614 
615 #define NV_SOC_IS_ISO_IOMMU_PRESENT(nv)     \
616         ((nv)->iso_iommu_present)
617 
618 #define NV_SOC_IS_NISO_IOMMU_PRESENT(nv)     \
619         ((nv)->niso_iommu_present)
620 /*
621  * GPU add/remove events
622  */
623 #define NV_SYSTEM_GPU_ADD_EVENT             0x9001
624 #define NV_SYSTEM_GPU_REMOVE_EVENT          0x9002
625 
626 /*
627  * NVIDIA ACPI sub-event IDs (event types) to be passed into
628  * to core NVIDIA driver for ACPI events.
629  */
630 #define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT    0
631 #define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED       0
632 #define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED         1
633 
634 #define NV_ACPI_NVIF_HANDLE_PRESENT 0x01
635 #define NV_ACPI_DSM_HANDLE_PRESENT  0x02
636 #define NV_ACPI_WMMX_HANDLE_PRESENT 0x04
637 
638 #define NV_EVAL_ACPI_METHOD_NVIF     0x01
639 #define NV_EVAL_ACPI_METHOD_WMMX     0x02
640 
641 typedef enum {
642     NV_I2C_CMD_READ = 1,
643     NV_I2C_CMD_WRITE,
644     NV_I2C_CMD_SMBUS_READ,
645     NV_I2C_CMD_SMBUS_WRITE,
646     NV_I2C_CMD_SMBUS_QUICK_WRITE,
647     NV_I2C_CMD_SMBUS_QUICK_READ,
648     NV_I2C_CMD_SMBUS_BLOCK_READ,
649     NV_I2C_CMD_SMBUS_BLOCK_WRITE,
650     NV_I2C_CMD_BLOCK_READ,
651     NV_I2C_CMD_BLOCK_WRITE
652 } nv_i2c_cmd_t;
653 
654 // Flags needed by OSAllocPagesNode
655 #define NV_ALLOC_PAGES_NODE_NONE                0x0
656 #define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM        0x1
657 
658 /*
659 ** where we hide our nv_state_t * ...
660 */
661 #define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p))
662 #define NV_GET_NV_STATE(pGpu) \
663     (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL)
664 
665 static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
666 {
667     return ((offset >= nv->regs->cpu_address) &&
668             ((offset + (length - 1)) >= offset) &&
669             ((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1))));
670 }
671 
672 static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
673 {
674     return  ((nv->fb) && (nv->fb->size != 0) &&
675              (offset >= nv->fb->cpu_address) &&
676              ((offset + (length - 1)) >= offset) &&
677              ((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
678 }
679 
680 static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
681 {
682     return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) &&
683             (offset >= nv->ud.cpu_address) &&
684             ((offset + (length - 1)) >= offset) &&
685             ((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1))));
686 }
687 
688 static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
689 {
690     return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) &&
691             (nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) &&
692             (offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) &&
693             ((offset + (length - 1)) >= offset) &&
694             ((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address +
695                                          (nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))));
696 }
697 
698 #define NV_RM_MAX_MSIX_LINES  8
699 
700 #define NV_MAX_ISR_DELAY_US           20000
701 #define NV_MAX_ISR_DELAY_MS           (NV_MAX_ISR_DELAY_US / 1000)
702 
703 #define NV_TIMERCMP(a, b, CMP)                                              \
704     (((a)->tv_sec == (b)->tv_sec) ?                                         \
705         ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec))
706 
707 #define NV_TIMERADD(a, b, result)                                           \
708     {                                                                       \
709         (result)->tv_sec = (a)->tv_sec + (b)->tv_sec;                       \
710         (result)->tv_usec = (a)->tv_usec + (b)->tv_usec;                    \
711         if ((result)->tv_usec >= 1000000)                                   \
712         {                                                                   \
713             ++(result)->tv_sec;                                             \
714             (result)->tv_usec -= 1000000;                                   \
715         }                                                                   \
716     }
717 
718 #define NV_TIMERSUB(a, b, result)                                           \
719     {                                                                       \
720         (result)->tv_sec = (a)->tv_sec - (b)->tv_sec;                       \
721         (result)->tv_usec = (a)->tv_usec - (b)->tv_usec;                    \
722         if ((result)->tv_usec < 0)                                          \
723         {                                                                   \
724           --(result)->tv_sec;                                               \
725           (result)->tv_usec += 1000000;                                     \
726         }                                                                   \
727     }
728 
729 #define NV_TIMEVAL_TO_US(tv)    ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec)
730 
731 #ifndef NV_ALIGN_UP
732 #define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1))
733 #endif
734 #ifndef NV_ALIGN_DOWN
735 #define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
736 #endif
737 
738 /*
739  * driver internal interfaces
740  */
741 
742 /*
743  * ---------------------------------------------------------------------------
744  *
745  * Function prototypes for UNIX specific OS interface.
746  *
747  * ---------------------------------------------------------------------------
748  */
749 
750 NvU32      NV_API_CALL  nv_get_dev_minor         (nv_state_t *);
751 void*      NV_API_CALL  nv_alloc_kernel_mapping  (nv_state_t *, void *, NvU64, NvU32, NvU64, void **);
752 NV_STATUS  NV_API_CALL  nv_free_kernel_mapping   (nv_state_t *, void *, void *, void *);
753 NV_STATUS  NV_API_CALL  nv_alloc_user_mapping    (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **);
754 NV_STATUS  NV_API_CALL  nv_free_user_mapping     (nv_state_t *, void *, NvU64, void *);
755 NV_STATUS  NV_API_CALL  nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32);
756 
757 NvU64  NV_API_CALL  nv_get_kern_phys_address     (NvU64);
758 NvU64  NV_API_CALL  nv_get_user_phys_address     (NvU64);
759 nv_state_t*  NV_API_CALL  nv_get_adapter_state   (NvU32, NvU8, NvU8);
760 nv_state_t*  NV_API_CALL  nv_get_ctl_state       (void);
761 
762 void   NV_API_CALL  nv_set_dma_address_size      (nv_state_t *, NvU32 );
763 
764 NV_STATUS  NV_API_CALL  nv_alias_pages           (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
765 NV_STATUS  NV_API_CALL  nv_alloc_pages           (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
766 NV_STATUS  NV_API_CALL  nv_free_pages            (nv_state_t *, NvU32, NvBool, NvU32, void *);
767 
768 NV_STATUS  NV_API_CALL  nv_register_user_pages   (nv_state_t *, NvU64, NvU64 *, void *, void **);
769 void       NV_API_CALL  nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **);
770 
771 NV_STATUS NV_API_CALL   nv_register_peer_io_mem  (nv_state_t *, NvU64 *, NvU64, void **);
772 void      NV_API_CALL   nv_unregister_peer_io_mem(nv_state_t *, void *);
773 
774 struct sg_table;
775 
776 NV_STATUS NV_API_CALL   nv_register_sgt          (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *);
777 void      NV_API_CALL   nv_unregister_sgt        (nv_state_t *, struct sg_table **, void **, void *);
778 NV_STATUS NV_API_CALL   nv_register_phys_pages   (nv_state_t *, NvU64 *, NvU64, NvU32, void **);
779 void      NV_API_CALL   nv_unregister_phys_pages (nv_state_t *, void *);
780 
781 NV_STATUS  NV_API_CALL  nv_dma_map_sgt           (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **);
782 NV_STATUS  NV_API_CALL  nv_dma_map_pages         (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **);
783 NV_STATUS  NV_API_CALL  nv_dma_unmap_pages       (nv_dma_device_t *, NvU64, NvU64 *, void **);
784 
785 NV_STATUS  NV_API_CALL  nv_dma_map_alloc         (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **);
786 NV_STATUS  NV_API_CALL  nv_dma_unmap_alloc       (nv_dma_device_t *, NvU64, NvU64 *, void **);
787 
788 NV_STATUS  NV_API_CALL  nv_dma_map_peer          (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *);
789 void       NV_API_CALL  nv_dma_unmap_peer        (nv_dma_device_t *, NvU64, NvU64);
790 
791 NV_STATUS  NV_API_CALL  nv_dma_map_mmio          (nv_dma_device_t *, NvU64, NvU64 *);
792 void       NV_API_CALL  nv_dma_unmap_mmio        (nv_dma_device_t *, NvU64, NvU64);
793 
794 void       NV_API_CALL  nv_dma_cache_invalidate  (nv_dma_device_t *, void *);
795 void       NV_API_CALL  nv_dma_enable_nvlink     (nv_dma_device_t *);
796 
797 NvS32  NV_API_CALL  nv_start_rc_timer            (nv_state_t *);
798 NvS32  NV_API_CALL  nv_stop_rc_timer             (nv_state_t *);
799 
800 void   NV_API_CALL  nv_post_event                (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool);
801 NvS32  NV_API_CALL  nv_get_event                 (nv_file_private_t *, nv_event_t *, NvU32 *);
802 
803 void*  NV_API_CALL  nv_i2c_add_adapter           (nv_state_t *, NvU32);
804 void   NV_API_CALL  nv_i2c_del_adapter           (nv_state_t *, void *);
805 
806 void   NV_API_CALL  nv_acpi_methods_init         (NvU32 *);
807 void   NV_API_CALL  nv_acpi_methods_uninit       (void);
808 
809 NV_STATUS  NV_API_CALL  nv_acpi_method           (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
810 NV_STATUS  NV_API_CALL  nv_acpi_dsm_method       (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
811 NV_STATUS  NV_API_CALL  nv_acpi_ddc_method       (nv_state_t *, void *, NvU32 *, NvBool);
812 NV_STATUS  NV_API_CALL  nv_acpi_dod_method       (nv_state_t *, NvU32 *, NvU32 *);
813 NV_STATUS  NV_API_CALL  nv_acpi_rom_method       (nv_state_t *, NvU32 *, NvU32 *);
814 NV_STATUS  NV_API_CALL  nv_acpi_get_powersource  (NvU32 *);
815 NvBool     NV_API_CALL  nv_acpi_is_battery_present(void);
816 
817 NV_STATUS  NV_API_CALL  nv_acpi_mux_method       (nv_state_t *, NvU32 *, NvU32, const char *);
818 
819 NV_STATUS  NV_API_CALL  nv_log_error             (nv_state_t *, NvU32, const char *, va_list);
820 
821 NvU64      NV_API_CALL  nv_get_dma_start_address (nv_state_t *);
822 NV_STATUS  NV_API_CALL  nv_set_primary_vga_status(nv_state_t *);
823 NV_STATUS  NV_API_CALL  nv_pci_trigger_recovery  (nv_state_t *);
824 NvBool     NV_API_CALL  nv_requires_dma_remap    (nv_state_t *);
825 
826 NvBool     NV_API_CALL  nv_is_rm_firmware_active(nv_state_t *);
827 const void*NV_API_CALL  nv_get_firmware(nv_state_t *, nv_firmware_type_t, nv_firmware_chip_family_t, const void **, NvU32 *);
828 void       NV_API_CALL  nv_put_firmware(const void *);
829 
830 nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
831 void               NV_API_CALL nv_put_file_private(void *);
832 
833 NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
834 NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
835 
836 NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
837 NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
838 
839 void      NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv);
840 
841 void      NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64);
842 
843 void      NV_API_CALL nv_p2p_free_platform_data(void *data);
844 
845 #if defined(NVCPU_PPC64LE)
846 NV_STATUS NV_API_CALL nv_get_nvlink_line_rate    (nv_state_t *, NvU32 *);
847 #endif
848 
849 NV_STATUS NV_API_CALL nv_revoke_gpu_mappings     (nv_state_t *);
850 void      NV_API_CALL nv_acquire_mmap_lock       (nv_state_t *);
851 void      NV_API_CALL nv_release_mmap_lock       (nv_state_t *);
852 NvBool    NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *);
853 void      NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool);
854 
855 NV_STATUS NV_API_CALL nv_indicate_idle           (nv_state_t *);
856 NV_STATUS NV_API_CALL nv_indicate_not_idle       (nv_state_t *);
857 void      NV_API_CALL nv_idle_holdoff            (nv_state_t *);
858 
859 NvBool    NV_API_CALL nv_dynamic_power_available (nv_state_t *);
860 void      NV_API_CALL nv_audio_dynamic_power     (nv_state_t *);
861 
862 void      NV_API_CALL nv_control_soc_irqs        (nv_state_t *, NvBool bEnable);
863 NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *);
864 
865 NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*);
866 int       NV_API_CALL nv_cap_drv_init(void);
867 void      NV_API_CALL nv_cap_drv_exit(void);
868 NvBool    NV_API_CALL nv_is_gpu_accessible(nv_state_t *);
869 NvBool    NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
870 
871 NvU32     NV_API_CALL nv_get_os_type(void);
872 
873 void      NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
874 struct dma_buf;
875 typedef struct nv_dma_buf nv_dma_buf_t;
876 struct drm_gem_object;
877 
878 NV_STATUS NV_API_CALL nv_dma_import_sgt  (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
879 void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
880 NV_STATUS NV_API_CALL nv_dma_import_dma_buf      (nv_dma_device_t *, struct dma_buf *, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
881 NV_STATUS NV_API_CALL nv_dma_import_from_fd      (nv_dma_device_t *, NvS32, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
882 void      NV_API_CALL nv_dma_release_dma_buf     (void *, nv_dma_buf_t *);
883 
884 void      NV_API_CALL nv_schedule_uvm_isr        (nv_state_t *);
885 
886 NvBool    NV_API_CALL nv_platform_supports_s0ix  (void);
887 NvBool    NV_API_CALL nv_s2idle_pm_configured    (void);
888 
889 NvBool    NV_API_CALL nv_is_chassis_notebook      (void);
890 void      NV_API_CALL nv_allow_runtime_suspend    (nv_state_t *nv);
891 void      NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv);
892 
893 typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *);
894 
895 NV_STATUS NV_API_CALL nv_get_num_phys_pages      (void *, NvU32 *);
896 NV_STATUS NV_API_CALL nv_get_phys_pages          (void *, void *, NvU32 *);
897 
898 /*
899  * ---------------------------------------------------------------------------
900  *
901  * Function prototypes for Resource Manager interface.
902  *
903  * ---------------------------------------------------------------------------
904  */
905 
906 NvBool     NV_API_CALL  rm_init_rm               (nvidia_stack_t *);
907 void       NV_API_CALL  rm_shutdown_rm           (nvidia_stack_t *);
908 NvBool     NV_API_CALL  rm_init_private_state    (nvidia_stack_t *, nv_state_t *);
909 void       NV_API_CALL  rm_free_private_state    (nvidia_stack_t *, nv_state_t *);
910 NvBool     NV_API_CALL  rm_init_adapter          (nvidia_stack_t *, nv_state_t *);
911 void       NV_API_CALL  rm_disable_adapter       (nvidia_stack_t *, nv_state_t *);
912 void       NV_API_CALL  rm_shutdown_adapter      (nvidia_stack_t *, nv_state_t *);
913 NV_STATUS  NV_API_CALL  rm_exclude_adapter       (nvidia_stack_t *, nv_state_t *);
914 NV_STATUS  NV_API_CALL  rm_acquire_api_lock      (nvidia_stack_t *);
915 NV_STATUS  NV_API_CALL  rm_release_api_lock      (nvidia_stack_t *);
916 NV_STATUS  NV_API_CALL  rm_acquire_gpu_lock      (nvidia_stack_t *, nv_state_t *);
917 NV_STATUS  NV_API_CALL  rm_release_gpu_lock      (nvidia_stack_t *, nv_state_t *);
918 NV_STATUS  NV_API_CALL  rm_acquire_all_gpus_lock (nvidia_stack_t *);
919 NV_STATUS  NV_API_CALL  rm_release_all_gpus_lock (nvidia_stack_t *);
920 NV_STATUS  NV_API_CALL  rm_ioctl                 (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
921 NvBool     NV_API_CALL  rm_isr                   (nvidia_stack_t *, nv_state_t *, NvU32 *);
922 void       NV_API_CALL  rm_isr_bh                (nvidia_stack_t *, nv_state_t *);
923 void       NV_API_CALL  rm_isr_bh_unlocked       (nvidia_stack_t *, nv_state_t *);
924 NV_STATUS  NV_API_CALL  rm_power_management      (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
925 NV_STATUS  NV_API_CALL  rm_stop_user_channels    (nvidia_stack_t *, nv_state_t *);
926 NV_STATUS  NV_API_CALL  rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);
927 NV_STATUS  NV_API_CALL  rm_save_low_res_mode     (nvidia_stack_t *, nv_state_t *);
928 void       NV_API_CALL  rm_get_vbios_version     (nvidia_stack_t *, nv_state_t *, char *);
929 char*      NV_API_CALL  rm_get_gpu_uuid          (nvidia_stack_t *, nv_state_t *);
930 const NvU8* NV_API_CALL rm_get_gpu_uuid_raw      (nvidia_stack_t *, nv_state_t *);
931 void       NV_API_CALL  rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *);
932 void       NV_API_CALL  rm_get_firmware_version  (nvidia_stack_t *, nv_state_t *, char *, NvLength);
933 void       NV_API_CALL  rm_cleanup_file_private  (nvidia_stack_t *, nv_state_t *, nv_file_private_t *);
934 void       NV_API_CALL  rm_unbind_lock           (nvidia_stack_t *, nv_state_t *);
935 NV_STATUS  NV_API_CALL  rm_read_registry_dword   (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *);
936 NV_STATUS  NV_API_CALL  rm_write_registry_dword  (nvidia_stack_t *, nv_state_t *, const char *, NvU32);
937 NV_STATUS  NV_API_CALL  rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32);
938 NV_STATUS  NV_API_CALL  rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32);
939 void       NV_API_CALL  rm_parse_option_string   (nvidia_stack_t *, const char *);
940 char*      NV_API_CALL  rm_remove_spaces         (const char *);
941 char*      NV_API_CALL  rm_string_token          (char **, const char);
942 void       NV_API_CALL  rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
943 
944 NV_STATUS  NV_API_CALL  rm_run_rc_callback       (nvidia_stack_t *, nv_state_t *);
945 void       NV_API_CALL  rm_execute_work_item     (nvidia_stack_t *, void *);
946 const char* NV_API_CALL rm_get_device_name       (NvU16, NvU16, NvU16);
947 
948 NV_STATUS  NV_API_CALL  rm_is_supported_device   (nvidia_stack_t *, nv_state_t *);
949 NvBool     NV_API_CALL  rm_is_supported_pci_device(NvU8   pci_class,
950                                                    NvU8   pci_subclass,
951                                                    NvU16  vendor,
952                                                    NvU16  device,
953                                                    NvU16  subsystem_vendor,
954                                                    NvU16  subsystem_device,
955                                                    NvBool print_legacy_warning);
956 
957 void       NV_API_CALL  rm_i2c_remove_adapters    (nvidia_stack_t *, nv_state_t *);
958 NvBool     NV_API_CALL  rm_i2c_is_smbus_capable   (nvidia_stack_t *, nv_state_t *, void *);
959 NV_STATUS  NV_API_CALL  rm_i2c_transfer           (nvidia_stack_t *, nv_state_t *, void *, nv_i2c_cmd_t, NvU8, NvU8, NvU32, NvU8 *);
960 
961 NV_STATUS  NV_API_CALL  rm_perform_version_check  (nvidia_stack_t *, void *, NvU32);
962 
963 void       NV_API_CALL  rm_power_source_change_event        (nvidia_stack_t *, NvU32);
964 
965 void       NV_API_CALL  rm_request_dnotifier_state          (nvidia_stack_t *, nv_state_t *);
966 
967 void       NV_API_CALL  rm_disable_gpu_state_persistence    (nvidia_stack_t *sp, nv_state_t *);
968 NV_STATUS  NV_API_CALL  rm_p2p_init_mapping       (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
969 NV_STATUS  NV_API_CALL  rm_p2p_destroy_mapping    (nvidia_stack_t *, NvU64);
970 NV_STATUS  NV_API_CALL  rm_p2p_get_pages          (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
971 NV_STATUS  NV_API_CALL  rm_p2p_get_gpu_info       (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
972 NV_STATUS  NV_API_CALL  rm_p2p_get_pages_persistent (nvidia_stack_t *,  NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *);
973 NV_STATUS  NV_API_CALL  rm_p2p_register_callback  (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
974 NV_STATUS  NV_API_CALL  rm_p2p_put_pages          (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
975 NV_STATUS  NV_API_CALL  rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *);
976 NV_STATUS  NV_API_CALL  rm_p2p_dma_map_pages      (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
977 NV_STATUS  NV_API_CALL  rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
978 void       NV_API_CALL  rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
979 NV_STATUS  NV_API_CALL  rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, void *, nv_phys_addr_range_t **, NvU32 *);
980 void       NV_API_CALL  rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, nv_phys_addr_range_t **, NvU32);
981 NV_STATUS  NV_API_CALL  rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
982 void       NV_API_CALL  rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
983 NV_STATUS  NV_API_CALL  rm_log_gpu_crash          (nv_stack_t *, nv_state_t *);
984 
985 void       NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
986 NvBool     NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
987 NV_STATUS  NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
988 NV_STATUS  NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
989 NvBool     NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
990 NvBool     NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
991 NvBool     NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
992 void       NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
993 NV_STATUS  NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, nv_ioctl_numa_info_t *);
994 NV_STATUS  NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *);
995 NV_STATUS  NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *);
996 NvBool     NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
997 void       NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *);
998 NV_STATUS  NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool);
999 NV_STATUS  NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
1000 NvBool     NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *);
1001 NvBool     NV_API_CALL rm_disable_iomap_wc(void);
1002 
1003 void       NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
1004 void       NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
1005 void       NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
1006 NV_STATUS  NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
1007 void       NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
1008 NV_STATUS  NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool, NvBool *);
1009 const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *);
1010 const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *);
1011 const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
1012 
1013 void       NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
1014 void       NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
1015 
1016 NvBool     NV_API_CALL rm_is_altstack_in_use(void);
1017 
1018 /* vGPU VFIO specific functions */
1019 NV_STATUS  NV_API_CALL  nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
1020 NV_STATUS  NV_API_CALL  nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
1021 NV_STATUS  NV_API_CALL  nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
1022 NV_STATUS  NV_API_CALL  nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
1023 NV_STATUS  NV_API_CALL  nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *, NvBool *);
1024 NV_STATUS  NV_API_CALL  nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
1025 NV_STATUS  NV_API_CALL  nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
1026 NV_STATUS  NV_API_CALL  nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
1027 NV_STATUS  NV_API_CALL  nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
1028 NV_STATUS  NV_API_CALL  nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *);
1029 NV_STATUS  NV_API_CALL  nv_gpu_bind_event(nvidia_stack_t *);
1030 
1031 NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
1032 nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);
1033 void       NV_API_CALL  nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size);
1034 
1035 /* Callbacks should occur roughly every 10ms. */
1036 #define NV_SNAPSHOT_TIMER_HZ 100
1037 void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context));
1038 void NV_API_CALL nv_flush_snapshot_timer(void);
1039 void NV_API_CALL nv_stop_snapshot_timer(void);
1040 
1041 static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv)
1042 {
1043     return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL;
1044 }
1045 
1046 /* nano second resolution timer callback structure */
1047 typedef struct nv_nano_timer nv_nano_timer_t;
1048 
1049 /* nano timer functions */
1050 void        NV_API_CALL nv_create_nano_timer(nv_state_t *, void *pTmrEvent, nv_nano_timer_t **);
1051 void        NV_API_CALL nv_start_nano_timer(nv_state_t *nv, nv_nano_timer_t *, NvU64 timens);
1052 NV_STATUS   NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t *, void *pTmrEvent);
1053 void        NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *);
1054 void        NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *);
1055 
1056 #if defined(NVCPU_X86_64)
1057 
1058 static inline NvU64 nv_rdtsc(void)
1059 {
1060     NvU64 val;
1061     __asm__ __volatile__ ("rdtsc               \t\n"
1062                           "shlq   $0x20,%%rdx  \t\n"
1063                           "orq    %%rdx,%%rax  \t\n"
1064                           : "=A" (val));
1065     return val;
1066 }
1067 
1068 #endif
1069 
1070 #endif /* NVRM */
1071 
1072 static inline int nv_count_bits(NvU64 word)
1073 {
1074     NvU64 bits;
1075 
1076     bits = (word & 0x5555555555555555ULL) + ((word >>  1) & 0x5555555555555555ULL);
1077     bits = (bits & 0x3333333333333333ULL) + ((bits >>  2) & 0x3333333333333333ULL);
1078     bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >>  4) & 0x0f0f0f0f0f0f0f0fULL);
1079     bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >>  8) & 0x00ff00ff00ff00ffULL);
1080     bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL);
1081     bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL);
1082 
1083     return (int)(bits);
1084 }
1085 
1086 #endif
1087