1 #ifndef _G_KERNEL_FIFO_NVOC_H_
2 #define _G_KERNEL_FIFO_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kernel_fifo_nvoc.h"
33 
34 #ifndef _KERNELFIFO_H_
35 #define _KERNELFIFO_H_
36 
37 /**************** Resource Manager Defines and Structures ******************\
38 *                                                                           *
39 * Module: KernelFifo.h                                                         *
40 *       Defines and structures used for the KernelFifo Object.                    *
41 \***************************************************************************/
42 
43 #include "kernel/gpu/eng_state.h"
44 #include "kernel/gpu/gpu_halspec.h"
45 #include "kernel/gpu/fifo/channel_descendant.h"
46 #include "kernel/gpu/fifo/engine_info.h"
47 #include "kernel/gpu/gpu_engine_type.h"
48 
49 #include "containers/eheap_old.h"
50 #include "containers/map.h"
51 #include "utils/nvbitvector.h"
52 #include "gpu/mem_mgr/mem_desc.h"
53 #include "nvoc/utility.h"
54 
55 #include "ctrl/ctrl2080/ctrl2080gpu.h"  // NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS
56 #include "ctrl/ctrl2080/ctrl2080fifo.h" // NV2080_CTRL_FIFO_MEM_INFO
57 #include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_*
58 #include "ctrl/ctrl906f.h"
59 
60 #include "class/clc369.h" // MMU_FAULT_BUFFER
61 
62 struct KernelChannel;
63 
64 #ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__
65 #define __NVOC_CLASS_KernelChannel_TYPEDEF__
66 typedef struct KernelChannel KernelChannel;
67 #endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */
68 
69 #ifndef __nvoc_class_id_KernelChannel
70 #define __nvoc_class_id_KernelChannel 0x5d8d70
71 #endif /* __nvoc_class_id_KernelChannel */
72 
73 
74 struct KernelChannelGroup;
75 
76 #ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
77 #define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
78 typedef struct KernelChannelGroup KernelChannelGroup;
79 #endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */
80 
81 #ifndef __nvoc_class_id_KernelChannelGroup
82 #define __nvoc_class_id_KernelChannelGroup 0xec6de1
83 #endif /* __nvoc_class_id_KernelChannelGroup */
84 
85 
86 struct KernelSchedMgr;
87 
88 #ifndef __NVOC_CLASS_KernelSchedMgr_TYPEDEF__
89 #define __NVOC_CLASS_KernelSchedMgr_TYPEDEF__
90 typedef struct KernelSchedMgr KernelSchedMgr;
91 #endif /* __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ */
92 
93 #ifndef __nvoc_class_id_KernelSchedMgr
94 #define __nvoc_class_id_KernelSchedMgr 0xea0970
95 #endif /* __nvoc_class_id_KernelSchedMgr */
96 
97 
98 
99 struct HOST_VGPU_DEVICE;
100 
101 // Pre-Ampere runlist ID to pass to kfifoGetChidMgr
102 #define CHIDMGR_RUNLIST_ID_LEGACY  0
103 
104 #define INVALID_CHID               0xFFFFFFFF
105 
106 #define INVALID_RUNLIST_ID         0xFFFFFFFFU
107 
108 /*! We use 32-bit process ID for now */
109 #define KERNEL_PID (0xFFFFFFFFULL)
110 
111 /*! cap at 64 for now, can extend when needed */
112 #define MAX_NUM_RUNLISTS           NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID
113 #define NUM_BUFFERS_PER_RUNLIST   (NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS)
114 MAKE_BITVECTOR(CHID_MGR_VALID_BIT_VECTOR, MAX_NUM_RUNLISTS);
115 
116 //
117 // Matches GET_PUSHBUFFER_CAPABILITIES bit positions
118 //
119 #define VID_PB_ALLOWED                      0x1
120 #define PCI_PB_ALLOWED                      0x2
121 
122 #define PBDMA_FAULT_MAX_ID  (0x1 << DRF_SIZE_MW(NVC369_BUF_ENTRY_ENGINE_ID))
123 MAKE_BITVECTOR(PBDMA_ID_BITVECTOR, PBDMA_FAULT_MAX_ID);
124 
125 /*!
126  * USERD isolation domain
127  *
128  * USERD allocated by different domains should not be put into the same physical page.
129  * This provides the basic security isolation because a physical page is the unit of
130  * granularity at which OS can provide isolation between processes.
131  *
132  *    GUEST_USER:     USERD allocated by guest user process
133  *    GUEST_KERNEL:   USERD allocated by guest kernel process
134  *    GUEST_INSECURE: USERD allocated by guest/kernel process,
135  *                    INSECURE means there is no isolation between guest user and guest kernel
136  *    HOST_USER:      USERD allocated by host user process
137  *    HOST_KERNEL:    USERD allocated by host kernel process
138  *
139  * Please refer to RM_USERD_Isolation wiki for more details
140  */
141 typedef enum _def_fifo_isolation_domain
142 {
143     GUEST_USER = 0x0,
144     GUEST_KERNEL,
145     GUEST_INSECURE,
146     HOST_USER,
147     HOST_KERNEL
148 } FIFO_ISOLATION_DOMAIN;
149 
150 /*!
151  * USERD isolation ID
152  *
153  * In vGPU environment, sub process means the guest user/kernel process running within a single VM.
154  * It also refers to any sub process (or sub-sub process) within a parent process.
155  *
156  * Please refer to Resource Server for more details about sub process concept
157  */
158 typedef struct _def_fifo_isolation_id
159 {
160     FIFO_ISOLATION_DOMAIN domain;
161     NvU64                 processID;
162     NvU64                 subProcessID;
163 } FIFO_ISOLATIONID, *PFIFO_ISOLATIONID;
164 
165 /*! Used for calls to kfifoChannelGetFifoContextMemDesc */
166 typedef enum
167 {
168     FIFO_CTX_RAMFC = 0,
169     FIFO_CTX_INST_BLOCK = 1,
170 } FIFO_CTX;
171 
172 typedef struct _fifo_mmu_exception_data
173 {
174     NvU32  addrLo;
175     NvU32  addrHi;
176     NvU32  faultType;
177     NvU32  clientId;
178     NvBool bGpc;
179     NvU32  gpcId;
180     NvU32  accessType;
181     NvU32  faultEngineId;
182     NvU64  faultedShaderProgramVA[NV906F_CTRL_MMU_FAULT_SHADER_TYPES];
183 } FIFO_MMU_EXCEPTION_DATA;
184 
185 /*! Used for calls to kchannelAllocHwID */
186 typedef enum
187 {
188     CHANNEL_HW_ID_ALLOC_MODE_GROW_DOWN,
189     CHANNEL_HW_ID_ALLOC_MODE_GROW_UP,
190     CHANNEL_HW_ID_ALLOC_MODE_PROVIDED,
191 } CHANNEL_HW_ID_ALLOC_MODE;
192 
193 typedef struct _fifo_hw_id
194 {
195     /*!
196      * Bitfield of HW IDs. 1 = reserved, 0 = available.
197      * A reserved ID may not be allocated but it can't be used for any
198      * future allocations.
199      */
200     NvU32 *pHwIdInUse;
201 
202     /*!
203      * Number of elements in pHwIdInUse
204      */
205     NvU32 hwIdInUseSz;
206 } FIFO_HW_ID;
207 
208 DECLARE_INTRUSIVE_MAP(KernelChannelGroupMap);
209 
210 typedef struct _chid_mgr
211 {
212     /*!
213      * Runlist managed by this CHID_MGR.
214      */
215     NvU32 runlistId;
216 
217     /*!
218      * Heap to manage pFifoData for all channels.
219      */
220     OBJEHEAP *pFifoDataHeap;
221 
222     /*!
223      * Global ChID heap - manages channel IDs and isolation IDs. In non-SRIOV
224      * systems, allocations/frees in this heap mirror those in pFifoDataHeap.
225      * When SRIOV is enabled, we reserve/free channel IDs for the guest in
226      * chunks from this heap when the VM starts/shuts down. ChID allocations
227      * during channel construction from the guest ChID space are from the
228      * virtual ChID heap for that guest.
229      */
230     OBJEHEAP *pGlobalChIDHeap;
231 
232     /*!
233      * Until FIFO code for SR-IOV moves to guest RM, this virtual ChID heap
234      * manages channel IDs allocated to a guest.
235      */
236     OBJEHEAP **ppVirtualChIDHeap;
237 
238     /*!
239      * Number of channels managed by this CHID_MGR
240      */
241     NvU32 numChannels;
242 
243     FIFO_HW_ID  channelGrpMgr;
244 
245     /*!
246      * Channel group pointers
247      */
248     KernelChannelGroupMap *pChanGrpTree;
249 
250 } CHID_MGR;
251 
252 /*! Typedef for the @ref channel_iterator structure */
253 typedef struct channel_iterator CHANNEL_ITERATOR;
254 typedef struct channel_iterator *PCHANNEL_ITERATOR;
255 
256 /*!
257  * Generic Linked-list of Channel pointers to be used where ever multiple channels
258  * are managed.
259  * TODO: Remove as part of Jira CORERM-2658
260  */
261 typedef struct _channel_node
262 {
263     struct KernelChannel *pKernelChannel;
264     struct _channel_node *pNext;
265 } CHANNEL_NODE, *PCHANNEL_NODE;
266 
267 /*!
268  * This structure represents an iterator for all channels.
269  * It is created by function @ref kfifoGetChannelIterator.
270  */
271 struct channel_iterator
272 {
273     NvU32 numChannels;
274     NvU32 numRunlists;
275     NvU32 physicalChannelID;
276     NvU32 runlistId;
277     EMEMBLOCK *pFifoDataBlock;
278     CHANNEL_NODE channelNode;
279 };
280 
281 // Maximum number of pbdma IDs for a given engine
282 #define FIFO_ENGINE_MAX_NUM_PBDMA       2
283 
284 // Maximum size (including null terminator for an engine name
285 #define FIFO_ENGINE_NAME_MAX_SIZE       16
286 
287 typedef struct _def_fifo_engine_list
288 {
289     NvU32 engineData[ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE];
290     NvU32 pbdmaIds[FIFO_ENGINE_MAX_NUM_PBDMA];
291     NvU32 pbdmaFaultIds[FIFO_ENGINE_MAX_NUM_PBDMA];
292     NvU32 numPbdmas;
293     char engineName[FIFO_ENGINE_NAME_MAX_SIZE];
294 } FIFO_ENGINE_LIST;
295 
296 typedef struct
297 {
298     NvU32 nv2080EngineType;
299     NvU32 mcIdx;
300 } FIFO_GUEST_ENGINE_TABLE;
301 
302 typedef struct _def_engine_info
303 {
304     NvU32 maxNumPbdmas;
305     PBDMA_ID_BITVECTOR  validEngineIdsForPbdmas;
306     //
307     // The highest runlist ID. Valid runlist IDs are < maxNumRunlists
308     // However, the entire [0, maxNumRunlists) range is not valid. There are
309     // missing runlist IDs in this range.
310     //
311     NvU32 maxNumRunlists;
312     //
313     // Multiple engines may have the same runlist ID. This is the total number
314     // of engines with a runlist which is equal to the number of Esched driven
315     // engines and does not include the SW engine.
316     //
317     NvU32 numRunlists;
318     NvU32 engineInfoListSize;
319     FIFO_ENGINE_LIST *engineInfoList;
320 } ENGINE_INFO;
321 
322 // Fully qualified instance block address
323 typedef struct _inst_block_desc
324 {
325     NvU64   address;        // Physical address or IOVA (unshifted)
326     NvU32   aperture;       // INST_BLOCK_APERTURE
327     NvU32   gfid;           // Valid in PF when SR-IOV is enabled
328 } INST_BLOCK_DESC;
329 
330 typedef struct _channel_list
331 {
332     CHANNEL_NODE *pHead;
333     CHANNEL_NODE *pTail;
334 } CHANNEL_LIST, *PCHANNEL_LIST;
335 
336 typedef struct _def_preallocated_userd_info
337 {
338     NvU32      userdAperture;            // default aperture for USERD
339     NvU32      userdAttr;                // default attr for USERD
340     MEMORY_DESCRIPTOR *userdPhysDesc[NV_MAX_SUBDEVICES];    // <a> base phys addr of contiguous USERD
341     NvU64      userdBar1MapStartOffset;  // <b> base offset of <a>'s BAR1 map
342     NvU32      userdBar1MapSize;         // <c> sizeof <b>'s map
343     NvU8      *userdBar1CpuPtr;          // <d> cpu map of <b>
344     NvU32      userdBar1RefMask;         // mask of GPUs referencing userD
345 } PREALLOCATED_USERD_INFO;
346 
347 
348 // Scheduling enable/disable handlers
349 typedef NV_STATUS (*PFifoSchedulingHandler)(OBJGPU *pGpu, void *pData);
350 typedef struct FifoSchedulingHandlerEntry
351 {
352     PFifoSchedulingHandler pCallback;
353     void *pCallbackParam;
354     NvBool bHandled;
355 } FifoSchedulingHandlerEntry;
356 
357 MAKE_LIST(FifoSchedulingHandlerEntryList, FifoSchedulingHandlerEntry);
358 
359 //
360 // This define indicates legacy pdb in instance block.
361 //
362 #define FIFO_PDB_IDX_BASE             (0xFFFFFFFF)
363 
364 //
365 // Aperture defines must match NV_MMU_PTE_APERTURE HW defines
366 // We do not support instance memory in peer (1).
367 //
368 #define INST_BLOCK_APERTURE_VIDEO_MEMORY                     0x00000000
369 #define INST_BLOCK_APERTURE_RESERVED                         0x00000001
370 #define INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY           0x00000002
371 #define INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY       0x00000003
372 
373 // Macro to verify HW and class defines are compatible
374 #define VERIFY_INST_BLOCK_APERTURE(vid, coh, ncoh)                  \
375     ct_assert((vid) == INST_BLOCK_APERTURE_VIDEO_MEMORY);           \
376     ct_assert((coh) == INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY); \
377     ct_assert((ncoh) == INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY)
378 
379 //
380 // The actual GPU object definition
381 //
382 
383 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
384 // the matching C source file, but causes diagnostics to be issued if another
385 // source file references the field.
386 #ifdef NVOC_KERNEL_FIFO_H_PRIVATE_ACCESS_ALLOWED
387 #define PRIVATE_FIELD(x) x
388 #else
389 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
390 #endif
391 
392 struct KernelFifo {
393     const struct NVOC_RTTI *__nvoc_rtti;
394     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
395     struct Object *__nvoc_pbase_Object;
396     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
397     struct KernelFifo *__nvoc_pbase_KernelFifo;
398     NV_STATUS (*__kfifoConstructEngine__)(struct OBJGPU *, struct KernelFifo *, ENGDESCRIPTOR);
399     NV_STATUS (*__kfifoStateLoad__)(struct OBJGPU *, struct KernelFifo *, NvU32);
400     NV_STATUS (*__kfifoStateUnload__)(struct OBJGPU *, struct KernelFifo *, NvU32);
401     NV_STATUS (*__kfifoStateInitLocked__)(struct OBJGPU *, struct KernelFifo *);
402     void (*__kfifoStateDestroy__)(struct OBJGPU *, struct KernelFifo *);
403     NV_STATUS (*__kfifoStatePostLoad__)(struct OBJGPU *, struct KernelFifo *, NvU32);
404     NV_STATUS (*__kfifoStatePreUnload__)(struct OBJGPU *, struct KernelFifo *, NvU32);
405     NV_STATUS (*__kfifoCheckChannelAllocAddrSpaces__)(struct KernelFifo *, NV_ADDRESS_SPACE, NV_ADDRESS_SPACE, NV_ADDRESS_SPACE);
406     NV_STATUS (*__kfifoConstructUsermodeMemdescs__)(struct OBJGPU *, struct KernelFifo *);
407     NvU32 (*__kfifoChannelGroupGetLocalMaxSubcontext__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannelGroup *, NvBool);
408     void (*__kfifoGetCtxBufferMapFlags__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32 *);
409     NV_STATUS (*__kfifoEngineInfoXlate__)(struct OBJGPU *, struct KernelFifo *, ENGINE_INFO_TYPE, NvU32, ENGINE_INFO_TYPE, NvU32 *);
410     NV_STATUS (*__kfifoGenerateWorkSubmitToken__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannel *, NvU32 *, NvBool);
411     NV_STATUS (*__kfifoUpdateUsermodeDoorbell__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32);
412     NvU32 (*__kfifoRunlistGetBaseShift__)(struct KernelFifo *);
413     NvU64 (*__kfifoGetUserdBar1MapStartOffset__)(struct OBJGPU *, struct KernelFifo *);
414     NvU32 (*__kfifoGetMaxCeChannelGroups__)(struct OBJGPU *, struct KernelFifo *);
415     NV_STATUS (*__kfifoGetVChIdForSChId__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32, NvU32, NvU32 *);
416     NV_STATUS (*__kfifoProgramChIdTable__)(struct OBJGPU *, struct KernelFifo *, CHID_MGR *, NvU32, NvU32, NvU32, struct Device *, NvU32, FIFO_ENGINE_LIST *);
417     NV_STATUS (*__kfifoRecoverAllChannels__)(struct OBJGPU *, struct KernelFifo *, NvU32);
418     NV_STATUS (*__kfifoGetEnginePbdmaFaultIds__)(struct OBJGPU *, struct KernelFifo *, ENGINE_INFO_TYPE, NvU32, NvU32 **, NvU32 *);
419     NvU32 (*__kfifoGetNumPBDMAs__)(struct OBJGPU *, struct KernelFifo *);
420     const char *(*__kfifoPrintPbdmaId__)(struct OBJGPU *, struct KernelFifo *, NvU32);
421     const char *(*__kfifoPrintInternalEngine__)(struct OBJGPU *, struct KernelFifo *, NvU32);
422     const char *(*__kfifoPrintInternalEngineCheck__)(struct OBJGPU *, struct KernelFifo *, NvU32);
423     const char *(*__kfifoGetClientIdStringCommon__)(struct OBJGPU *, struct KernelFifo *, FIFO_MMU_EXCEPTION_DATA *);
424     const char *(*__kfifoGetClientIdString__)(struct OBJGPU *, struct KernelFifo *, FIFO_MMU_EXCEPTION_DATA *);
425     const char *(*__kfifoGetClientIdStringCheck__)(struct OBJGPU *, struct KernelFifo *, NvU32);
426     NV_STATUS (*__kfifoStatePreLoad__)(POBJGPU, struct KernelFifo *, NvU32);
427     NV_STATUS (*__kfifoStatePostUnload__)(POBJGPU, struct KernelFifo *, NvU32);
428     NV_STATUS (*__kfifoStateInitUnlocked__)(POBJGPU, struct KernelFifo *);
429     void (*__kfifoInitMissing__)(POBJGPU, struct KernelFifo *);
430     NV_STATUS (*__kfifoStatePreInitLocked__)(POBJGPU, struct KernelFifo *);
431     NV_STATUS (*__kfifoStatePreInitUnlocked__)(POBJGPU, struct KernelFifo *);
432     NvBool (*__kfifoIsPresent__)(POBJGPU, struct KernelFifo *);
433     struct KernelSchedMgr *pKernelSchedMgr;
434     CHID_MGR **ppChidMgr;
435     NvU32 numChidMgrs;
436     union CHID_MGR_VALID_BIT_VECTOR chidMgrValid;
437     ENGINE_INFO engineInfo;
438     PREALLOCATED_USERD_INFO userdInfo;
439     NvU32 maxSubcontextCount;
440     FifoSchedulingHandlerEntryList postSchedulingEnableHandlerList;
441     FifoSchedulingHandlerEntryList preSchedulingDisableHandlerList;
442     NvU32 maxSec2SecureChannels;
443     NvU32 maxCeSecureChannels;
444     NvBool bUseChidHeap;
445     NvBool bUsePerRunlistChram;
446     NvBool bDisableChidIsolation;
447     NvBool bIsPerRunlistChramSupportedInHw;
448     NvBool bHostEngineExpansion;
449     NvBool bHostHasLbOverflow;
450     NvBool bSubcontextSupported;
451     NvBool bMixedInstmemApertureDefAllowed;
452     NvBool bIsZombieSubctxWarEnabled;
453     NvBool bIsSchedSupported;
454     NvBool bGuestGenenratesWorkSubmitToken;
455     NvBool bWddmInterleavingPolicyEnabled;
456     NvBool bUserdInSystemMemory;
457     NvBool bUserdMapDmaSupported;
458     NvBool bPerRunlistChramOverride;
459     NvBool bNumChannelsOverride;
460     NvU32 numChannelsOverride;
461     NvBool bInstProtectedMem;
462     NvU32 InstAttr;
463     const NV_ADDRESS_SPACE *pInstAllocList;
464     MEMORY_DESCRIPTOR *pDummyPageMemDesc;
465     MEMORY_DESCRIPTOR *pBar1VF;
466     MEMORY_DESCRIPTOR *pBar1PrivVF;
467     MEMORY_DESCRIPTOR *pRegVF;
468     CTX_BUF_POOL_INFO *pRunlistBufPool[64];
469     MEMORY_DESCRIPTOR ***pppRunlistBufMemDesc;
470 };
471 
472 #ifndef __NVOC_CLASS_KernelFifo_TYPEDEF__
473 #define __NVOC_CLASS_KernelFifo_TYPEDEF__
474 typedef struct KernelFifo KernelFifo;
475 #endif /* __NVOC_CLASS_KernelFifo_TYPEDEF__ */
476 
477 #ifndef __nvoc_class_id_KernelFifo
478 #define __nvoc_class_id_KernelFifo 0xf3e155
479 #endif /* __nvoc_class_id_KernelFifo */
480 
481 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFifo;
482 
483 #define __staticCast_KernelFifo(pThis) \
484     ((pThis)->__nvoc_pbase_KernelFifo)
485 
486 #ifdef __nvoc_kernel_fifo_h_disabled
487 #define __dynamicCast_KernelFifo(pThis) ((KernelFifo*)NULL)
488 #else //__nvoc_kernel_fifo_h_disabled
489 #define __dynamicCast_KernelFifo(pThis) \
490     ((KernelFifo*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelFifo)))
491 #endif //__nvoc_kernel_fifo_h_disabled
492 
493 #define PDB_PROP_KFIFO_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
494 #define PDB_PROP_KFIFO_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
495 
496 NV_STATUS __nvoc_objCreateDynamic_KernelFifo(KernelFifo**, Dynamic*, NvU32, va_list);
497 
498 NV_STATUS __nvoc_objCreate_KernelFifo(KernelFifo**, Dynamic*, NvU32);
499 #define __objCreate_KernelFifo(ppNewObj, pParent, createFlags) \
500     __nvoc_objCreate_KernelFifo((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
501 
502 #define kfifoConstructEngine(pGpu, pKernelFifo, engDesc) kfifoConstructEngine_DISPATCH(pGpu, pKernelFifo, engDesc)
503 #define kfifoStateLoad(pGpu, pKernelFifo, flags) kfifoStateLoad_DISPATCH(pGpu, pKernelFifo, flags)
504 #define kfifoStateLoad_HAL(pGpu, pKernelFifo, flags) kfifoStateLoad_DISPATCH(pGpu, pKernelFifo, flags)
505 #define kfifoStateUnload(pGpu, pKernelFifo, flags) kfifoStateUnload_DISPATCH(pGpu, pKernelFifo, flags)
506 #define kfifoStateUnload_HAL(pGpu, pKernelFifo, flags) kfifoStateUnload_DISPATCH(pGpu, pKernelFifo, flags)
507 #define kfifoStateInitLocked(pGpu, pKernelFifo) kfifoStateInitLocked_DISPATCH(pGpu, pKernelFifo)
508 #define kfifoStateDestroy(pGpu, pKernelFifo) kfifoStateDestroy_DISPATCH(pGpu, pKernelFifo)
509 #define kfifoStatePostLoad(pGpu, pKernelFifo, flags) kfifoStatePostLoad_DISPATCH(pGpu, pKernelFifo, flags)
510 #define kfifoStatePostLoad_HAL(pGpu, pKernelFifo, flags) kfifoStatePostLoad_DISPATCH(pGpu, pKernelFifo, flags)
511 #define kfifoStatePreUnload(pGpu, pKernelFifo, flags) kfifoStatePreUnload_DISPATCH(pGpu, pKernelFifo, flags)
512 #define kfifoStatePreUnload_HAL(pGpu, pKernelFifo, flags) kfifoStatePreUnload_DISPATCH(pGpu, pKernelFifo, flags)
513 #define kfifoCheckChannelAllocAddrSpaces(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) kfifoCheckChannelAllocAddrSpaces_DISPATCH(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace)
514 #define kfifoCheckChannelAllocAddrSpaces_HAL(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) kfifoCheckChannelAllocAddrSpaces_DISPATCH(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace)
515 #define kfifoConstructUsermodeMemdescs(pGpu, pKernelFifo) kfifoConstructUsermodeMemdescs_DISPATCH(pGpu, pKernelFifo)
516 #define kfifoConstructUsermodeMemdescs_HAL(pGpu, pKernelFifo) kfifoConstructUsermodeMemdescs_DISPATCH(pGpu, pKernelFifo)
517 #define kfifoChannelGroupGetLocalMaxSubcontext(pGpu, pKernelFifo, arg0, arg1) kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(pGpu, pKernelFifo, arg0, arg1)
518 #define kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(pGpu, pKernelFifo, arg0, arg1)
519 #define kfifoGetCtxBufferMapFlags(pGpu, pKernelFifo, engine, pFlags) kfifoGetCtxBufferMapFlags_DISPATCH(pGpu, pKernelFifo, engine, pFlags)
520 #define kfifoGetCtxBufferMapFlags_HAL(pGpu, pKernelFifo, engine, pFlags) kfifoGetCtxBufferMapFlags_DISPATCH(pGpu, pKernelFifo, engine, pFlags)
521 #define kfifoEngineInfoXlate(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) kfifoEngineInfoXlate_DISPATCH(pGpu, pKernelFifo, inType, inVal, outType, pOutVal)
522 #define kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) kfifoEngineInfoXlate_DISPATCH(pGpu, pKernelFifo, inType, inVal, outType, pOutVal)
523 #define kfifoGenerateWorkSubmitToken(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) kfifoGenerateWorkSubmitToken_DISPATCH(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost)
524 #define kfifoGenerateWorkSubmitToken_HAL(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) kfifoGenerateWorkSubmitToken_DISPATCH(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost)
525 #define kfifoUpdateUsermodeDoorbell(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateUsermodeDoorbell_DISPATCH(arg0, arg1, workSubmitToken, runlisId)
526 #define kfifoUpdateUsermodeDoorbell_HAL(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateUsermodeDoorbell_DISPATCH(arg0, arg1, workSubmitToken, runlisId)
527 #define kfifoRunlistGetBaseShift(pKernelFifo) kfifoRunlistGetBaseShift_DISPATCH(pKernelFifo)
528 #define kfifoRunlistGetBaseShift_HAL(pKernelFifo) kfifoRunlistGetBaseShift_DISPATCH(pKernelFifo)
529 #define kfifoGetUserdBar1MapStartOffset(pGpu, pKernelFifo) kfifoGetUserdBar1MapStartOffset_DISPATCH(pGpu, pKernelFifo)
530 #define kfifoGetUserdBar1MapStartOffset_HAL(pGpu, pKernelFifo) kfifoGetUserdBar1MapStartOffset_DISPATCH(pGpu, pKernelFifo)
531 #define kfifoGetMaxCeChannelGroups(pGpu, pKernelFifo) kfifoGetMaxCeChannelGroups_DISPATCH(pGpu, pKernelFifo)
532 #define kfifoGetMaxCeChannelGroups_HAL(pGpu, pKernelFifo) kfifoGetMaxCeChannelGroups_DISPATCH(pGpu, pKernelFifo)
533 #define kfifoGetVChIdForSChId(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) kfifoGetVChIdForSChId_DISPATCH(pGpu, pKernelFifo, chId, gfid, engineId, pVChid)
534 #define kfifoGetVChIdForSChId_HAL(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) kfifoGetVChIdForSChId_DISPATCH(pGpu, pKernelFifo, chId, gfid, engineId, pVChid)
535 #define kfifoProgramChIdTable(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoProgramChIdTable_DISPATCH(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
536 #define kfifoProgramChIdTable_HAL(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoProgramChIdTable_DISPATCH(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
537 #define kfifoRecoverAllChannels(pGpu, pKernelFifo, gfid) kfifoRecoverAllChannels_DISPATCH(pGpu, pKernelFifo, gfid)
538 #define kfifoRecoverAllChannels_HAL(pGpu, pKernelFifo, gfid) kfifoRecoverAllChannels_DISPATCH(pGpu, pKernelFifo, gfid)
539 #define kfifoGetEnginePbdmaFaultIds(pGpu, pKernelFifo, arg0, arg1, arg2, arg3) kfifoGetEnginePbdmaFaultIds_DISPATCH(pGpu, pKernelFifo, arg0, arg1, arg2, arg3)
540 #define kfifoGetEnginePbdmaFaultIds_HAL(pGpu, pKernelFifo, arg0, arg1, arg2, arg3) kfifoGetEnginePbdmaFaultIds_DISPATCH(pGpu, pKernelFifo, arg0, arg1, arg2, arg3)
541 #define kfifoGetNumPBDMAs(pGpu, pKernelFifo) kfifoGetNumPBDMAs_DISPATCH(pGpu, pKernelFifo)
542 #define kfifoGetNumPBDMAs_HAL(pGpu, pKernelFifo) kfifoGetNumPBDMAs_DISPATCH(pGpu, pKernelFifo)
543 #define kfifoPrintPbdmaId(pGpu, pKernelFifo, pbdmaId) kfifoPrintPbdmaId_DISPATCH(pGpu, pKernelFifo, pbdmaId)
544 #define kfifoPrintPbdmaId_HAL(pGpu, pKernelFifo, pbdmaId) kfifoPrintPbdmaId_DISPATCH(pGpu, pKernelFifo, pbdmaId)
545 #define kfifoPrintInternalEngine(pGpu, pKernelFifo, arg0) kfifoPrintInternalEngine_DISPATCH(pGpu, pKernelFifo, arg0)
546 #define kfifoPrintInternalEngine_HAL(pGpu, pKernelFifo, arg0) kfifoPrintInternalEngine_DISPATCH(pGpu, pKernelFifo, arg0)
547 #define kfifoPrintInternalEngineCheck(pGpu, pKernelFifo, arg0) kfifoPrintInternalEngineCheck_DISPATCH(pGpu, pKernelFifo, arg0)
548 #define kfifoPrintInternalEngineCheck_HAL(pGpu, pKernelFifo, arg0) kfifoPrintInternalEngineCheck_DISPATCH(pGpu, pKernelFifo, arg0)
549 #define kfifoGetClientIdStringCommon(pGpu, pKernelFifo, arg0) kfifoGetClientIdStringCommon_DISPATCH(pGpu, pKernelFifo, arg0)
550 #define kfifoGetClientIdStringCommon_HAL(pGpu, pKernelFifo, arg0) kfifoGetClientIdStringCommon_DISPATCH(pGpu, pKernelFifo, arg0)
551 #define kfifoGetClientIdString(pGpu, pKernelFifo, arg0) kfifoGetClientIdString_DISPATCH(pGpu, pKernelFifo, arg0)
552 #define kfifoGetClientIdString_HAL(pGpu, pKernelFifo, arg0) kfifoGetClientIdString_DISPATCH(pGpu, pKernelFifo, arg0)
553 #define kfifoGetClientIdStringCheck(pGpu, pKernelFifo, arg0) kfifoGetClientIdStringCheck_DISPATCH(pGpu, pKernelFifo, arg0)
554 #define kfifoGetClientIdStringCheck_HAL(pGpu, pKernelFifo, arg0) kfifoGetClientIdStringCheck_DISPATCH(pGpu, pKernelFifo, arg0)
555 #define kfifoStatePreLoad(pGpu, pEngstate, arg0) kfifoStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
556 #define kfifoStatePostUnload(pGpu, pEngstate, arg0) kfifoStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
557 #define kfifoStateInitUnlocked(pGpu, pEngstate) kfifoStateInitUnlocked_DISPATCH(pGpu, pEngstate)
558 #define kfifoInitMissing(pGpu, pEngstate) kfifoInitMissing_DISPATCH(pGpu, pEngstate)
559 #define kfifoStatePreInitLocked(pGpu, pEngstate) kfifoStatePreInitLocked_DISPATCH(pGpu, pEngstate)
560 #define kfifoStatePreInitUnlocked(pGpu, pEngstate) kfifoStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
561 #define kfifoIsPresent(pGpu, pEngstate) kfifoIsPresent_DISPATCH(pGpu, pEngstate)
562 NV_STATUS kfifoConstructHal_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
563 
564 
565 #ifdef __nvoc_kernel_fifo_h_disabled
566 static inline NV_STATUS kfifoConstructHal(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
567     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
568     return NV_ERR_NOT_SUPPORTED;
569 }
570 #else //__nvoc_kernel_fifo_h_disabled
571 #define kfifoConstructHal(pGpu, pKernelFifo) kfifoConstructHal_GM107(pGpu, pKernelFifo)
572 #endif //__nvoc_kernel_fifo_h_disabled
573 
574 #define kfifoConstructHal_HAL(pGpu, pKernelFifo) kfifoConstructHal(pGpu, pKernelFifo)
575 
576 static inline NV_STATUS kfifoChannelGroupSetTimesliceSched_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
577     return NV_OK;
578 }
579 
580 
581 #ifdef __nvoc_kernel_fifo_h_disabled
582 static inline NV_STATUS kfifoChannelGroupSetTimesliceSched(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
583     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
584     return NV_ERR_NOT_SUPPORTED;
585 }
586 #else //__nvoc_kernel_fifo_h_disabled
587 #define kfifoChannelGroupSetTimesliceSched(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimesliceSched_56cd7a(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
588 #endif //__nvoc_kernel_fifo_h_disabled
589 
590 #define kfifoChannelGroupSetTimesliceSched_HAL(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimesliceSched(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
591 
592 NvU32 kfifoRunlistQueryNumChannels_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
593 
594 
595 #ifdef __nvoc_kernel_fifo_h_disabled
596 static inline NvU32 kfifoRunlistQueryNumChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
597     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
598     return 0;
599 }
600 #else //__nvoc_kernel_fifo_h_disabled
601 #define kfifoRunlistQueryNumChannels(pGpu, pKernelFifo, runlistId) kfifoRunlistQueryNumChannels_KERNEL(pGpu, pKernelFifo, runlistId)
602 #endif //__nvoc_kernel_fifo_h_disabled
603 
604 #define kfifoRunlistQueryNumChannels_HAL(pGpu, pKernelFifo, runlistId) kfifoRunlistQueryNumChannels(pGpu, pKernelFifo, runlistId)
605 
606 NV_STATUS kfifoIdleChannelsPerDevice_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, NvU32 numChannels, NvU32 flags, NvU32 timeout);
607 
608 
609 #ifdef __nvoc_kernel_fifo_h_disabled
610 static inline NV_STATUS kfifoIdleChannelsPerDevice(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, NvU32 numChannels, NvU32 flags, NvU32 timeout) {
611     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
612     return NV_ERR_NOT_SUPPORTED;
613 }
614 #else //__nvoc_kernel_fifo_h_disabled
615 #define kfifoIdleChannelsPerDevice(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) kfifoIdleChannelsPerDevice_KERNEL(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout)
616 #endif //__nvoc_kernel_fifo_h_disabled
617 
618 #define kfifoIdleChannelsPerDevice_HAL(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) kfifoIdleChannelsPerDevice(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout)
619 
620 NvU64 kfifoChannelGroupGetDefaultTimeslice_GV100(struct KernelFifo *pKernelFifo);
621 
622 
623 #ifdef __nvoc_kernel_fifo_h_disabled
624 static inline NvU64 kfifoChannelGroupGetDefaultTimeslice(struct KernelFifo *pKernelFifo) {
625     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
626     return 0;
627 }
628 #else //__nvoc_kernel_fifo_h_disabled
629 #define kfifoChannelGroupGetDefaultTimeslice(pKernelFifo) kfifoChannelGroupGetDefaultTimeslice_GV100(pKernelFifo)
630 #endif //__nvoc_kernel_fifo_h_disabled
631 
632 #define kfifoChannelGroupGetDefaultTimeslice_HAL(pKernelFifo) kfifoChannelGroupGetDefaultTimeslice(pKernelFifo)
633 
634 static inline NvU64 kfifoRunlistGetMinTimeSlice_4a4dee(struct KernelFifo *pKernelFifo) {
635     return 0;
636 }
637 
638 
639 #ifdef __nvoc_kernel_fifo_h_disabled
640 static inline NvU64 kfifoRunlistGetMinTimeSlice(struct KernelFifo *pKernelFifo) {
641     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
642     return 0;
643 }
644 #else //__nvoc_kernel_fifo_h_disabled
645 #define kfifoRunlistGetMinTimeSlice(pKernelFifo) kfifoRunlistGetMinTimeSlice_4a4dee(pKernelFifo)
646 #endif //__nvoc_kernel_fifo_h_disabled
647 
648 #define kfifoRunlistGetMinTimeSlice_HAL(pKernelFifo) kfifoRunlistGetMinTimeSlice(pKernelFifo)
649 
650 NV_STATUS kfifoGetInstMemInfo_GM107(struct KernelFifo *pKernelFifo, NvU64 *pSize, NvU64 *pAlignment, NvBool *pbInstProtectedMem, NvU32 *pInstAttr, const NV_ADDRESS_SPACE **ppInstAllocList);
651 
652 
653 #ifdef __nvoc_kernel_fifo_h_disabled
654 static inline NV_STATUS kfifoGetInstMemInfo(struct KernelFifo *pKernelFifo, NvU64 *pSize, NvU64 *pAlignment, NvBool *pbInstProtectedMem, NvU32 *pInstAttr, const NV_ADDRESS_SPACE **ppInstAllocList) {
655     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
656     return NV_ERR_NOT_SUPPORTED;
657 }
658 #else //__nvoc_kernel_fifo_h_disabled
659 #define kfifoGetInstMemInfo(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) kfifoGetInstMemInfo_GM107(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList)
660 #endif //__nvoc_kernel_fifo_h_disabled
661 
662 #define kfifoGetInstMemInfo_HAL(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) kfifoGetInstMemInfo(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList)
663 
664 void kfifoGetInstBlkSizeAlign_GM107(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pShift);
665 
666 
667 #ifdef __nvoc_kernel_fifo_h_disabled
668 static inline void kfifoGetInstBlkSizeAlign(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pShift) {
669     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
670 }
671 #else //__nvoc_kernel_fifo_h_disabled
672 #define kfifoGetInstBlkSizeAlign(pKernelFifo, pSize, pShift) kfifoGetInstBlkSizeAlign_GM107(pKernelFifo, pSize, pShift)
673 #endif //__nvoc_kernel_fifo_h_disabled
674 
675 #define kfifoGetInstBlkSizeAlign_HAL(pKernelFifo, pSize, pShift) kfifoGetInstBlkSizeAlign(pKernelFifo, pSize, pShift)
676 
677 NvU32 kfifoGetDefaultRunlist_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType);
678 
679 
680 #ifdef __nvoc_kernel_fifo_h_disabled
681 static inline NvU32 kfifoGetDefaultRunlist(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType) {
682     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
683     return 0;
684 }
685 #else //__nvoc_kernel_fifo_h_disabled
686 #define kfifoGetDefaultRunlist(pGpu, pKernelFifo, rmEngineType) kfifoGetDefaultRunlist_GM107(pGpu, pKernelFifo, rmEngineType)
687 #endif //__nvoc_kernel_fifo_h_disabled
688 
689 #define kfifoGetDefaultRunlist_HAL(pGpu, pKernelFifo, rmEngineType) kfifoGetDefaultRunlist(pGpu, pKernelFifo, rmEngineType)
690 
691 NvBool kfifoValidateSCGTypeAndRunqueue_GP102(struct KernelFifo *pKernelFifo, NvU32 scgType, NvU32 runqueue);
692 
693 
694 #ifdef __nvoc_kernel_fifo_h_disabled
695 static inline NvBool kfifoValidateSCGTypeAndRunqueue(struct KernelFifo *pKernelFifo, NvU32 scgType, NvU32 runqueue) {
696     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
697     return NV_FALSE;
698 }
699 #else //__nvoc_kernel_fifo_h_disabled
700 #define kfifoValidateSCGTypeAndRunqueue(pKernelFifo, scgType, runqueue) kfifoValidateSCGTypeAndRunqueue_GP102(pKernelFifo, scgType, runqueue)
701 #endif //__nvoc_kernel_fifo_h_disabled
702 
703 #define kfifoValidateSCGTypeAndRunqueue_HAL(pKernelFifo, scgType, runqueue) kfifoValidateSCGTypeAndRunqueue(pKernelFifo, scgType, runqueue)
704 
705 NvBool kfifoValidateEngineAndRunqueue_GP102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 runqueue);
706 
707 
708 #ifdef __nvoc_kernel_fifo_h_disabled
709 static inline NvBool kfifoValidateEngineAndRunqueue(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 runqueue) {
710     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
711     return NV_FALSE;
712 }
713 #else //__nvoc_kernel_fifo_h_disabled
714 #define kfifoValidateEngineAndRunqueue(pGpu, pKernelFifo, engDesc, runqueue) kfifoValidateEngineAndRunqueue_GP102(pGpu, pKernelFifo, engDesc, runqueue)
715 #endif //__nvoc_kernel_fifo_h_disabled
716 
717 #define kfifoValidateEngineAndRunqueue_HAL(pGpu, pKernelFifo, engDesc, runqueue) kfifoValidateEngineAndRunqueue(pGpu, pKernelFifo, engDesc, runqueue)
718 
719 NvBool kfifoValidateEngineAndSubctxType_GP102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 subctxType);
720 
721 
722 #ifdef __nvoc_kernel_fifo_h_disabled
723 static inline NvBool kfifoValidateEngineAndSubctxType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 subctxType) {
724     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
725     return NV_FALSE;
726 }
727 #else //__nvoc_kernel_fifo_h_disabled
728 #define kfifoValidateEngineAndSubctxType(pGpu, pKernelFifo, engDesc, subctxType) kfifoValidateEngineAndSubctxType_GP102(pGpu, pKernelFifo, engDesc, subctxType)
729 #endif //__nvoc_kernel_fifo_h_disabled
730 
731 #define kfifoValidateEngineAndSubctxType_HAL(pGpu, pKernelFifo, engDesc, subctxType) kfifoValidateEngineAndSubctxType(pGpu, pKernelFifo, engDesc, subctxType)
732 
733 NV_STATUS kfifoRmctrlGetWorkSubmitToken_GV100(struct KernelFifo *pKernelFifo, NvHandle hClient, NvHandle hChannel, NvU32 *pWorkSubmitToken);
734 
735 
736 #ifdef __nvoc_kernel_fifo_h_disabled
737 static inline NV_STATUS kfifoRmctrlGetWorkSubmitToken(struct KernelFifo *pKernelFifo, NvHandle hClient, NvHandle hChannel, NvU32 *pWorkSubmitToken) {
738     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
739     return NV_ERR_NOT_SUPPORTED;
740 }
741 #else //__nvoc_kernel_fifo_h_disabled
742 #define kfifoRmctrlGetWorkSubmitToken(pKernelFifo, hClient, hChannel, pWorkSubmitToken) kfifoRmctrlGetWorkSubmitToken_GV100(pKernelFifo, hClient, hChannel, pWorkSubmitToken)
743 #endif //__nvoc_kernel_fifo_h_disabled
744 
745 #define kfifoRmctrlGetWorkSubmitToken_HAL(pKernelFifo, hClient, hChannel, pWorkSubmitToken) kfifoRmctrlGetWorkSubmitToken(pKernelFifo, hClient, hChannel, pWorkSubmitToken)
746 
747 NV_STATUS kfifoChannelGetFifoContextMemDesc_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, FIFO_CTX engState, MEMORY_DESCRIPTOR **ppMemdesc);
748 
749 
750 #ifdef __nvoc_kernel_fifo_h_disabled
751 static inline NV_STATUS kfifoChannelGetFifoContextMemDesc(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, FIFO_CTX engState, MEMORY_DESCRIPTOR **ppMemdesc) {
752     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
753     return NV_ERR_NOT_SUPPORTED;
754 }
755 #else //__nvoc_kernel_fifo_h_disabled
756 #define kfifoChannelGetFifoContextMemDesc(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) kfifoChannelGetFifoContextMemDesc_GM107(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc)
757 #endif //__nvoc_kernel_fifo_h_disabled
758 
759 #define kfifoChannelGetFifoContextMemDesc_HAL(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) kfifoChannelGetFifoContextMemDesc(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc)
760 
761 NV_STATUS kfifoConvertInstToKernelChannel_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, INST_BLOCK_DESC *arg0, struct KernelChannel **arg1);
762 
763 
764 #ifdef __nvoc_kernel_fifo_h_disabled
765 static inline NV_STATUS kfifoConvertInstToKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, INST_BLOCK_DESC *arg0, struct KernelChannel **arg1) {
766     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
767     return NV_ERR_NOT_SUPPORTED;
768 }
769 #else //__nvoc_kernel_fifo_h_disabled
770 #define kfifoConvertInstToKernelChannel(pGpu, pKernelFifo, arg0, arg1) kfifoConvertInstToKernelChannel_GM107(pGpu, pKernelFifo, arg0, arg1)
771 #endif //__nvoc_kernel_fifo_h_disabled
772 
773 #define kfifoConvertInstToKernelChannel_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoConvertInstToKernelChannel(pGpu, pKernelFifo, arg0, arg1)
774 
775 NV_STATUS kfifoGetUsermodeMapInfo_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *arg0, NvU32 *arg1);
776 
777 
778 #ifdef __nvoc_kernel_fifo_h_disabled
779 static inline NV_STATUS kfifoGetUsermodeMapInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *arg0, NvU32 *arg1) {
780     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
781     return NV_ERR_NOT_SUPPORTED;
782 }
783 #else //__nvoc_kernel_fifo_h_disabled
784 #define kfifoGetUsermodeMapInfo(pGpu, pKernelFifo, arg0, arg1) kfifoGetUsermodeMapInfo_GV100(pGpu, pKernelFifo, arg0, arg1)
785 #endif //__nvoc_kernel_fifo_h_disabled
786 
787 #define kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoGetUsermodeMapInfo(pGpu, pKernelFifo, arg0, arg1)
788 
789 NvU32 kfifoGetMaxSubcontext_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0);
790 
791 
792 #ifdef __nvoc_kernel_fifo_h_disabled
793 static inline NvU32 kfifoGetMaxSubcontext(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) {
794     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
795     return 0;
796 }
797 #else //__nvoc_kernel_fifo_h_disabled
798 #define kfifoGetMaxSubcontext(pGpu, pKernelFifo, arg0) kfifoGetMaxSubcontext_GV100(pGpu, pKernelFifo, arg0)
799 #endif //__nvoc_kernel_fifo_h_disabled
800 
801 #define kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, arg0) kfifoGetMaxSubcontext(pGpu, pKernelFifo, arg0)
802 
803 NvU32 kfifoGetMaxSubcontextFromGr_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernel);
804 
805 
806 #ifdef __nvoc_kernel_fifo_h_disabled
807 static inline NvU32 kfifoGetMaxSubcontextFromGr(struct OBJGPU *pGpu, struct KernelFifo *pKernel) {
808     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
809     return 0;
810 }
811 #else //__nvoc_kernel_fifo_h_disabled
812 #define kfifoGetMaxSubcontextFromGr(pGpu, pKernel) kfifoGetMaxSubcontextFromGr_KERNEL(pGpu, pKernel)
813 #endif //__nvoc_kernel_fifo_h_disabled
814 
815 #define kfifoGetMaxSubcontextFromGr_HAL(pGpu, pKernel) kfifoGetMaxSubcontextFromGr(pGpu, pKernel)
816 
817 static inline NvU32 kfifoGetNumRunqueues_adde13(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
818     return 2;
819 }
820 
821 
822 #ifdef __nvoc_kernel_fifo_h_disabled
823 static inline NvU32 kfifoGetNumRunqueues(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
824     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
825     return 0;
826 }
827 #else //__nvoc_kernel_fifo_h_disabled
828 #define kfifoGetNumRunqueues(pGpu, pKernelFifo) kfifoGetNumRunqueues_adde13(pGpu, pKernelFifo)
829 #endif //__nvoc_kernel_fifo_h_disabled
830 
831 #define kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo) kfifoGetNumRunqueues(pGpu, pKernelFifo)
832 
833 NvU32 kfifoGetMaxChannelGroupSize_GV100(struct KernelFifo *pKernelFifo);
834 
835 
836 #ifdef __nvoc_kernel_fifo_h_disabled
837 static inline NvU32 kfifoGetMaxChannelGroupSize(struct KernelFifo *pKernelFifo) {
838     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
839     return 0;
840 }
841 #else //__nvoc_kernel_fifo_h_disabled
842 #define kfifoGetMaxChannelGroupSize(pKernelFifo) kfifoGetMaxChannelGroupSize_GV100(pKernelFifo)
843 #endif //__nvoc_kernel_fifo_h_disabled
844 
845 #define kfifoGetMaxChannelGroupSize_HAL(pKernelFifo) kfifoGetMaxChannelGroupSize(pKernelFifo)
846 
847 static inline NV_STATUS kfifoAddObject_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
848     return NV_OK;
849 }
850 
851 
852 #ifdef __nvoc_kernel_fifo_h_disabled
853 static inline NV_STATUS kfifoAddObject(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
854     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
855     return NV_ERR_NOT_SUPPORTED;
856 }
857 #else //__nvoc_kernel_fifo_h_disabled
858 #define kfifoAddObject(pGpu, pKernelFifo, pObject) kfifoAddObject_56cd7a(pGpu, pKernelFifo, pObject)
859 #endif //__nvoc_kernel_fifo_h_disabled
860 
861 #define kfifoAddObject_HAL(pGpu, pKernelFifo, pObject) kfifoAddObject(pGpu, pKernelFifo, pObject)
862 
863 static inline NV_STATUS kfifoDeleteObject_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
864     return NV_OK;
865 }
866 
867 
868 #ifdef __nvoc_kernel_fifo_h_disabled
869 static inline NV_STATUS kfifoDeleteObject(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
870     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
871     return NV_ERR_NOT_SUPPORTED;
872 }
873 #else //__nvoc_kernel_fifo_h_disabled
874 #define kfifoDeleteObject(pGpu, pKernelFifo, pObject) kfifoDeleteObject_56cd7a(pGpu, pKernelFifo, pObject)
875 #endif //__nvoc_kernel_fifo_h_disabled
876 
877 #define kfifoDeleteObject_HAL(pGpu, pKernelFifo, pObject) kfifoDeleteObject(pGpu, pKernelFifo, pObject)
878 
879 NV_STATUS kfifoConstructEngineList_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
880 
881 
882 #ifdef __nvoc_kernel_fifo_h_disabled
883 static inline NV_STATUS kfifoConstructEngineList(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
884     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
885     return NV_ERR_NOT_SUPPORTED;
886 }
887 #else //__nvoc_kernel_fifo_h_disabled
888 #define kfifoConstructEngineList(pGpu, pKernelFifo) kfifoConstructEngineList_KERNEL(pGpu, pKernelFifo)
889 #endif //__nvoc_kernel_fifo_h_disabled
890 
891 #define kfifoConstructEngineList_HAL(pGpu, pKernelFifo) kfifoConstructEngineList(pGpu, pKernelFifo)
892 
893 NV_STATUS kfifoGetHostDeviceInfoTable_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO *pEngineInfo, struct Device *pMigDevice);
894 
895 
896 #ifdef __nvoc_kernel_fifo_h_disabled
897 static inline NV_STATUS kfifoGetHostDeviceInfoTable(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO *pEngineInfo, struct Device *pMigDevice) {
898     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
899     return NV_ERR_NOT_SUPPORTED;
900 }
901 #else //__nvoc_kernel_fifo_h_disabled
902 #define kfifoGetHostDeviceInfoTable(pGpu, pKernelFifo, pEngineInfo, pMigDevice) kfifoGetHostDeviceInfoTable_KERNEL(pGpu, pKernelFifo, pEngineInfo, pMigDevice)
903 #endif //__nvoc_kernel_fifo_h_disabled
904 
905 #define kfifoGetHostDeviceInfoTable_HAL(pGpu, pKernelFifo, pEngineInfo, pMigDevice) kfifoGetHostDeviceInfoTable(pGpu, pKernelFifo, pEngineInfo, pMigDevice)
906 
907 void kfifoGetSubctxType_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 *arg1);
908 
909 
910 #ifdef __nvoc_kernel_fifo_h_disabled
911 static inline void kfifoGetSubctxType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 *arg1) {
912     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
913 }
914 #else //__nvoc_kernel_fifo_h_disabled
915 #define kfifoGetSubctxType(pGpu, pKernelFifo, arg0, arg1) kfifoGetSubctxType_GV100(pGpu, pKernelFifo, arg0, arg1)
916 #endif //__nvoc_kernel_fifo_h_disabled
917 
918 #define kfifoGetSubctxType_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoGetSubctxType(pGpu, pKernelFifo, arg0, arg1)
919 
920 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken_c04480(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
921     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
922 }
923 
924 NV_STATUS kfifoGenerateInternalWorkSubmitToken_GA100(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1);
925 
926 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken_5baef9(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
927     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
928 }
929 
930 
931 #ifdef __nvoc_kernel_fifo_h_disabled
932 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
933     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
934     return NV_ERR_NOT_SUPPORTED;
935 }
936 #else //__nvoc_kernel_fifo_h_disabled
937 #define kfifoGenerateInternalWorkSubmitToken(pGpu, arg0, arg1) kfifoGenerateInternalWorkSubmitToken_c04480(pGpu, arg0, arg1)
938 #endif //__nvoc_kernel_fifo_h_disabled
939 
940 #define kfifoGenerateInternalWorkSubmitToken_HAL(pGpu, arg0, arg1) kfifoGenerateInternalWorkSubmitToken(pGpu, arg0, arg1)
941 
942 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode_c04480(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
943     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
944 }
945 
946 NV_STATUS kfifoUpdateInternalDoorbellForUsermode_GA100(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
947 
948 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode_5baef9(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
949     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
950 }
951 
952 
953 #ifdef __nvoc_kernel_fifo_h_disabled
954 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
955     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
956     return NV_ERR_NOT_SUPPORTED;
957 }
958 #else //__nvoc_kernel_fifo_h_disabled
959 #define kfifoUpdateInternalDoorbellForUsermode(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateInternalDoorbellForUsermode_c04480(arg0, arg1, workSubmitToken, runlisId)
960 #endif //__nvoc_kernel_fifo_h_disabled
961 
962 #define kfifoUpdateInternalDoorbellForUsermode_HAL(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateInternalDoorbellForUsermode(arg0, arg1, workSubmitToken, runlisId)
963 
964 static inline NvBool kfifoIsLiteModeEnabled_491d52(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
965     return ((NvBool)(0 != 0));
966 }
967 
968 
969 #ifdef __nvoc_kernel_fifo_h_disabled
970 static inline NvBool kfifoIsLiteModeEnabled(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
971     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
972     return NV_FALSE;
973 }
974 #else //__nvoc_kernel_fifo_h_disabled
975 #define kfifoIsLiteModeEnabled(pGpu, pKernelFifo) kfifoIsLiteModeEnabled_491d52(pGpu, pKernelFifo)
976 #endif //__nvoc_kernel_fifo_h_disabled
977 
978 #define kfifoIsLiteModeEnabled_HAL(pGpu, pKernelFifo) kfifoIsLiteModeEnabled(pGpu, pKernelFifo)
979 
980 NvU32 kfifoGetNumEngines_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
981 
982 
983 #ifdef __nvoc_kernel_fifo_h_disabled
984 static inline NvU32 kfifoGetNumEngines(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
985     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
986     return 0;
987 }
988 #else //__nvoc_kernel_fifo_h_disabled
989 #define kfifoGetNumEngines(pGpu, pKernelFifo) kfifoGetNumEngines_GM107(pGpu, pKernelFifo)
990 #endif //__nvoc_kernel_fifo_h_disabled
991 
992 #define kfifoGetNumEngines_HAL(pGpu, pKernelFifo) kfifoGetNumEngines(pGpu, pKernelFifo)
993 
994 const char *kfifoGetEngineName_GM107(struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal);
995 
996 
997 #ifdef __nvoc_kernel_fifo_h_disabled
998 static inline const char *kfifoGetEngineName(struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal) {
999     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1000     return NULL;
1001 }
1002 #else //__nvoc_kernel_fifo_h_disabled
1003 #define kfifoGetEngineName(pKernelFifo, inType, inVal) kfifoGetEngineName_GM107(pKernelFifo, inType, inVal)
1004 #endif //__nvoc_kernel_fifo_h_disabled
1005 
1006 #define kfifoGetEngineName_HAL(pKernelFifo, inType, inVal) kfifoGetEngineName(pKernelFifo, inType, inVal)
1007 
1008 NvU32 kfifoGetMaxNumRunlists_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1009 
1010 
1011 #ifdef __nvoc_kernel_fifo_h_disabled
1012 static inline NvU32 kfifoGetMaxNumRunlists(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1013     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1014     return 0;
1015 }
1016 #else //__nvoc_kernel_fifo_h_disabled
1017 #define kfifoGetMaxNumRunlists(pGpu, pKernelFifo) kfifoGetMaxNumRunlists_GM107(pGpu, pKernelFifo)
1018 #endif //__nvoc_kernel_fifo_h_disabled
1019 
1020 #define kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo) kfifoGetMaxNumRunlists(pGpu, pKernelFifo)
1021 
1022 NV_STATUS kfifoGetEnginePbdmaIds_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE type, NvU32 val, NvU32 **ppPbdmaIds, NvU32 *pNumPbdmas);
1023 
1024 
1025 #ifdef __nvoc_kernel_fifo_h_disabled
1026 static inline NV_STATUS kfifoGetEnginePbdmaIds(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE type, NvU32 val, NvU32 **ppPbdmaIds, NvU32 *pNumPbdmas) {
1027     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1028     return NV_ERR_NOT_SUPPORTED;
1029 }
1030 #else //__nvoc_kernel_fifo_h_disabled
1031 #define kfifoGetEnginePbdmaIds(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) kfifoGetEnginePbdmaIds_GM107(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas)
1032 #endif //__nvoc_kernel_fifo_h_disabled
1033 
1034 #define kfifoGetEnginePbdmaIds_HAL(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) kfifoGetEnginePbdmaIds(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas)
1035 
1036 static inline NV_STATUS kfifoReservePbdmaFaultIds_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_ENGINE_LIST *arg0, NvU32 arg1) {
1037     return NV_OK;
1038 }
1039 
1040 
1041 #ifdef __nvoc_kernel_fifo_h_disabled
1042 static inline NV_STATUS kfifoReservePbdmaFaultIds(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_ENGINE_LIST *arg0, NvU32 arg1) {
1043     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1044     return NV_ERR_NOT_SUPPORTED;
1045 }
1046 #else //__nvoc_kernel_fifo_h_disabled
1047 #define kfifoReservePbdmaFaultIds(pGpu, pKernelFifo, arg0, arg1) kfifoReservePbdmaFaultIds_56cd7a(pGpu, pKernelFifo, arg0, arg1)
1048 #endif //__nvoc_kernel_fifo_h_disabled
1049 
1050 #define kfifoReservePbdmaFaultIds_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoReservePbdmaFaultIds(pGpu, pKernelFifo, arg0, arg1)
1051 
1052 NV_STATUS kfifoGetEnginePartnerList_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pParams);
1053 
1054 
1055 #ifdef __nvoc_kernel_fifo_h_disabled
1056 static inline NV_STATUS kfifoGetEnginePartnerList(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pParams) {
1057     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1058     return NV_ERR_NOT_SUPPORTED;
1059 }
1060 #else //__nvoc_kernel_fifo_h_disabled
1061 #define kfifoGetEnginePartnerList(pGpu, pKernelFifo, pParams) kfifoGetEnginePartnerList_GM107(pGpu, pKernelFifo, pParams)
1062 #endif //__nvoc_kernel_fifo_h_disabled
1063 
1064 #define kfifoGetEnginePartnerList_HAL(pGpu, pKernelFifo, pParams) kfifoGetEnginePartnerList(pGpu, pKernelFifo, pParams)
1065 
1066 static inline NvBool kfifoRunlistIsTsgHeaderSupported_cbe027(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1067     return ((NvBool)(0 == 0));
1068 }
1069 
1070 
1071 #ifdef __nvoc_kernel_fifo_h_disabled
1072 static inline NvBool kfifoRunlistIsTsgHeaderSupported(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1073     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1074     return NV_FALSE;
1075 }
1076 #else //__nvoc_kernel_fifo_h_disabled
1077 #define kfifoRunlistIsTsgHeaderSupported(pGpu, pKernelFifo, arg0) kfifoRunlistIsTsgHeaderSupported_cbe027(pGpu, pKernelFifo, arg0)
1078 #endif //__nvoc_kernel_fifo_h_disabled
1079 
1080 #define kfifoRunlistIsTsgHeaderSupported_HAL(pGpu, pKernelFifo, arg0) kfifoRunlistIsTsgHeaderSupported(pGpu, pKernelFifo, arg0)
1081 
1082 NvU32 kfifoRunlistGetEntrySize_GV100(struct KernelFifo *arg0);
1083 
1084 
1085 #ifdef __nvoc_kernel_fifo_h_disabled
1086 static inline NvU32 kfifoRunlistGetEntrySize(struct KernelFifo *arg0) {
1087     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1088     return 0;
1089 }
1090 #else //__nvoc_kernel_fifo_h_disabled
1091 #define kfifoRunlistGetEntrySize(arg0) kfifoRunlistGetEntrySize_GV100(arg0)
1092 #endif //__nvoc_kernel_fifo_h_disabled
1093 
1094 #define kfifoRunlistGetEntrySize_HAL(arg0) kfifoRunlistGetEntrySize(arg0)
1095 
1096 static inline void kfifoSetupBar1UserdSnoop_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bEnable, NvU64 offset) {
1097     return;
1098 }
1099 
1100 
1101 #ifdef __nvoc_kernel_fifo_h_disabled
1102 static inline void kfifoSetupBar1UserdSnoop(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bEnable, NvU64 offset) {
1103     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1104 }
1105 #else //__nvoc_kernel_fifo_h_disabled
1106 #define kfifoSetupBar1UserdSnoop(pGpu, pKernelFifo, bEnable, offset) kfifoSetupBar1UserdSnoop_b3696a(pGpu, pKernelFifo, bEnable, offset)
1107 #endif //__nvoc_kernel_fifo_h_disabled
1108 
1109 #define kfifoSetupBar1UserdSnoop_HAL(pGpu, pKernelFifo, bEnable, offset) kfifoSetupBar1UserdSnoop(pGpu, pKernelFifo, bEnable, offset)
1110 
1111 NV_STATUS kfifoPreAllocUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1112 
1113 
1114 #ifdef __nvoc_kernel_fifo_h_disabled
1115 static inline NV_STATUS kfifoPreAllocUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1116     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1117     return NV_ERR_NOT_SUPPORTED;
1118 }
1119 #else //__nvoc_kernel_fifo_h_disabled
1120 #define kfifoPreAllocUserD(pGpu, pKernelFifo) kfifoPreAllocUserD_GM107(pGpu, pKernelFifo)
1121 #endif //__nvoc_kernel_fifo_h_disabled
1122 
1123 #define kfifoPreAllocUserD_HAL(pGpu, pKernelFifo) kfifoPreAllocUserD(pGpu, pKernelFifo)
1124 
1125 void kfifoFreePreAllocUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1126 
1127 
1128 #ifdef __nvoc_kernel_fifo_h_disabled
1129 static inline void kfifoFreePreAllocUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1130     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1131 }
1132 #else //__nvoc_kernel_fifo_h_disabled
1133 #define kfifoFreePreAllocUserD(pGpu, pKernelFifo) kfifoFreePreAllocUserD_GM107(pGpu, pKernelFifo)
1134 #endif //__nvoc_kernel_fifo_h_disabled
1135 
1136 #define kfifoFreePreAllocUserD_HAL(pGpu, pKernelFifo) kfifoFreePreAllocUserD(pGpu, pKernelFifo)
1137 
1138 NV_STATUS kfifoGetUserdBar1MapInfo_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *bar1Offset, NvU32 *bar1MapSize);
1139 
1140 
1141 #ifdef __nvoc_kernel_fifo_h_disabled
1142 static inline NV_STATUS kfifoGetUserdBar1MapInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *bar1Offset, NvU32 *bar1MapSize) {
1143     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1144     return NV_ERR_NOT_SUPPORTED;
1145 }
1146 #else //__nvoc_kernel_fifo_h_disabled
1147 #define kfifoGetUserdBar1MapInfo(pGpu, pKernelFifo, bar1Offset, bar1MapSize) kfifoGetUserdBar1MapInfo_GM107(pGpu, pKernelFifo, bar1Offset, bar1MapSize)
1148 #endif //__nvoc_kernel_fifo_h_disabled
1149 
1150 #define kfifoGetUserdBar1MapInfo_HAL(pGpu, pKernelFifo, bar1Offset, bar1MapSize) kfifoGetUserdBar1MapInfo(pGpu, pKernelFifo, bar1Offset, bar1MapSize)
1151 
1152 void kfifoGetUserdSizeAlign_GM107(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pAddrShift);
1153 
1154 
1155 #ifdef __nvoc_kernel_fifo_h_disabled
1156 static inline void kfifoGetUserdSizeAlign(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pAddrShift) {
1157     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1158 }
1159 #else //__nvoc_kernel_fifo_h_disabled
1160 #define kfifoGetUserdSizeAlign(pKernelFifo, pSize, pAddrShift) kfifoGetUserdSizeAlign_GM107(pKernelFifo, pSize, pAddrShift)
1161 #endif //__nvoc_kernel_fifo_h_disabled
1162 
1163 #define kfifoGetUserdSizeAlign_HAL(pKernelFifo, pSize, pAddrShift) kfifoGetUserdSizeAlign(pKernelFifo, pSize, pAddrShift)
1164 
1165 NV_STATUS kfifoGetUserdLocation_GM107(struct KernelFifo *pKernelFifo, NvU32 *pUserdAperture, NvU32 *pUserdAttribute);
1166 
1167 
1168 #ifdef __nvoc_kernel_fifo_h_disabled
1169 static inline NV_STATUS kfifoGetUserdLocation(struct KernelFifo *pKernelFifo, NvU32 *pUserdAperture, NvU32 *pUserdAttribute) {
1170     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1171     return NV_ERR_NOT_SUPPORTED;
1172 }
1173 #else //__nvoc_kernel_fifo_h_disabled
1174 #define kfifoGetUserdLocation(pKernelFifo, pUserdAperture, pUserdAttribute) kfifoGetUserdLocation_GM107(pKernelFifo, pUserdAperture, pUserdAttribute)
1175 #endif //__nvoc_kernel_fifo_h_disabled
1176 
1177 #define kfifoGetUserdLocation_HAL(pKernelFifo, pUserdAperture, pUserdAttribute) kfifoGetUserdLocation(pKernelFifo, pUserdAperture, pUserdAttribute)
1178 
1179 NvU32 kfifoCalcTotalSizeOfFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bCalcForFbRsvd);
1180 
1181 
1182 #ifdef __nvoc_kernel_fifo_h_disabled
1183 static inline NvU32 kfifoCalcTotalSizeOfFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bCalcForFbRsvd) {
1184     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1185     return 0;
1186 }
1187 #else //__nvoc_kernel_fifo_h_disabled
1188 #define kfifoCalcTotalSizeOfFaultMethodBuffers(pGpu, pKernelFifo, bCalcForFbRsvd) kfifoCalcTotalSizeOfFaultMethodBuffers_GV100(pGpu, pKernelFifo, bCalcForFbRsvd)
1189 #endif //__nvoc_kernel_fifo_h_disabled
1190 
1191 #define kfifoCalcTotalSizeOfFaultMethodBuffers_HAL(pGpu, pKernelFifo, bCalcForFbRsvd) kfifoCalcTotalSizeOfFaultMethodBuffers(pGpu, pKernelFifo, bCalcForFbRsvd)
1192 
1193 NV_STATUS kfifoCheckEngine_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvBool *pPresent);
1194 
1195 
1196 #ifdef __nvoc_kernel_fifo_h_disabled
1197 static inline NV_STATUS kfifoCheckEngine(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvBool *pPresent) {
1198     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1199     return NV_ERR_NOT_SUPPORTED;
1200 }
1201 #else //__nvoc_kernel_fifo_h_disabled
1202 #define kfifoCheckEngine(pGpu, pKernelFifo, engDesc, pPresent) kfifoCheckEngine_GM107(pGpu, pKernelFifo, engDesc, pPresent)
1203 #endif //__nvoc_kernel_fifo_h_disabled
1204 
1205 #define kfifoCheckEngine_HAL(pGpu, pKernelFifo, engDesc, pPresent) kfifoCheckEngine(pGpu, pKernelFifo, engDesc, pPresent)
1206 
1207 static inline NV_STATUS kfifoRestoreSchedPolicy_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1208     return NV_OK;
1209 }
1210 
1211 
1212 #ifdef __nvoc_kernel_fifo_h_disabled
1213 static inline NV_STATUS kfifoRestoreSchedPolicy(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1214     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1215     return NV_ERR_NOT_SUPPORTED;
1216 }
1217 #else //__nvoc_kernel_fifo_h_disabled
1218 #define kfifoRestoreSchedPolicy(pGpu, pKernelFifo) kfifoRestoreSchedPolicy_56cd7a(pGpu, pKernelFifo)
1219 #endif //__nvoc_kernel_fifo_h_disabled
1220 
1221 #define kfifoRestoreSchedPolicy_HAL(pGpu, pKernelFifo) kfifoRestoreSchedPolicy(pGpu, pKernelFifo)
1222 
1223 NV_STATUS kfifoGetMaxSecureChannels_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1224 
1225 
1226 #ifdef __nvoc_kernel_fifo_h_disabled
1227 static inline NV_STATUS kfifoGetMaxSecureChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1228     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1229     return NV_ERR_NOT_SUPPORTED;
1230 }
1231 #else //__nvoc_kernel_fifo_h_disabled
1232 #define kfifoGetMaxSecureChannels(pGpu, pKernelFifo) kfifoGetMaxSecureChannels_KERNEL(pGpu, pKernelFifo)
1233 #endif //__nvoc_kernel_fifo_h_disabled
1234 
1235 #define kfifoGetMaxSecureChannels_HAL(pGpu, pKernelFifo) kfifoGetMaxSecureChannels(pGpu, pKernelFifo)
1236 
1237 NV_STATUS kfifoRunlistSetId_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 runlistId);
1238 
1239 
1240 #ifdef __nvoc_kernel_fifo_h_disabled
1241 static inline NV_STATUS kfifoRunlistSetId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 runlistId) {
1242     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1243     return NV_ERR_NOT_SUPPORTED;
1244 }
1245 #else //__nvoc_kernel_fifo_h_disabled
1246 #define kfifoRunlistSetId(pGpu, pKernelFifo, arg0, runlistId) kfifoRunlistSetId_GM107(pGpu, pKernelFifo, arg0, runlistId)
1247 #endif //__nvoc_kernel_fifo_h_disabled
1248 
1249 #define kfifoRunlistSetId_HAL(pGpu, pKernelFifo, arg0, runlistId) kfifoRunlistSetId(pGpu, pKernelFifo, arg0, runlistId)
1250 
1251 NV_STATUS kfifoRunlistSetIdByEngine_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 engDesc);
1252 
1253 
1254 #ifdef __nvoc_kernel_fifo_h_disabled
1255 static inline NV_STATUS kfifoRunlistSetIdByEngine(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 engDesc) {
1256     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1257     return NV_ERR_NOT_SUPPORTED;
1258 }
1259 #else //__nvoc_kernel_fifo_h_disabled
1260 #define kfifoRunlistSetIdByEngine(pGpu, pKernelFifo, arg0, engDesc) kfifoRunlistSetIdByEngine_GM107(pGpu, pKernelFifo, arg0, engDesc)
1261 #endif //__nvoc_kernel_fifo_h_disabled
1262 
1263 #define kfifoRunlistSetIdByEngine_HAL(pGpu, pKernelFifo, arg0, engDesc) kfifoRunlistSetIdByEngine(pGpu, pKernelFifo, arg0, engDesc)
1264 
1265 void kfifoSetupUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc);
1266 
1267 
1268 #ifdef __nvoc_kernel_fifo_h_disabled
1269 static inline void kfifoSetupUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc) {
1270     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1271 }
1272 #else //__nvoc_kernel_fifo_h_disabled
1273 #define kfifoSetupUserD(pGpu, pKernelFifo, pMemDesc) kfifoSetupUserD_GM107(pGpu, pKernelFifo, pMemDesc)
1274 #endif //__nvoc_kernel_fifo_h_disabled
1275 
1276 #define kfifoSetupUserD_HAL(pGpu, pKernelFifo, pMemDesc) kfifoSetupUserD(pGpu, pKernelFifo, pMemDesc)
1277 
1278 const char *kfifoGetFaultAccessTypeString_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1279 
1280 
1281 #ifdef __nvoc_kernel_fifo_h_disabled
1282 static inline const char *kfifoGetFaultAccessTypeString(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1283     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1284     return NULL;
1285 }
1286 #else //__nvoc_kernel_fifo_h_disabled
1287 #define kfifoGetFaultAccessTypeString(pGpu, pKernelFifo, arg0) kfifoGetFaultAccessTypeString_GV100(pGpu, pKernelFifo, arg0)
1288 #endif //__nvoc_kernel_fifo_h_disabled
1289 
1290 #define kfifoGetFaultAccessTypeString_HAL(pGpu, pKernelFifo, arg0) kfifoGetFaultAccessTypeString(pGpu, pKernelFifo, arg0)
1291 
1292 NV_STATUS kfifoConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGDESCRIPTOR engDesc);
1293 
1294 static inline NV_STATUS kfifoConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGDESCRIPTOR engDesc) {
1295     return pKernelFifo->__kfifoConstructEngine__(pGpu, pKernelFifo, engDesc);
1296 }
1297 
1298 NV_STATUS kfifoStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1299 
1300 static inline NV_STATUS kfifoStateLoad_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1301     return NV_OK;
1302 }
1303 
1304 static inline NV_STATUS kfifoStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1305     return pKernelFifo->__kfifoStateLoad__(pGpu, pKernelFifo, flags);
1306 }
1307 
1308 NV_STATUS kfifoStateUnload_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1309 
1310 static inline NV_STATUS kfifoStateUnload_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1311     return NV_OK;
1312 }
1313 
1314 static inline NV_STATUS kfifoStateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1315     return pKernelFifo->__kfifoStateUnload__(pGpu, pKernelFifo, flags);
1316 }
1317 
1318 NV_STATUS kfifoStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1319 
1320 static inline NV_STATUS kfifoStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1321     return pKernelFifo->__kfifoStateInitLocked__(pGpu, pKernelFifo);
1322 }
1323 
1324 void kfifoStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1325 
1326 static inline void kfifoStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1327     pKernelFifo->__kfifoStateDestroy__(pGpu, pKernelFifo);
1328 }
1329 
1330 NV_STATUS kfifoStatePostLoad_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1331 
1332 static inline NV_STATUS kfifoStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1333     return pKernelFifo->__kfifoStatePostLoad__(pGpu, pKernelFifo, flags);
1334 }
1335 
1336 NV_STATUS kfifoStatePreUnload_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1337 
1338 static inline NV_STATUS kfifoStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1339     return pKernelFifo->__kfifoStatePreUnload__(pGpu, pKernelFifo, flags);
1340 }
1341 
1342 NV_STATUS kfifoCheckChannelAllocAddrSpaces_GH100(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace);
1343 
1344 static inline NV_STATUS kfifoCheckChannelAllocAddrSpaces_56cd7a(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace) {
1345     return NV_OK;
1346 }
1347 
1348 static inline NV_STATUS kfifoCheckChannelAllocAddrSpaces_DISPATCH(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace) {
1349     return pKernelFifo->__kfifoCheckChannelAllocAddrSpaces__(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace);
1350 }
1351 
1352 NV_STATUS kfifoConstructUsermodeMemdescs_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1353 
1354 NV_STATUS kfifoConstructUsermodeMemdescs_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1355 
1356 static inline NV_STATUS kfifoConstructUsermodeMemdescs_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1357     return pKernelFifo->__kfifoConstructUsermodeMemdescs__(pGpu, pKernelFifo);
1358 }
1359 
1360 NvU32 kfifoChannelGroupGetLocalMaxSubcontext_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1);
1361 
1362 NvU32 kfifoChannelGroupGetLocalMaxSubcontext_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1);
1363 
1364 static inline NvU32 kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1) {
1365     return pKernelFifo->__kfifoChannelGroupGetLocalMaxSubcontext__(pGpu, pKernelFifo, arg0, arg1);
1366 }
1367 
1368 void kfifoGetCtxBufferMapFlags_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags);
1369 
1370 static inline void kfifoGetCtxBufferMapFlags_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags) {
1371     return;
1372 }
1373 
1374 static inline void kfifoGetCtxBufferMapFlags_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags) {
1375     pKernelFifo->__kfifoGetCtxBufferMapFlags__(pGpu, pKernelFifo, engine, pFlags);
1376 }
1377 
1378 NV_STATUS kfifoEngineInfoXlate_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal);
1379 
1380 NV_STATUS kfifoEngineInfoXlate_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal);
1381 
1382 static inline NV_STATUS kfifoEngineInfoXlate_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal) {
1383     return pKernelFifo->__kfifoEngineInfoXlate__(pGpu, pKernelFifo, inType, inVal, outType, pOutVal);
1384 }
1385 
1386 NV_STATUS kfifoGenerateWorkSubmitToken_TU102(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost);
1387 
1388 NV_STATUS kfifoGenerateWorkSubmitToken_GA100(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost);
1389 
1390 static inline NV_STATUS kfifoGenerateWorkSubmitToken_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost) {
1391     return arg0->__kfifoGenerateWorkSubmitToken__(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost);
1392 }
1393 
1394 NV_STATUS kfifoUpdateUsermodeDoorbell_TU102(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
1395 
1396 NV_STATUS kfifoUpdateUsermodeDoorbell_GA100(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
1397 
1398 static inline NV_STATUS kfifoUpdateUsermodeDoorbell_DISPATCH(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
1399     return arg1->__kfifoUpdateUsermodeDoorbell__(arg0, arg1, workSubmitToken, runlisId);
1400 }
1401 
1402 NvU32 kfifoRunlistGetBaseShift_GM107(struct KernelFifo *pKernelFifo);
1403 
1404 NvU32 kfifoRunlistGetBaseShift_GA100(struct KernelFifo *pKernelFifo);
1405 
1406 NvU32 kfifoRunlistGetBaseShift_GA102(struct KernelFifo *pKernelFifo);
1407 
1408 static inline NvU32 kfifoRunlistGetBaseShift_DISPATCH(struct KernelFifo *pKernelFifo) {
1409     return pKernelFifo->__kfifoRunlistGetBaseShift__(pKernelFifo);
1410 }
1411 
1412 NvU64 kfifoGetUserdBar1MapStartOffset_VF(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1413 
1414 static inline NvU64 kfifoGetUserdBar1MapStartOffset_4a4dee(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1415     return 0;
1416 }
1417 
1418 static inline NvU64 kfifoGetUserdBar1MapStartOffset_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1419     return pKernelFifo->__kfifoGetUserdBar1MapStartOffset__(pGpu, pKernelFifo);
1420 }
1421 
1422 NvU32 kfifoGetMaxCeChannelGroups_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1423 
1424 NvU32 kfifoGetMaxCeChannelGroups_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1425 
1426 static inline NvU32 kfifoGetMaxCeChannelGroups_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1427     return pKernelFifo->__kfifoGetMaxCeChannelGroups__(pGpu, pKernelFifo);
1428 }
1429 
1430 static inline NV_STATUS kfifoGetVChIdForSChId_c04480(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid) {
1431     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
1432 }
1433 
1434 NV_STATUS kfifoGetVChIdForSChId_FWCLIENT(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid);
1435 
1436 static inline NV_STATUS kfifoGetVChIdForSChId_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid) {
1437     return pKernelFifo->__kfifoGetVChIdForSChId__(pGpu, pKernelFifo, chId, gfid, engineId, pVChid);
1438 }
1439 
1440 static inline NV_STATUS kfifoProgramChIdTable_c04480(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1441     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
1442 }
1443 
1444 static inline NV_STATUS kfifoProgramChIdTable_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1445     return NV_OK;
1446 }
1447 
1448 static inline NV_STATUS kfifoProgramChIdTable_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1449     return pKernelFifo->__kfifoProgramChIdTable__(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList);
1450 }
1451 
1452 static inline NV_STATUS kfifoRecoverAllChannels_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 gfid) {
1453     return NV_OK;
1454 }
1455 
1456 static inline NV_STATUS kfifoRecoverAllChannels_92bfc3(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 gfid) {
1457     NV_ASSERT_PRECOMP(0);
1458     return NV_ERR_NOT_SUPPORTED;
1459 }
1460 
1461 static inline NV_STATUS kfifoRecoverAllChannels_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 gfid) {
1462     return pKernelFifo->__kfifoRecoverAllChannels__(pGpu, pKernelFifo, gfid);
1463 }
1464 
1465 NV_STATUS kfifoGetEnginePbdmaFaultIds_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE arg0, NvU32 arg1, NvU32 **arg2, NvU32 *arg3);
1466 
1467 static inline NV_STATUS kfifoGetEnginePbdmaFaultIds_5baef9(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE arg0, NvU32 arg1, NvU32 **arg2, NvU32 *arg3) {
1468     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
1469 }
1470 
1471 static inline NV_STATUS kfifoGetEnginePbdmaFaultIds_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE arg0, NvU32 arg1, NvU32 **arg2, NvU32 *arg3) {
1472     return pKernelFifo->__kfifoGetEnginePbdmaFaultIds__(pGpu, pKernelFifo, arg0, arg1, arg2, arg3);
1473 }
1474 
1475 NvU32 kfifoGetNumPBDMAs_GM200(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1476 
1477 NvU32 kfifoGetNumPBDMAs_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1478 
1479 static inline NvU32 kfifoGetNumPBDMAs_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1480     return pKernelFifo->__kfifoGetNumPBDMAs__(pGpu, pKernelFifo);
1481 }
1482 
1483 const char *kfifoPrintPbdmaId_TU102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 pbdmaId);
1484 
1485 const char *kfifoPrintPbdmaId_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 pbdmaId);
1486 
1487 static inline const char *kfifoPrintPbdmaId_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 pbdmaId) {
1488     return pKernelFifo->__kfifoPrintPbdmaId__(pGpu, pKernelFifo, pbdmaId);
1489 }
1490 
1491 const char *kfifoPrintInternalEngine_TU102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1492 
1493 const char *kfifoPrintInternalEngine_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1494 
1495 const char *kfifoPrintInternalEngine_AD102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1496 
1497 const char *kfifoPrintInternalEngine_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1498 
1499 static inline const char *kfifoPrintInternalEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1500     return pKernelFifo->__kfifoPrintInternalEngine__(pGpu, pKernelFifo, arg0);
1501 }
1502 
1503 const char *kfifoPrintInternalEngineCheck_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1504 
1505 static inline const char *kfifoPrintInternalEngineCheck_fa6e19(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1506     return ((void *)0);
1507 }
1508 
1509 static inline const char *kfifoPrintInternalEngineCheck_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1510     return pKernelFifo->__kfifoPrintInternalEngineCheck__(pGpu, pKernelFifo, arg0);
1511 }
1512 
1513 const char *kfifoGetClientIdStringCommon_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1514 
1515 static inline const char *kfifoGetClientIdStringCommon_95626c(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0) {
1516     NV_ASSERT_OR_RETURN_PRECOMP(0, "UNKNOWN");
1517 }
1518 
1519 static inline const char *kfifoGetClientIdStringCommon_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0) {
1520     return pKernelFifo->__kfifoGetClientIdStringCommon__(pGpu, pKernelFifo, arg0);
1521 }
1522 
1523 const char *kfifoGetClientIdString_TU102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1524 
1525 const char *kfifoGetClientIdString_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1526 
1527 const char *kfifoGetClientIdString_AD102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1528 
1529 const char *kfifoGetClientIdString_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1530 
1531 static inline const char *kfifoGetClientIdString_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0) {
1532     return pKernelFifo->__kfifoGetClientIdString__(pGpu, pKernelFifo, arg0);
1533 }
1534 
1535 const char *kfifoGetClientIdStringCheck_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1536 
1537 static inline const char *kfifoGetClientIdStringCheck_da47da(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1538     return "UNKNOWN";
1539 }
1540 
1541 static inline const char *kfifoGetClientIdStringCheck_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1542     return pKernelFifo->__kfifoGetClientIdStringCheck__(pGpu, pKernelFifo, arg0);
1543 }
1544 
1545 static inline NV_STATUS kfifoStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) {
1546     return pEngstate->__kfifoStatePreLoad__(pGpu, pEngstate, arg0);
1547 }
1548 
1549 static inline NV_STATUS kfifoStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) {
1550     return pEngstate->__kfifoStatePostUnload__(pGpu, pEngstate, arg0);
1551 }
1552 
1553 static inline NV_STATUS kfifoStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1554     return pEngstate->__kfifoStateInitUnlocked__(pGpu, pEngstate);
1555 }
1556 
1557 static inline void kfifoInitMissing_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1558     pEngstate->__kfifoInitMissing__(pGpu, pEngstate);
1559 }
1560 
1561 static inline NV_STATUS kfifoStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1562     return pEngstate->__kfifoStatePreInitLocked__(pGpu, pEngstate);
1563 }
1564 
1565 static inline NV_STATUS kfifoStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1566     return pEngstate->__kfifoStatePreInitUnlocked__(pGpu, pEngstate);
1567 }
1568 
1569 static inline NvBool kfifoIsPresent_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1570     return pEngstate->__kfifoIsPresent__(pGpu, pEngstate);
1571 }
1572 
1573 static inline const ENGINE_INFO *kfifoGetEngineInfo(struct KernelFifo *pKernelFifo) {
1574     if (pKernelFifo->engineInfo.engineInfoList == ((void *)0))
1575         return ((void *)0);
1576     return &pKernelFifo->engineInfo;
1577 }
1578 
1579 static inline const PREALLOCATED_USERD_INFO *kfifoGetPreallocatedUserdInfo(struct KernelFifo *pKernelFifo) {
1580     return &pKernelFifo->userdInfo;
1581 }
1582 
1583 static inline NvBool kfifoIsPerRunlistChramEnabled(struct KernelFifo *pKernelFifo) {
1584     return pKernelFifo->bUsePerRunlistChram;
1585 }
1586 
1587 static inline NvBool kfifoIsPerRunlistChramSupportedInHw(struct KernelFifo *pKernelFifo) {
1588     return pKernelFifo->bIsPerRunlistChramSupportedInHw;
1589 }
1590 
1591 static inline NvBool kfifoIsChidHeapEnabled(struct KernelFifo *pKernelFifo) {
1592     return pKernelFifo->bUseChidHeap;
1593 }
1594 
1595 static inline NvBool kfifoIsHostEngineExpansionSupported(struct KernelFifo *pKernelFifo) {
1596     return pKernelFifo->bHostEngineExpansion;
1597 }
1598 
1599 static inline NvBool kfifoIsSubcontextSupported(struct KernelFifo *pKernelFifo) {
1600     return pKernelFifo->bSubcontextSupported;
1601 }
1602 
1603 static inline NvBool kfifoHostHasLbOverflow(struct KernelFifo *pKernelFifo) {
1604     return pKernelFifo->bHostHasLbOverflow;
1605 }
1606 
1607 static inline NvBool kfifoIsUserdInSystemMemory(struct KernelFifo *pKernelFifo) {
1608     return pKernelFifo->bUserdInSystemMemory;
1609 }
1610 
1611 static inline NvBool kfifoIsUserdMapDmaSupported(struct KernelFifo *pKernelFifo) {
1612     return pKernelFifo->bUserdMapDmaSupported;
1613 }
1614 
1615 static inline NvBool kfifoIsMixedInstmemApertureDefAllowed(struct KernelFifo *pKernelFifo) {
1616     return pKernelFifo->bMixedInstmemApertureDefAllowed;
1617 }
1618 
1619 static inline NvBool kfifoIsZombieSubctxWarEnabled(struct KernelFifo *pKernelFifo) {
1620     return pKernelFifo->bIsZombieSubctxWarEnabled;
1621 }
1622 
1623 static inline NvBool kfifoIsWddmInterleavingPolicyEnabled(struct KernelFifo *pKernelFifo) {
1624     return pKernelFifo->bWddmInterleavingPolicyEnabled;
1625 }
1626 
1627 static inline NvBool kfifoIsSchedSupported(struct KernelFifo *pKernelFifo) {
1628     return pKernelFifo->bIsSchedSupported;
1629 }
1630 
1631 static inline struct KernelSchedMgr *kfifoGetKernelSchedMgr(struct KernelFifo *pKernelFifo) {
1632     return pKernelFifo->pKernelSchedMgr;
1633 }
1634 
1635 static inline MEMORY_DESCRIPTOR *kfifoGetDummyPageMemDesc(struct KernelFifo *pKernelFifo) {
1636     return pKernelFifo->pDummyPageMemDesc;
1637 }
1638 
1639 void kfifoDestruct_IMPL(struct KernelFifo *pKernelFifo);
1640 
1641 #define __nvoc_kfifoDestruct(pKernelFifo) kfifoDestruct_IMPL(pKernelFifo)
1642 NV_STATUS kfifoChidMgrConstruct_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1643 
1644 #ifdef __nvoc_kernel_fifo_h_disabled
1645 static inline NV_STATUS kfifoChidMgrConstruct(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1646     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1647     return NV_ERR_NOT_SUPPORTED;
1648 }
1649 #else //__nvoc_kernel_fifo_h_disabled
1650 #define kfifoChidMgrConstruct(pGpu, pKernelFifo) kfifoChidMgrConstruct_IMPL(pGpu, pKernelFifo)
1651 #endif //__nvoc_kernel_fifo_h_disabled
1652 
1653 void kfifoChidMgrDestruct_IMPL(struct KernelFifo *pKernelFifo);
1654 
1655 #ifdef __nvoc_kernel_fifo_h_disabled
1656 static inline void kfifoChidMgrDestruct(struct KernelFifo *pKernelFifo) {
1657     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1658 }
1659 #else //__nvoc_kernel_fifo_h_disabled
1660 #define kfifoChidMgrDestruct(pKernelFifo) kfifoChidMgrDestruct_IMPL(pKernelFifo)
1661 #endif //__nvoc_kernel_fifo_h_disabled
1662 
1663 NV_STATUS kfifoChidMgrAllocChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvHandle hClient, CHANNEL_HW_ID_ALLOC_MODE arg0, NvBool bForceInternalIdx, NvU32 internalIdx, NvBool bForceUserdPage, NvU32 userdPageIdx, NvU32 ChID, struct KernelChannel *arg1);
1664 
1665 #ifdef __nvoc_kernel_fifo_h_disabled
1666 static inline NV_STATUS kfifoChidMgrAllocChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvHandle hClient, CHANNEL_HW_ID_ALLOC_MODE arg0, NvBool bForceInternalIdx, NvU32 internalIdx, NvBool bForceUserdPage, NvU32 userdPageIdx, NvU32 ChID, struct KernelChannel *arg1) {
1667     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1668     return NV_ERR_NOT_SUPPORTED;
1669 }
1670 #else //__nvoc_kernel_fifo_h_disabled
1671 #define kfifoChidMgrAllocChid(pGpu, pKernelFifo, pChidMgr, hClient, arg0, bForceInternalIdx, internalIdx, bForceUserdPage, userdPageIdx, ChID, arg1) kfifoChidMgrAllocChid_IMPL(pGpu, pKernelFifo, pChidMgr, hClient, arg0, bForceInternalIdx, internalIdx, bForceUserdPage, userdPageIdx, ChID, arg1)
1672 #endif //__nvoc_kernel_fifo_h_disabled
1673 
1674 NV_STATUS kfifoChidMgrRetainChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1675 
1676 #ifdef __nvoc_kernel_fifo_h_disabled
1677 static inline NV_STATUS kfifoChidMgrRetainChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1678     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1679     return NV_ERR_NOT_SUPPORTED;
1680 }
1681 #else //__nvoc_kernel_fifo_h_disabled
1682 #define kfifoChidMgrRetainChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrRetainChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1683 #endif //__nvoc_kernel_fifo_h_disabled
1684 
1685 NV_STATUS kfifoChidMgrReleaseChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1686 
1687 #ifdef __nvoc_kernel_fifo_h_disabled
1688 static inline NV_STATUS kfifoChidMgrReleaseChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1689     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1690     return NV_ERR_NOT_SUPPORTED;
1691 }
1692 #else //__nvoc_kernel_fifo_h_disabled
1693 #define kfifoChidMgrReleaseChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrReleaseChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1694 #endif //__nvoc_kernel_fifo_h_disabled
1695 
1696 NV_STATUS kfifoChidMgrFreeChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1697 
1698 #ifdef __nvoc_kernel_fifo_h_disabled
1699 static inline NV_STATUS kfifoChidMgrFreeChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1700     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1701     return NV_ERR_NOT_SUPPORTED;
1702 }
1703 #else //__nvoc_kernel_fifo_h_disabled
1704 #define kfifoChidMgrFreeChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrFreeChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1705 #endif //__nvoc_kernel_fifo_h_disabled
1706 
1707 NV_STATUS kfifoChidMgrReserveSystemChids_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 numChannels, NvU32 flags, NvU32 gfid, NvU32 *pChidOffset, NvU64 offset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1708 
1709 #ifdef __nvoc_kernel_fifo_h_disabled
1710 static inline NV_STATUS kfifoChidMgrReserveSystemChids(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 numChannels, NvU32 flags, NvU32 gfid, NvU32 *pChidOffset, NvU64 offset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1711     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1712     return NV_ERR_NOT_SUPPORTED;
1713 }
1714 #else //__nvoc_kernel_fifo_h_disabled
1715 #define kfifoChidMgrReserveSystemChids(pGpu, pKernelFifo, pChidMgr, numChannels, flags, gfid, pChidOffset, offset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoChidMgrReserveSystemChids_IMPL(pGpu, pKernelFifo, pChidMgr, numChannels, flags, gfid, pChidOffset, offset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
1716 #endif //__nvoc_kernel_fifo_h_disabled
1717 
1718 NV_STATUS kfifoChidMgrFreeSystemChids_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1719 
1720 #ifdef __nvoc_kernel_fifo_h_disabled
1721 static inline NV_STATUS kfifoChidMgrFreeSystemChids(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1722     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1723     return NV_ERR_NOT_SUPPORTED;
1724 }
1725 #else //__nvoc_kernel_fifo_h_disabled
1726 #define kfifoChidMgrFreeSystemChids(pGpu, pKernelFifo, pChidMgr, gfid, pChidOffset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoChidMgrFreeSystemChids_IMPL(pGpu, pKernelFifo, pChidMgr, gfid, pChidOffset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
1727 #endif //__nvoc_kernel_fifo_h_disabled
1728 
1729 NV_STATUS kfifoSetChidOffset_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1730 
1731 #ifdef __nvoc_kernel_fifo_h_disabled
1732 static inline NV_STATUS kfifoSetChidOffset(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1733     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1734     return NV_ERR_NOT_SUPPORTED;
1735 }
1736 #else //__nvoc_kernel_fifo_h_disabled
1737 #define kfifoSetChidOffset(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pChidOffset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoSetChidOffset_IMPL(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pChidOffset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
1738 #endif //__nvoc_kernel_fifo_h_disabled
1739 
1740 NvU32 kfifoChidMgrGetNumChannels_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr);
1741 
1742 #ifdef __nvoc_kernel_fifo_h_disabled
1743 static inline NvU32 kfifoChidMgrGetNumChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr) {
1744     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1745     return 0;
1746 }
1747 #else //__nvoc_kernel_fifo_h_disabled
1748 #define kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr) kfifoChidMgrGetNumChannels_IMPL(pGpu, pKernelFifo, pChidMgr)
1749 #endif //__nvoc_kernel_fifo_h_disabled
1750 
1751 NV_STATUS kfifoChidMgrAllocChannelGroupHwID_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 *pGrpId);
1752 
1753 #ifdef __nvoc_kernel_fifo_h_disabled
1754 static inline NV_STATUS kfifoChidMgrAllocChannelGroupHwID(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 *pGrpId) {
1755     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1756     return NV_ERR_NOT_SUPPORTED;
1757 }
1758 #else //__nvoc_kernel_fifo_h_disabled
1759 #define kfifoChidMgrAllocChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, pGrpId) kfifoChidMgrAllocChannelGroupHwID_IMPL(pGpu, pKernelFifo, pChidMgr, pGrpId)
1760 #endif //__nvoc_kernel_fifo_h_disabled
1761 
1762 NV_STATUS kfifoChidMgrFreeChannelGroupHwID_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpId);
1763 
1764 #ifdef __nvoc_kernel_fifo_h_disabled
1765 static inline NV_STATUS kfifoChidMgrFreeChannelGroupHwID(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpId) {
1766     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1767     return NV_ERR_NOT_SUPPORTED;
1768 }
1769 #else //__nvoc_kernel_fifo_h_disabled
1770 #define kfifoChidMgrFreeChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, grpId) kfifoChidMgrFreeChannelGroupHwID_IMPL(pGpu, pKernelFifo, pChidMgr, grpId)
1771 #endif //__nvoc_kernel_fifo_h_disabled
1772 
1773 struct KernelChannelGroup *kfifoChidMgrGetKernelChannelGroup_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpID);
1774 
1775 #ifdef __nvoc_kernel_fifo_h_disabled
1776 static inline struct KernelChannelGroup *kfifoChidMgrGetKernelChannelGroup(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpID) {
1777     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1778     return NULL;
1779 }
1780 #else //__nvoc_kernel_fifo_h_disabled
1781 #define kfifoChidMgrGetKernelChannelGroup(pGpu, pKernelFifo, pChidMgr, grpID) kfifoChidMgrGetKernelChannelGroup_IMPL(pGpu, pKernelFifo, pChidMgr, grpID)
1782 #endif //__nvoc_kernel_fifo_h_disabled
1783 
1784 struct KernelChannel *kfifoChidMgrGetKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1785 
1786 #ifdef __nvoc_kernel_fifo_h_disabled
1787 static inline struct KernelChannel *kfifoChidMgrGetKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1788     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1789     return NULL;
1790 }
1791 #else //__nvoc_kernel_fifo_h_disabled
1792 #define kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrGetKernelChannel_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1793 #endif //__nvoc_kernel_fifo_h_disabled
1794 
1795 CHID_MGR *kfifoGetChidMgr_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
1796 
1797 #ifdef __nvoc_kernel_fifo_h_disabled
1798 static inline CHID_MGR *kfifoGetChidMgr(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
1799     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1800     return NULL;
1801 }
1802 #else //__nvoc_kernel_fifo_h_disabled
1803 #define kfifoGetChidMgr(pGpu, pKernelFifo, runlistId) kfifoGetChidMgr_IMPL(pGpu, pKernelFifo, runlistId)
1804 #endif //__nvoc_kernel_fifo_h_disabled
1805 
1806 NV_STATUS kfifoGetChidMgrFromType_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineInfoType, NvU32 value, CHID_MGR **arg0);
1807 
1808 #ifdef __nvoc_kernel_fifo_h_disabled
1809 static inline NV_STATUS kfifoGetChidMgrFromType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineInfoType, NvU32 value, CHID_MGR **arg0) {
1810     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1811     return NV_ERR_NOT_SUPPORTED;
1812 }
1813 #else //__nvoc_kernel_fifo_h_disabled
1814 #define kfifoGetChidMgrFromType(pGpu, pKernelFifo, engineInfoType, value, arg0) kfifoGetChidMgrFromType_IMPL(pGpu, pKernelFifo, engineInfoType, value, arg0)
1815 #endif //__nvoc_kernel_fifo_h_disabled
1816 
1817 struct KernelChannelGroup *kfifoGetChannelGroup_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 grpID, NvU32 runlistID);
1818 
1819 #ifdef __nvoc_kernel_fifo_h_disabled
1820 static inline struct KernelChannelGroup *kfifoGetChannelGroup(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 grpID, NvU32 runlistID) {
1821     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1822     return NULL;
1823 }
1824 #else //__nvoc_kernel_fifo_h_disabled
1825 #define kfifoGetChannelGroup(pGpu, pKernelFifo, grpID, runlistID) kfifoGetChannelGroup_IMPL(pGpu, pKernelFifo, grpID, runlistID)
1826 #endif //__nvoc_kernel_fifo_h_disabled
1827 
1828 NvU32 kfifoGetChannelGroupsInUse_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1829 
1830 #ifdef __nvoc_kernel_fifo_h_disabled
1831 static inline NvU32 kfifoGetChannelGroupsInUse(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1832     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1833     return 0;
1834 }
1835 #else //__nvoc_kernel_fifo_h_disabled
1836 #define kfifoGetChannelGroupsInUse(pGpu, pKernelFifo) kfifoGetChannelGroupsInUse_IMPL(pGpu, pKernelFifo)
1837 #endif //__nvoc_kernel_fifo_h_disabled
1838 
1839 NvU32 kfifoGetRunlistChannelGroupsInUse_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
1840 
1841 #ifdef __nvoc_kernel_fifo_h_disabled
1842 static inline NvU32 kfifoGetRunlistChannelGroupsInUse(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
1843     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1844     return 0;
1845 }
1846 #else //__nvoc_kernel_fifo_h_disabled
1847 #define kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId) kfifoGetRunlistChannelGroupsInUse_IMPL(pGpu, pKernelFifo, runlistId)
1848 #endif //__nvoc_kernel_fifo_h_disabled
1849 
1850 void kfifoGetChannelIterator_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt);
1851 
1852 #ifdef __nvoc_kernel_fifo_h_disabled
1853 static inline void kfifoGetChannelIterator(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt) {
1854     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1855 }
1856 #else //__nvoc_kernel_fifo_h_disabled
1857 #define kfifoGetChannelIterator(pGpu, pKernelFifo, pIt) kfifoGetChannelIterator_IMPL(pGpu, pKernelFifo, pIt)
1858 #endif //__nvoc_kernel_fifo_h_disabled
1859 
1860 NV_STATUS kfifoGetNextKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel);
1861 
1862 #ifdef __nvoc_kernel_fifo_h_disabled
1863 static inline NV_STATUS kfifoGetNextKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel) {
1864     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1865     return NV_ERR_NOT_SUPPORTED;
1866 }
1867 #else //__nvoc_kernel_fifo_h_disabled
1868 #define kfifoGetNextKernelChannel(pGpu, pKernelFifo, pIt, ppKernelChannel) kfifoGetNextKernelChannel_IMPL(pGpu, pKernelFifo, pIt, ppKernelChannel)
1869 #endif //__nvoc_kernel_fifo_h_disabled
1870 
1871 void kfifoFillMemInfo_IMPL(struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc, NV2080_CTRL_FIFO_MEM_INFO *pMemory);
1872 
1873 #ifdef __nvoc_kernel_fifo_h_disabled
1874 static inline void kfifoFillMemInfo(struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc, NV2080_CTRL_FIFO_MEM_INFO *pMemory) {
1875     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1876 }
1877 #else //__nvoc_kernel_fifo_h_disabled
1878 #define kfifoFillMemInfo(pKernelFifo, pMemDesc, pMemory) kfifoFillMemInfo_IMPL(pKernelFifo, pMemDesc, pMemory)
1879 #endif //__nvoc_kernel_fifo_h_disabled
1880 
1881 NvU32 kfifoGetAllocatedChannelMask_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, NvU32 *pBitMask, NvLength bitMaskSize);
1882 
1883 #ifdef __nvoc_kernel_fifo_h_disabled
1884 static inline NvU32 kfifoGetAllocatedChannelMask(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, NvU32 *pBitMask, NvLength bitMaskSize) {
1885     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1886     return 0;
1887 }
1888 #else //__nvoc_kernel_fifo_h_disabled
1889 #define kfifoGetAllocatedChannelMask(pGpu, pKernelFifo, runlistId, pBitMask, bitMaskSize) kfifoGetAllocatedChannelMask_IMPL(pGpu, pKernelFifo, runlistId, pBitMask, bitMaskSize)
1890 #endif //__nvoc_kernel_fifo_h_disabled
1891 
1892 NV_STATUS kfifoChannelListCreate_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST **arg0);
1893 
1894 #ifdef __nvoc_kernel_fifo_h_disabled
1895 static inline NV_STATUS kfifoChannelListCreate(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST **arg0) {
1896     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1897     return NV_ERR_NOT_SUPPORTED;
1898 }
1899 #else //__nvoc_kernel_fifo_h_disabled
1900 #define kfifoChannelListCreate(pGpu, pKernelFifo, arg0) kfifoChannelListCreate_IMPL(pGpu, pKernelFifo, arg0)
1901 #endif //__nvoc_kernel_fifo_h_disabled
1902 
1903 NV_STATUS kfifoChannelListDestroy_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST *arg0);
1904 
1905 #ifdef __nvoc_kernel_fifo_h_disabled
1906 static inline NV_STATUS kfifoChannelListDestroy(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST *arg0) {
1907     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1908     return NV_ERR_NOT_SUPPORTED;
1909 }
1910 #else //__nvoc_kernel_fifo_h_disabled
1911 #define kfifoChannelListDestroy(pGpu, pKernelFifo, arg0) kfifoChannelListDestroy_IMPL(pGpu, pKernelFifo, arg0)
1912 #endif //__nvoc_kernel_fifo_h_disabled
1913 
1914 NV_STATUS kfifoChannelListAppend_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1);
1915 
1916 #ifdef __nvoc_kernel_fifo_h_disabled
1917 static inline NV_STATUS kfifoChannelListAppend(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1) {
1918     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1919     return NV_ERR_NOT_SUPPORTED;
1920 }
1921 #else //__nvoc_kernel_fifo_h_disabled
1922 #define kfifoChannelListAppend(pGpu, pKernelFifo, arg0, arg1) kfifoChannelListAppend_IMPL(pGpu, pKernelFifo, arg0, arg1)
1923 #endif //__nvoc_kernel_fifo_h_disabled
1924 
1925 NV_STATUS kfifoChannelListRemove_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1);
1926 
1927 #ifdef __nvoc_kernel_fifo_h_disabled
1928 static inline NV_STATUS kfifoChannelListRemove(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1) {
1929     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1930     return NV_ERR_NOT_SUPPORTED;
1931 }
1932 #else //__nvoc_kernel_fifo_h_disabled
1933 #define kfifoChannelListRemove(pGpu, pKernelFifo, arg0, arg1) kfifoChannelListRemove_IMPL(pGpu, pKernelFifo, arg0, arg1)
1934 #endif //__nvoc_kernel_fifo_h_disabled
1935 
1936 NvBool kfifoEngineListHasChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE *arg0, NvU32 arg1);
1937 
1938 #ifdef __nvoc_kernel_fifo_h_disabled
1939 static inline NvBool kfifoEngineListHasChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE *arg0, NvU32 arg1) {
1940     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1941     return NV_FALSE;
1942 }
1943 #else //__nvoc_kernel_fifo_h_disabled
1944 #define kfifoEngineListHasChannel(pGpu, pKernelFifo, arg0, arg1) kfifoEngineListHasChannel_IMPL(pGpu, pKernelFifo, arg0, arg1)
1945 #endif //__nvoc_kernel_fifo_h_disabled
1946 
1947 CTX_BUF_POOL_INFO *kfifoGetRunlistBufPool_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType);
1948 
1949 #ifdef __nvoc_kernel_fifo_h_disabled
1950 static inline CTX_BUF_POOL_INFO *kfifoGetRunlistBufPool(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType) {
1951     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1952     return NULL;
1953 }
1954 #else //__nvoc_kernel_fifo_h_disabled
1955 #define kfifoGetRunlistBufPool(pGpu, pKernelFifo, rmEngineType) kfifoGetRunlistBufPool_IMPL(pGpu, pKernelFifo, rmEngineType)
1956 #endif //__nvoc_kernel_fifo_h_disabled
1957 
1958 NV_STATUS kfifoGetRunlistBufInfo_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvBool arg1, NvU32 arg2, NvU64 *arg3, NvU64 *arg4);
1959 
1960 #ifdef __nvoc_kernel_fifo_h_disabled
1961 static inline NV_STATUS kfifoGetRunlistBufInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvBool arg1, NvU32 arg2, NvU64 *arg3, NvU64 *arg4) {
1962     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1963     return NV_ERR_NOT_SUPPORTED;
1964 }
1965 #else //__nvoc_kernel_fifo_h_disabled
1966 #define kfifoGetRunlistBufInfo(pGpu, pKernelFifo, arg0, arg1, arg2, arg3, arg4) kfifoGetRunlistBufInfo_IMPL(pGpu, pKernelFifo, arg0, arg1, arg2, arg3, arg4)
1967 #endif //__nvoc_kernel_fifo_h_disabled
1968 
1969 NV_STATUS kfifoAddSchedulingHandler_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData);
1970 
1971 #ifdef __nvoc_kernel_fifo_h_disabled
1972 static inline NV_STATUS kfifoAddSchedulingHandler(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData) {
1973     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1974     return NV_ERR_NOT_SUPPORTED;
1975 }
1976 #else //__nvoc_kernel_fifo_h_disabled
1977 #define kfifoAddSchedulingHandler(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) kfifoAddSchedulingHandler_IMPL(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData)
1978 #endif //__nvoc_kernel_fifo_h_disabled
1979 
1980 void kfifoRemoveSchedulingHandler_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData);
1981 
1982 #ifdef __nvoc_kernel_fifo_h_disabled
1983 static inline void kfifoRemoveSchedulingHandler(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData) {
1984     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1985 }
1986 #else //__nvoc_kernel_fifo_h_disabled
1987 #define kfifoRemoveSchedulingHandler(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) kfifoRemoveSchedulingHandler_IMPL(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData)
1988 #endif //__nvoc_kernel_fifo_h_disabled
1989 
1990 NV_STATUS kfifoTriggerPostSchedulingEnableCallback_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1991 
1992 #ifdef __nvoc_kernel_fifo_h_disabled
1993 static inline NV_STATUS kfifoTriggerPostSchedulingEnableCallback(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1994     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1995     return NV_ERR_NOT_SUPPORTED;
1996 }
1997 #else //__nvoc_kernel_fifo_h_disabled
1998 #define kfifoTriggerPostSchedulingEnableCallback(pGpu, pKernelFifo) kfifoTriggerPostSchedulingEnableCallback_IMPL(pGpu, pKernelFifo)
1999 #endif //__nvoc_kernel_fifo_h_disabled
2000 
2001 NV_STATUS kfifoTriggerPreSchedulingDisableCallback_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2002 
2003 #ifdef __nvoc_kernel_fifo_h_disabled
2004 static inline NV_STATUS kfifoTriggerPreSchedulingDisableCallback(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2005     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2006     return NV_ERR_NOT_SUPPORTED;
2007 }
2008 #else //__nvoc_kernel_fifo_h_disabled
2009 #define kfifoTriggerPreSchedulingDisableCallback(pGpu, pKernelFifo) kfifoTriggerPreSchedulingDisableCallback_IMPL(pGpu, pKernelFifo)
2010 #endif //__nvoc_kernel_fifo_h_disabled
2011 
2012 NvU32 kfifoGetMaxChannelsInSystem_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2013 
2014 #ifdef __nvoc_kernel_fifo_h_disabled
2015 static inline NvU32 kfifoGetMaxChannelsInSystem(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2016     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2017     return 0;
2018 }
2019 #else //__nvoc_kernel_fifo_h_disabled
2020 #define kfifoGetMaxChannelsInSystem(pGpu, pKernelFifo) kfifoGetMaxChannelsInSystem_IMPL(pGpu, pKernelFifo)
2021 #endif //__nvoc_kernel_fifo_h_disabled
2022 
2023 NvU32 kfifoGetMaxChannelGroupsInSystem_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2024 
2025 #ifdef __nvoc_kernel_fifo_h_disabled
2026 static inline NvU32 kfifoGetMaxChannelGroupsInSystem(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2027     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2028     return 0;
2029 }
2030 #else //__nvoc_kernel_fifo_h_disabled
2031 #define kfifoGetMaxChannelGroupsInSystem(pGpu, pKernelFifo) kfifoGetMaxChannelGroupsInSystem_IMPL(pGpu, pKernelFifo)
2032 #endif //__nvoc_kernel_fifo_h_disabled
2033 
2034 void kfifoGetDeviceCaps_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU8 *pKfifoCaps, NvBool bCapsInitialized);
2035 
2036 #ifdef __nvoc_kernel_fifo_h_disabled
2037 static inline void kfifoGetDeviceCaps(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU8 *pKfifoCaps, NvBool bCapsInitialized) {
2038     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2039 }
2040 #else //__nvoc_kernel_fifo_h_disabled
2041 #define kfifoGetDeviceCaps(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized) kfifoGetDeviceCaps_IMPL(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized)
2042 #endif //__nvoc_kernel_fifo_h_disabled
2043 
2044 NvU32 kfifoReturnPushbufferCaps_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2045 
2046 #ifdef __nvoc_kernel_fifo_h_disabled
2047 static inline NvU32 kfifoReturnPushbufferCaps(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2048     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2049     return 0;
2050 }
2051 #else //__nvoc_kernel_fifo_h_disabled
2052 #define kfifoReturnPushbufferCaps(pGpu, pKernelFifo) kfifoReturnPushbufferCaps_IMPL(pGpu, pKernelFifo)
2053 #endif //__nvoc_kernel_fifo_h_disabled
2054 
2055 void kfifoRunlistGetBufAllocParams_IMPL(struct OBJGPU *pGpu, NV_ADDRESS_SPACE *pAperture, NvU32 *pAttr, NvU64 *pAllocFlags);
2056 
2057 #define kfifoRunlistGetBufAllocParams(pGpu, pAperture, pAttr, pAllocFlags) kfifoRunlistGetBufAllocParams_IMPL(pGpu, pAperture, pAttr, pAllocFlags)
2058 NV_STATUS kfifoRunlistAllocBuffers_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bSupportTsg, NV_ADDRESS_SPACE aperture, NvU32 runlistId, NvU32 attr, NvU64 allocFlags, NvU64 maxRunlistEntries, NvBool bHWRL, PMEMORY_DESCRIPTOR *ppMemDesc);
2059 
2060 #ifdef __nvoc_kernel_fifo_h_disabled
2061 static inline NV_STATUS kfifoRunlistAllocBuffers(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bSupportTsg, NV_ADDRESS_SPACE aperture, NvU32 runlistId, NvU32 attr, NvU64 allocFlags, NvU64 maxRunlistEntries, NvBool bHWRL, PMEMORY_DESCRIPTOR *ppMemDesc) {
2062     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2063     return NV_ERR_NOT_SUPPORTED;
2064 }
2065 #else //__nvoc_kernel_fifo_h_disabled
2066 #define kfifoRunlistAllocBuffers(pGpu, pKernelFifo, bSupportTsg, aperture, runlistId, attr, allocFlags, maxRunlistEntries, bHWRL, ppMemDesc) kfifoRunlistAllocBuffers_IMPL(pGpu, pKernelFifo, bSupportTsg, aperture, runlistId, attr, allocFlags, maxRunlistEntries, bHWRL, ppMemDesc)
2067 #endif //__nvoc_kernel_fifo_h_disabled
2068 
2069 NV_STATUS kfifoGetEngineListForRunlist_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, RM_ENGINE_TYPE *pOutEngineIds, NvU32 *pNumEngines);
2070 
2071 #ifdef __nvoc_kernel_fifo_h_disabled
2072 static inline NV_STATUS kfifoGetEngineListForRunlist(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, RM_ENGINE_TYPE *pOutEngineIds, NvU32 *pNumEngines) {
2073     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2074     return NV_ERR_NOT_SUPPORTED;
2075 }
2076 #else //__nvoc_kernel_fifo_h_disabled
2077 #define kfifoGetEngineListForRunlist(pGpu, pKernelFifo, runlistId, pOutEngineIds, pNumEngines) kfifoGetEngineListForRunlist_IMPL(pGpu, pKernelFifo, runlistId, pOutEngineIds, pNumEngines)
2078 #endif //__nvoc_kernel_fifo_h_disabled
2079 
2080 NvU32 kfifoGetChannelClassId_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2081 
2082 #ifdef __nvoc_kernel_fifo_h_disabled
2083 static inline NvU32 kfifoGetChannelClassId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2084     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2085     return 0;
2086 }
2087 #else //__nvoc_kernel_fifo_h_disabled
2088 #define kfifoGetChannelClassId(pGpu, pKernelFifo) kfifoGetChannelClassId_IMPL(pGpu, pKernelFifo)
2089 #endif //__nvoc_kernel_fifo_h_disabled
2090 
2091 NvBool kfifoIsMmuFaultEngineIdPbdma_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
2092 
2093 #ifdef __nvoc_kernel_fifo_h_disabled
2094 static inline NvBool kfifoIsMmuFaultEngineIdPbdma(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
2095     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2096     return NV_FALSE;
2097 }
2098 #else //__nvoc_kernel_fifo_h_disabled
2099 #define kfifoIsMmuFaultEngineIdPbdma(pGpu, pKernelFifo, arg0) kfifoIsMmuFaultEngineIdPbdma_IMPL(pGpu, pKernelFifo, arg0)
2100 #endif //__nvoc_kernel_fifo_h_disabled
2101 
2102 NV_STATUS kfifoGetPbdmaIdFromMmuFaultId_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvU32 *arg1);
2103 
2104 #ifdef __nvoc_kernel_fifo_h_disabled
2105 static inline NV_STATUS kfifoGetPbdmaIdFromMmuFaultId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvU32 *arg1) {
2106     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2107     return NV_ERR_NOT_SUPPORTED;
2108 }
2109 #else //__nvoc_kernel_fifo_h_disabled
2110 #define kfifoGetPbdmaIdFromMmuFaultId(pGpu, pKernelFifo, arg0, arg1) kfifoGetPbdmaIdFromMmuFaultId_IMPL(pGpu, pKernelFifo, arg0, arg1)
2111 #endif //__nvoc_kernel_fifo_h_disabled
2112 
2113 NV_STATUS kfifoGetEngineTypeFromPbdmaFaultId_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, RM_ENGINE_TYPE *arg1);
2114 
2115 #ifdef __nvoc_kernel_fifo_h_disabled
2116 static inline NV_STATUS kfifoGetEngineTypeFromPbdmaFaultId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, RM_ENGINE_TYPE *arg1) {
2117     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2118     return NV_ERR_NOT_SUPPORTED;
2119 }
2120 #else //__nvoc_kernel_fifo_h_disabled
2121 #define kfifoGetEngineTypeFromPbdmaFaultId(pGpu, pKernelFifo, arg0, arg1) kfifoGetEngineTypeFromPbdmaFaultId_IMPL(pGpu, pKernelFifo, arg0, arg1)
2122 #endif //__nvoc_kernel_fifo_h_disabled
2123 
2124 NV_STATUS kfifoChannelGroupSetTimeslice_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit);
2125 
2126 #ifdef __nvoc_kernel_fifo_h_disabled
2127 static inline NV_STATUS kfifoChannelGroupSetTimeslice(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
2128     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2129     return NV_ERR_NOT_SUPPORTED;
2130 }
2131 #else //__nvoc_kernel_fifo_h_disabled
2132 #define kfifoChannelGroupSetTimeslice(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimeslice_IMPL(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
2133 #endif //__nvoc_kernel_fifo_h_disabled
2134 
2135 const FIFO_GUEST_ENGINE_TABLE *kfifoGetGuestEngineLookupTable_IMPL(NvU32 *pTableSize);
2136 
2137 #define kfifoGetGuestEngineLookupTable(pTableSize) kfifoGetGuestEngineLookupTable_IMPL(pTableSize)
2138 NvU32 kfifoGetNumEschedDrivenEngines_IMPL(struct KernelFifo *pKernelFifo);
2139 
2140 #ifdef __nvoc_kernel_fifo_h_disabled
2141 static inline NvU32 kfifoGetNumEschedDrivenEngines(struct KernelFifo *pKernelFifo) {
2142     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2143     return 0;
2144 }
2145 #else //__nvoc_kernel_fifo_h_disabled
2146 #define kfifoGetNumEschedDrivenEngines(pKernelFifo) kfifoGetNumEschedDrivenEngines_IMPL(pKernelFifo)
2147 #endif //__nvoc_kernel_fifo_h_disabled
2148 
2149 #undef PRIVATE_FIELD
2150 
2151 
2152 NV_STATUS RmIdleChannels(NvHandle hClient,
2153                          NvHandle hDevice,
2154                          NvHandle hChannel,
2155                          NvU32    numChannels,
2156                          NvP64    clients,
2157                          NvP64    devices,
2158                          NvP64    channels,
2159                          NvU32    flags,
2160                          NvU32    timeout,
2161                          NvBool   bUserModeArgs);
2162 
2163 #endif // _KERNELFIFO_H_
2164 
2165 #ifdef __cplusplus
2166 } // extern "C"
2167 #endif
2168 
2169 #endif // _G_KERNEL_FIFO_NVOC_H_
2170