1 #ifndef _G_KERNEL_FIFO_NVOC_H_
2 #define _G_KERNEL_FIFO_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kernel_fifo_nvoc.h"
33 
34 #ifndef _KERNELFIFO_H_
35 #define _KERNELFIFO_H_
36 
37 /**************** Resource Manager Defines and Structures ******************\
38 *                                                                           *
39 * Module: KernelFifo.h                                                         *
40 *       Defines and structures used for the KernelFifo Object.                    *
41 \***************************************************************************/
42 
43 #include "kernel/gpu/eng_state.h"
44 #include "kernel/gpu/gpu_halspec.h"
45 #include "kernel/gpu/fifo/channel_descendant.h"
46 #include "kernel/gpu/gpu_engine_type.h"
47 
48 #include "containers/eheap_old.h"
49 #include "containers/map.h"
50 #include "utils/nvbitvector.h"
51 #include "gpu/mem_mgr/mem_desc.h"
52 #include "nvoc/utility.h"
53 
54 #include "ctrl/ctrl2080/ctrl2080gpu.h"  // NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS
55 #include "ctrl/ctrl2080/ctrl2080fifo.h" // NV2080_CTRL_FIFO_MEM_INFO
56 #include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_*
57 #include "ctrl/ctrl906f.h"
58 
59 #include "class/clc369.h" // MMU_FAULT_BUFFER
60 
61 struct KernelChannel;
62 
63 #ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__
64 #define __NVOC_CLASS_KernelChannel_TYPEDEF__
65 typedef struct KernelChannel KernelChannel;
66 #endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */
67 
68 #ifndef __nvoc_class_id_KernelChannel
69 #define __nvoc_class_id_KernelChannel 0x5d8d70
70 #endif /* __nvoc_class_id_KernelChannel */
71 
72 
73 struct KernelChannelGroup;
74 
75 #ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
76 #define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
77 typedef struct KernelChannelGroup KernelChannelGroup;
78 #endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */
79 
80 #ifndef __nvoc_class_id_KernelChannelGroup
81 #define __nvoc_class_id_KernelChannelGroup 0xec6de1
82 #endif /* __nvoc_class_id_KernelChannelGroup */
83 
84 
85 struct KernelSchedMgr;
86 
87 #ifndef __NVOC_CLASS_KernelSchedMgr_TYPEDEF__
88 #define __NVOC_CLASS_KernelSchedMgr_TYPEDEF__
89 typedef struct KernelSchedMgr KernelSchedMgr;
90 #endif /* __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ */
91 
92 #ifndef __nvoc_class_id_KernelSchedMgr
93 #define __nvoc_class_id_KernelSchedMgr 0xea0970
94 #endif /* __nvoc_class_id_KernelSchedMgr */
95 
96 
97 
98 struct HOST_VGPU_DEVICE;
99 
100 // Pre-Ampere runlist ID to pass to kfifoGetChidMgr
101 #define CHIDMGR_RUNLIST_ID_LEGACY  0
102 
103 #define INVALID_CHID               0xFFFFFFFF
104 
105 #define INVALID_RUNLIST_ID         0xFFFFFFFFU
106 
107 /*! We use 32-bit process ID for now */
108 #define KERNEL_PID (0xFFFFFFFFULL)
109 
110 /*! cap at 64 for now, can extend when needed */
111 #define MAX_NUM_RUNLISTS           NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID
112 #define NUM_BUFFERS_PER_RUNLIST   (NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS)
113 MAKE_BITVECTOR(CHID_MGR_VALID_BIT_VECTOR, MAX_NUM_RUNLISTS);
114 
115 //
116 // Matches GET_PUSHBUFFER_CAPABILITIES bit positions
117 //
118 #define VID_PB_ALLOWED                      0x1
119 #define PCI_PB_ALLOWED                      0x2
120 
121 #define PBDMA_FAULT_MAX_ID  (0x1 << DRF_SIZE_MW(NVC369_BUF_ENTRY_ENGINE_ID))
122 MAKE_BITVECTOR(PBDMA_ID_BITVECTOR, PBDMA_FAULT_MAX_ID);
123 
124 /*!
125  * USERD isolation domain
126  *
127  * USERD allocated by different domains should not be put into the same physical page.
128  * This provides the basic security isolation because a physical page is the unit of
129  * granularity at which OS can provide isolation between processes.
130  *
131  *    GUEST_USER:     USERD allocated by guest user process
132  *    GUEST_KERNEL:   USERD allocated by guest kernel process
133  *    GUEST_INSECURE: USERD allocated by guest/kernel process,
134  *                    INSECURE means there is no isolation between guest user and guest kernel
135  *    HOST_USER:      USERD allocated by host user process
136  *    HOST_KERNEL:    USERD allocated by host kernel process
137  *
138  * Please refer to RM_USERD_Isolation wiki for more details
139  */
140 typedef enum _def_fifo_isolation_domain
141 {
142     GUEST_USER = 0x0,
143     GUEST_KERNEL,
144     GUEST_INSECURE,
145     HOST_USER,
146     HOST_KERNEL
147 } FIFO_ISOLATION_DOMAIN;
148 
149 /*!
150  * USERD isolation ID
151  *
152  * In vGPU environment, sub process means the guest user/kernel process running within a single VM.
153  * It also refers to any sub process (or sub-sub process) within a parent process.
154  *
155  * Please refer to Resource Server for more details about sub process concept
156  */
157 typedef struct _def_fifo_isolation_id
158 {
159     FIFO_ISOLATION_DOMAIN domain;
160     NvU64                 processID;
161     NvU64                 subProcessID;
162 } FIFO_ISOLATIONID, *PFIFO_ISOLATIONID;
163 
164 /*! Used for calls to kfifoChannelGetFifoContextMemDesc */
165 typedef enum
166 {
167     FIFO_CTX_RAMFC = 0,
168     FIFO_CTX_INST_BLOCK = 1,
169 } FIFO_CTX;
170 
171 typedef struct
172 {
173     NvU32  addrLo;
174     NvU32  addrHi;
175     NvU32  faultType;
176     NvU32  clientId;
177     NvBool bGpc;
178     NvU32  gpcId;
179     NvU32  accessType;
180     NvU32  faultEngineId;
181     NvU64  faultedShaderProgramVA[NV906F_CTRL_MMU_FAULT_SHADER_TYPES];
182 } FIFO_MMU_EXCEPTION_DATA;
183 
184 /*! Used for calls to kchannelAllocHwID */
185 typedef enum
186 {
187     CHANNEL_HW_ID_ALLOC_MODE_GROW_DOWN,
188     CHANNEL_HW_ID_ALLOC_MODE_GROW_UP,
189     CHANNEL_HW_ID_ALLOC_MODE_PROVIDED,
190 } CHANNEL_HW_ID_ALLOC_MODE;
191 
192 typedef struct _fifo_hw_id
193 {
194     /*!
195      * Bitfield of HW IDs. 1 = reserved, 0 = available.
196      * A reserved ID may not be allocated but it can't be used for any
197      * future allocations.
198      */
199     NvU32 *pHwIdInUse;
200 
201     /*!
202      * Number of elements in pHwIdInUse
203      */
204     NvU32 hwIdInUseSz;
205 } FIFO_HW_ID;
206 
207 DECLARE_INTRUSIVE_MAP(KernelChannelGroupMap);
208 
209 typedef struct
210 {
211     /*!
212      * Runlist managed by this CHID_MGR.
213      */
214     NvU32 runlistId;
215 
216     /*!
217      * Heap to manage pFifoData for all channels.
218      */
219     OBJEHEAP *pFifoDataHeap;
220 
221     /*!
222      * Global ChID heap - manages channel IDs and isolation IDs. In non-SRIOV
223      * systems, allocations/frees in this heap mirror those in pFifoDataHeap.
224      * When SRIOV is enabled, we reserve/free channel IDs for the guest in
225      * chunks from this heap when the VM starts/shuts down. ChID allocations
226      * during channel construction from the guest ChID space are from the
227      * virtual ChID heap for that guest.
228      */
229     OBJEHEAP *pGlobalChIDHeap;
230 
231     /*!
232      * Until FIFO code for SR-IOV moves to guest RM, this virtual ChID heap
233      * manages channel IDs allocated to a guest.
234      */
235     OBJEHEAP **ppVirtualChIDHeap;
236 
237     /*!
238      * Number of channels managed by this CHID_MGR
239      */
240     NvU32 numChannels;
241 
242     FIFO_HW_ID  channelGrpMgr;
243 
244     /*!
245      * Channel group pointers
246      */
247     KernelChannelGroupMap *pChanGrpTree;
248 
249 } CHID_MGR;
250 
251 /*! Typedef for the @ref channel_iterator structure */
252 typedef struct channel_iterator CHANNEL_ITERATOR;
253 typedef struct channel_iterator *PCHANNEL_ITERATOR;
254 
255 /*!
256  * Generic Linked-list of Channel pointers to be used where ever multiple channels
257  * are managed.
258  * TODO: Remove as part of Jira CORERM-2658
259  */
260 typedef struct _channel_node
261 {
262     struct KernelChannel *pKernelChannel;
263     struct _channel_node *pNext;
264 } CHANNEL_NODE, *PCHANNEL_NODE;
265 
266 /*!
267  * This structure represents an iterator for all channels.
268  * It is created by function @ref kfifoGetChannelIterator.
269  */
270 struct channel_iterator
271 {
272     NvU32 numChannels;
273     NvU32 numRunlists;
274     NvU32 physicalChannelID;
275     NvU32 runlistId;
276     EMEMBLOCK *pFifoDataBlock;
277     CHANNEL_NODE channelNode;
278 };
279 
280 typedef enum
281 {
282     /* *************************************************************************
283      * Bug 3820969
284      * THINK BEFORE CHANGING ENUM ORDER HERE.
285      * VGPU-guest uses this same ordering. Because this enum is not versioned,
286      * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
287      * ************************************************************************/
288 
289     // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
290     ENGINE_INFO_TYPE_ENG_DESC = 0,
291 
292     // HW engine ID
293     ENGINE_INFO_TYPE_FIFO_TAG,
294 
295     // RM_ENGINE_TYPE_*
296     ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
297 
298     //
299     // runlist id (meaning varies by GPU)
300     // Valid only for Esched-driven engines
301     //
302     ENGINE_INFO_TYPE_RUNLIST,
303 
304     // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
305     ENGINE_INFO_TYPE_MMU_FAULT_ID,
306 
307     // ROBUST_CHANNEL_*
308     ENGINE_INFO_TYPE_RC_MASK,
309 
310     // Reset Bit Position. On Ampere, only valid if not _INVALID
311     ENGINE_INFO_TYPE_RESET,
312 
313     // Interrupt Bit Position
314     ENGINE_INFO_TYPE_INTR,
315 
316     // log2(MC_ENGINE_*)
317     ENGINE_INFO_TYPE_MC,
318 
319     // The DEV_TYPE_ENUM for this engine
320     ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
321 
322     // The particular instance of this engine type
323     ENGINE_INFO_TYPE_INSTANCE_ID,
324 
325     //
326     // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
327     // Valid only for Esched-driven engines
328     //
329     ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
330 
331     //
332     // If this entry is a host-driven engine.
333     // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
334     //
335     ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
336 
337     //
338     // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
339     // Valid only for Esched-driven engines
340     //
341     ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
342 
343     //
344     // The base address for this engine's NV_CHRAM registers. Valid only on
345     // Ampere+
346     //
347     // Valid only for Esched-driven engines
348     //
349     ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
350 
351     // Used for iterating the engine info table by the index passed.
352     ENGINE_INFO_TYPE_INVALID,
353 
354     // Input-only parameter for kfifoEngineInfoXlate.
355     ENGINE_INFO_TYPE_PBDMA_ID
356 
357     /* *************************************************************************
358      * Bug 3820969
359      * THINK BEFORE CHANGING ENUM ORDER HERE.
360      * VGPU-guest uses this same ordering. Because this enum is not versioned,
361      * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
362      * ************************************************************************/
363 } ENGINE_INFO_TYPE;
364 
365 // Maximum number of pbdma IDs for a given engine
366 #define FIFO_ENGINE_MAX_NUM_PBDMA       2
367 
368 // Maximum size (including null terminator for an engine name
369 #define FIFO_ENGINE_NAME_MAX_SIZE       16
370 
371 typedef struct _def_fifo_engine_list
372 {
373     NvU32 engineData[ENGINE_INFO_TYPE_INVALID];
374     NvU32 pbdmaIds[FIFO_ENGINE_MAX_NUM_PBDMA];
375     NvU32 pbdmaFaultIds[FIFO_ENGINE_MAX_NUM_PBDMA];
376     NvU32 numPbdmas;
377     char engineName[FIFO_ENGINE_NAME_MAX_SIZE];
378 } FIFO_ENGINE_LIST, *PFIFO_ENGINE_LIST;
379 
380 typedef struct
381 {
382     NvU32 nv2080EngineType;
383     NvU32 mcIdx;
384 } FIFO_GUEST_ENGINE_TABLE;
385 
386 typedef struct _def_engine_info
387 {
388     NvU32 maxNumPbdmas;
389     PBDMA_ID_BITVECTOR  validEngineIdsForPbdmas;
390     //
391     // The highest runlist ID. Valid runlist IDs are < maxNumRunlists
392     // However, the entire [0, maxNumRunlists) range is not valid. There are
393     // missing runlist IDs in this range.
394     //
395     NvU32 maxNumRunlists;
396     //
397     // Multiple engines may have the same runlist ID. This is the total number
398     // of engines with a runlist which is equal to the number of Esched driven
399     // engines and does not include the SW engine.
400     //
401     NvU32 numRunlists;
402     NvU32 engineInfoListSize;
403     FIFO_ENGINE_LIST *engineInfoList;
404 } ENGINE_INFO;
405 
406 // Fully qualified instance block address
407 typedef struct
408 {
409     NvU64   address;        // Physical address or IOVA (unshifted)
410     NvU32   aperture;       // INST_BLOCK_APERTURE
411     NvU32   gfid;           // Valid in PF when SR-IOV is enabled
412 } INST_BLOCK_DESC;
413 
414 typedef struct _channel_list
415 {
416     CHANNEL_NODE *pHead;
417     CHANNEL_NODE *pTail;
418 } CHANNEL_LIST, *PCHANNEL_LIST;
419 
420 typedef struct _def_preallocated_userd_info
421 {
422     NvU32      userdAperture;            // default aperture for USERD
423     NvU32      userdAttr;                // default attr for USERD
424     MEMORY_DESCRIPTOR *userdPhysDesc[NV_MAX_SUBDEVICES];    // <a> base phys addr of contiguous USERD
425     NvU64      userdBar1MapStartOffset;  // <b> base offset of <a>'s BAR1 map
426     NvU32      userdBar1MapSize;         // <c> sizeof <b>'s map
427     NvU8      *userdBar1CpuPtr;          // <d> cpu map of <b>
428     NvU32      userdBar1RefMask;         // mask of GPUs referencing userD
429 } PREALLOCATED_USERD_INFO;
430 
431 
432 // Scheduling enable/disable handlers
433 typedef NV_STATUS (*PFifoSchedulingHandler)(OBJGPU *pGpu, void *pData);
434 typedef struct FifoSchedulingHandlerEntry
435 {
436     PFifoSchedulingHandler pCallback;
437     void *pCallbackParam;
438     NvBool bHandled;
439 } FifoSchedulingHandlerEntry;
440 
441 MAKE_LIST(FifoSchedulingHandlerEntryList, FifoSchedulingHandlerEntry);
442 
443 //
444 // This define indicates legacy pdb in instance block.
445 //
446 #define FIFO_PDB_IDX_BASE             (0xFFFFFFFF)
447 
448 //
449 // Aperture defines must match NV_MMU_PTE_APERTURE HW defines
450 // We do not support instance memory in peer (1).
451 //
452 #define INST_BLOCK_APERTURE_VIDEO_MEMORY                     0x00000000
453 #define INST_BLOCK_APERTURE_RESERVED                         0x00000001
454 #define INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY           0x00000002
455 #define INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY       0x00000003
456 
457 // Macro to verify HW and class defines are compatible
458 #define VERIFY_INST_BLOCK_APERTURE(vid, coh, ncoh) \
459     ct_assert((vid) == INST_BLOCK_APERTURE_VIDEO_MEMORY); \
460     ct_assert((coh) == INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY); \
461     ct_assert((ncoh) == INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY)
462 
463 //
464 // The actual GPU object definition
465 //
466 #ifdef NVOC_KERNEL_FIFO_H_PRIVATE_ACCESS_ALLOWED
467 #define PRIVATE_FIELD(x) x
468 #else
469 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
470 #endif
471 struct KernelFifo {
472     const struct NVOC_RTTI *__nvoc_rtti;
473     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
474     struct Object *__nvoc_pbase_Object;
475     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
476     struct KernelFifo *__nvoc_pbase_KernelFifo;
477     NV_STATUS (*__kfifoConstructEngine__)(struct OBJGPU *, struct KernelFifo *, ENGDESCRIPTOR);
478     NV_STATUS (*__kfifoStateInitLocked__)(struct OBJGPU *, struct KernelFifo *);
479     void (*__kfifoStateDestroy__)(struct OBJGPU *, struct KernelFifo *);
480     NV_STATUS (*__kfifoStatePostLoad__)(struct OBJGPU *, struct KernelFifo *, NvU32);
481     NV_STATUS (*__kfifoStatePreUnload__)(struct OBJGPU *, struct KernelFifo *, NvU32);
482     NV_STATUS (*__kfifoCheckChannelAllocAddrSpaces__)(struct KernelFifo *, NV_ADDRESS_SPACE, NV_ADDRESS_SPACE, NV_ADDRESS_SPACE);
483     NvU64 (*__kfifoGetMmioUsermodeOffset__)(struct OBJGPU *, struct KernelFifo *, NvBool);
484     NvU64 (*__kfifoGetMmioUsermodeSize__)(struct OBJGPU *, struct KernelFifo *, NvBool);
485     NvU32 (*__kfifoChannelGroupGetLocalMaxSubcontext__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannelGroup *, NvBool);
486     void (*__kfifoGetCtxBufferMapFlags__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32 *);
487     NV_STATUS (*__kfifoEngineInfoXlate__)(struct OBJGPU *, struct KernelFifo *, ENGINE_INFO_TYPE, NvU32, ENGINE_INFO_TYPE, NvU32 *);
488     NV_STATUS (*__kfifoGenerateWorkSubmitToken__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannel *, NvU32 *, NvBool);
489     NV_STATUS (*__kfifoUpdateUsermodeDoorbell__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32);
490     NvU32 (*__kfifoRunlistGetBaseShift__)(struct KernelFifo *);
491     NvU32 (*__kfifoGetMaxCeChannelGroups__)(struct OBJGPU *, struct KernelFifo *);
492     NV_STATUS (*__kfifoStateLoad__)(POBJGPU, struct KernelFifo *, NvU32);
493     NV_STATUS (*__kfifoStateUnload__)(POBJGPU, struct KernelFifo *, NvU32);
494     NV_STATUS (*__kfifoStatePreLoad__)(POBJGPU, struct KernelFifo *, NvU32);
495     NV_STATUS (*__kfifoStatePostUnload__)(POBJGPU, struct KernelFifo *, NvU32);
496     NV_STATUS (*__kfifoStateInitUnlocked__)(POBJGPU, struct KernelFifo *);
497     void (*__kfifoInitMissing__)(POBJGPU, struct KernelFifo *);
498     NV_STATUS (*__kfifoStatePreInitLocked__)(POBJGPU, struct KernelFifo *);
499     NV_STATUS (*__kfifoStatePreInitUnlocked__)(POBJGPU, struct KernelFifo *);
500     NvBool (*__kfifoIsPresent__)(POBJGPU, struct KernelFifo *);
501     struct KernelSchedMgr *pKernelSchedMgr;
502     CHID_MGR **ppChidMgr;
503     NvU32 numChidMgrs;
504     union CHID_MGR_VALID_BIT_VECTOR chidMgrValid;
505     ENGINE_INFO engineInfo;
506     PREALLOCATED_USERD_INFO userdInfo;
507     NvU32 maxSubcontextCount;
508     FifoSchedulingHandlerEntryList postSchedulingEnableHandlerList;
509     FifoSchedulingHandlerEntryList preSchedulingDisableHandlerList;
510     NvBool bUseChidHeap;
511     NvBool bUsePerRunlistChram;
512     NvBool bDisableChidIsolation;
513     NvBool bIsPerRunlistChramSupportedInHw;
514     NvBool bHostEngineExpansion;
515     NvBool bHostHasLbOverflow;
516     NvBool bSubcontextSupported;
517     NvBool bMixedInstmemApertureDefAllowed;
518     NvBool bIsZombieSubctxWarEnabled;
519     NvBool bIsSchedSupported;
520     NvBool bWddmInterleavingPolicyEnabled;
521     NvBool bUserdInSystemMemory;
522     NvBool bUserdMapDmaSupported;
523     NvBool bPerRunlistChramOverride;
524     NvBool bNumChannelsOverride;
525     NvU32 numChannelsOverride;
526     NvBool bInstProtectedMem;
527     NvU32 InstAttr;
528     const NV_ADDRESS_SPACE *pInstAllocList;
529     MEMORY_DESCRIPTOR *pDummyPageMemDesc;
530     CTX_BUF_POOL_INFO *pRunlistBufPool[62];
531     MEMORY_DESCRIPTOR ***pppRunlistBufMemDesc;
532 };
533 
534 #ifndef __NVOC_CLASS_KernelFifo_TYPEDEF__
535 #define __NVOC_CLASS_KernelFifo_TYPEDEF__
536 typedef struct KernelFifo KernelFifo;
537 #endif /* __NVOC_CLASS_KernelFifo_TYPEDEF__ */
538 
539 #ifndef __nvoc_class_id_KernelFifo
540 #define __nvoc_class_id_KernelFifo 0xf3e155
541 #endif /* __nvoc_class_id_KernelFifo */
542 
543 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFifo;
544 
545 #define __staticCast_KernelFifo(pThis) \
546     ((pThis)->__nvoc_pbase_KernelFifo)
547 
548 #ifdef __nvoc_kernel_fifo_h_disabled
549 #define __dynamicCast_KernelFifo(pThis) ((KernelFifo*)NULL)
550 #else //__nvoc_kernel_fifo_h_disabled
551 #define __dynamicCast_KernelFifo(pThis) \
552     ((KernelFifo*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelFifo)))
553 #endif //__nvoc_kernel_fifo_h_disabled
554 
555 #define PDB_PROP_KFIFO_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
556 #define PDB_PROP_KFIFO_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
557 
558 NV_STATUS __nvoc_objCreateDynamic_KernelFifo(KernelFifo**, Dynamic*, NvU32, va_list);
559 
560 NV_STATUS __nvoc_objCreate_KernelFifo(KernelFifo**, Dynamic*, NvU32);
561 #define __objCreate_KernelFifo(ppNewObj, pParent, createFlags) \
562     __nvoc_objCreate_KernelFifo((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
563 
564 #define kfifoConstructEngine(pGpu, pKernelFifo, engDesc) kfifoConstructEngine_DISPATCH(pGpu, pKernelFifo, engDesc)
565 #define kfifoStateInitLocked(pGpu, pKernelFifo) kfifoStateInitLocked_DISPATCH(pGpu, pKernelFifo)
566 #define kfifoStateDestroy(pGpu, pKernelFifo) kfifoStateDestroy_DISPATCH(pGpu, pKernelFifo)
567 #define kfifoStatePostLoad(pGpu, pKernelFifo, flags) kfifoStatePostLoad_DISPATCH(pGpu, pKernelFifo, flags)
568 #define kfifoStatePostLoad_HAL(pGpu, pKernelFifo, flags) kfifoStatePostLoad_DISPATCH(pGpu, pKernelFifo, flags)
569 #define kfifoStatePreUnload(pGpu, pKernelFifo, flags) kfifoStatePreUnload_DISPATCH(pGpu, pKernelFifo, flags)
570 #define kfifoStatePreUnload_HAL(pGpu, pKernelFifo, flags) kfifoStatePreUnload_DISPATCH(pGpu, pKernelFifo, flags)
571 #define kfifoCheckChannelAllocAddrSpaces(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) kfifoCheckChannelAllocAddrSpaces_DISPATCH(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace)
572 #define kfifoCheckChannelAllocAddrSpaces_HAL(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) kfifoCheckChannelAllocAddrSpaces_DISPATCH(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace)
573 #define kfifoGetMmioUsermodeOffset(pGpu, pKernelFifo, arg0) kfifoGetMmioUsermodeOffset_DISPATCH(pGpu, pKernelFifo, arg0)
574 #define kfifoGetMmioUsermodeOffset_HAL(pGpu, pKernelFifo, arg0) kfifoGetMmioUsermodeOffset_DISPATCH(pGpu, pKernelFifo, arg0)
575 #define kfifoGetMmioUsermodeSize(pGpu, pKernelFifo, arg0) kfifoGetMmioUsermodeSize_DISPATCH(pGpu, pKernelFifo, arg0)
576 #define kfifoGetMmioUsermodeSize_HAL(pGpu, pKernelFifo, arg0) kfifoGetMmioUsermodeSize_DISPATCH(pGpu, pKernelFifo, arg0)
577 #define kfifoChannelGroupGetLocalMaxSubcontext(pGpu, pKernelFifo, arg0, arg1) kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(pGpu, pKernelFifo, arg0, arg1)
578 #define kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(pGpu, pKernelFifo, arg0, arg1)
579 #define kfifoGetCtxBufferMapFlags(pGpu, pKernelFifo, engine, pFlags) kfifoGetCtxBufferMapFlags_DISPATCH(pGpu, pKernelFifo, engine, pFlags)
580 #define kfifoGetCtxBufferMapFlags_HAL(pGpu, pKernelFifo, engine, pFlags) kfifoGetCtxBufferMapFlags_DISPATCH(pGpu, pKernelFifo, engine, pFlags)
581 #define kfifoEngineInfoXlate(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) kfifoEngineInfoXlate_DISPATCH(pGpu, pKernelFifo, inType, inVal, outType, pOutVal)
582 #define kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) kfifoEngineInfoXlate_DISPATCH(pGpu, pKernelFifo, inType, inVal, outType, pOutVal)
583 #define kfifoGenerateWorkSubmitToken(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) kfifoGenerateWorkSubmitToken_DISPATCH(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost)
584 #define kfifoGenerateWorkSubmitToken_HAL(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) kfifoGenerateWorkSubmitToken_DISPATCH(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost)
585 #define kfifoUpdateUsermodeDoorbell(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateUsermodeDoorbell_DISPATCH(arg0, arg1, workSubmitToken, runlisId)
586 #define kfifoUpdateUsermodeDoorbell_HAL(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateUsermodeDoorbell_DISPATCH(arg0, arg1, workSubmitToken, runlisId)
587 #define kfifoRunlistGetBaseShift(pKernelFifo) kfifoRunlistGetBaseShift_DISPATCH(pKernelFifo)
588 #define kfifoRunlistGetBaseShift_HAL(pKernelFifo) kfifoRunlistGetBaseShift_DISPATCH(pKernelFifo)
589 #define kfifoGetMaxCeChannelGroups(pGpu, pKernelFifo) kfifoGetMaxCeChannelGroups_DISPATCH(pGpu, pKernelFifo)
590 #define kfifoGetMaxCeChannelGroups_HAL(pGpu, pKernelFifo) kfifoGetMaxCeChannelGroups_DISPATCH(pGpu, pKernelFifo)
591 #define kfifoStateLoad(pGpu, pEngstate, arg0) kfifoStateLoad_DISPATCH(pGpu, pEngstate, arg0)
592 #define kfifoStateUnload(pGpu, pEngstate, arg0) kfifoStateUnload_DISPATCH(pGpu, pEngstate, arg0)
593 #define kfifoStatePreLoad(pGpu, pEngstate, arg0) kfifoStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
594 #define kfifoStatePostUnload(pGpu, pEngstate, arg0) kfifoStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
595 #define kfifoStateInitUnlocked(pGpu, pEngstate) kfifoStateInitUnlocked_DISPATCH(pGpu, pEngstate)
596 #define kfifoInitMissing(pGpu, pEngstate) kfifoInitMissing_DISPATCH(pGpu, pEngstate)
597 #define kfifoStatePreInitLocked(pGpu, pEngstate) kfifoStatePreInitLocked_DISPATCH(pGpu, pEngstate)
598 #define kfifoStatePreInitUnlocked(pGpu, pEngstate) kfifoStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
599 #define kfifoIsPresent(pGpu, pEngstate) kfifoIsPresent_DISPATCH(pGpu, pEngstate)
600 NV_STATUS kfifoConstructHal_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
601 
602 
603 #ifdef __nvoc_kernel_fifo_h_disabled
604 static inline NV_STATUS kfifoConstructHal(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
605     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
606     return NV_ERR_NOT_SUPPORTED;
607 }
608 #else //__nvoc_kernel_fifo_h_disabled
609 #define kfifoConstructHal(pGpu, pKernelFifo) kfifoConstructHal_GM107(pGpu, pKernelFifo)
610 #endif //__nvoc_kernel_fifo_h_disabled
611 
612 #define kfifoConstructHal_HAL(pGpu, pKernelFifo) kfifoConstructHal(pGpu, pKernelFifo)
613 
614 static inline NV_STATUS kfifoChannelGroupSetTimesliceSched_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
615     return NV_OK;
616 }
617 
618 
619 #ifdef __nvoc_kernel_fifo_h_disabled
620 static inline NV_STATUS kfifoChannelGroupSetTimesliceSched(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
621     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
622     return NV_ERR_NOT_SUPPORTED;
623 }
624 #else //__nvoc_kernel_fifo_h_disabled
625 #define kfifoChannelGroupSetTimesliceSched(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimesliceSched_56cd7a(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
626 #endif //__nvoc_kernel_fifo_h_disabled
627 
628 #define kfifoChannelGroupSetTimesliceSched_HAL(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimesliceSched(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
629 
630 NvU32 kfifoRunlistQueryNumChannels_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
631 
632 
633 #ifdef __nvoc_kernel_fifo_h_disabled
634 static inline NvU32 kfifoRunlistQueryNumChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
635     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
636     return 0;
637 }
638 #else //__nvoc_kernel_fifo_h_disabled
639 #define kfifoRunlistQueryNumChannels(pGpu, pKernelFifo, runlistId) kfifoRunlistQueryNumChannels_KERNEL(pGpu, pKernelFifo, runlistId)
640 #endif //__nvoc_kernel_fifo_h_disabled
641 
642 #define kfifoRunlistQueryNumChannels_HAL(pGpu, pKernelFifo, runlistId) kfifoRunlistQueryNumChannels(pGpu, pKernelFifo, runlistId)
643 
644 NV_STATUS kfifoIdleChannelsPerDevice_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, NvU32 numChannels, NvU32 flags, NvU32 timeout);
645 
646 
647 #ifdef __nvoc_kernel_fifo_h_disabled
648 static inline NV_STATUS kfifoIdleChannelsPerDevice(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, NvU32 numChannels, NvU32 flags, NvU32 timeout) {
649     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
650     return NV_ERR_NOT_SUPPORTED;
651 }
652 #else //__nvoc_kernel_fifo_h_disabled
653 #define kfifoIdleChannelsPerDevice(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) kfifoIdleChannelsPerDevice_KERNEL(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout)
654 #endif //__nvoc_kernel_fifo_h_disabled
655 
656 #define kfifoIdleChannelsPerDevice_HAL(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) kfifoIdleChannelsPerDevice(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout)
657 
658 NvU64 kfifoChannelGroupGetDefaultTimeslice_GV100(struct KernelFifo *pKernelFifo);
659 
660 
661 #ifdef __nvoc_kernel_fifo_h_disabled
662 static inline NvU64 kfifoChannelGroupGetDefaultTimeslice(struct KernelFifo *pKernelFifo) {
663     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
664     return 0;
665 }
666 #else //__nvoc_kernel_fifo_h_disabled
667 #define kfifoChannelGroupGetDefaultTimeslice(pKernelFifo) kfifoChannelGroupGetDefaultTimeslice_GV100(pKernelFifo)
668 #endif //__nvoc_kernel_fifo_h_disabled
669 
670 #define kfifoChannelGroupGetDefaultTimeslice_HAL(pKernelFifo) kfifoChannelGroupGetDefaultTimeslice(pKernelFifo)
671 
672 static inline NvU64 kfifoRunlistGetMinTimeSlice_4a4dee(struct KernelFifo *pKernelFifo) {
673     return 0;
674 }
675 
676 
677 #ifdef __nvoc_kernel_fifo_h_disabled
678 static inline NvU64 kfifoRunlistGetMinTimeSlice(struct KernelFifo *pKernelFifo) {
679     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
680     return 0;
681 }
682 #else //__nvoc_kernel_fifo_h_disabled
683 #define kfifoRunlistGetMinTimeSlice(pKernelFifo) kfifoRunlistGetMinTimeSlice_4a4dee(pKernelFifo)
684 #endif //__nvoc_kernel_fifo_h_disabled
685 
686 #define kfifoRunlistGetMinTimeSlice_HAL(pKernelFifo) kfifoRunlistGetMinTimeSlice(pKernelFifo)
687 
688 NV_STATUS kfifoGetInstMemInfo_GM107(struct KernelFifo *pKernelFifo, NvU64 *pSize, NvU64 *pAlignment, NvBool *pbInstProtectedMem, NvU32 *pInstAttr, const NV_ADDRESS_SPACE **ppInstAllocList);
689 
690 
691 #ifdef __nvoc_kernel_fifo_h_disabled
692 static inline NV_STATUS kfifoGetInstMemInfo(struct KernelFifo *pKernelFifo, NvU64 *pSize, NvU64 *pAlignment, NvBool *pbInstProtectedMem, NvU32 *pInstAttr, const NV_ADDRESS_SPACE **ppInstAllocList) {
693     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
694     return NV_ERR_NOT_SUPPORTED;
695 }
696 #else //__nvoc_kernel_fifo_h_disabled
697 #define kfifoGetInstMemInfo(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) kfifoGetInstMemInfo_GM107(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList)
698 #endif //__nvoc_kernel_fifo_h_disabled
699 
700 #define kfifoGetInstMemInfo_HAL(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) kfifoGetInstMemInfo(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList)
701 
702 void kfifoGetInstBlkSizeAlign_GM107(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pShift);
703 
704 
705 #ifdef __nvoc_kernel_fifo_h_disabled
706 static inline void kfifoGetInstBlkSizeAlign(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pShift) {
707     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
708 }
709 #else //__nvoc_kernel_fifo_h_disabled
710 #define kfifoGetInstBlkSizeAlign(pKernelFifo, pSize, pShift) kfifoGetInstBlkSizeAlign_GM107(pKernelFifo, pSize, pShift)
711 #endif //__nvoc_kernel_fifo_h_disabled
712 
713 #define kfifoGetInstBlkSizeAlign_HAL(pKernelFifo, pSize, pShift) kfifoGetInstBlkSizeAlign(pKernelFifo, pSize, pShift)
714 
715 NvU32 kfifoGetDefaultRunlist_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType);
716 
717 
718 #ifdef __nvoc_kernel_fifo_h_disabled
719 static inline NvU32 kfifoGetDefaultRunlist(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType) {
720     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
721     return 0;
722 }
723 #else //__nvoc_kernel_fifo_h_disabled
724 #define kfifoGetDefaultRunlist(pGpu, pKernelFifo, rmEngineType) kfifoGetDefaultRunlist_GM107(pGpu, pKernelFifo, rmEngineType)
725 #endif //__nvoc_kernel_fifo_h_disabled
726 
727 #define kfifoGetDefaultRunlist_HAL(pGpu, pKernelFifo, rmEngineType) kfifoGetDefaultRunlist(pGpu, pKernelFifo, rmEngineType)
728 
729 NvBool kfifoValidateSCGTypeAndRunqueue_GP102(struct KernelFifo *pKernelFifo, NvU32 scgType, NvU32 runqueue);
730 
731 
732 #ifdef __nvoc_kernel_fifo_h_disabled
733 static inline NvBool kfifoValidateSCGTypeAndRunqueue(struct KernelFifo *pKernelFifo, NvU32 scgType, NvU32 runqueue) {
734     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
735     return NV_FALSE;
736 }
737 #else //__nvoc_kernel_fifo_h_disabled
738 #define kfifoValidateSCGTypeAndRunqueue(pKernelFifo, scgType, runqueue) kfifoValidateSCGTypeAndRunqueue_GP102(pKernelFifo, scgType, runqueue)
739 #endif //__nvoc_kernel_fifo_h_disabled
740 
741 #define kfifoValidateSCGTypeAndRunqueue_HAL(pKernelFifo, scgType, runqueue) kfifoValidateSCGTypeAndRunqueue(pKernelFifo, scgType, runqueue)
742 
743 NvBool kfifoValidateEngineAndRunqueue_GP102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 runqueue);
744 
745 
746 #ifdef __nvoc_kernel_fifo_h_disabled
747 static inline NvBool kfifoValidateEngineAndRunqueue(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 runqueue) {
748     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
749     return NV_FALSE;
750 }
751 #else //__nvoc_kernel_fifo_h_disabled
752 #define kfifoValidateEngineAndRunqueue(pGpu, pKernelFifo, engDesc, runqueue) kfifoValidateEngineAndRunqueue_GP102(pGpu, pKernelFifo, engDesc, runqueue)
753 #endif //__nvoc_kernel_fifo_h_disabled
754 
755 #define kfifoValidateEngineAndRunqueue_HAL(pGpu, pKernelFifo, engDesc, runqueue) kfifoValidateEngineAndRunqueue(pGpu, pKernelFifo, engDesc, runqueue)
756 
757 NvBool kfifoValidateEngineAndSubctxType_GP102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 subctxType);
758 
759 
760 #ifdef __nvoc_kernel_fifo_h_disabled
761 static inline NvBool kfifoValidateEngineAndSubctxType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 subctxType) {
762     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
763     return NV_FALSE;
764 }
765 #else //__nvoc_kernel_fifo_h_disabled
766 #define kfifoValidateEngineAndSubctxType(pGpu, pKernelFifo, engDesc, subctxType) kfifoValidateEngineAndSubctxType_GP102(pGpu, pKernelFifo, engDesc, subctxType)
767 #endif //__nvoc_kernel_fifo_h_disabled
768 
769 #define kfifoValidateEngineAndSubctxType_HAL(pGpu, pKernelFifo, engDesc, subctxType) kfifoValidateEngineAndSubctxType(pGpu, pKernelFifo, engDesc, subctxType)
770 
771 NV_STATUS kfifoRmctrlGetWorkSubmitToken_GV100(struct KernelFifo *pKernelFifo, NvHandle hClient, NvHandle hChannel, NvU32 *pWorkSubmitToken);
772 
773 
774 #ifdef __nvoc_kernel_fifo_h_disabled
775 static inline NV_STATUS kfifoRmctrlGetWorkSubmitToken(struct KernelFifo *pKernelFifo, NvHandle hClient, NvHandle hChannel, NvU32 *pWorkSubmitToken) {
776     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
777     return NV_ERR_NOT_SUPPORTED;
778 }
779 #else //__nvoc_kernel_fifo_h_disabled
780 #define kfifoRmctrlGetWorkSubmitToken(pKernelFifo, hClient, hChannel, pWorkSubmitToken) kfifoRmctrlGetWorkSubmitToken_GV100(pKernelFifo, hClient, hChannel, pWorkSubmitToken)
781 #endif //__nvoc_kernel_fifo_h_disabled
782 
783 #define kfifoRmctrlGetWorkSubmitToken_HAL(pKernelFifo, hClient, hChannel, pWorkSubmitToken) kfifoRmctrlGetWorkSubmitToken(pKernelFifo, hClient, hChannel, pWorkSubmitToken)
784 
785 NV_STATUS kfifoChannelGetFifoContextMemDesc_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, FIFO_CTX engState, MEMORY_DESCRIPTOR **ppMemdesc);
786 
787 
788 #ifdef __nvoc_kernel_fifo_h_disabled
789 static inline NV_STATUS kfifoChannelGetFifoContextMemDesc(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, FIFO_CTX engState, MEMORY_DESCRIPTOR **ppMemdesc) {
790     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
791     return NV_ERR_NOT_SUPPORTED;
792 }
793 #else //__nvoc_kernel_fifo_h_disabled
794 #define kfifoChannelGetFifoContextMemDesc(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) kfifoChannelGetFifoContextMemDesc_GM107(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc)
795 #endif //__nvoc_kernel_fifo_h_disabled
796 
797 #define kfifoChannelGetFifoContextMemDesc_HAL(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) kfifoChannelGetFifoContextMemDesc(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc)
798 
799 NV_STATUS kfifoConvertInstToKernelChannel_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, INST_BLOCK_DESC *arg0, struct KernelChannel **arg1);
800 
801 
802 #ifdef __nvoc_kernel_fifo_h_disabled
803 static inline NV_STATUS kfifoConvertInstToKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, INST_BLOCK_DESC *arg0, struct KernelChannel **arg1) {
804     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
805     return NV_ERR_NOT_SUPPORTED;
806 }
807 #else //__nvoc_kernel_fifo_h_disabled
808 #define kfifoConvertInstToKernelChannel(pGpu, pKernelFifo, arg0, arg1) kfifoConvertInstToKernelChannel_GM107(pGpu, pKernelFifo, arg0, arg1)
809 #endif //__nvoc_kernel_fifo_h_disabled
810 
811 #define kfifoConvertInstToKernelChannel_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoConvertInstToKernelChannel(pGpu, pKernelFifo, arg0, arg1)
812 
813 NV_STATUS kfifoGetUsermodeMapInfo_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *arg0, NvU32 *arg1);
814 
815 
816 #ifdef __nvoc_kernel_fifo_h_disabled
817 static inline NV_STATUS kfifoGetUsermodeMapInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *arg0, NvU32 *arg1) {
818     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
819     return NV_ERR_NOT_SUPPORTED;
820 }
821 #else //__nvoc_kernel_fifo_h_disabled
822 #define kfifoGetUsermodeMapInfo(pGpu, pKernelFifo, arg0, arg1) kfifoGetUsermodeMapInfo_GV100(pGpu, pKernelFifo, arg0, arg1)
823 #endif //__nvoc_kernel_fifo_h_disabled
824 
825 #define kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoGetUsermodeMapInfo(pGpu, pKernelFifo, arg0, arg1)
826 
827 NvU32 kfifoGetMaxSubcontext_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0);
828 
829 
830 #ifdef __nvoc_kernel_fifo_h_disabled
831 static inline NvU32 kfifoGetMaxSubcontext(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) {
832     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
833     return 0;
834 }
835 #else //__nvoc_kernel_fifo_h_disabled
836 #define kfifoGetMaxSubcontext(pGpu, pKernelFifo, arg0) kfifoGetMaxSubcontext_GV100(pGpu, pKernelFifo, arg0)
837 #endif //__nvoc_kernel_fifo_h_disabled
838 
839 #define kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, arg0) kfifoGetMaxSubcontext(pGpu, pKernelFifo, arg0)
840 
841 NvU32 kfifoGetMaxSubcontextFromGr_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernel);
842 
843 
844 #ifdef __nvoc_kernel_fifo_h_disabled
845 static inline NvU32 kfifoGetMaxSubcontextFromGr(struct OBJGPU *pGpu, struct KernelFifo *pKernel) {
846     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
847     return 0;
848 }
849 #else //__nvoc_kernel_fifo_h_disabled
850 #define kfifoGetMaxSubcontextFromGr(pGpu, pKernel) kfifoGetMaxSubcontextFromGr_KERNEL(pGpu, pKernel)
851 #endif //__nvoc_kernel_fifo_h_disabled
852 
853 #define kfifoGetMaxSubcontextFromGr_HAL(pGpu, pKernel) kfifoGetMaxSubcontextFromGr(pGpu, pKernel)
854 
855 static inline NvU32 kfifoGetNumRunqueues_adde13(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
856     return 2;
857 }
858 
859 
860 #ifdef __nvoc_kernel_fifo_h_disabled
861 static inline NvU32 kfifoGetNumRunqueues(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
862     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
863     return 0;
864 }
865 #else //__nvoc_kernel_fifo_h_disabled
866 #define kfifoGetNumRunqueues(pGpu, pKernelFifo) kfifoGetNumRunqueues_adde13(pGpu, pKernelFifo)
867 #endif //__nvoc_kernel_fifo_h_disabled
868 
869 #define kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo) kfifoGetNumRunqueues(pGpu, pKernelFifo)
870 
871 NvU32 kfifoGetMaxChannelGroupSize_GV100(struct KernelFifo *pKernelFifo);
872 
873 
874 #ifdef __nvoc_kernel_fifo_h_disabled
875 static inline NvU32 kfifoGetMaxChannelGroupSize(struct KernelFifo *pKernelFifo) {
876     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
877     return 0;
878 }
879 #else //__nvoc_kernel_fifo_h_disabled
880 #define kfifoGetMaxChannelGroupSize(pKernelFifo) kfifoGetMaxChannelGroupSize_GV100(pKernelFifo)
881 #endif //__nvoc_kernel_fifo_h_disabled
882 
883 #define kfifoGetMaxChannelGroupSize_HAL(pKernelFifo) kfifoGetMaxChannelGroupSize(pKernelFifo)
884 
885 static inline NV_STATUS kfifoAddObject_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
886     return NV_OK;
887 }
888 
889 
890 #ifdef __nvoc_kernel_fifo_h_disabled
891 static inline NV_STATUS kfifoAddObject(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
892     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
893     return NV_ERR_NOT_SUPPORTED;
894 }
895 #else //__nvoc_kernel_fifo_h_disabled
896 #define kfifoAddObject(pGpu, pKernelFifo, pObject) kfifoAddObject_56cd7a(pGpu, pKernelFifo, pObject)
897 #endif //__nvoc_kernel_fifo_h_disabled
898 
899 #define kfifoAddObject_HAL(pGpu, pKernelFifo, pObject) kfifoAddObject(pGpu, pKernelFifo, pObject)
900 
901 static inline NV_STATUS kfifoDeleteObject_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
902     return NV_OK;
903 }
904 
905 
906 #ifdef __nvoc_kernel_fifo_h_disabled
907 static inline NV_STATUS kfifoDeleteObject(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
908     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
909     return NV_ERR_NOT_SUPPORTED;
910 }
911 #else //__nvoc_kernel_fifo_h_disabled
912 #define kfifoDeleteObject(pGpu, pKernelFifo, pObject) kfifoDeleteObject_56cd7a(pGpu, pKernelFifo, pObject)
913 #endif //__nvoc_kernel_fifo_h_disabled
914 
915 #define kfifoDeleteObject_HAL(pGpu, pKernelFifo, pObject) kfifoDeleteObject(pGpu, pKernelFifo, pObject)
916 
917 NV_STATUS kfifoConstructEngineList_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
918 
919 
920 #ifdef __nvoc_kernel_fifo_h_disabled
921 static inline NV_STATUS kfifoConstructEngineList(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
922     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
923     return NV_ERR_NOT_SUPPORTED;
924 }
925 #else //__nvoc_kernel_fifo_h_disabled
926 #define kfifoConstructEngineList(pGpu, pKernelFifo) kfifoConstructEngineList_KERNEL(pGpu, pKernelFifo)
927 #endif //__nvoc_kernel_fifo_h_disabled
928 
929 #define kfifoConstructEngineList_HAL(pGpu, pKernelFifo) kfifoConstructEngineList(pGpu, pKernelFifo)
930 
931 NV_STATUS kfifoGetHostDeviceInfoTable_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO *pEngineInfo, NvHandle hMigClient);
932 
933 
934 #ifdef __nvoc_kernel_fifo_h_disabled
935 static inline NV_STATUS kfifoGetHostDeviceInfoTable(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO *pEngineInfo, NvHandle hMigClient) {
936     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
937     return NV_ERR_NOT_SUPPORTED;
938 }
939 #else //__nvoc_kernel_fifo_h_disabled
940 #define kfifoGetHostDeviceInfoTable(pGpu, pKernelFifo, pEngineInfo, hMigClient) kfifoGetHostDeviceInfoTable_KERNEL(pGpu, pKernelFifo, pEngineInfo, hMigClient)
941 #endif //__nvoc_kernel_fifo_h_disabled
942 
943 #define kfifoGetHostDeviceInfoTable_HAL(pGpu, pKernelFifo, pEngineInfo, hMigClient) kfifoGetHostDeviceInfoTable(pGpu, pKernelFifo, pEngineInfo, hMigClient)
944 
945 void kfifoGetSubctxType_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 *arg1);
946 
947 
948 #ifdef __nvoc_kernel_fifo_h_disabled
949 static inline void kfifoGetSubctxType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 *arg1) {
950     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
951 }
952 #else //__nvoc_kernel_fifo_h_disabled
953 #define kfifoGetSubctxType(pGpu, pKernelFifo, arg0, arg1) kfifoGetSubctxType_GV100(pGpu, pKernelFifo, arg0, arg1)
954 #endif //__nvoc_kernel_fifo_h_disabled
955 
956 #define kfifoGetSubctxType_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoGetSubctxType(pGpu, pKernelFifo, arg0, arg1)
957 
958 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken_c04480(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
959     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
960 }
961 
962 NV_STATUS kfifoGenerateInternalWorkSubmitToken_GA100(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1);
963 
964 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken_5baef9(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
965     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
966 }
967 
968 
969 #ifdef __nvoc_kernel_fifo_h_disabled
970 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
971     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
972     return NV_ERR_NOT_SUPPORTED;
973 }
974 #else //__nvoc_kernel_fifo_h_disabled
975 #define kfifoGenerateInternalWorkSubmitToken(pGpu, arg0, arg1) kfifoGenerateInternalWorkSubmitToken_c04480(pGpu, arg0, arg1)
976 #endif //__nvoc_kernel_fifo_h_disabled
977 
978 #define kfifoGenerateInternalWorkSubmitToken_HAL(pGpu, arg0, arg1) kfifoGenerateInternalWorkSubmitToken(pGpu, arg0, arg1)
979 
980 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode_c04480(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
981     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
982 }
983 
984 NV_STATUS kfifoUpdateInternalDoorbellForUsermode_GA100(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
985 
986 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode_5baef9(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
987     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
988 }
989 
990 
991 #ifdef __nvoc_kernel_fifo_h_disabled
992 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
993     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
994     return NV_ERR_NOT_SUPPORTED;
995 }
996 #else //__nvoc_kernel_fifo_h_disabled
997 #define kfifoUpdateInternalDoorbellForUsermode(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateInternalDoorbellForUsermode_c04480(arg0, arg1, workSubmitToken, runlisId)
998 #endif //__nvoc_kernel_fifo_h_disabled
999 
1000 #define kfifoUpdateInternalDoorbellForUsermode_HAL(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateInternalDoorbellForUsermode(arg0, arg1, workSubmitToken, runlisId)
1001 
1002 static inline NvBool kfifoIsLiteModeEnabled_491d52(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1003     return ((NvBool)(0 != 0));
1004 }
1005 
1006 
1007 #ifdef __nvoc_kernel_fifo_h_disabled
1008 static inline NvBool kfifoIsLiteModeEnabled(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1009     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1010     return NV_FALSE;
1011 }
1012 #else //__nvoc_kernel_fifo_h_disabled
1013 #define kfifoIsLiteModeEnabled(pGpu, pKernelFifo) kfifoIsLiteModeEnabled_491d52(pGpu, pKernelFifo)
1014 #endif //__nvoc_kernel_fifo_h_disabled
1015 
1016 #define kfifoIsLiteModeEnabled_HAL(pGpu, pKernelFifo) kfifoIsLiteModeEnabled(pGpu, pKernelFifo)
1017 
1018 NvU32 kfifoGetNumEngines_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1019 
1020 
1021 #ifdef __nvoc_kernel_fifo_h_disabled
1022 static inline NvU32 kfifoGetNumEngines(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1023     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1024     return 0;
1025 }
1026 #else //__nvoc_kernel_fifo_h_disabled
1027 #define kfifoGetNumEngines(pGpu, pKernelFifo) kfifoGetNumEngines_GM107(pGpu, pKernelFifo)
1028 #endif //__nvoc_kernel_fifo_h_disabled
1029 
1030 #define kfifoGetNumEngines_HAL(pGpu, pKernelFifo) kfifoGetNumEngines(pGpu, pKernelFifo)
1031 
1032 const char *kfifoGetEngineName_GM107(struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal);
1033 
1034 
1035 #ifdef __nvoc_kernel_fifo_h_disabled
1036 static inline const char *kfifoGetEngineName(struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal) {
1037     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1038     return NULL;
1039 }
1040 #else //__nvoc_kernel_fifo_h_disabled
1041 #define kfifoGetEngineName(pKernelFifo, inType, inVal) kfifoGetEngineName_GM107(pKernelFifo, inType, inVal)
1042 #endif //__nvoc_kernel_fifo_h_disabled
1043 
1044 #define kfifoGetEngineName_HAL(pKernelFifo, inType, inVal) kfifoGetEngineName(pKernelFifo, inType, inVal)
1045 
1046 NvU32 kfifoGetMaxNumRunlists_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1047 
1048 
1049 #ifdef __nvoc_kernel_fifo_h_disabled
1050 static inline NvU32 kfifoGetMaxNumRunlists(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1051     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1052     return 0;
1053 }
1054 #else //__nvoc_kernel_fifo_h_disabled
1055 #define kfifoGetMaxNumRunlists(pGpu, pKernelFifo) kfifoGetMaxNumRunlists_GM107(pGpu, pKernelFifo)
1056 #endif //__nvoc_kernel_fifo_h_disabled
1057 
1058 #define kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo) kfifoGetMaxNumRunlists(pGpu, pKernelFifo)
1059 
1060 NV_STATUS kfifoGetEnginePbdmaIds_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE type, NvU32 val, NvU32 **ppPbdmaIds, NvU32 *pNumPbdmas);
1061 
1062 
1063 #ifdef __nvoc_kernel_fifo_h_disabled
1064 static inline NV_STATUS kfifoGetEnginePbdmaIds(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE type, NvU32 val, NvU32 **ppPbdmaIds, NvU32 *pNumPbdmas) {
1065     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1066     return NV_ERR_NOT_SUPPORTED;
1067 }
1068 #else //__nvoc_kernel_fifo_h_disabled
1069 #define kfifoGetEnginePbdmaIds(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) kfifoGetEnginePbdmaIds_GM107(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas)
1070 #endif //__nvoc_kernel_fifo_h_disabled
1071 
1072 #define kfifoGetEnginePbdmaIds_HAL(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) kfifoGetEnginePbdmaIds(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas)
1073 
1074 static inline NV_STATUS kfifoReservePbdmaFaultIds_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_ENGINE_LIST *arg0, NvU32 arg1) {
1075     return NV_OK;
1076 }
1077 
1078 
1079 #ifdef __nvoc_kernel_fifo_h_disabled
1080 static inline NV_STATUS kfifoReservePbdmaFaultIds(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_ENGINE_LIST *arg0, NvU32 arg1) {
1081     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1082     return NV_ERR_NOT_SUPPORTED;
1083 }
1084 #else //__nvoc_kernel_fifo_h_disabled
1085 #define kfifoReservePbdmaFaultIds(pGpu, pKernelFifo, arg0, arg1) kfifoReservePbdmaFaultIds_56cd7a(pGpu, pKernelFifo, arg0, arg1)
1086 #endif //__nvoc_kernel_fifo_h_disabled
1087 
1088 #define kfifoReservePbdmaFaultIds_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoReservePbdmaFaultIds(pGpu, pKernelFifo, arg0, arg1)
1089 
1090 NV_STATUS kfifoGetEnginePartnerList_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pParams);
1091 
1092 
1093 #ifdef __nvoc_kernel_fifo_h_disabled
1094 static inline NV_STATUS kfifoGetEnginePartnerList(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pParams) {
1095     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1096     return NV_ERR_NOT_SUPPORTED;
1097 }
1098 #else //__nvoc_kernel_fifo_h_disabled
1099 #define kfifoGetEnginePartnerList(pGpu, pKernelFifo, pParams) kfifoGetEnginePartnerList_GM107(pGpu, pKernelFifo, pParams)
1100 #endif //__nvoc_kernel_fifo_h_disabled
1101 
1102 #define kfifoGetEnginePartnerList_HAL(pGpu, pKernelFifo, pParams) kfifoGetEnginePartnerList(pGpu, pKernelFifo, pParams)
1103 
1104 static inline NvBool kfifoRunlistIsTsgHeaderSupported_cbe027(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1105     return ((NvBool)(0 == 0));
1106 }
1107 
1108 
1109 #ifdef __nvoc_kernel_fifo_h_disabled
1110 static inline NvBool kfifoRunlistIsTsgHeaderSupported(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1111     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1112     return NV_FALSE;
1113 }
1114 #else //__nvoc_kernel_fifo_h_disabled
1115 #define kfifoRunlistIsTsgHeaderSupported(pGpu, pKernelFifo, arg0) kfifoRunlistIsTsgHeaderSupported_cbe027(pGpu, pKernelFifo, arg0)
1116 #endif //__nvoc_kernel_fifo_h_disabled
1117 
1118 #define kfifoRunlistIsTsgHeaderSupported_HAL(pGpu, pKernelFifo, arg0) kfifoRunlistIsTsgHeaderSupported(pGpu, pKernelFifo, arg0)
1119 
1120 NvU32 kfifoRunlistGetEntrySize_GV100(struct KernelFifo *arg0);
1121 
1122 
1123 #ifdef __nvoc_kernel_fifo_h_disabled
1124 static inline NvU32 kfifoRunlistGetEntrySize(struct KernelFifo *arg0) {
1125     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1126     return 0;
1127 }
1128 #else //__nvoc_kernel_fifo_h_disabled
1129 #define kfifoRunlistGetEntrySize(arg0) kfifoRunlistGetEntrySize_GV100(arg0)
1130 #endif //__nvoc_kernel_fifo_h_disabled
1131 
1132 #define kfifoRunlistGetEntrySize_HAL(arg0) kfifoRunlistGetEntrySize(arg0)
1133 
1134 static inline void kfifoSetupBar1UserdSnoop_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bEnable, NvU64 offset) {
1135     return;
1136 }
1137 
1138 
1139 #ifdef __nvoc_kernel_fifo_h_disabled
1140 static inline void kfifoSetupBar1UserdSnoop(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bEnable, NvU64 offset) {
1141     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1142 }
1143 #else //__nvoc_kernel_fifo_h_disabled
1144 #define kfifoSetupBar1UserdSnoop(pGpu, pKernelFifo, bEnable, offset) kfifoSetupBar1UserdSnoop_b3696a(pGpu, pKernelFifo, bEnable, offset)
1145 #endif //__nvoc_kernel_fifo_h_disabled
1146 
1147 #define kfifoSetupBar1UserdSnoop_HAL(pGpu, pKernelFifo, bEnable, offset) kfifoSetupBar1UserdSnoop(pGpu, pKernelFifo, bEnable, offset)
1148 
1149 NV_STATUS kfifoPreAllocUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1150 
1151 
1152 #ifdef __nvoc_kernel_fifo_h_disabled
1153 static inline NV_STATUS kfifoPreAllocUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1154     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1155     return NV_ERR_NOT_SUPPORTED;
1156 }
1157 #else //__nvoc_kernel_fifo_h_disabled
1158 #define kfifoPreAllocUserD(pGpu, pKernelFifo) kfifoPreAllocUserD_GM107(pGpu, pKernelFifo)
1159 #endif //__nvoc_kernel_fifo_h_disabled
1160 
1161 #define kfifoPreAllocUserD_HAL(pGpu, pKernelFifo) kfifoPreAllocUserD(pGpu, pKernelFifo)
1162 
1163 void kfifoFreePreAllocUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1164 
1165 
1166 #ifdef __nvoc_kernel_fifo_h_disabled
1167 static inline void kfifoFreePreAllocUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1168     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1169 }
1170 #else //__nvoc_kernel_fifo_h_disabled
1171 #define kfifoFreePreAllocUserD(pGpu, pKernelFifo) kfifoFreePreAllocUserD_GM107(pGpu, pKernelFifo)
1172 #endif //__nvoc_kernel_fifo_h_disabled
1173 
1174 #define kfifoFreePreAllocUserD_HAL(pGpu, pKernelFifo) kfifoFreePreAllocUserD(pGpu, pKernelFifo)
1175 
1176 static inline NvU64 kfifoGetUserdBar1MapStartOffset_4a4dee(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1177     return 0;
1178 }
1179 
1180 
1181 #ifdef __nvoc_kernel_fifo_h_disabled
1182 static inline NvU64 kfifoGetUserdBar1MapStartOffset(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1183     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1184     return 0;
1185 }
1186 #else //__nvoc_kernel_fifo_h_disabled
1187 #define kfifoGetUserdBar1MapStartOffset(pGpu, pKernelFifo) kfifoGetUserdBar1MapStartOffset_4a4dee(pGpu, pKernelFifo)
1188 #endif //__nvoc_kernel_fifo_h_disabled
1189 
1190 #define kfifoGetUserdBar1MapStartOffset_HAL(pGpu, pKernelFifo) kfifoGetUserdBar1MapStartOffset(pGpu, pKernelFifo)
1191 
1192 NV_STATUS kfifoGetUserdBar1MapInfo_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *bar1Offset, NvU32 *bar1MapSize);
1193 
1194 
1195 #ifdef __nvoc_kernel_fifo_h_disabled
1196 static inline NV_STATUS kfifoGetUserdBar1MapInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *bar1Offset, NvU32 *bar1MapSize) {
1197     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1198     return NV_ERR_NOT_SUPPORTED;
1199 }
1200 #else //__nvoc_kernel_fifo_h_disabled
1201 #define kfifoGetUserdBar1MapInfo(pGpu, pKernelFifo, bar1Offset, bar1MapSize) kfifoGetUserdBar1MapInfo_GM107(pGpu, pKernelFifo, bar1Offset, bar1MapSize)
1202 #endif //__nvoc_kernel_fifo_h_disabled
1203 
1204 #define kfifoGetUserdBar1MapInfo_HAL(pGpu, pKernelFifo, bar1Offset, bar1MapSize) kfifoGetUserdBar1MapInfo(pGpu, pKernelFifo, bar1Offset, bar1MapSize)
1205 
1206 void kfifoGetUserdSizeAlign_GM107(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pAddrShift);
1207 
1208 
1209 #ifdef __nvoc_kernel_fifo_h_disabled
1210 static inline void kfifoGetUserdSizeAlign(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pAddrShift) {
1211     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1212 }
1213 #else //__nvoc_kernel_fifo_h_disabled
1214 #define kfifoGetUserdSizeAlign(pKernelFifo, pSize, pAddrShift) kfifoGetUserdSizeAlign_GM107(pKernelFifo, pSize, pAddrShift)
1215 #endif //__nvoc_kernel_fifo_h_disabled
1216 
1217 #define kfifoGetUserdSizeAlign_HAL(pKernelFifo, pSize, pAddrShift) kfifoGetUserdSizeAlign(pKernelFifo, pSize, pAddrShift)
1218 
1219 NV_STATUS kfifoGetUserdLocation_GM107(struct KernelFifo *pKernelFifo, NvU32 *pUserdAperture, NvU32 *pUserdAttribute);
1220 
1221 
1222 #ifdef __nvoc_kernel_fifo_h_disabled
1223 static inline NV_STATUS kfifoGetUserdLocation(struct KernelFifo *pKernelFifo, NvU32 *pUserdAperture, NvU32 *pUserdAttribute) {
1224     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1225     return NV_ERR_NOT_SUPPORTED;
1226 }
1227 #else //__nvoc_kernel_fifo_h_disabled
1228 #define kfifoGetUserdLocation(pKernelFifo, pUserdAperture, pUserdAttribute) kfifoGetUserdLocation_GM107(pKernelFifo, pUserdAperture, pUserdAttribute)
1229 #endif //__nvoc_kernel_fifo_h_disabled
1230 
1231 #define kfifoGetUserdLocation_HAL(pKernelFifo, pUserdAperture, pUserdAttribute) kfifoGetUserdLocation(pKernelFifo, pUserdAperture, pUserdAttribute)
1232 
1233 NvU32 kfifoCalcTotalSizeOfFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bCalcForFbRsvd);
1234 
1235 
1236 #ifdef __nvoc_kernel_fifo_h_disabled
1237 static inline NvU32 kfifoCalcTotalSizeOfFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bCalcForFbRsvd) {
1238     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1239     return 0;
1240 }
1241 #else //__nvoc_kernel_fifo_h_disabled
1242 #define kfifoCalcTotalSizeOfFaultMethodBuffers(pGpu, pKernelFifo, bCalcForFbRsvd) kfifoCalcTotalSizeOfFaultMethodBuffers_GV100(pGpu, pKernelFifo, bCalcForFbRsvd)
1243 #endif //__nvoc_kernel_fifo_h_disabled
1244 
1245 #define kfifoCalcTotalSizeOfFaultMethodBuffers_HAL(pGpu, pKernelFifo, bCalcForFbRsvd) kfifoCalcTotalSizeOfFaultMethodBuffers(pGpu, pKernelFifo, bCalcForFbRsvd)
1246 
1247 NV_STATUS kfifoCheckEngine_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvBool *pPresent);
1248 
1249 
1250 #ifdef __nvoc_kernel_fifo_h_disabled
1251 static inline NV_STATUS kfifoCheckEngine(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvBool *pPresent) {
1252     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1253     return NV_ERR_NOT_SUPPORTED;
1254 }
1255 #else //__nvoc_kernel_fifo_h_disabled
1256 #define kfifoCheckEngine(pGpu, pKernelFifo, engDesc, pPresent) kfifoCheckEngine_GM107(pGpu, pKernelFifo, engDesc, pPresent)
1257 #endif //__nvoc_kernel_fifo_h_disabled
1258 
1259 #define kfifoCheckEngine_HAL(pGpu, pKernelFifo, engDesc, pPresent) kfifoCheckEngine(pGpu, pKernelFifo, engDesc, pPresent)
1260 
1261 NV_STATUS kfifoGetVChIdForSChId_FWCLIENT(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid);
1262 
1263 
1264 #ifdef __nvoc_kernel_fifo_h_disabled
1265 static inline NV_STATUS kfifoGetVChIdForSChId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid) {
1266     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1267     return NV_ERR_NOT_SUPPORTED;
1268 }
1269 #else //__nvoc_kernel_fifo_h_disabled
1270 #define kfifoGetVChIdForSChId(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) kfifoGetVChIdForSChId_FWCLIENT(pGpu, pKernelFifo, chId, gfid, engineId, pVChid)
1271 #endif //__nvoc_kernel_fifo_h_disabled
1272 
1273 #define kfifoGetVChIdForSChId_HAL(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) kfifoGetVChIdForSChId(pGpu, pKernelFifo, chId, gfid, engineId, pVChid)
1274 
1275 static inline NV_STATUS kfifoProgramChIdTable_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, NvHandle hMigClient, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1276     return NV_OK;
1277 }
1278 
1279 
1280 #ifdef __nvoc_kernel_fifo_h_disabled
1281 static inline NV_STATUS kfifoProgramChIdTable(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, NvHandle hMigClient, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1282     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1283     return NV_ERR_NOT_SUPPORTED;
1284 }
1285 #else //__nvoc_kernel_fifo_h_disabled
1286 #define kfifoProgramChIdTable(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, hMigClient, engineFifoListNumEntries, pEngineFifoList) kfifoProgramChIdTable_56cd7a(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, hMigClient, engineFifoListNumEntries, pEngineFifoList)
1287 #endif //__nvoc_kernel_fifo_h_disabled
1288 
1289 #define kfifoProgramChIdTable_HAL(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, hMigClient, engineFifoListNumEntries, pEngineFifoList) kfifoProgramChIdTable(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, hMigClient, engineFifoListNumEntries, pEngineFifoList)
1290 
1291 static inline NV_STATUS kfifoRestoreSchedPolicy_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1292     return NV_OK;
1293 }
1294 
1295 
1296 #ifdef __nvoc_kernel_fifo_h_disabled
1297 static inline NV_STATUS kfifoRestoreSchedPolicy(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1298     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1299     return NV_ERR_NOT_SUPPORTED;
1300 }
1301 #else //__nvoc_kernel_fifo_h_disabled
1302 #define kfifoRestoreSchedPolicy(pGpu, pKernelFifo) kfifoRestoreSchedPolicy_56cd7a(pGpu, pKernelFifo)
1303 #endif //__nvoc_kernel_fifo_h_disabled
1304 
1305 #define kfifoRestoreSchedPolicy_HAL(pGpu, pKernelFifo) kfifoRestoreSchedPolicy(pGpu, pKernelFifo)
1306 
1307 NV_STATUS kfifoRunlistSetId_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 runlistId);
1308 
1309 
1310 #ifdef __nvoc_kernel_fifo_h_disabled
1311 static inline NV_STATUS kfifoRunlistSetId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 runlistId) {
1312     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1313     return NV_ERR_NOT_SUPPORTED;
1314 }
1315 #else //__nvoc_kernel_fifo_h_disabled
1316 #define kfifoRunlistSetId(pGpu, pKernelFifo, arg0, runlistId) kfifoRunlistSetId_GM107(pGpu, pKernelFifo, arg0, runlistId)
1317 #endif //__nvoc_kernel_fifo_h_disabled
1318 
1319 #define kfifoRunlistSetId_HAL(pGpu, pKernelFifo, arg0, runlistId) kfifoRunlistSetId(pGpu, pKernelFifo, arg0, runlistId)
1320 
1321 NV_STATUS kfifoRunlistSetIdByEngine_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 engDesc);
1322 
1323 
1324 #ifdef __nvoc_kernel_fifo_h_disabled
1325 static inline NV_STATUS kfifoRunlistSetIdByEngine(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 engDesc) {
1326     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1327     return NV_ERR_NOT_SUPPORTED;
1328 }
1329 #else //__nvoc_kernel_fifo_h_disabled
1330 #define kfifoRunlistSetIdByEngine(pGpu, pKernelFifo, arg0, engDesc) kfifoRunlistSetIdByEngine_GM107(pGpu, pKernelFifo, arg0, engDesc)
1331 #endif //__nvoc_kernel_fifo_h_disabled
1332 
1333 #define kfifoRunlistSetIdByEngine_HAL(pGpu, pKernelFifo, arg0, engDesc) kfifoRunlistSetIdByEngine(pGpu, pKernelFifo, arg0, engDesc)
1334 
1335 void kfifoSetupUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc);
1336 
1337 
1338 #ifdef __nvoc_kernel_fifo_h_disabled
1339 static inline void kfifoSetupUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc) {
1340     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1341 }
1342 #else //__nvoc_kernel_fifo_h_disabled
1343 #define kfifoSetupUserD(pGpu, pKernelFifo, pMemDesc) kfifoSetupUserD_GM107(pGpu, pKernelFifo, pMemDesc)
1344 #endif //__nvoc_kernel_fifo_h_disabled
1345 
1346 #define kfifoSetupUserD_HAL(pGpu, pKernelFifo, pMemDesc) kfifoSetupUserD(pGpu, pKernelFifo, pMemDesc)
1347 
1348 NV_STATUS kfifoConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGDESCRIPTOR engDesc);
1349 
1350 static inline NV_STATUS kfifoConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGDESCRIPTOR engDesc) {
1351     return pKernelFifo->__kfifoConstructEngine__(pGpu, pKernelFifo, engDesc);
1352 }
1353 
1354 NV_STATUS kfifoStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1355 
1356 static inline NV_STATUS kfifoStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1357     return pKernelFifo->__kfifoStateInitLocked__(pGpu, pKernelFifo);
1358 }
1359 
1360 void kfifoStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1361 
1362 static inline void kfifoStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1363     pKernelFifo->__kfifoStateDestroy__(pGpu, pKernelFifo);
1364 }
1365 
1366 NV_STATUS kfifoStatePostLoad_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1367 
1368 static inline NV_STATUS kfifoStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1369     return pKernelFifo->__kfifoStatePostLoad__(pGpu, pKernelFifo, flags);
1370 }
1371 
1372 NV_STATUS kfifoStatePreUnload_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1373 
1374 static inline NV_STATUS kfifoStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1375     return pKernelFifo->__kfifoStatePreUnload__(pGpu, pKernelFifo, flags);
1376 }
1377 
1378 NV_STATUS kfifoCheckChannelAllocAddrSpaces_GH100(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace);
1379 
1380 static inline NV_STATUS kfifoCheckChannelAllocAddrSpaces_56cd7a(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace) {
1381     return NV_OK;
1382 }
1383 
1384 static inline NV_STATUS kfifoCheckChannelAllocAddrSpaces_DISPATCH(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace) {
1385     return pKernelFifo->__kfifoCheckChannelAllocAddrSpaces__(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace);
1386 }
1387 
1388 NvU64 kfifoGetMmioUsermodeOffset_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0);
1389 
1390 static inline NvU64 kfifoGetMmioUsermodeOffset_474d46(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) {
1391     NV_ASSERT_OR_RETURN_PRECOMP(0, 0);
1392 }
1393 
1394 static inline NvU64 kfifoGetMmioUsermodeOffset_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) {
1395     return pKernelFifo->__kfifoGetMmioUsermodeOffset__(pGpu, pKernelFifo, arg0);
1396 }
1397 
1398 NvU64 kfifoGetMmioUsermodeSize_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0);
1399 
1400 static inline NvU64 kfifoGetMmioUsermodeSize_474d46(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) {
1401     NV_ASSERT_OR_RETURN_PRECOMP(0, 0);
1402 }
1403 
1404 static inline NvU64 kfifoGetMmioUsermodeSize_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) {
1405     return pKernelFifo->__kfifoGetMmioUsermodeSize__(pGpu, pKernelFifo, arg0);
1406 }
1407 
1408 NvU32 kfifoChannelGroupGetLocalMaxSubcontext_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1);
1409 
1410 NvU32 kfifoChannelGroupGetLocalMaxSubcontext_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1);
1411 
1412 static inline NvU32 kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1) {
1413     return pKernelFifo->__kfifoChannelGroupGetLocalMaxSubcontext__(pGpu, pKernelFifo, arg0, arg1);
1414 }
1415 
1416 void kfifoGetCtxBufferMapFlags_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags);
1417 
1418 static inline void kfifoGetCtxBufferMapFlags_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags) {
1419     return;
1420 }
1421 
1422 static inline void kfifoGetCtxBufferMapFlags_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags) {
1423     pKernelFifo->__kfifoGetCtxBufferMapFlags__(pGpu, pKernelFifo, engine, pFlags);
1424 }
1425 
1426 NV_STATUS kfifoEngineInfoXlate_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal);
1427 
1428 NV_STATUS kfifoEngineInfoXlate_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal);
1429 
1430 static inline NV_STATUS kfifoEngineInfoXlate_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal) {
1431     return pKernelFifo->__kfifoEngineInfoXlate__(pGpu, pKernelFifo, inType, inVal, outType, pOutVal);
1432 }
1433 
1434 NV_STATUS kfifoGenerateWorkSubmitToken_TU102(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost);
1435 
1436 NV_STATUS kfifoGenerateWorkSubmitToken_GA100(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost);
1437 
1438 static inline NV_STATUS kfifoGenerateWorkSubmitToken_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost) {
1439     return arg0->__kfifoGenerateWorkSubmitToken__(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost);
1440 }
1441 
1442 NV_STATUS kfifoUpdateUsermodeDoorbell_TU102(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
1443 
1444 NV_STATUS kfifoUpdateUsermodeDoorbell_GA100(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
1445 
1446 static inline NV_STATUS kfifoUpdateUsermodeDoorbell_DISPATCH(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
1447     return arg1->__kfifoUpdateUsermodeDoorbell__(arg0, arg1, workSubmitToken, runlisId);
1448 }
1449 
1450 NvU32 kfifoRunlistGetBaseShift_GM107(struct KernelFifo *pKernelFifo);
1451 
1452 NvU32 kfifoRunlistGetBaseShift_GA100(struct KernelFifo *pKernelFifo);
1453 
1454 NvU32 kfifoRunlistGetBaseShift_GA102(struct KernelFifo *pKernelFifo);
1455 
1456 static inline NvU32 kfifoRunlistGetBaseShift_DISPATCH(struct KernelFifo *pKernelFifo) {
1457     return pKernelFifo->__kfifoRunlistGetBaseShift__(pKernelFifo);
1458 }
1459 
1460 NvU32 kfifoGetMaxCeChannelGroups_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1461 
1462 NvU32 kfifoGetMaxCeChannelGroups_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1463 
1464 static inline NvU32 kfifoGetMaxCeChannelGroups_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1465     return pKernelFifo->__kfifoGetMaxCeChannelGroups__(pGpu, pKernelFifo);
1466 }
1467 
1468 static inline NV_STATUS kfifoStateLoad_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) {
1469     return pEngstate->__kfifoStateLoad__(pGpu, pEngstate, arg0);
1470 }
1471 
1472 static inline NV_STATUS kfifoStateUnload_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) {
1473     return pEngstate->__kfifoStateUnload__(pGpu, pEngstate, arg0);
1474 }
1475 
1476 static inline NV_STATUS kfifoStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) {
1477     return pEngstate->__kfifoStatePreLoad__(pGpu, pEngstate, arg0);
1478 }
1479 
1480 static inline NV_STATUS kfifoStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) {
1481     return pEngstate->__kfifoStatePostUnload__(pGpu, pEngstate, arg0);
1482 }
1483 
1484 static inline NV_STATUS kfifoStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1485     return pEngstate->__kfifoStateInitUnlocked__(pGpu, pEngstate);
1486 }
1487 
1488 static inline void kfifoInitMissing_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1489     pEngstate->__kfifoInitMissing__(pGpu, pEngstate);
1490 }
1491 
1492 static inline NV_STATUS kfifoStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1493     return pEngstate->__kfifoStatePreInitLocked__(pGpu, pEngstate);
1494 }
1495 
1496 static inline NV_STATUS kfifoStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1497     return pEngstate->__kfifoStatePreInitUnlocked__(pGpu, pEngstate);
1498 }
1499 
1500 static inline NvBool kfifoIsPresent_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1501     return pEngstate->__kfifoIsPresent__(pGpu, pEngstate);
1502 }
1503 
1504 static inline const ENGINE_INFO *kfifoGetEngineInfo(struct KernelFifo *pKernelFifo) {
1505     if (pKernelFifo->engineInfo.engineInfoList == ((void *)0))
1506         return ((void *)0);
1507     return &pKernelFifo->engineInfo;
1508 }
1509 
1510 static inline const PREALLOCATED_USERD_INFO *kfifoGetPreallocatedUserdInfo(struct KernelFifo *pKernelFifo) {
1511     return &pKernelFifo->userdInfo;
1512 }
1513 
1514 static inline NvBool kfifoIsPerRunlistChramEnabled(struct KernelFifo *pKernelFifo) {
1515     return pKernelFifo->bUsePerRunlistChram;
1516 }
1517 
1518 static inline NvBool kfifoIsPerRunlistChramSupportedInHw(struct KernelFifo *pKernelFifo) {
1519     return pKernelFifo->bIsPerRunlistChramSupportedInHw;
1520 }
1521 
1522 static inline NvBool kfifoIsChidHeapEnabled(struct KernelFifo *pKernelFifo) {
1523     return pKernelFifo->bUseChidHeap;
1524 }
1525 
1526 static inline NvBool kfifoIsHostEngineExpansionSupported(struct KernelFifo *pKernelFifo) {
1527     return pKernelFifo->bHostEngineExpansion;
1528 }
1529 
1530 static inline NvBool kfifoIsSubcontextSupported(struct KernelFifo *pKernelFifo) {
1531     return pKernelFifo->bSubcontextSupported;
1532 }
1533 
1534 static inline NvBool kfifoHostHasLbOverflow(struct KernelFifo *pKernelFifo) {
1535     return pKernelFifo->bHostHasLbOverflow;
1536 }
1537 
1538 static inline NvBool kfifoIsUserdInSystemMemory(struct KernelFifo *pKernelFifo) {
1539     return pKernelFifo->bUserdInSystemMemory;
1540 }
1541 
1542 static inline NvBool kfifoIsUserdMapDmaSupported(struct KernelFifo *pKernelFifo) {
1543     return pKernelFifo->bUserdMapDmaSupported;
1544 }
1545 
1546 static inline NvBool kfifoIsMixedInstmemApertureDefAllowed(struct KernelFifo *pKernelFifo) {
1547     return pKernelFifo->bMixedInstmemApertureDefAllowed;
1548 }
1549 
1550 static inline NvBool kfifoIsZombieSubctxWarEnabled(struct KernelFifo *pKernelFifo) {
1551     return pKernelFifo->bIsZombieSubctxWarEnabled;
1552 }
1553 
1554 static inline NvBool kfifoIsWddmInterleavingPolicyEnabled(struct KernelFifo *pKernelFifo) {
1555     return pKernelFifo->bWddmInterleavingPolicyEnabled;
1556 }
1557 
1558 static inline NvBool kfifoIsSchedSupported(struct KernelFifo *pKernelFifo) {
1559     return pKernelFifo->bIsSchedSupported;
1560 }
1561 
1562 static inline struct KernelSchedMgr *kfifoGetKernelSchedMgr(struct KernelFifo *pKernelFifo) {
1563     return pKernelFifo->pKernelSchedMgr;
1564 }
1565 
1566 static inline MEMORY_DESCRIPTOR *kfifoGetDummyPageMemDesc(struct KernelFifo *pKernelFifo) {
1567     return pKernelFifo->pDummyPageMemDesc;
1568 }
1569 
1570 void kfifoDestruct_IMPL(struct KernelFifo *pKernelFifo);
1571 
1572 #define __nvoc_kfifoDestruct(pKernelFifo) kfifoDestruct_IMPL(pKernelFifo)
1573 NV_STATUS kfifoChidMgrConstruct_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1574 
1575 #ifdef __nvoc_kernel_fifo_h_disabled
1576 static inline NV_STATUS kfifoChidMgrConstruct(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1577     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1578     return NV_ERR_NOT_SUPPORTED;
1579 }
1580 #else //__nvoc_kernel_fifo_h_disabled
1581 #define kfifoChidMgrConstruct(pGpu, pKernelFifo) kfifoChidMgrConstruct_IMPL(pGpu, pKernelFifo)
1582 #endif //__nvoc_kernel_fifo_h_disabled
1583 
1584 void kfifoChidMgrDestruct_IMPL(struct KernelFifo *pKernelFifo);
1585 
1586 #ifdef __nvoc_kernel_fifo_h_disabled
1587 static inline void kfifoChidMgrDestruct(struct KernelFifo *pKernelFifo) {
1588     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1589 }
1590 #else //__nvoc_kernel_fifo_h_disabled
1591 #define kfifoChidMgrDestruct(pKernelFifo) kfifoChidMgrDestruct_IMPL(pKernelFifo)
1592 #endif //__nvoc_kernel_fifo_h_disabled
1593 
1594 NV_STATUS kfifoChidMgrAllocChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvHandle hClient, CHANNEL_HW_ID_ALLOC_MODE arg0, NvBool bForceInternalIdx, NvU32 internalIdx, NvBool bForceUserdPage, NvU32 userdPageIdx, NvU32 ChID, struct KernelChannel *arg1);
1595 
1596 #ifdef __nvoc_kernel_fifo_h_disabled
1597 static inline NV_STATUS kfifoChidMgrAllocChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvHandle hClient, CHANNEL_HW_ID_ALLOC_MODE arg0, NvBool bForceInternalIdx, NvU32 internalIdx, NvBool bForceUserdPage, NvU32 userdPageIdx, NvU32 ChID, struct KernelChannel *arg1) {
1598     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1599     return NV_ERR_NOT_SUPPORTED;
1600 }
1601 #else //__nvoc_kernel_fifo_h_disabled
1602 #define kfifoChidMgrAllocChid(pGpu, pKernelFifo, pChidMgr, hClient, arg0, bForceInternalIdx, internalIdx, bForceUserdPage, userdPageIdx, ChID, arg1) kfifoChidMgrAllocChid_IMPL(pGpu, pKernelFifo, pChidMgr, hClient, arg0, bForceInternalIdx, internalIdx, bForceUserdPage, userdPageIdx, ChID, arg1)
1603 #endif //__nvoc_kernel_fifo_h_disabled
1604 
1605 NV_STATUS kfifoChidMgrRetainChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1606 
1607 #ifdef __nvoc_kernel_fifo_h_disabled
1608 static inline NV_STATUS kfifoChidMgrRetainChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1609     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1610     return NV_ERR_NOT_SUPPORTED;
1611 }
1612 #else //__nvoc_kernel_fifo_h_disabled
1613 #define kfifoChidMgrRetainChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrRetainChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1614 #endif //__nvoc_kernel_fifo_h_disabled
1615 
1616 NV_STATUS kfifoChidMgrReleaseChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1617 
1618 #ifdef __nvoc_kernel_fifo_h_disabled
1619 static inline NV_STATUS kfifoChidMgrReleaseChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1620     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1621     return NV_ERR_NOT_SUPPORTED;
1622 }
1623 #else //__nvoc_kernel_fifo_h_disabled
1624 #define kfifoChidMgrReleaseChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrReleaseChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1625 #endif //__nvoc_kernel_fifo_h_disabled
1626 
1627 NV_STATUS kfifoChidMgrFreeChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1628 
1629 #ifdef __nvoc_kernel_fifo_h_disabled
1630 static inline NV_STATUS kfifoChidMgrFreeChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1631     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1632     return NV_ERR_NOT_SUPPORTED;
1633 }
1634 #else //__nvoc_kernel_fifo_h_disabled
1635 #define kfifoChidMgrFreeChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrFreeChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1636 #endif //__nvoc_kernel_fifo_h_disabled
1637 
1638 NV_STATUS kfifoChidMgrReserveSystemChids_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 numChannels, NvU32 flags, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, NvHandle hMigClient, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1639 
1640 #ifdef __nvoc_kernel_fifo_h_disabled
1641 static inline NV_STATUS kfifoChidMgrReserveSystemChids(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 numChannels, NvU32 flags, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, NvHandle hMigClient, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1642     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1643     return NV_ERR_NOT_SUPPORTED;
1644 }
1645 #else //__nvoc_kernel_fifo_h_disabled
1646 #define kfifoChidMgrReserveSystemChids(pGpu, pKernelFifo, pChidMgr, numChannels, flags, gfid, pChidOffset, pChannelCount, hMigClient, engineFifoListNumEntries, pEngineFifoList) kfifoChidMgrReserveSystemChids_IMPL(pGpu, pKernelFifo, pChidMgr, numChannels, flags, gfid, pChidOffset, pChannelCount, hMigClient, engineFifoListNumEntries, pEngineFifoList)
1647 #endif //__nvoc_kernel_fifo_h_disabled
1648 
1649 NV_STATUS kfifoChidMgrFreeSystemChids_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, NvHandle hMigClient, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1650 
1651 #ifdef __nvoc_kernel_fifo_h_disabled
1652 static inline NV_STATUS kfifoChidMgrFreeSystemChids(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, NvHandle hMigClient, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1653     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1654     return NV_ERR_NOT_SUPPORTED;
1655 }
1656 #else //__nvoc_kernel_fifo_h_disabled
1657 #define kfifoChidMgrFreeSystemChids(pGpu, pKernelFifo, pChidMgr, gfid, pChidOffset, pChannelCount, hMigClient, engineFifoListNumEntries, pEngineFifoList) kfifoChidMgrFreeSystemChids_IMPL(pGpu, pKernelFifo, pChidMgr, gfid, pChidOffset, pChannelCount, hMigClient, engineFifoListNumEntries, pEngineFifoList)
1658 #endif //__nvoc_kernel_fifo_h_disabled
1659 
1660 NV_STATUS kfifoSetChidOffset_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, NvHandle hMigClient, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1661 
1662 #ifdef __nvoc_kernel_fifo_h_disabled
1663 static inline NV_STATUS kfifoSetChidOffset(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, NvHandle hMigClient, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1664     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1665     return NV_ERR_NOT_SUPPORTED;
1666 }
1667 #else //__nvoc_kernel_fifo_h_disabled
1668 #define kfifoSetChidOffset(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pChidOffset, pChannelCount, hMigClient, engineFifoListNumEntries, pEngineFifoList) kfifoSetChidOffset_IMPL(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pChidOffset, pChannelCount, hMigClient, engineFifoListNumEntries, pEngineFifoList)
1669 #endif //__nvoc_kernel_fifo_h_disabled
1670 
1671 NvU32 kfifoChidMgrGetNumChannels_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr);
1672 
1673 #ifdef __nvoc_kernel_fifo_h_disabled
1674 static inline NvU32 kfifoChidMgrGetNumChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr) {
1675     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1676     return 0;
1677 }
1678 #else //__nvoc_kernel_fifo_h_disabled
1679 #define kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr) kfifoChidMgrGetNumChannels_IMPL(pGpu, pKernelFifo, pChidMgr)
1680 #endif //__nvoc_kernel_fifo_h_disabled
1681 
1682 NV_STATUS kfifoChidMgrAllocChannelGroupHwID_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 *pGrpId);
1683 
1684 #ifdef __nvoc_kernel_fifo_h_disabled
1685 static inline NV_STATUS kfifoChidMgrAllocChannelGroupHwID(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 *pGrpId) {
1686     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1687     return NV_ERR_NOT_SUPPORTED;
1688 }
1689 #else //__nvoc_kernel_fifo_h_disabled
1690 #define kfifoChidMgrAllocChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, pGrpId) kfifoChidMgrAllocChannelGroupHwID_IMPL(pGpu, pKernelFifo, pChidMgr, pGrpId)
1691 #endif //__nvoc_kernel_fifo_h_disabled
1692 
1693 NV_STATUS kfifoChidMgrFreeChannelGroupHwID_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpId);
1694 
1695 #ifdef __nvoc_kernel_fifo_h_disabled
1696 static inline NV_STATUS kfifoChidMgrFreeChannelGroupHwID(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpId) {
1697     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1698     return NV_ERR_NOT_SUPPORTED;
1699 }
1700 #else //__nvoc_kernel_fifo_h_disabled
1701 #define kfifoChidMgrFreeChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, grpId) kfifoChidMgrFreeChannelGroupHwID_IMPL(pGpu, pKernelFifo, pChidMgr, grpId)
1702 #endif //__nvoc_kernel_fifo_h_disabled
1703 
1704 struct KernelChannelGroup *kfifoChidMgrGetKernelChannelGroup_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpID);
1705 
1706 #ifdef __nvoc_kernel_fifo_h_disabled
1707 static inline struct KernelChannelGroup *kfifoChidMgrGetKernelChannelGroup(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpID) {
1708     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1709     return NULL;
1710 }
1711 #else //__nvoc_kernel_fifo_h_disabled
1712 #define kfifoChidMgrGetKernelChannelGroup(pGpu, pKernelFifo, pChidMgr, grpID) kfifoChidMgrGetKernelChannelGroup_IMPL(pGpu, pKernelFifo, pChidMgr, grpID)
1713 #endif //__nvoc_kernel_fifo_h_disabled
1714 
1715 struct KernelChannel *kfifoChidMgrGetKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1716 
1717 #ifdef __nvoc_kernel_fifo_h_disabled
1718 static inline struct KernelChannel *kfifoChidMgrGetKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1719     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1720     return NULL;
1721 }
1722 #else //__nvoc_kernel_fifo_h_disabled
1723 #define kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrGetKernelChannel_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1724 #endif //__nvoc_kernel_fifo_h_disabled
1725 
1726 CHID_MGR *kfifoGetChidMgr_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
1727 
1728 #ifdef __nvoc_kernel_fifo_h_disabled
1729 static inline CHID_MGR *kfifoGetChidMgr(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
1730     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1731     return NULL;
1732 }
1733 #else //__nvoc_kernel_fifo_h_disabled
1734 #define kfifoGetChidMgr(pGpu, pKernelFifo, runlistId) kfifoGetChidMgr_IMPL(pGpu, pKernelFifo, runlistId)
1735 #endif //__nvoc_kernel_fifo_h_disabled
1736 
1737 NV_STATUS kfifoGetChidMgrFromType_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineInfoType, NvU32 value, CHID_MGR **arg0);
1738 
1739 #ifdef __nvoc_kernel_fifo_h_disabled
1740 static inline NV_STATUS kfifoGetChidMgrFromType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineInfoType, NvU32 value, CHID_MGR **arg0) {
1741     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1742     return NV_ERR_NOT_SUPPORTED;
1743 }
1744 #else //__nvoc_kernel_fifo_h_disabled
1745 #define kfifoGetChidMgrFromType(pGpu, pKernelFifo, engineInfoType, value, arg0) kfifoGetChidMgrFromType_IMPL(pGpu, pKernelFifo, engineInfoType, value, arg0)
1746 #endif //__nvoc_kernel_fifo_h_disabled
1747 
1748 struct KernelChannelGroup *kfifoGetChannelGroup_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 grpID, NvU32 runlistID);
1749 
1750 #ifdef __nvoc_kernel_fifo_h_disabled
1751 static inline struct KernelChannelGroup *kfifoGetChannelGroup(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 grpID, NvU32 runlistID) {
1752     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1753     return NULL;
1754 }
1755 #else //__nvoc_kernel_fifo_h_disabled
1756 #define kfifoGetChannelGroup(pGpu, pKernelFifo, grpID, runlistID) kfifoGetChannelGroup_IMPL(pGpu, pKernelFifo, grpID, runlistID)
1757 #endif //__nvoc_kernel_fifo_h_disabled
1758 
1759 NvU32 kfifoGetChannelGroupsInUse_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1760 
1761 #ifdef __nvoc_kernel_fifo_h_disabled
1762 static inline NvU32 kfifoGetChannelGroupsInUse(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1763     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1764     return 0;
1765 }
1766 #else //__nvoc_kernel_fifo_h_disabled
1767 #define kfifoGetChannelGroupsInUse(pGpu, pKernelFifo) kfifoGetChannelGroupsInUse_IMPL(pGpu, pKernelFifo)
1768 #endif //__nvoc_kernel_fifo_h_disabled
1769 
1770 NvU32 kfifoGetRunlistChannelGroupsInUse_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
1771 
1772 #ifdef __nvoc_kernel_fifo_h_disabled
1773 static inline NvU32 kfifoGetRunlistChannelGroupsInUse(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
1774     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1775     return 0;
1776 }
1777 #else //__nvoc_kernel_fifo_h_disabled
1778 #define kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId) kfifoGetRunlistChannelGroupsInUse_IMPL(pGpu, pKernelFifo, runlistId)
1779 #endif //__nvoc_kernel_fifo_h_disabled
1780 
1781 void kfifoGetChannelIterator_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt);
1782 
1783 #ifdef __nvoc_kernel_fifo_h_disabled
1784 static inline void kfifoGetChannelIterator(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt) {
1785     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1786 }
1787 #else //__nvoc_kernel_fifo_h_disabled
1788 #define kfifoGetChannelIterator(pGpu, pKernelFifo, pIt) kfifoGetChannelIterator_IMPL(pGpu, pKernelFifo, pIt)
1789 #endif //__nvoc_kernel_fifo_h_disabled
1790 
1791 NV_STATUS kfifoGetNextKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel);
1792 
1793 #ifdef __nvoc_kernel_fifo_h_disabled
1794 static inline NV_STATUS kfifoGetNextKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel) {
1795     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1796     return NV_ERR_NOT_SUPPORTED;
1797 }
1798 #else //__nvoc_kernel_fifo_h_disabled
1799 #define kfifoGetNextKernelChannel(pGpu, pKernelFifo, pIt, ppKernelChannel) kfifoGetNextKernelChannel_IMPL(pGpu, pKernelFifo, pIt, ppKernelChannel)
1800 #endif //__nvoc_kernel_fifo_h_disabled
1801 
1802 void kfifoFillMemInfo_IMPL(struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc, NV2080_CTRL_FIFO_MEM_INFO *pMemory);
1803 
1804 #ifdef __nvoc_kernel_fifo_h_disabled
1805 static inline void kfifoFillMemInfo(struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc, NV2080_CTRL_FIFO_MEM_INFO *pMemory) {
1806     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1807 }
1808 #else //__nvoc_kernel_fifo_h_disabled
1809 #define kfifoFillMemInfo(pKernelFifo, pMemDesc, pMemory) kfifoFillMemInfo_IMPL(pKernelFifo, pMemDesc, pMemory)
1810 #endif //__nvoc_kernel_fifo_h_disabled
1811 
1812 NvU32 kfifoGetAllocatedChannelMask_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, NvU32 *pBitMask, NvLength bitMaskSize);
1813 
1814 #ifdef __nvoc_kernel_fifo_h_disabled
1815 static inline NvU32 kfifoGetAllocatedChannelMask(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, NvU32 *pBitMask, NvLength bitMaskSize) {
1816     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1817     return 0;
1818 }
1819 #else //__nvoc_kernel_fifo_h_disabled
1820 #define kfifoGetAllocatedChannelMask(pGpu, pKernelFifo, runlistId, pBitMask, bitMaskSize) kfifoGetAllocatedChannelMask_IMPL(pGpu, pKernelFifo, runlistId, pBitMask, bitMaskSize)
1821 #endif //__nvoc_kernel_fifo_h_disabled
1822 
1823 NV_STATUS kfifoChannelListCreate_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST **arg0);
1824 
1825 #ifdef __nvoc_kernel_fifo_h_disabled
1826 static inline NV_STATUS kfifoChannelListCreate(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST **arg0) {
1827     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1828     return NV_ERR_NOT_SUPPORTED;
1829 }
1830 #else //__nvoc_kernel_fifo_h_disabled
1831 #define kfifoChannelListCreate(pGpu, pKernelFifo, arg0) kfifoChannelListCreate_IMPL(pGpu, pKernelFifo, arg0)
1832 #endif //__nvoc_kernel_fifo_h_disabled
1833 
1834 NV_STATUS kfifoChannelListDestroy_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST *arg0);
1835 
1836 #ifdef __nvoc_kernel_fifo_h_disabled
1837 static inline NV_STATUS kfifoChannelListDestroy(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST *arg0) {
1838     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1839     return NV_ERR_NOT_SUPPORTED;
1840 }
1841 #else //__nvoc_kernel_fifo_h_disabled
1842 #define kfifoChannelListDestroy(pGpu, pKernelFifo, arg0) kfifoChannelListDestroy_IMPL(pGpu, pKernelFifo, arg0)
1843 #endif //__nvoc_kernel_fifo_h_disabled
1844 
1845 NV_STATUS kfifoChannelListAppend_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1);
1846 
1847 #ifdef __nvoc_kernel_fifo_h_disabled
1848 static inline NV_STATUS kfifoChannelListAppend(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1) {
1849     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1850     return NV_ERR_NOT_SUPPORTED;
1851 }
1852 #else //__nvoc_kernel_fifo_h_disabled
1853 #define kfifoChannelListAppend(pGpu, pKernelFifo, arg0, arg1) kfifoChannelListAppend_IMPL(pGpu, pKernelFifo, arg0, arg1)
1854 #endif //__nvoc_kernel_fifo_h_disabled
1855 
1856 NV_STATUS kfifoChannelListRemove_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1);
1857 
1858 #ifdef __nvoc_kernel_fifo_h_disabled
1859 static inline NV_STATUS kfifoChannelListRemove(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1) {
1860     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1861     return NV_ERR_NOT_SUPPORTED;
1862 }
1863 #else //__nvoc_kernel_fifo_h_disabled
1864 #define kfifoChannelListRemove(pGpu, pKernelFifo, arg0, arg1) kfifoChannelListRemove_IMPL(pGpu, pKernelFifo, arg0, arg1)
1865 #endif //__nvoc_kernel_fifo_h_disabled
1866 
1867 NvBool kfifoEngineListHasChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE *arg0, NvU32 arg1);
1868 
1869 #ifdef __nvoc_kernel_fifo_h_disabled
1870 static inline NvBool kfifoEngineListHasChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE *arg0, NvU32 arg1) {
1871     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1872     return NV_FALSE;
1873 }
1874 #else //__nvoc_kernel_fifo_h_disabled
1875 #define kfifoEngineListHasChannel(pGpu, pKernelFifo, arg0, arg1) kfifoEngineListHasChannel_IMPL(pGpu, pKernelFifo, arg0, arg1)
1876 #endif //__nvoc_kernel_fifo_h_disabled
1877 
1878 CTX_BUF_POOL_INFO *kfifoGetRunlistBufPool_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType);
1879 
1880 #ifdef __nvoc_kernel_fifo_h_disabled
1881 static inline CTX_BUF_POOL_INFO *kfifoGetRunlistBufPool(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType) {
1882     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1883     return NULL;
1884 }
1885 #else //__nvoc_kernel_fifo_h_disabled
1886 #define kfifoGetRunlistBufPool(pGpu, pKernelFifo, rmEngineType) kfifoGetRunlistBufPool_IMPL(pGpu, pKernelFifo, rmEngineType)
1887 #endif //__nvoc_kernel_fifo_h_disabled
1888 
1889 NV_STATUS kfifoGetRunlistBufInfo_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvBool arg1, NvU32 arg2, NvU64 *arg3, NvU64 *arg4);
1890 
1891 #ifdef __nvoc_kernel_fifo_h_disabled
1892 static inline NV_STATUS kfifoGetRunlistBufInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvBool arg1, NvU32 arg2, NvU64 *arg3, NvU64 *arg4) {
1893     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1894     return NV_ERR_NOT_SUPPORTED;
1895 }
1896 #else //__nvoc_kernel_fifo_h_disabled
1897 #define kfifoGetRunlistBufInfo(pGpu, pKernelFifo, arg0, arg1, arg2, arg3, arg4) kfifoGetRunlistBufInfo_IMPL(pGpu, pKernelFifo, arg0, arg1, arg2, arg3, arg4)
1898 #endif //__nvoc_kernel_fifo_h_disabled
1899 
1900 NV_STATUS kfifoAddSchedulingHandler_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData);
1901 
1902 #ifdef __nvoc_kernel_fifo_h_disabled
1903 static inline NV_STATUS kfifoAddSchedulingHandler(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData) {
1904     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1905     return NV_ERR_NOT_SUPPORTED;
1906 }
1907 #else //__nvoc_kernel_fifo_h_disabled
1908 #define kfifoAddSchedulingHandler(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) kfifoAddSchedulingHandler_IMPL(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData)
1909 #endif //__nvoc_kernel_fifo_h_disabled
1910 
1911 void kfifoRemoveSchedulingHandler_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData);
1912 
1913 #ifdef __nvoc_kernel_fifo_h_disabled
1914 static inline void kfifoRemoveSchedulingHandler(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData) {
1915     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1916 }
1917 #else //__nvoc_kernel_fifo_h_disabled
1918 #define kfifoRemoveSchedulingHandler(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) kfifoRemoveSchedulingHandler_IMPL(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData)
1919 #endif //__nvoc_kernel_fifo_h_disabled
1920 
1921 NV_STATUS kfifoTriggerPostSchedulingEnableCallback_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1922 
1923 #ifdef __nvoc_kernel_fifo_h_disabled
1924 static inline NV_STATUS kfifoTriggerPostSchedulingEnableCallback(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1925     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1926     return NV_ERR_NOT_SUPPORTED;
1927 }
1928 #else //__nvoc_kernel_fifo_h_disabled
1929 #define kfifoTriggerPostSchedulingEnableCallback(pGpu, pKernelFifo) kfifoTriggerPostSchedulingEnableCallback_IMPL(pGpu, pKernelFifo)
1930 #endif //__nvoc_kernel_fifo_h_disabled
1931 
1932 NV_STATUS kfifoTriggerPreSchedulingDisableCallback_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1933 
1934 #ifdef __nvoc_kernel_fifo_h_disabled
1935 static inline NV_STATUS kfifoTriggerPreSchedulingDisableCallback(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1936     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1937     return NV_ERR_NOT_SUPPORTED;
1938 }
1939 #else //__nvoc_kernel_fifo_h_disabled
1940 #define kfifoTriggerPreSchedulingDisableCallback(pGpu, pKernelFifo) kfifoTriggerPreSchedulingDisableCallback_IMPL(pGpu, pKernelFifo)
1941 #endif //__nvoc_kernel_fifo_h_disabled
1942 
1943 NvU32 kfifoGetMaxChannelsInSystem_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1944 
1945 #ifdef __nvoc_kernel_fifo_h_disabled
1946 static inline NvU32 kfifoGetMaxChannelsInSystem(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1947     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1948     return 0;
1949 }
1950 #else //__nvoc_kernel_fifo_h_disabled
1951 #define kfifoGetMaxChannelsInSystem(pGpu, pKernelFifo) kfifoGetMaxChannelsInSystem_IMPL(pGpu, pKernelFifo)
1952 #endif //__nvoc_kernel_fifo_h_disabled
1953 
1954 NvU32 kfifoGetMaxChannelGroupsInSystem_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1955 
1956 #ifdef __nvoc_kernel_fifo_h_disabled
1957 static inline NvU32 kfifoGetMaxChannelGroupsInSystem(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1958     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1959     return 0;
1960 }
1961 #else //__nvoc_kernel_fifo_h_disabled
1962 #define kfifoGetMaxChannelGroupsInSystem(pGpu, pKernelFifo) kfifoGetMaxChannelGroupsInSystem_IMPL(pGpu, pKernelFifo)
1963 #endif //__nvoc_kernel_fifo_h_disabled
1964 
1965 void kfifoGetDeviceCaps_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU8 *pKfifoCaps, NvBool bCapsInitialized);
1966 
1967 #ifdef __nvoc_kernel_fifo_h_disabled
1968 static inline void kfifoGetDeviceCaps(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU8 *pKfifoCaps, NvBool bCapsInitialized) {
1969     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1970 }
1971 #else //__nvoc_kernel_fifo_h_disabled
1972 #define kfifoGetDeviceCaps(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized) kfifoGetDeviceCaps_IMPL(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized)
1973 #endif //__nvoc_kernel_fifo_h_disabled
1974 
1975 NvU32 kfifoReturnPushbufferCaps_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1976 
1977 #ifdef __nvoc_kernel_fifo_h_disabled
1978 static inline NvU32 kfifoReturnPushbufferCaps(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1979     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1980     return 0;
1981 }
1982 #else //__nvoc_kernel_fifo_h_disabled
1983 #define kfifoReturnPushbufferCaps(pGpu, pKernelFifo) kfifoReturnPushbufferCaps_IMPL(pGpu, pKernelFifo)
1984 #endif //__nvoc_kernel_fifo_h_disabled
1985 
1986 void kfifoRunlistGetBufAllocParams_IMPL(struct OBJGPU *pGpu, NV_ADDRESS_SPACE *pAperture, NvU32 *pAttr, NvU64 *pAllocFlags);
1987 
1988 #define kfifoRunlistGetBufAllocParams(pGpu, pAperture, pAttr, pAllocFlags) kfifoRunlistGetBufAllocParams_IMPL(pGpu, pAperture, pAttr, pAllocFlags)
1989 NV_STATUS kfifoRunlistAllocBuffers_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bSupportTsg, NV_ADDRESS_SPACE aperture, NvU32 runlistId, NvU32 attr, NvU64 allocFlags, NvU64 maxRunlistEntries, NvBool bHWRL, PMEMORY_DESCRIPTOR *ppMemDesc);
1990 
1991 #ifdef __nvoc_kernel_fifo_h_disabled
1992 static inline NV_STATUS kfifoRunlistAllocBuffers(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bSupportTsg, NV_ADDRESS_SPACE aperture, NvU32 runlistId, NvU32 attr, NvU64 allocFlags, NvU64 maxRunlistEntries, NvBool bHWRL, PMEMORY_DESCRIPTOR *ppMemDesc) {
1993     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1994     return NV_ERR_NOT_SUPPORTED;
1995 }
1996 #else //__nvoc_kernel_fifo_h_disabled
1997 #define kfifoRunlistAllocBuffers(pGpu, pKernelFifo, bSupportTsg, aperture, runlistId, attr, allocFlags, maxRunlistEntries, bHWRL, ppMemDesc) kfifoRunlistAllocBuffers_IMPL(pGpu, pKernelFifo, bSupportTsg, aperture, runlistId, attr, allocFlags, maxRunlistEntries, bHWRL, ppMemDesc)
1998 #endif //__nvoc_kernel_fifo_h_disabled
1999 
2000 NV_STATUS kfifoGetEngineListForRunlist_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, RM_ENGINE_TYPE *pOutEngineIds, NvU32 *pNumEngines);
2001 
2002 #ifdef __nvoc_kernel_fifo_h_disabled
2003 static inline NV_STATUS kfifoGetEngineListForRunlist(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, RM_ENGINE_TYPE *pOutEngineIds, NvU32 *pNumEngines) {
2004     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2005     return NV_ERR_NOT_SUPPORTED;
2006 }
2007 #else //__nvoc_kernel_fifo_h_disabled
2008 #define kfifoGetEngineListForRunlist(pGpu, pKernelFifo, runlistId, pOutEngineIds, pNumEngines) kfifoGetEngineListForRunlist_IMPL(pGpu, pKernelFifo, runlistId, pOutEngineIds, pNumEngines)
2009 #endif //__nvoc_kernel_fifo_h_disabled
2010 
2011 NvU32 kfifoGetChannelClassId_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2012 
2013 #ifdef __nvoc_kernel_fifo_h_disabled
2014 static inline NvU32 kfifoGetChannelClassId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2015     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2016     return 0;
2017 }
2018 #else //__nvoc_kernel_fifo_h_disabled
2019 #define kfifoGetChannelClassId(pGpu, pKernelFifo) kfifoGetChannelClassId_IMPL(pGpu, pKernelFifo)
2020 #endif //__nvoc_kernel_fifo_h_disabled
2021 
2022 NV_STATUS kfifoChannelGroupSetTimeslice_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit);
2023 
2024 #ifdef __nvoc_kernel_fifo_h_disabled
2025 static inline NV_STATUS kfifoChannelGroupSetTimeslice(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
2026     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2027     return NV_ERR_NOT_SUPPORTED;
2028 }
2029 #else //__nvoc_kernel_fifo_h_disabled
2030 #define kfifoChannelGroupSetTimeslice(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimeslice_IMPL(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
2031 #endif //__nvoc_kernel_fifo_h_disabled
2032 
2033 const FIFO_GUEST_ENGINE_TABLE *kfifoGetGuestEngineLookupTable_IMPL(NvU32 *pTableSize);
2034 
2035 #define kfifoGetGuestEngineLookupTable(pTableSize) kfifoGetGuestEngineLookupTable_IMPL(pTableSize)
2036 NvU32 kfifoGetNumEschedDrivenEngines_IMPL(struct KernelFifo *pKernelFifo);
2037 
2038 #ifdef __nvoc_kernel_fifo_h_disabled
2039 static inline NvU32 kfifoGetNumEschedDrivenEngines(struct KernelFifo *pKernelFifo) {
2040     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2041     return 0;
2042 }
2043 #else //__nvoc_kernel_fifo_h_disabled
2044 #define kfifoGetNumEschedDrivenEngines(pKernelFifo) kfifoGetNumEschedDrivenEngines_IMPL(pKernelFifo)
2045 #endif //__nvoc_kernel_fifo_h_disabled
2046 
2047 #undef PRIVATE_FIELD
2048 
2049 
2050 NV_STATUS RmIdleChannels(NvHandle hClient,
2051                          NvHandle hDevice,
2052                          NvHandle hChannel,
2053                          NvU32    numChannels,
2054                          NvP64    clients,
2055                          NvP64    devices,
2056                          NvP64    channels,
2057                          NvU32    flags,
2058                          NvU32    timeout,
2059                          NvBool   bUserModeArgs);
2060 
2061 #endif // _KERNELFIFO_H_
2062 
2063 #ifdef __cplusplus
2064 } // extern "C"
2065 #endif
2066 #endif // _G_KERNEL_FIFO_NVOC_H_
2067