1 #ifndef _G_KERNEL_FIFO_NVOC_H_
2 #define _G_KERNEL_FIFO_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kernel_fifo_nvoc.h"
33 
34 #ifndef _KERNELFIFO_H_
35 #define _KERNELFIFO_H_
36 
37 /**************** Resource Manager Defines and Structures ******************\
38 *                                                                           *
39 * Module: KernelFifo.h                                                         *
40 *       Defines and structures used for the KernelFifo Object.                    *
41 \***************************************************************************/
42 
43 #include "kernel/gpu/eng_state.h"
44 #include "kernel/gpu/gpu_timeout.h"
45 #include "kernel/gpu/gpu_halspec.h"
46 #include "kernel/gpu/fifo/channel_descendant.h"
47 #include "kernel/gpu/fifo/engine_info.h"
48 #include "kernel/gpu/gpu_engine_type.h"
49 
50 #include "containers/eheap_old.h"
51 #include "containers/map.h"
52 #include "utils/nvbitvector.h"
53 #include "gpu/mem_mgr/mem_desc.h"
54 #include "nvoc/utility.h"
55 
56 #include "ctrl/ctrl2080/ctrl2080gpu.h"  // NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS
57 #include "ctrl/ctrl2080/ctrl2080fifo.h" // NV2080_CTRL_FIFO_MEM_INFO
58 #include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_*
59 #include "ctrl/ctrl906f.h"
60 
61 #include "class/clc369.h" // MMU_FAULT_BUFFER
62 
63 struct KernelChannel;
64 
65 #ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__
66 #define __NVOC_CLASS_KernelChannel_TYPEDEF__
67 typedef struct KernelChannel KernelChannel;
68 #endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */
69 
70 #ifndef __nvoc_class_id_KernelChannel
71 #define __nvoc_class_id_KernelChannel 0x5d8d70
72 #endif /* __nvoc_class_id_KernelChannel */
73 
74 
75 struct KernelChannelGroup;
76 
77 #ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
78 #define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__
79 typedef struct KernelChannelGroup KernelChannelGroup;
80 #endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */
81 
82 #ifndef __nvoc_class_id_KernelChannelGroup
83 #define __nvoc_class_id_KernelChannelGroup 0xec6de1
84 #endif /* __nvoc_class_id_KernelChannelGroup */
85 
86 
87 struct KernelSchedMgr;
88 
89 #ifndef __NVOC_CLASS_KernelSchedMgr_TYPEDEF__
90 #define __NVOC_CLASS_KernelSchedMgr_TYPEDEF__
91 typedef struct KernelSchedMgr KernelSchedMgr;
92 #endif /* __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ */
93 
94 #ifndef __nvoc_class_id_KernelSchedMgr
95 #define __nvoc_class_id_KernelSchedMgr 0xea0970
96 #endif /* __nvoc_class_id_KernelSchedMgr */
97 
98 
99 
100 struct HOST_VGPU_DEVICE;
101 
102 // Pre-Ampere runlist ID to pass to kfifoGetChidMgr
103 #define CHIDMGR_RUNLIST_ID_LEGACY  0
104 
105 #define INVALID_CHID               0xFFFFFFFF
106 
107 #define INVALID_RUNLIST_ID         0xFFFFFFFFU
108 
109 /*! We use 32-bit process ID for now */
110 #define KERNEL_PID (0xFFFFFFFFULL)
111 
112 /*! cap at 64 for now, can extend when needed */
113 #define MAX_NUM_RUNLISTS           NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID
114 #define NUM_BUFFERS_PER_RUNLIST   (NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS)
115 MAKE_BITVECTOR(CHID_MGR_VALID_BIT_VECTOR, MAX_NUM_RUNLISTS);
116 
117 //
118 // Matches GET_PUSHBUFFER_CAPABILITIES bit positions
119 //
120 #define VID_PB_ALLOWED                      0x1
121 #define PCI_PB_ALLOWED                      0x2
122 
123 #define PBDMA_FAULT_MAX_ID  (0x1 << DRF_SIZE_MW(NVC369_BUF_ENTRY_ENGINE_ID))
124 MAKE_BITVECTOR(PBDMA_ID_BITVECTOR, PBDMA_FAULT_MAX_ID);
125 
126 /*!
127  * USERD isolation domain
128  *
129  * USERD allocated by different domains should not be put into the same physical page.
130  * This provides the basic security isolation because a physical page is the unit of
131  * granularity at which OS can provide isolation between processes.
132  *
133  *    GUEST_USER:     USERD allocated by guest user process
134  *    GUEST_KERNEL:   USERD allocated by guest kernel process
135  *    GUEST_INSECURE: USERD allocated by guest/kernel process,
136  *                    INSECURE means there is no isolation between guest user and guest kernel
137  *    HOST_USER:      USERD allocated by host user process
138  *    HOST_KERNEL:    USERD allocated by host kernel process
139  *
140  * Please refer to RM_USERD_Isolation wiki for more details
141  */
142 typedef enum _def_fifo_isolation_domain
143 {
144     GUEST_USER = 0x0,
145     GUEST_KERNEL,
146     GUEST_INSECURE,
147     HOST_USER,
148     HOST_KERNEL
149 } FIFO_ISOLATION_DOMAIN;
150 
151 /*!
152  * USERD isolation ID
153  *
154  * In vGPU environment, sub process means the guest user/kernel process running within a single VM.
155  * It also refers to any sub process (or sub-sub process) within a parent process.
156  *
157  * Please refer to Resource Server for more details about sub process concept
158  */
159 typedef struct _def_fifo_isolation_id
160 {
161     FIFO_ISOLATION_DOMAIN domain;
162     NvU64                 processID;
163     NvU64                 subProcessID;
164 } FIFO_ISOLATIONID, *PFIFO_ISOLATIONID;
165 
166 /*! Used for calls to kfifoChannelGetFifoContextMemDesc */
167 typedef enum
168 {
169     FIFO_CTX_RAMFC = 0,
170     FIFO_CTX_INST_BLOCK = 1,
171 } FIFO_CTX;
172 
173 typedef struct _fifo_mmu_exception_data
174 {
175     NvU32  addrLo;
176     NvU32  addrHi;
177     NvU32  faultType;
178     NvU32  clientId;
179     NvBool bGpc;
180     NvU32  gpcId;
181     NvU32  accessType;
182     NvU32  faultEngineId;
183     NvU64  faultedShaderProgramVA[NV906F_CTRL_MMU_FAULT_SHADER_TYPES];
184 } FIFO_MMU_EXCEPTION_DATA;
185 
186 /*! Used for calls to kchannelAllocHwID */
187 typedef enum
188 {
189     CHANNEL_HW_ID_ALLOC_MODE_GROW_DOWN,
190     CHANNEL_HW_ID_ALLOC_MODE_GROW_UP,
191     CHANNEL_HW_ID_ALLOC_MODE_PROVIDED,
192 } CHANNEL_HW_ID_ALLOC_MODE;
193 
194 typedef struct _fifo_hw_id
195 {
196     /*!
197      * Bitfield of HW IDs. 1 = reserved, 0 = available.
198      * A reserved ID may not be allocated but it can't be used for any
199      * future allocations.
200      */
201     NvU32 *pHwIdInUse;
202 
203     /*!
204      * Number of elements in pHwIdInUse
205      */
206     NvU32 hwIdInUseSz;
207 } FIFO_HW_ID;
208 
209 DECLARE_INTRUSIVE_MAP(KernelChannelGroupMap);
210 
211 typedef struct _chid_mgr
212 {
213     /*!
214      * Runlist managed by this CHID_MGR.
215      */
216     NvU32 runlistId;
217 
218     /*!
219      * Heap to manage pFifoData for all channels.
220      */
221     OBJEHEAP *pFifoDataHeap;
222 
223     /*!
224      * Global ChID heap - manages channel IDs and isolation IDs. In non-SRIOV
225      * systems, allocations/frees in this heap mirror those in pFifoDataHeap.
226      * When SRIOV is enabled, we reserve/free channel IDs for the guest in
227      * chunks from this heap when the VM starts/shuts down. ChID allocations
228      * during channel construction from the guest ChID space are from the
229      * virtual ChID heap for that guest.
230      */
231     OBJEHEAP *pGlobalChIDHeap;
232 
233     /*!
234      * Until FIFO code for SR-IOV moves to guest RM, this virtual ChID heap
235      * manages channel IDs allocated to a guest.
236      */
237     OBJEHEAP **ppVirtualChIDHeap;
238 
239     /*!
240      * Number of channels managed by this CHID_MGR
241      */
242     NvU32 numChannels;
243 
244     FIFO_HW_ID  channelGrpMgr;
245 
246     /*!
247      * Channel group pointers
248      */
249     KernelChannelGroupMap *pChanGrpTree;
250 
251 } CHID_MGR;
252 
253 /*! Typedef for the @ref channel_iterator structure */
254 typedef struct channel_iterator CHANNEL_ITERATOR;
255 typedef struct channel_iterator *PCHANNEL_ITERATOR;
256 
257 /*!
258  * Generic Linked-list of Channel pointers to be used where ever multiple channels
259  * are managed.
260  * TODO: Remove as part of Jira CORERM-2658
261  */
262 typedef struct _channel_node
263 {
264     struct KernelChannel *pKernelChannel;
265     struct _channel_node *pNext;
266 } CHANNEL_NODE, *PCHANNEL_NODE;
267 
268 /*!
269  * This structure represents an iterator for all channels.
270  * It is created by function @ref kfifoGetChannelIterator.
271  */
272 struct channel_iterator
273 {
274     NvU32 numChannels;
275     NvU32 numRunlists;
276     NvU32 physicalChannelID;
277     NvU32 runlistId;
278     EMEMBLOCK *pFifoDataBlock;
279     CHANNEL_NODE channelNode;
280 };
281 
282 // Maximum number of pbdma IDs for a given engine
283 #define FIFO_ENGINE_MAX_NUM_PBDMA       2
284 
285 // Maximum size (including null terminator for an engine name
286 #define FIFO_ENGINE_NAME_MAX_SIZE       16
287 
288 typedef struct _def_fifo_engine_list
289 {
290     NvU32 engineData[ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE];
291     NvU32 pbdmaIds[FIFO_ENGINE_MAX_NUM_PBDMA];
292     NvU32 pbdmaFaultIds[FIFO_ENGINE_MAX_NUM_PBDMA];
293     NvU32 numPbdmas;
294     char engineName[FIFO_ENGINE_NAME_MAX_SIZE];
295 } FIFO_ENGINE_LIST;
296 
297 typedef struct
298 {
299     NvU32 nv2080EngineType;
300     NvU32 mcIdx;
301 } FIFO_GUEST_ENGINE_TABLE;
302 
303 typedef struct _def_engine_info
304 {
305     NvU32 maxNumPbdmas;
306     PBDMA_ID_BITVECTOR  validEngineIdsForPbdmas;
307     //
308     // The highest runlist ID. Valid runlist IDs are < maxNumRunlists
309     // However, the entire [0, maxNumRunlists) range is not valid. There are
310     // missing runlist IDs in this range.
311     //
312     NvU32 maxNumRunlists;
313     //
314     // Multiple engines may have the same runlist ID. This is the total number
315     // of engines with a runlist which is equal to the number of Esched driven
316     // engines and does not include the SW engine.
317     //
318     NvU32 numRunlists;
319     NvU32 engineInfoListSize;
320     FIFO_ENGINE_LIST *engineInfoList;
321 } ENGINE_INFO;
322 
323 // Fully qualified instance block address
324 typedef struct _inst_block_desc
325 {
326     NvU64   address;        // Physical address or IOVA (unshifted)
327     NvU32   aperture;       // INST_BLOCK_APERTURE
328     NvU32   gfid;           // Valid in PF when SR-IOV is enabled
329 } INST_BLOCK_DESC;
330 
331 typedef struct _channel_list
332 {
333     CHANNEL_NODE *pHead;
334     CHANNEL_NODE *pTail;
335 } CHANNEL_LIST, *PCHANNEL_LIST;
336 
337 typedef struct _def_preallocated_userd_info
338 {
339     NvU32      userdAperture;            // default aperture for USERD
340     NvU32      userdAttr;                // default attr for USERD
341     MEMORY_DESCRIPTOR *userdPhysDesc[NV_MAX_SUBDEVICES];    // <a> base phys addr of contiguous USERD
342     NvU64      userdBar1MapStartOffset;  // <b> base offset of <a>'s BAR1 map
343     NvU32      userdBar1MapSize;         // <c> sizeof <b>'s map
344     NvU8      *userdBar1CpuPtr;          // <d> cpu map of <b>
345     NvU32      userdBar1RefMask;         // mask of GPUs referencing userD
346 } PREALLOCATED_USERD_INFO;
347 
348 
349 // Scheduling enable/disable handlers
350 typedef NV_STATUS (*PFifoSchedulingHandler)(OBJGPU *pGpu, void *pData);
351 typedef struct FifoSchedulingHandlerEntry
352 {
353     PFifoSchedulingHandler pCallback;
354     void *pCallbackParam;
355     NvBool bHandled;
356 } FifoSchedulingHandlerEntry;
357 
358 MAKE_LIST(FifoSchedulingHandlerEntryList, FifoSchedulingHandlerEntry);
359 
360 //
361 // This define indicates legacy pdb in instance block.
362 //
363 #define FIFO_PDB_IDX_BASE             (0xFFFFFFFF)
364 
365 //
366 // Aperture defines must match NV_MMU_PTE_APERTURE HW defines
367 // We do not support instance memory in peer (1).
368 //
369 #define INST_BLOCK_APERTURE_VIDEO_MEMORY                     0x00000000
370 #define INST_BLOCK_APERTURE_RESERVED                         0x00000001
371 #define INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY           0x00000002
372 #define INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY       0x00000003
373 
374 // Macro to verify HW and class defines are compatible
375 #define VERIFY_INST_BLOCK_APERTURE(vid, coh, ncoh)                  \
376     ct_assert((vid) == INST_BLOCK_APERTURE_VIDEO_MEMORY);           \
377     ct_assert((coh) == INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY); \
378     ct_assert((ncoh) == INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY)
379 
380 //
381 // The actual GPU object definition
382 //
383 
384 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
385 // the matching C source file, but causes diagnostics to be issued if another
386 // source file references the field.
387 #ifdef NVOC_KERNEL_FIFO_H_PRIVATE_ACCESS_ALLOWED
388 #define PRIVATE_FIELD(x) x
389 #else
390 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
391 #endif
392 
393 struct KernelFifo {
394     const struct NVOC_RTTI *__nvoc_rtti;
395     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
396     struct Object *__nvoc_pbase_Object;
397     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
398     struct KernelFifo *__nvoc_pbase_KernelFifo;
399     NV_STATUS (*__kfifoConstructEngine__)(struct OBJGPU *, struct KernelFifo *, ENGDESCRIPTOR);
400     NV_STATUS (*__kfifoStateLoad__)(struct OBJGPU *, struct KernelFifo *, NvU32);
401     NV_STATUS (*__kfifoStateUnload__)(struct OBJGPU *, struct KernelFifo *, NvU32);
402     NV_STATUS (*__kfifoStateInitLocked__)(struct OBJGPU *, struct KernelFifo *);
403     void (*__kfifoStateDestroy__)(struct OBJGPU *, struct KernelFifo *);
404     NV_STATUS (*__kfifoStatePostLoad__)(struct OBJGPU *, struct KernelFifo *, NvU32);
405     NV_STATUS (*__kfifoStatePreUnload__)(struct OBJGPU *, struct KernelFifo *, NvU32);
406     NV_STATUS (*__kfifoCheckChannelAllocAddrSpaces__)(struct KernelFifo *, NV_ADDRESS_SPACE, NV_ADDRESS_SPACE, NV_ADDRESS_SPACE);
407     NV_STATUS (*__kfifoConstructUsermodeMemdescs__)(struct OBJGPU *, struct KernelFifo *);
408     NvU32 (*__kfifoChannelGroupGetLocalMaxSubcontext__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannelGroup *, NvBool);
409     void (*__kfifoGetCtxBufferMapFlags__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32 *);
410     NV_STATUS (*__kfifoEngineInfoXlate__)(struct OBJGPU *, struct KernelFifo *, ENGINE_INFO_TYPE, NvU32, ENGINE_INFO_TYPE, NvU32 *);
411     NV_STATUS (*__kfifoGenerateWorkSubmitToken__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannel *, NvU32 *, NvBool);
412     NV_STATUS (*__kfifoUpdateUsermodeDoorbell__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32);
413     NvU32 (*__kfifoRunlistGetBaseShift__)(struct KernelFifo *);
414     NvU64 (*__kfifoGetUserdBar1MapStartOffset__)(struct OBJGPU *, struct KernelFifo *);
415     NvU32 (*__kfifoGetMaxCeChannelGroups__)(struct OBJGPU *, struct KernelFifo *);
416     NV_STATUS (*__kfifoGetVChIdForSChId__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32, NvU32, NvU32 *);
417     NV_STATUS (*__kfifoProgramChIdTable__)(struct OBJGPU *, struct KernelFifo *, CHID_MGR *, NvU32, NvU32, NvU32, struct Device *, NvU32, FIFO_ENGINE_LIST *);
418     NV_STATUS (*__kfifoRecoverAllChannels__)(struct OBJGPU *, struct KernelFifo *, NvU32);
419     void (*__kfifoStartChannelHalt__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannel *);
420     void (*__kfifoCompleteChannelHalt__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannel *, RMTIMEOUT *);
421     NV_STATUS (*__kfifoGetEnginePbdmaFaultIds__)(struct OBJGPU *, struct KernelFifo *, ENGINE_INFO_TYPE, NvU32, NvU32 **, NvU32 *);
422     NvU32 (*__kfifoGetNumPBDMAs__)(struct OBJGPU *, struct KernelFifo *);
423     const char *(*__kfifoPrintPbdmaId__)(struct OBJGPU *, struct KernelFifo *, NvU32);
424     const char *(*__kfifoPrintInternalEngine__)(struct OBJGPU *, struct KernelFifo *, NvU32);
425     const char *(*__kfifoPrintInternalEngineCheck__)(struct OBJGPU *, struct KernelFifo *, NvU32);
426     const char *(*__kfifoGetClientIdStringCommon__)(struct OBJGPU *, struct KernelFifo *, FIFO_MMU_EXCEPTION_DATA *);
427     const char *(*__kfifoGetClientIdString__)(struct OBJGPU *, struct KernelFifo *, FIFO_MMU_EXCEPTION_DATA *);
428     const char *(*__kfifoGetClientIdStringCheck__)(struct OBJGPU *, struct KernelFifo *, NvU32);
429     NV_STATUS (*__kfifoStatePreLoad__)(POBJGPU, struct KernelFifo *, NvU32);
430     NV_STATUS (*__kfifoStatePostUnload__)(POBJGPU, struct KernelFifo *, NvU32);
431     NV_STATUS (*__kfifoStateInitUnlocked__)(POBJGPU, struct KernelFifo *);
432     void (*__kfifoInitMissing__)(POBJGPU, struct KernelFifo *);
433     NV_STATUS (*__kfifoStatePreInitLocked__)(POBJGPU, struct KernelFifo *);
434     NV_STATUS (*__kfifoStatePreInitUnlocked__)(POBJGPU, struct KernelFifo *);
435     NvBool (*__kfifoIsPresent__)(POBJGPU, struct KernelFifo *);
436     struct KernelSchedMgr *pKernelSchedMgr;
437     CHID_MGR **ppChidMgr;
438     NvU32 numChidMgrs;
439     union CHID_MGR_VALID_BIT_VECTOR chidMgrValid;
440     ENGINE_INFO engineInfo;
441     PREALLOCATED_USERD_INFO userdInfo;
442     NvU32 maxSubcontextCount;
443     FifoSchedulingHandlerEntryList postSchedulingEnableHandlerList;
444     FifoSchedulingHandlerEntryList preSchedulingDisableHandlerList;
445     NvU32 maxSec2SecureChannels;
446     NvU32 maxCeSecureChannels;
447     NvBool bUseChidHeap;
448     NvBool bUsePerRunlistChram;
449     NvBool bDisableChidIsolation;
450     NvBool bIsPerRunlistChramSupportedInHw;
451     NvBool bHostEngineExpansion;
452     NvBool bHostHasLbOverflow;
453     NvBool bSubcontextSupported;
454     NvBool bMixedInstmemApertureDefAllowed;
455     NvBool bIsZombieSubctxWarEnabled;
456     NvBool bIsSchedSupported;
457     NvBool bGuestGenenratesWorkSubmitToken;
458     NvBool bWddmInterleavingPolicyEnabled;
459     NvBool bUserdInSystemMemory;
460     NvBool bUserdMapDmaSupported;
461     NvBool bPerRunlistChramOverride;
462     NvBool bNumChannelsOverride;
463     NvU32 numChannelsOverride;
464     NvBool bInstProtectedMem;
465     NvU32 InstAttr;
466     const NV_ADDRESS_SPACE *pInstAllocList;
467     MEMORY_DESCRIPTOR *pDummyPageMemDesc;
468     MEMORY_DESCRIPTOR *pBar1VF;
469     MEMORY_DESCRIPTOR *pBar1PrivVF;
470     MEMORY_DESCRIPTOR *pRegVF;
471     CTX_BUF_POOL_INFO *pRunlistBufPool[64];
472     MEMORY_DESCRIPTOR ***pppRunlistBufMemDesc;
473 };
474 
475 #ifndef __NVOC_CLASS_KernelFifo_TYPEDEF__
476 #define __NVOC_CLASS_KernelFifo_TYPEDEF__
477 typedef struct KernelFifo KernelFifo;
478 #endif /* __NVOC_CLASS_KernelFifo_TYPEDEF__ */
479 
480 #ifndef __nvoc_class_id_KernelFifo
481 #define __nvoc_class_id_KernelFifo 0xf3e155
482 #endif /* __nvoc_class_id_KernelFifo */
483 
484 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFifo;
485 
486 #define __staticCast_KernelFifo(pThis) \
487     ((pThis)->__nvoc_pbase_KernelFifo)
488 
489 #ifdef __nvoc_kernel_fifo_h_disabled
490 #define __dynamicCast_KernelFifo(pThis) ((KernelFifo*)NULL)
491 #else //__nvoc_kernel_fifo_h_disabled
492 #define __dynamicCast_KernelFifo(pThis) \
493     ((KernelFifo*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelFifo)))
494 #endif //__nvoc_kernel_fifo_h_disabled
495 
496 #define PDB_PROP_KFIFO_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
497 #define PDB_PROP_KFIFO_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
498 
499 NV_STATUS __nvoc_objCreateDynamic_KernelFifo(KernelFifo**, Dynamic*, NvU32, va_list);
500 
501 NV_STATUS __nvoc_objCreate_KernelFifo(KernelFifo**, Dynamic*, NvU32);
502 #define __objCreate_KernelFifo(ppNewObj, pParent, createFlags) \
503     __nvoc_objCreate_KernelFifo((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
504 
505 #define kfifoConstructEngine(pGpu, pKernelFifo, engDesc) kfifoConstructEngine_DISPATCH(pGpu, pKernelFifo, engDesc)
506 #define kfifoStateLoad(pGpu, pKernelFifo, flags) kfifoStateLoad_DISPATCH(pGpu, pKernelFifo, flags)
507 #define kfifoStateLoad_HAL(pGpu, pKernelFifo, flags) kfifoStateLoad_DISPATCH(pGpu, pKernelFifo, flags)
508 #define kfifoStateUnload(pGpu, pKernelFifo, flags) kfifoStateUnload_DISPATCH(pGpu, pKernelFifo, flags)
509 #define kfifoStateUnload_HAL(pGpu, pKernelFifo, flags) kfifoStateUnload_DISPATCH(pGpu, pKernelFifo, flags)
510 #define kfifoStateInitLocked(pGpu, pKernelFifo) kfifoStateInitLocked_DISPATCH(pGpu, pKernelFifo)
511 #define kfifoStateDestroy(pGpu, pKernelFifo) kfifoStateDestroy_DISPATCH(pGpu, pKernelFifo)
512 #define kfifoStatePostLoad(pGpu, pKernelFifo, flags) kfifoStatePostLoad_DISPATCH(pGpu, pKernelFifo, flags)
513 #define kfifoStatePostLoad_HAL(pGpu, pKernelFifo, flags) kfifoStatePostLoad_DISPATCH(pGpu, pKernelFifo, flags)
514 #define kfifoStatePreUnload(pGpu, pKernelFifo, flags) kfifoStatePreUnload_DISPATCH(pGpu, pKernelFifo, flags)
515 #define kfifoStatePreUnload_HAL(pGpu, pKernelFifo, flags) kfifoStatePreUnload_DISPATCH(pGpu, pKernelFifo, flags)
516 #define kfifoCheckChannelAllocAddrSpaces(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) kfifoCheckChannelAllocAddrSpaces_DISPATCH(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace)
517 #define kfifoCheckChannelAllocAddrSpaces_HAL(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) kfifoCheckChannelAllocAddrSpaces_DISPATCH(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace)
518 #define kfifoConstructUsermodeMemdescs(pGpu, pKernelFifo) kfifoConstructUsermodeMemdescs_DISPATCH(pGpu, pKernelFifo)
519 #define kfifoConstructUsermodeMemdescs_HAL(pGpu, pKernelFifo) kfifoConstructUsermodeMemdescs_DISPATCH(pGpu, pKernelFifo)
520 #define kfifoChannelGroupGetLocalMaxSubcontext(pGpu, pKernelFifo, arg0, arg1) kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(pGpu, pKernelFifo, arg0, arg1)
521 #define kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(pGpu, pKernelFifo, arg0, arg1)
522 #define kfifoGetCtxBufferMapFlags(pGpu, pKernelFifo, engine, pFlags) kfifoGetCtxBufferMapFlags_DISPATCH(pGpu, pKernelFifo, engine, pFlags)
523 #define kfifoGetCtxBufferMapFlags_HAL(pGpu, pKernelFifo, engine, pFlags) kfifoGetCtxBufferMapFlags_DISPATCH(pGpu, pKernelFifo, engine, pFlags)
524 #define kfifoEngineInfoXlate(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) kfifoEngineInfoXlate_DISPATCH(pGpu, pKernelFifo, inType, inVal, outType, pOutVal)
525 #define kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) kfifoEngineInfoXlate_DISPATCH(pGpu, pKernelFifo, inType, inVal, outType, pOutVal)
526 #define kfifoGenerateWorkSubmitToken(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) kfifoGenerateWorkSubmitToken_DISPATCH(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost)
527 #define kfifoGenerateWorkSubmitToken_HAL(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) kfifoGenerateWorkSubmitToken_DISPATCH(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost)
528 #define kfifoUpdateUsermodeDoorbell(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateUsermodeDoorbell_DISPATCH(arg0, arg1, workSubmitToken, runlisId)
529 #define kfifoUpdateUsermodeDoorbell_HAL(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateUsermodeDoorbell_DISPATCH(arg0, arg1, workSubmitToken, runlisId)
530 #define kfifoRunlistGetBaseShift(pKernelFifo) kfifoRunlistGetBaseShift_DISPATCH(pKernelFifo)
531 #define kfifoRunlistGetBaseShift_HAL(pKernelFifo) kfifoRunlistGetBaseShift_DISPATCH(pKernelFifo)
532 #define kfifoGetUserdBar1MapStartOffset(pGpu, pKernelFifo) kfifoGetUserdBar1MapStartOffset_DISPATCH(pGpu, pKernelFifo)
533 #define kfifoGetUserdBar1MapStartOffset_HAL(pGpu, pKernelFifo) kfifoGetUserdBar1MapStartOffset_DISPATCH(pGpu, pKernelFifo)
534 #define kfifoGetMaxCeChannelGroups(pGpu, pKernelFifo) kfifoGetMaxCeChannelGroups_DISPATCH(pGpu, pKernelFifo)
535 #define kfifoGetMaxCeChannelGroups_HAL(pGpu, pKernelFifo) kfifoGetMaxCeChannelGroups_DISPATCH(pGpu, pKernelFifo)
536 #define kfifoGetVChIdForSChId(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) kfifoGetVChIdForSChId_DISPATCH(pGpu, pKernelFifo, chId, gfid, engineId, pVChid)
537 #define kfifoGetVChIdForSChId_HAL(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) kfifoGetVChIdForSChId_DISPATCH(pGpu, pKernelFifo, chId, gfid, engineId, pVChid)
538 #define kfifoProgramChIdTable(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoProgramChIdTable_DISPATCH(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
539 #define kfifoProgramChIdTable_HAL(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoProgramChIdTable_DISPATCH(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
540 #define kfifoRecoverAllChannels(pGpu, pKernelFifo, gfid) kfifoRecoverAllChannels_DISPATCH(pGpu, pKernelFifo, gfid)
541 #define kfifoRecoverAllChannels_HAL(pGpu, pKernelFifo, gfid) kfifoRecoverAllChannels_DISPATCH(pGpu, pKernelFifo, gfid)
542 #define kfifoStartChannelHalt(pGpu, pKernelFifo, pKernelChannel) kfifoStartChannelHalt_DISPATCH(pGpu, pKernelFifo, pKernelChannel)
543 #define kfifoStartChannelHalt_HAL(pGpu, pKernelFifo, pKernelChannel) kfifoStartChannelHalt_DISPATCH(pGpu, pKernelFifo, pKernelChannel)
544 #define kfifoCompleteChannelHalt(pGpu, pKernelFifo, pKernelChannel, pTimeout) kfifoCompleteChannelHalt_DISPATCH(pGpu, pKernelFifo, pKernelChannel, pTimeout)
545 #define kfifoCompleteChannelHalt_HAL(pGpu, pKernelFifo, pKernelChannel, pTimeout) kfifoCompleteChannelHalt_DISPATCH(pGpu, pKernelFifo, pKernelChannel, pTimeout)
546 #define kfifoGetEnginePbdmaFaultIds(pGpu, pKernelFifo, arg0, arg1, arg2, arg3) kfifoGetEnginePbdmaFaultIds_DISPATCH(pGpu, pKernelFifo, arg0, arg1, arg2, arg3)
547 #define kfifoGetEnginePbdmaFaultIds_HAL(pGpu, pKernelFifo, arg0, arg1, arg2, arg3) kfifoGetEnginePbdmaFaultIds_DISPATCH(pGpu, pKernelFifo, arg0, arg1, arg2, arg3)
548 #define kfifoGetNumPBDMAs(pGpu, pKernelFifo) kfifoGetNumPBDMAs_DISPATCH(pGpu, pKernelFifo)
549 #define kfifoGetNumPBDMAs_HAL(pGpu, pKernelFifo) kfifoGetNumPBDMAs_DISPATCH(pGpu, pKernelFifo)
550 #define kfifoPrintPbdmaId(pGpu, pKernelFifo, pbdmaId) kfifoPrintPbdmaId_DISPATCH(pGpu, pKernelFifo, pbdmaId)
551 #define kfifoPrintPbdmaId_HAL(pGpu, pKernelFifo, pbdmaId) kfifoPrintPbdmaId_DISPATCH(pGpu, pKernelFifo, pbdmaId)
552 #define kfifoPrintInternalEngine(pGpu, pKernelFifo, arg0) kfifoPrintInternalEngine_DISPATCH(pGpu, pKernelFifo, arg0)
553 #define kfifoPrintInternalEngine_HAL(pGpu, pKernelFifo, arg0) kfifoPrintInternalEngine_DISPATCH(pGpu, pKernelFifo, arg0)
554 #define kfifoPrintInternalEngineCheck(pGpu, pKernelFifo, arg0) kfifoPrintInternalEngineCheck_DISPATCH(pGpu, pKernelFifo, arg0)
555 #define kfifoPrintInternalEngineCheck_HAL(pGpu, pKernelFifo, arg0) kfifoPrintInternalEngineCheck_DISPATCH(pGpu, pKernelFifo, arg0)
556 #define kfifoGetClientIdStringCommon(pGpu, pKernelFifo, arg0) kfifoGetClientIdStringCommon_DISPATCH(pGpu, pKernelFifo, arg0)
557 #define kfifoGetClientIdStringCommon_HAL(pGpu, pKernelFifo, arg0) kfifoGetClientIdStringCommon_DISPATCH(pGpu, pKernelFifo, arg0)
558 #define kfifoGetClientIdString(pGpu, pKernelFifo, arg0) kfifoGetClientIdString_DISPATCH(pGpu, pKernelFifo, arg0)
559 #define kfifoGetClientIdString_HAL(pGpu, pKernelFifo, arg0) kfifoGetClientIdString_DISPATCH(pGpu, pKernelFifo, arg0)
560 #define kfifoGetClientIdStringCheck(pGpu, pKernelFifo, arg0) kfifoGetClientIdStringCheck_DISPATCH(pGpu, pKernelFifo, arg0)
561 #define kfifoGetClientIdStringCheck_HAL(pGpu, pKernelFifo, arg0) kfifoGetClientIdStringCheck_DISPATCH(pGpu, pKernelFifo, arg0)
562 #define kfifoStatePreLoad(pGpu, pEngstate, arg0) kfifoStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
563 #define kfifoStatePostUnload(pGpu, pEngstate, arg0) kfifoStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
564 #define kfifoStateInitUnlocked(pGpu, pEngstate) kfifoStateInitUnlocked_DISPATCH(pGpu, pEngstate)
565 #define kfifoInitMissing(pGpu, pEngstate) kfifoInitMissing_DISPATCH(pGpu, pEngstate)
566 #define kfifoStatePreInitLocked(pGpu, pEngstate) kfifoStatePreInitLocked_DISPATCH(pGpu, pEngstate)
567 #define kfifoStatePreInitUnlocked(pGpu, pEngstate) kfifoStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
568 #define kfifoIsPresent(pGpu, pEngstate) kfifoIsPresent_DISPATCH(pGpu, pEngstate)
569 NV_STATUS kfifoConstructHal_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
570 
571 
572 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoConstructHal(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)573 static inline NV_STATUS kfifoConstructHal(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
574     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
575     return NV_ERR_NOT_SUPPORTED;
576 }
577 #else //__nvoc_kernel_fifo_h_disabled
578 #define kfifoConstructHal(pGpu, pKernelFifo) kfifoConstructHal_GM107(pGpu, pKernelFifo)
579 #endif //__nvoc_kernel_fifo_h_disabled
580 
581 #define kfifoConstructHal_HAL(pGpu, pKernelFifo) kfifoConstructHal(pGpu, pKernelFifo)
582 
kfifoChannelGroupSetTimesliceSched_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannelGroup * pKernelChannelGroup,NvU64 timesliceUs,NvBool bSkipSubmit)583 static inline NV_STATUS kfifoChannelGroupSetTimesliceSched_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
584     return NV_OK;
585 }
586 
587 
588 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChannelGroupSetTimesliceSched(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannelGroup * pKernelChannelGroup,NvU64 timesliceUs,NvBool bSkipSubmit)589 static inline NV_STATUS kfifoChannelGroupSetTimesliceSched(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
590     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
591     return NV_ERR_NOT_SUPPORTED;
592 }
593 #else //__nvoc_kernel_fifo_h_disabled
594 #define kfifoChannelGroupSetTimesliceSched(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimesliceSched_56cd7a(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
595 #endif //__nvoc_kernel_fifo_h_disabled
596 
597 #define kfifoChannelGroupSetTimesliceSched_HAL(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimesliceSched(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
598 
599 NvU32 kfifoRunlistQueryNumChannels_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
600 
601 
602 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRunlistQueryNumChannels(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 runlistId)603 static inline NvU32 kfifoRunlistQueryNumChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
604     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
605     return 0;
606 }
607 #else //__nvoc_kernel_fifo_h_disabled
608 #define kfifoRunlistQueryNumChannels(pGpu, pKernelFifo, runlistId) kfifoRunlistQueryNumChannels_KERNEL(pGpu, pKernelFifo, runlistId)
609 #endif //__nvoc_kernel_fifo_h_disabled
610 
611 #define kfifoRunlistQueryNumChannels_HAL(pGpu, pKernelFifo, runlistId) kfifoRunlistQueryNumChannels(pGpu, pKernelFifo, runlistId)
612 
613 NV_STATUS kfifoIdleChannelsPerDevice_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, NvU32 numChannels, NvU32 flags, NvU32 timeout);
614 
615 
616 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoIdleChannelsPerDevice(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvHandle * phClients,NvHandle * phDevices,NvHandle * phChannels,NvU32 numChannels,NvU32 flags,NvU32 timeout)617 static inline NV_STATUS kfifoIdleChannelsPerDevice(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, NvU32 numChannels, NvU32 flags, NvU32 timeout) {
618     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
619     return NV_ERR_NOT_SUPPORTED;
620 }
621 #else //__nvoc_kernel_fifo_h_disabled
622 #define kfifoIdleChannelsPerDevice(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) kfifoIdleChannelsPerDevice_KERNEL(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout)
623 #endif //__nvoc_kernel_fifo_h_disabled
624 
625 #define kfifoIdleChannelsPerDevice_HAL(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) kfifoIdleChannelsPerDevice(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout)
626 
627 NvU64 kfifoChannelGroupGetDefaultTimeslice_GV100(struct KernelFifo *pKernelFifo);
628 
629 
630 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChannelGroupGetDefaultTimeslice(struct KernelFifo * pKernelFifo)631 static inline NvU64 kfifoChannelGroupGetDefaultTimeslice(struct KernelFifo *pKernelFifo) {
632     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
633     return 0;
634 }
635 #else //__nvoc_kernel_fifo_h_disabled
636 #define kfifoChannelGroupGetDefaultTimeslice(pKernelFifo) kfifoChannelGroupGetDefaultTimeslice_GV100(pKernelFifo)
637 #endif //__nvoc_kernel_fifo_h_disabled
638 
639 #define kfifoChannelGroupGetDefaultTimeslice_HAL(pKernelFifo) kfifoChannelGroupGetDefaultTimeslice(pKernelFifo)
640 
kfifoRunlistGetMinTimeSlice_4a4dee(struct KernelFifo * pKernelFifo)641 static inline NvU64 kfifoRunlistGetMinTimeSlice_4a4dee(struct KernelFifo *pKernelFifo) {
642     return 0;
643 }
644 
645 
646 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRunlistGetMinTimeSlice(struct KernelFifo * pKernelFifo)647 static inline NvU64 kfifoRunlistGetMinTimeSlice(struct KernelFifo *pKernelFifo) {
648     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
649     return 0;
650 }
651 #else //__nvoc_kernel_fifo_h_disabled
652 #define kfifoRunlistGetMinTimeSlice(pKernelFifo) kfifoRunlistGetMinTimeSlice_4a4dee(pKernelFifo)
653 #endif //__nvoc_kernel_fifo_h_disabled
654 
655 #define kfifoRunlistGetMinTimeSlice_HAL(pKernelFifo) kfifoRunlistGetMinTimeSlice(pKernelFifo)
656 
657 NV_STATUS kfifoGetInstMemInfo_GM107(struct KernelFifo *pKernelFifo, NvU64 *pSize, NvU64 *pAlignment, NvBool *pbInstProtectedMem, NvU32 *pInstAttr, const NV_ADDRESS_SPACE **ppInstAllocList);
658 
659 
660 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetInstMemInfo(struct KernelFifo * pKernelFifo,NvU64 * pSize,NvU64 * pAlignment,NvBool * pbInstProtectedMem,NvU32 * pInstAttr,const NV_ADDRESS_SPACE ** ppInstAllocList)661 static inline NV_STATUS kfifoGetInstMemInfo(struct KernelFifo *pKernelFifo, NvU64 *pSize, NvU64 *pAlignment, NvBool *pbInstProtectedMem, NvU32 *pInstAttr, const NV_ADDRESS_SPACE **ppInstAllocList) {
662     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
663     return NV_ERR_NOT_SUPPORTED;
664 }
665 #else //__nvoc_kernel_fifo_h_disabled
666 #define kfifoGetInstMemInfo(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) kfifoGetInstMemInfo_GM107(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList)
667 #endif //__nvoc_kernel_fifo_h_disabled
668 
669 #define kfifoGetInstMemInfo_HAL(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) kfifoGetInstMemInfo(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList)
670 
671 void kfifoGetInstBlkSizeAlign_GM107(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pShift);
672 
673 
674 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetInstBlkSizeAlign(struct KernelFifo * pKernelFifo,NvU32 * pSize,NvU32 * pShift)675 static inline void kfifoGetInstBlkSizeAlign(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pShift) {
676     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
677 }
678 #else //__nvoc_kernel_fifo_h_disabled
679 #define kfifoGetInstBlkSizeAlign(pKernelFifo, pSize, pShift) kfifoGetInstBlkSizeAlign_GM107(pKernelFifo, pSize, pShift)
680 #endif //__nvoc_kernel_fifo_h_disabled
681 
682 #define kfifoGetInstBlkSizeAlign_HAL(pKernelFifo, pSize, pShift) kfifoGetInstBlkSizeAlign(pKernelFifo, pSize, pShift)
683 
684 NvU32 kfifoGetDefaultRunlist_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType);
685 
686 
687 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetDefaultRunlist(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,RM_ENGINE_TYPE rmEngineType)688 static inline NvU32 kfifoGetDefaultRunlist(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType) {
689     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
690     return 0;
691 }
692 #else //__nvoc_kernel_fifo_h_disabled
693 #define kfifoGetDefaultRunlist(pGpu, pKernelFifo, rmEngineType) kfifoGetDefaultRunlist_GM107(pGpu, pKernelFifo, rmEngineType)
694 #endif //__nvoc_kernel_fifo_h_disabled
695 
696 #define kfifoGetDefaultRunlist_HAL(pGpu, pKernelFifo, rmEngineType) kfifoGetDefaultRunlist(pGpu, pKernelFifo, rmEngineType)
697 
698 NvBool kfifoValidateSCGTypeAndRunqueue_GP102(struct KernelFifo *pKernelFifo, NvU32 scgType, NvU32 runqueue);
699 
700 
701 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoValidateSCGTypeAndRunqueue(struct KernelFifo * pKernelFifo,NvU32 scgType,NvU32 runqueue)702 static inline NvBool kfifoValidateSCGTypeAndRunqueue(struct KernelFifo *pKernelFifo, NvU32 scgType, NvU32 runqueue) {
703     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
704     return NV_FALSE;
705 }
706 #else //__nvoc_kernel_fifo_h_disabled
707 #define kfifoValidateSCGTypeAndRunqueue(pKernelFifo, scgType, runqueue) kfifoValidateSCGTypeAndRunqueue_GP102(pKernelFifo, scgType, runqueue)
708 #endif //__nvoc_kernel_fifo_h_disabled
709 
710 #define kfifoValidateSCGTypeAndRunqueue_HAL(pKernelFifo, scgType, runqueue) kfifoValidateSCGTypeAndRunqueue(pKernelFifo, scgType, runqueue)
711 
712 NvBool kfifoValidateEngineAndRunqueue_GP102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 runqueue);
713 
714 
715 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoValidateEngineAndRunqueue(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 engDesc,NvU32 runqueue)716 static inline NvBool kfifoValidateEngineAndRunqueue(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 runqueue) {
717     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
718     return NV_FALSE;
719 }
720 #else //__nvoc_kernel_fifo_h_disabled
721 #define kfifoValidateEngineAndRunqueue(pGpu, pKernelFifo, engDesc, runqueue) kfifoValidateEngineAndRunqueue_GP102(pGpu, pKernelFifo, engDesc, runqueue)
722 #endif //__nvoc_kernel_fifo_h_disabled
723 
724 #define kfifoValidateEngineAndRunqueue_HAL(pGpu, pKernelFifo, engDesc, runqueue) kfifoValidateEngineAndRunqueue(pGpu, pKernelFifo, engDesc, runqueue)
725 
726 NvBool kfifoValidateEngineAndSubctxType_GP102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 subctxType);
727 
728 
729 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoValidateEngineAndSubctxType(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 engDesc,NvU32 subctxType)730 static inline NvBool kfifoValidateEngineAndSubctxType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 subctxType) {
731     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
732     return NV_FALSE;
733 }
734 #else //__nvoc_kernel_fifo_h_disabled
735 #define kfifoValidateEngineAndSubctxType(pGpu, pKernelFifo, engDesc, subctxType) kfifoValidateEngineAndSubctxType_GP102(pGpu, pKernelFifo, engDesc, subctxType)
736 #endif //__nvoc_kernel_fifo_h_disabled
737 
738 #define kfifoValidateEngineAndSubctxType_HAL(pGpu, pKernelFifo, engDesc, subctxType) kfifoValidateEngineAndSubctxType(pGpu, pKernelFifo, engDesc, subctxType)
739 
740 NV_STATUS kfifoRmctrlGetWorkSubmitToken_GV100(struct KernelFifo *pKernelFifo, NvHandle hClient, NvHandle hChannel, NvU32 *pWorkSubmitToken);
741 
742 
743 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRmctrlGetWorkSubmitToken(struct KernelFifo * pKernelFifo,NvHandle hClient,NvHandle hChannel,NvU32 * pWorkSubmitToken)744 static inline NV_STATUS kfifoRmctrlGetWorkSubmitToken(struct KernelFifo *pKernelFifo, NvHandle hClient, NvHandle hChannel, NvU32 *pWorkSubmitToken) {
745     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
746     return NV_ERR_NOT_SUPPORTED;
747 }
748 #else //__nvoc_kernel_fifo_h_disabled
749 #define kfifoRmctrlGetWorkSubmitToken(pKernelFifo, hClient, hChannel, pWorkSubmitToken) kfifoRmctrlGetWorkSubmitToken_GV100(pKernelFifo, hClient, hChannel, pWorkSubmitToken)
750 #endif //__nvoc_kernel_fifo_h_disabled
751 
752 #define kfifoRmctrlGetWorkSubmitToken_HAL(pKernelFifo, hClient, hChannel, pWorkSubmitToken) kfifoRmctrlGetWorkSubmitToken(pKernelFifo, hClient, hChannel, pWorkSubmitToken)
753 
754 NV_STATUS kfifoChannelGetFifoContextMemDesc_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, FIFO_CTX engState, MEMORY_DESCRIPTOR **ppMemdesc);
755 
756 
757 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChannelGetFifoContextMemDesc(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * pKernelChannel,FIFO_CTX engState,MEMORY_DESCRIPTOR ** ppMemdesc)758 static inline NV_STATUS kfifoChannelGetFifoContextMemDesc(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, FIFO_CTX engState, MEMORY_DESCRIPTOR **ppMemdesc) {
759     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
760     return NV_ERR_NOT_SUPPORTED;
761 }
762 #else //__nvoc_kernel_fifo_h_disabled
763 #define kfifoChannelGetFifoContextMemDesc(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) kfifoChannelGetFifoContextMemDesc_GM107(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc)
764 #endif //__nvoc_kernel_fifo_h_disabled
765 
766 #define kfifoChannelGetFifoContextMemDesc_HAL(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) kfifoChannelGetFifoContextMemDesc(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc)
767 
768 NV_STATUS kfifoConvertInstToKernelChannel_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, INST_BLOCK_DESC *arg0, struct KernelChannel **arg1);
769 
770 
771 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoConvertInstToKernelChannel(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,INST_BLOCK_DESC * arg0,struct KernelChannel ** arg1)772 static inline NV_STATUS kfifoConvertInstToKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, INST_BLOCK_DESC *arg0, struct KernelChannel **arg1) {
773     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
774     return NV_ERR_NOT_SUPPORTED;
775 }
776 #else //__nvoc_kernel_fifo_h_disabled
777 #define kfifoConvertInstToKernelChannel(pGpu, pKernelFifo, arg0, arg1) kfifoConvertInstToKernelChannel_GM107(pGpu, pKernelFifo, arg0, arg1)
778 #endif //__nvoc_kernel_fifo_h_disabled
779 
780 #define kfifoConvertInstToKernelChannel_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoConvertInstToKernelChannel(pGpu, pKernelFifo, arg0, arg1)
781 
782 NV_STATUS kfifoGetUsermodeMapInfo_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *arg0, NvU32 *arg1);
783 
784 
785 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetUsermodeMapInfo(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU64 * arg0,NvU32 * arg1)786 static inline NV_STATUS kfifoGetUsermodeMapInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *arg0, NvU32 *arg1) {
787     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
788     return NV_ERR_NOT_SUPPORTED;
789 }
790 #else //__nvoc_kernel_fifo_h_disabled
791 #define kfifoGetUsermodeMapInfo(pGpu, pKernelFifo, arg0, arg1) kfifoGetUsermodeMapInfo_GV100(pGpu, pKernelFifo, arg0, arg1)
792 #endif //__nvoc_kernel_fifo_h_disabled
793 
794 #define kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoGetUsermodeMapInfo(pGpu, pKernelFifo, arg0, arg1)
795 
796 NvU32 kfifoGetMaxSubcontext_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0);
797 
798 
799 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetMaxSubcontext(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvBool arg0)800 static inline NvU32 kfifoGetMaxSubcontext(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) {
801     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
802     return 0;
803 }
804 #else //__nvoc_kernel_fifo_h_disabled
805 #define kfifoGetMaxSubcontext(pGpu, pKernelFifo, arg0) kfifoGetMaxSubcontext_GV100(pGpu, pKernelFifo, arg0)
806 #endif //__nvoc_kernel_fifo_h_disabled
807 
808 #define kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, arg0) kfifoGetMaxSubcontext(pGpu, pKernelFifo, arg0)
809 
810 NvU32 kfifoGetMaxSubcontextFromGr_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernel);
811 
812 
813 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetMaxSubcontextFromGr(struct OBJGPU * pGpu,struct KernelFifo * pKernel)814 static inline NvU32 kfifoGetMaxSubcontextFromGr(struct OBJGPU *pGpu, struct KernelFifo *pKernel) {
815     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
816     return 0;
817 }
818 #else //__nvoc_kernel_fifo_h_disabled
819 #define kfifoGetMaxSubcontextFromGr(pGpu, pKernel) kfifoGetMaxSubcontextFromGr_KERNEL(pGpu, pKernel)
820 #endif //__nvoc_kernel_fifo_h_disabled
821 
822 #define kfifoGetMaxSubcontextFromGr_HAL(pGpu, pKernel) kfifoGetMaxSubcontextFromGr(pGpu, pKernel)
823 
kfifoGetNumRunqueues_adde13(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)824 static inline NvU32 kfifoGetNumRunqueues_adde13(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
825     return 2;
826 }
827 
828 
829 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetNumRunqueues(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)830 static inline NvU32 kfifoGetNumRunqueues(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
831     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
832     return 0;
833 }
834 #else //__nvoc_kernel_fifo_h_disabled
835 #define kfifoGetNumRunqueues(pGpu, pKernelFifo) kfifoGetNumRunqueues_adde13(pGpu, pKernelFifo)
836 #endif //__nvoc_kernel_fifo_h_disabled
837 
838 #define kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo) kfifoGetNumRunqueues(pGpu, pKernelFifo)
839 
840 NvU32 kfifoGetMaxChannelGroupSize_GV100(struct KernelFifo *pKernelFifo);
841 
842 
843 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetMaxChannelGroupSize(struct KernelFifo * pKernelFifo)844 static inline NvU32 kfifoGetMaxChannelGroupSize(struct KernelFifo *pKernelFifo) {
845     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
846     return 0;
847 }
848 #else //__nvoc_kernel_fifo_h_disabled
849 #define kfifoGetMaxChannelGroupSize(pKernelFifo) kfifoGetMaxChannelGroupSize_GV100(pKernelFifo)
850 #endif //__nvoc_kernel_fifo_h_disabled
851 
852 #define kfifoGetMaxChannelGroupSize_HAL(pKernelFifo) kfifoGetMaxChannelGroupSize(pKernelFifo)
853 
kfifoAddObject_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct ChannelDescendant * pObject)854 static inline NV_STATUS kfifoAddObject_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
855     return NV_OK;
856 }
857 
858 
859 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoAddObject(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct ChannelDescendant * pObject)860 static inline NV_STATUS kfifoAddObject(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
861     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
862     return NV_ERR_NOT_SUPPORTED;
863 }
864 #else //__nvoc_kernel_fifo_h_disabled
865 #define kfifoAddObject(pGpu, pKernelFifo, pObject) kfifoAddObject_56cd7a(pGpu, pKernelFifo, pObject)
866 #endif //__nvoc_kernel_fifo_h_disabled
867 
868 #define kfifoAddObject_HAL(pGpu, pKernelFifo, pObject) kfifoAddObject(pGpu, pKernelFifo, pObject)
869 
kfifoDeleteObject_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct ChannelDescendant * pObject)870 static inline NV_STATUS kfifoDeleteObject_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
871     return NV_OK;
872 }
873 
874 
875 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoDeleteObject(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct ChannelDescendant * pObject)876 static inline NV_STATUS kfifoDeleteObject(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) {
877     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
878     return NV_ERR_NOT_SUPPORTED;
879 }
880 #else //__nvoc_kernel_fifo_h_disabled
881 #define kfifoDeleteObject(pGpu, pKernelFifo, pObject) kfifoDeleteObject_56cd7a(pGpu, pKernelFifo, pObject)
882 #endif //__nvoc_kernel_fifo_h_disabled
883 
884 #define kfifoDeleteObject_HAL(pGpu, pKernelFifo, pObject) kfifoDeleteObject(pGpu, pKernelFifo, pObject)
885 
886 NV_STATUS kfifoConstructEngineList_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
887 
888 
889 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoConstructEngineList(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)890 static inline NV_STATUS kfifoConstructEngineList(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
891     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
892     return NV_ERR_NOT_SUPPORTED;
893 }
894 #else //__nvoc_kernel_fifo_h_disabled
895 #define kfifoConstructEngineList(pGpu, pKernelFifo) kfifoConstructEngineList_KERNEL(pGpu, pKernelFifo)
896 #endif //__nvoc_kernel_fifo_h_disabled
897 
898 #define kfifoConstructEngineList_HAL(pGpu, pKernelFifo) kfifoConstructEngineList(pGpu, pKernelFifo)
899 
900 NV_STATUS kfifoGetHostDeviceInfoTable_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO *pEngineInfo, struct Device *pMigDevice);
901 
902 
903 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetHostDeviceInfoTable(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,ENGINE_INFO * pEngineInfo,struct Device * pMigDevice)904 static inline NV_STATUS kfifoGetHostDeviceInfoTable(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO *pEngineInfo, struct Device *pMigDevice) {
905     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
906     return NV_ERR_NOT_SUPPORTED;
907 }
908 #else //__nvoc_kernel_fifo_h_disabled
909 #define kfifoGetHostDeviceInfoTable(pGpu, pKernelFifo, pEngineInfo, pMigDevice) kfifoGetHostDeviceInfoTable_KERNEL(pGpu, pKernelFifo, pEngineInfo, pMigDevice)
910 #endif //__nvoc_kernel_fifo_h_disabled
911 
912 #define kfifoGetHostDeviceInfoTable_HAL(pGpu, pKernelFifo, pEngineInfo, pMigDevice) kfifoGetHostDeviceInfoTable(pGpu, pKernelFifo, pEngineInfo, pMigDevice)
913 
914 void kfifoGetSubctxType_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 *arg1);
915 
916 
917 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetSubctxType(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * arg0,NvU32 * arg1)918 static inline void kfifoGetSubctxType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 *arg1) {
919     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
920 }
921 #else //__nvoc_kernel_fifo_h_disabled
922 #define kfifoGetSubctxType(pGpu, pKernelFifo, arg0, arg1) kfifoGetSubctxType_GV100(pGpu, pKernelFifo, arg0, arg1)
923 #endif //__nvoc_kernel_fifo_h_disabled
924 
925 #define kfifoGetSubctxType_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoGetSubctxType(pGpu, pKernelFifo, arg0, arg1)
926 
kfifoGenerateInternalWorkSubmitToken_c04480(struct OBJGPU * pGpu,struct KernelFifo * arg0,struct KernelChannel * arg1)927 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken_c04480(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
928     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
929 }
930 
931 NV_STATUS kfifoGenerateInternalWorkSubmitToken_GA100(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1);
932 
kfifoGenerateInternalWorkSubmitToken_5baef9(struct OBJGPU * pGpu,struct KernelFifo * arg0,struct KernelChannel * arg1)933 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken_5baef9(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
934     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
935 }
936 
937 
938 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGenerateInternalWorkSubmitToken(struct OBJGPU * pGpu,struct KernelFifo * arg0,struct KernelChannel * arg1)939 static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) {
940     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
941     return NV_ERR_NOT_SUPPORTED;
942 }
943 #else //__nvoc_kernel_fifo_h_disabled
944 #define kfifoGenerateInternalWorkSubmitToken(pGpu, arg0, arg1) kfifoGenerateInternalWorkSubmitToken_c04480(pGpu, arg0, arg1)
945 #endif //__nvoc_kernel_fifo_h_disabled
946 
947 #define kfifoGenerateInternalWorkSubmitToken_HAL(pGpu, arg0, arg1) kfifoGenerateInternalWorkSubmitToken(pGpu, arg0, arg1)
948 
kfifoUpdateInternalDoorbellForUsermode_c04480(struct OBJGPU * arg0,struct KernelFifo * arg1,NvU32 workSubmitToken,NvU32 runlisId)949 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode_c04480(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
950     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
951 }
952 
953 NV_STATUS kfifoUpdateInternalDoorbellForUsermode_GA100(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
954 
kfifoUpdateInternalDoorbellForUsermode_5baef9(struct OBJGPU * arg0,struct KernelFifo * arg1,NvU32 workSubmitToken,NvU32 runlisId)955 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode_5baef9(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
956     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
957 }
958 
959 
960 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoUpdateInternalDoorbellForUsermode(struct OBJGPU * arg0,struct KernelFifo * arg1,NvU32 workSubmitToken,NvU32 runlisId)961 static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
962     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
963     return NV_ERR_NOT_SUPPORTED;
964 }
965 #else //__nvoc_kernel_fifo_h_disabled
966 #define kfifoUpdateInternalDoorbellForUsermode(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateInternalDoorbellForUsermode_c04480(arg0, arg1, workSubmitToken, runlisId)
967 #endif //__nvoc_kernel_fifo_h_disabled
968 
969 #define kfifoUpdateInternalDoorbellForUsermode_HAL(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateInternalDoorbellForUsermode(arg0, arg1, workSubmitToken, runlisId)
970 
kfifoIsLiteModeEnabled_491d52(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)971 static inline NvBool kfifoIsLiteModeEnabled_491d52(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
972     return ((NvBool)(0 != 0));
973 }
974 
975 
976 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoIsLiteModeEnabled(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)977 static inline NvBool kfifoIsLiteModeEnabled(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
978     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
979     return NV_FALSE;
980 }
981 #else //__nvoc_kernel_fifo_h_disabled
982 #define kfifoIsLiteModeEnabled(pGpu, pKernelFifo) kfifoIsLiteModeEnabled_491d52(pGpu, pKernelFifo)
983 #endif //__nvoc_kernel_fifo_h_disabled
984 
985 #define kfifoIsLiteModeEnabled_HAL(pGpu, pKernelFifo) kfifoIsLiteModeEnabled(pGpu, pKernelFifo)
986 
987 NvU32 kfifoGetNumEngines_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
988 
989 
990 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetNumEngines(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)991 static inline NvU32 kfifoGetNumEngines(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
992     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
993     return 0;
994 }
995 #else //__nvoc_kernel_fifo_h_disabled
996 #define kfifoGetNumEngines(pGpu, pKernelFifo) kfifoGetNumEngines_GM107(pGpu, pKernelFifo)
997 #endif //__nvoc_kernel_fifo_h_disabled
998 
999 #define kfifoGetNumEngines_HAL(pGpu, pKernelFifo) kfifoGetNumEngines(pGpu, pKernelFifo)
1000 
1001 const char *kfifoGetEngineName_GM107(struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal);
1002 
1003 
1004 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetEngineName(struct KernelFifo * pKernelFifo,ENGINE_INFO_TYPE inType,NvU32 inVal)1005 static inline const char *kfifoGetEngineName(struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal) {
1006     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1007     return NULL;
1008 }
1009 #else //__nvoc_kernel_fifo_h_disabled
1010 #define kfifoGetEngineName(pKernelFifo, inType, inVal) kfifoGetEngineName_GM107(pKernelFifo, inType, inVal)
1011 #endif //__nvoc_kernel_fifo_h_disabled
1012 
1013 #define kfifoGetEngineName_HAL(pKernelFifo, inType, inVal) kfifoGetEngineName(pKernelFifo, inType, inVal)
1014 
1015 NvU32 kfifoGetMaxNumRunlists_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1016 
1017 
1018 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetMaxNumRunlists(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1019 static inline NvU32 kfifoGetMaxNumRunlists(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1020     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1021     return 0;
1022 }
1023 #else //__nvoc_kernel_fifo_h_disabled
1024 #define kfifoGetMaxNumRunlists(pGpu, pKernelFifo) kfifoGetMaxNumRunlists_GM107(pGpu, pKernelFifo)
1025 #endif //__nvoc_kernel_fifo_h_disabled
1026 
1027 #define kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo) kfifoGetMaxNumRunlists(pGpu, pKernelFifo)
1028 
1029 NV_STATUS kfifoGetEnginePbdmaIds_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE type, NvU32 val, NvU32 **ppPbdmaIds, NvU32 *pNumPbdmas);
1030 
1031 
1032 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetEnginePbdmaIds(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,ENGINE_INFO_TYPE type,NvU32 val,NvU32 ** ppPbdmaIds,NvU32 * pNumPbdmas)1033 static inline NV_STATUS kfifoGetEnginePbdmaIds(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE type, NvU32 val, NvU32 **ppPbdmaIds, NvU32 *pNumPbdmas) {
1034     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1035     return NV_ERR_NOT_SUPPORTED;
1036 }
1037 #else //__nvoc_kernel_fifo_h_disabled
1038 #define kfifoGetEnginePbdmaIds(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) kfifoGetEnginePbdmaIds_GM107(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas)
1039 #endif //__nvoc_kernel_fifo_h_disabled
1040 
1041 #define kfifoGetEnginePbdmaIds_HAL(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) kfifoGetEnginePbdmaIds(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas)
1042 
kfifoReservePbdmaFaultIds_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,FIFO_ENGINE_LIST * arg0,NvU32 arg1)1043 static inline NV_STATUS kfifoReservePbdmaFaultIds_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_ENGINE_LIST *arg0, NvU32 arg1) {
1044     return NV_OK;
1045 }
1046 
1047 
1048 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoReservePbdmaFaultIds(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,FIFO_ENGINE_LIST * arg0,NvU32 arg1)1049 static inline NV_STATUS kfifoReservePbdmaFaultIds(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_ENGINE_LIST *arg0, NvU32 arg1) {
1050     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1051     return NV_ERR_NOT_SUPPORTED;
1052 }
1053 #else //__nvoc_kernel_fifo_h_disabled
1054 #define kfifoReservePbdmaFaultIds(pGpu, pKernelFifo, arg0, arg1) kfifoReservePbdmaFaultIds_56cd7a(pGpu, pKernelFifo, arg0, arg1)
1055 #endif //__nvoc_kernel_fifo_h_disabled
1056 
1057 #define kfifoReservePbdmaFaultIds_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoReservePbdmaFaultIds(pGpu, pKernelFifo, arg0, arg1)
1058 
1059 NV_STATUS kfifoGetEnginePartnerList_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pParams);
1060 
1061 
1062 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetEnginePartnerList(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS * pParams)1063 static inline NV_STATUS kfifoGetEnginePartnerList(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pParams) {
1064     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1065     return NV_ERR_NOT_SUPPORTED;
1066 }
1067 #else //__nvoc_kernel_fifo_h_disabled
1068 #define kfifoGetEnginePartnerList(pGpu, pKernelFifo, pParams) kfifoGetEnginePartnerList_GM107(pGpu, pKernelFifo, pParams)
1069 #endif //__nvoc_kernel_fifo_h_disabled
1070 
1071 #define kfifoGetEnginePartnerList_HAL(pGpu, pKernelFifo, pParams) kfifoGetEnginePartnerList(pGpu, pKernelFifo, pParams)
1072 
kfifoRunlistIsTsgHeaderSupported_cbe027(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)1073 static inline NvBool kfifoRunlistIsTsgHeaderSupported_cbe027(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1074     return ((NvBool)(0 == 0));
1075 }
1076 
1077 
1078 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRunlistIsTsgHeaderSupported(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)1079 static inline NvBool kfifoRunlistIsTsgHeaderSupported(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1080     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1081     return NV_FALSE;
1082 }
1083 #else //__nvoc_kernel_fifo_h_disabled
1084 #define kfifoRunlistIsTsgHeaderSupported(pGpu, pKernelFifo, arg0) kfifoRunlistIsTsgHeaderSupported_cbe027(pGpu, pKernelFifo, arg0)
1085 #endif //__nvoc_kernel_fifo_h_disabled
1086 
1087 #define kfifoRunlistIsTsgHeaderSupported_HAL(pGpu, pKernelFifo, arg0) kfifoRunlistIsTsgHeaderSupported(pGpu, pKernelFifo, arg0)
1088 
1089 NvU32 kfifoRunlistGetEntrySize_GV100(struct KernelFifo *arg0);
1090 
1091 
1092 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRunlistGetEntrySize(struct KernelFifo * arg0)1093 static inline NvU32 kfifoRunlistGetEntrySize(struct KernelFifo *arg0) {
1094     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1095     return 0;
1096 }
1097 #else //__nvoc_kernel_fifo_h_disabled
1098 #define kfifoRunlistGetEntrySize(arg0) kfifoRunlistGetEntrySize_GV100(arg0)
1099 #endif //__nvoc_kernel_fifo_h_disabled
1100 
1101 #define kfifoRunlistGetEntrySize_HAL(arg0) kfifoRunlistGetEntrySize(arg0)
1102 
kfifoSetupBar1UserdSnoop_b3696a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvBool bEnable,NvU64 offset)1103 static inline void kfifoSetupBar1UserdSnoop_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bEnable, NvU64 offset) {
1104     return;
1105 }
1106 
1107 
1108 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoSetupBar1UserdSnoop(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvBool bEnable,NvU64 offset)1109 static inline void kfifoSetupBar1UserdSnoop(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bEnable, NvU64 offset) {
1110     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1111 }
1112 #else //__nvoc_kernel_fifo_h_disabled
1113 #define kfifoSetupBar1UserdSnoop(pGpu, pKernelFifo, bEnable, offset) kfifoSetupBar1UserdSnoop_b3696a(pGpu, pKernelFifo, bEnable, offset)
1114 #endif //__nvoc_kernel_fifo_h_disabled
1115 
1116 #define kfifoSetupBar1UserdSnoop_HAL(pGpu, pKernelFifo, bEnable, offset) kfifoSetupBar1UserdSnoop(pGpu, pKernelFifo, bEnable, offset)
1117 
1118 NV_STATUS kfifoPreAllocUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1119 
1120 
1121 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoPreAllocUserD(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1122 static inline NV_STATUS kfifoPreAllocUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1123     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1124     return NV_ERR_NOT_SUPPORTED;
1125 }
1126 #else //__nvoc_kernel_fifo_h_disabled
1127 #define kfifoPreAllocUserD(pGpu, pKernelFifo) kfifoPreAllocUserD_GM107(pGpu, pKernelFifo)
1128 #endif //__nvoc_kernel_fifo_h_disabled
1129 
1130 #define kfifoPreAllocUserD_HAL(pGpu, pKernelFifo) kfifoPreAllocUserD(pGpu, pKernelFifo)
1131 
1132 void kfifoFreePreAllocUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1133 
1134 
1135 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoFreePreAllocUserD(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1136 static inline void kfifoFreePreAllocUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1137     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1138 }
1139 #else //__nvoc_kernel_fifo_h_disabled
1140 #define kfifoFreePreAllocUserD(pGpu, pKernelFifo) kfifoFreePreAllocUserD_GM107(pGpu, pKernelFifo)
1141 #endif //__nvoc_kernel_fifo_h_disabled
1142 
1143 #define kfifoFreePreAllocUserD_HAL(pGpu, pKernelFifo) kfifoFreePreAllocUserD(pGpu, pKernelFifo)
1144 
1145 NV_STATUS kfifoGetUserdBar1MapInfo_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *bar1Offset, NvU32 *bar1MapSize);
1146 
1147 
1148 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetUserdBar1MapInfo(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU64 * bar1Offset,NvU32 * bar1MapSize)1149 static inline NV_STATUS kfifoGetUserdBar1MapInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *bar1Offset, NvU32 *bar1MapSize) {
1150     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1151     return NV_ERR_NOT_SUPPORTED;
1152 }
1153 #else //__nvoc_kernel_fifo_h_disabled
1154 #define kfifoGetUserdBar1MapInfo(pGpu, pKernelFifo, bar1Offset, bar1MapSize) kfifoGetUserdBar1MapInfo_GM107(pGpu, pKernelFifo, bar1Offset, bar1MapSize)
1155 #endif //__nvoc_kernel_fifo_h_disabled
1156 
1157 #define kfifoGetUserdBar1MapInfo_HAL(pGpu, pKernelFifo, bar1Offset, bar1MapSize) kfifoGetUserdBar1MapInfo(pGpu, pKernelFifo, bar1Offset, bar1MapSize)
1158 
1159 void kfifoGetUserdSizeAlign_GM107(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pAddrShift);
1160 
1161 
1162 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetUserdSizeAlign(struct KernelFifo * pKernelFifo,NvU32 * pSize,NvU32 * pAddrShift)1163 static inline void kfifoGetUserdSizeAlign(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pAddrShift) {
1164     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1165 }
1166 #else //__nvoc_kernel_fifo_h_disabled
1167 #define kfifoGetUserdSizeAlign(pKernelFifo, pSize, pAddrShift) kfifoGetUserdSizeAlign_GM107(pKernelFifo, pSize, pAddrShift)
1168 #endif //__nvoc_kernel_fifo_h_disabled
1169 
1170 #define kfifoGetUserdSizeAlign_HAL(pKernelFifo, pSize, pAddrShift) kfifoGetUserdSizeAlign(pKernelFifo, pSize, pAddrShift)
1171 
1172 NV_STATUS kfifoGetUserdLocation_GM107(struct KernelFifo *pKernelFifo, NvU32 *pUserdAperture, NvU32 *pUserdAttribute);
1173 
1174 
1175 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetUserdLocation(struct KernelFifo * pKernelFifo,NvU32 * pUserdAperture,NvU32 * pUserdAttribute)1176 static inline NV_STATUS kfifoGetUserdLocation(struct KernelFifo *pKernelFifo, NvU32 *pUserdAperture, NvU32 *pUserdAttribute) {
1177     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1178     return NV_ERR_NOT_SUPPORTED;
1179 }
1180 #else //__nvoc_kernel_fifo_h_disabled
1181 #define kfifoGetUserdLocation(pKernelFifo, pUserdAperture, pUserdAttribute) kfifoGetUserdLocation_GM107(pKernelFifo, pUserdAperture, pUserdAttribute)
1182 #endif //__nvoc_kernel_fifo_h_disabled
1183 
1184 #define kfifoGetUserdLocation_HAL(pKernelFifo, pUserdAperture, pUserdAttribute) kfifoGetUserdLocation(pKernelFifo, pUserdAperture, pUserdAttribute)
1185 
1186 NvU32 kfifoCalcTotalSizeOfFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bCalcForFbRsvd);
1187 
1188 
1189 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoCalcTotalSizeOfFaultMethodBuffers(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvBool bCalcForFbRsvd)1190 static inline NvU32 kfifoCalcTotalSizeOfFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bCalcForFbRsvd) {
1191     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1192     return 0;
1193 }
1194 #else //__nvoc_kernel_fifo_h_disabled
1195 #define kfifoCalcTotalSizeOfFaultMethodBuffers(pGpu, pKernelFifo, bCalcForFbRsvd) kfifoCalcTotalSizeOfFaultMethodBuffers_GV100(pGpu, pKernelFifo, bCalcForFbRsvd)
1196 #endif //__nvoc_kernel_fifo_h_disabled
1197 
1198 #define kfifoCalcTotalSizeOfFaultMethodBuffers_HAL(pGpu, pKernelFifo, bCalcForFbRsvd) kfifoCalcTotalSizeOfFaultMethodBuffers(pGpu, pKernelFifo, bCalcForFbRsvd)
1199 
1200 NV_STATUS kfifoCheckEngine_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvBool *pPresent);
1201 
1202 
1203 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoCheckEngine(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 engDesc,NvBool * pPresent)1204 static inline NV_STATUS kfifoCheckEngine(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvBool *pPresent) {
1205     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1206     return NV_ERR_NOT_SUPPORTED;
1207 }
1208 #else //__nvoc_kernel_fifo_h_disabled
1209 #define kfifoCheckEngine(pGpu, pKernelFifo, engDesc, pPresent) kfifoCheckEngine_GM107(pGpu, pKernelFifo, engDesc, pPresent)
1210 #endif //__nvoc_kernel_fifo_h_disabled
1211 
1212 #define kfifoCheckEngine_HAL(pGpu, pKernelFifo, engDesc, pPresent) kfifoCheckEngine(pGpu, pKernelFifo, engDesc, pPresent)
1213 
kfifoRestoreSchedPolicy_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1214 static inline NV_STATUS kfifoRestoreSchedPolicy_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1215     return NV_OK;
1216 }
1217 
1218 
1219 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRestoreSchedPolicy(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1220 static inline NV_STATUS kfifoRestoreSchedPolicy(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1221     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1222     return NV_ERR_NOT_SUPPORTED;
1223 }
1224 #else //__nvoc_kernel_fifo_h_disabled
1225 #define kfifoRestoreSchedPolicy(pGpu, pKernelFifo) kfifoRestoreSchedPolicy_56cd7a(pGpu, pKernelFifo)
1226 #endif //__nvoc_kernel_fifo_h_disabled
1227 
1228 #define kfifoRestoreSchedPolicy_HAL(pGpu, pKernelFifo) kfifoRestoreSchedPolicy(pGpu, pKernelFifo)
1229 
1230 NV_STATUS kfifoGetMaxSecureChannels_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1231 
1232 
1233 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetMaxSecureChannels(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1234 static inline NV_STATUS kfifoGetMaxSecureChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1235     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1236     return NV_ERR_NOT_SUPPORTED;
1237 }
1238 #else //__nvoc_kernel_fifo_h_disabled
1239 #define kfifoGetMaxSecureChannels(pGpu, pKernelFifo) kfifoGetMaxSecureChannels_KERNEL(pGpu, pKernelFifo)
1240 #endif //__nvoc_kernel_fifo_h_disabled
1241 
1242 #define kfifoGetMaxSecureChannels_HAL(pGpu, pKernelFifo) kfifoGetMaxSecureChannels(pGpu, pKernelFifo)
1243 
1244 NV_STATUS kfifoRunlistSetId_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 runlistId);
1245 
1246 
1247 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRunlistSetId(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * arg0,NvU32 runlistId)1248 static inline NV_STATUS kfifoRunlistSetId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 runlistId) {
1249     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1250     return NV_ERR_NOT_SUPPORTED;
1251 }
1252 #else //__nvoc_kernel_fifo_h_disabled
1253 #define kfifoRunlistSetId(pGpu, pKernelFifo, arg0, runlistId) kfifoRunlistSetId_GM107(pGpu, pKernelFifo, arg0, runlistId)
1254 #endif //__nvoc_kernel_fifo_h_disabled
1255 
1256 #define kfifoRunlistSetId_HAL(pGpu, pKernelFifo, arg0, runlistId) kfifoRunlistSetId(pGpu, pKernelFifo, arg0, runlistId)
1257 
1258 NV_STATUS kfifoRunlistSetIdByEngine_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 engDesc);
1259 
1260 
1261 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRunlistSetIdByEngine(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * arg0,NvU32 engDesc)1262 static inline NV_STATUS kfifoRunlistSetIdByEngine(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 engDesc) {
1263     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1264     return NV_ERR_NOT_SUPPORTED;
1265 }
1266 #else //__nvoc_kernel_fifo_h_disabled
1267 #define kfifoRunlistSetIdByEngine(pGpu, pKernelFifo, arg0, engDesc) kfifoRunlistSetIdByEngine_GM107(pGpu, pKernelFifo, arg0, engDesc)
1268 #endif //__nvoc_kernel_fifo_h_disabled
1269 
1270 #define kfifoRunlistSetIdByEngine_HAL(pGpu, pKernelFifo, arg0, engDesc) kfifoRunlistSetIdByEngine(pGpu, pKernelFifo, arg0, engDesc)
1271 
1272 void kfifoSetupUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc);
1273 
1274 
1275 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoSetupUserD(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,MEMORY_DESCRIPTOR * pMemDesc)1276 static inline void kfifoSetupUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc) {
1277     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1278 }
1279 #else //__nvoc_kernel_fifo_h_disabled
1280 #define kfifoSetupUserD(pGpu, pKernelFifo, pMemDesc) kfifoSetupUserD_GM107(pGpu, pKernelFifo, pMemDesc)
1281 #endif //__nvoc_kernel_fifo_h_disabled
1282 
1283 #define kfifoSetupUserD_HAL(pGpu, pKernelFifo, pMemDesc) kfifoSetupUserD(pGpu, pKernelFifo, pMemDesc)
1284 
1285 const char *kfifoGetFaultAccessTypeString_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1286 
1287 
1288 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetFaultAccessTypeString(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)1289 static inline const char *kfifoGetFaultAccessTypeString(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1290     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1291     return NULL;
1292 }
1293 #else //__nvoc_kernel_fifo_h_disabled
1294 #define kfifoGetFaultAccessTypeString(pGpu, pKernelFifo, arg0) kfifoGetFaultAccessTypeString_GV100(pGpu, pKernelFifo, arg0)
1295 #endif //__nvoc_kernel_fifo_h_disabled
1296 
1297 #define kfifoGetFaultAccessTypeString_HAL(pGpu, pKernelFifo, arg0) kfifoGetFaultAccessTypeString(pGpu, pKernelFifo, arg0)
1298 
1299 NV_STATUS kfifoConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGDESCRIPTOR engDesc);
1300 
kfifoConstructEngine_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,ENGDESCRIPTOR engDesc)1301 static inline NV_STATUS kfifoConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGDESCRIPTOR engDesc) {
1302     return pKernelFifo->__kfifoConstructEngine__(pGpu, pKernelFifo, engDesc);
1303 }
1304 
1305 NV_STATUS kfifoStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1306 
kfifoStateLoad_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 flags)1307 static inline NV_STATUS kfifoStateLoad_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1308     return NV_OK;
1309 }
1310 
kfifoStateLoad_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 flags)1311 static inline NV_STATUS kfifoStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1312     return pKernelFifo->__kfifoStateLoad__(pGpu, pKernelFifo, flags);
1313 }
1314 
1315 NV_STATUS kfifoStateUnload_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1316 
kfifoStateUnload_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 flags)1317 static inline NV_STATUS kfifoStateUnload_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1318     return NV_OK;
1319 }
1320 
kfifoStateUnload_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 flags)1321 static inline NV_STATUS kfifoStateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1322     return pKernelFifo->__kfifoStateUnload__(pGpu, pKernelFifo, flags);
1323 }
1324 
1325 NV_STATUS kfifoStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1326 
kfifoStateInitLocked_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1327 static inline NV_STATUS kfifoStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1328     return pKernelFifo->__kfifoStateInitLocked__(pGpu, pKernelFifo);
1329 }
1330 
1331 void kfifoStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1332 
kfifoStateDestroy_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1333 static inline void kfifoStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1334     pKernelFifo->__kfifoStateDestroy__(pGpu, pKernelFifo);
1335 }
1336 
1337 NV_STATUS kfifoStatePostLoad_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1338 
kfifoStatePostLoad_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 flags)1339 static inline NV_STATUS kfifoStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1340     return pKernelFifo->__kfifoStatePostLoad__(pGpu, pKernelFifo, flags);
1341 }
1342 
1343 NV_STATUS kfifoStatePreUnload_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags);
1344 
kfifoStatePreUnload_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 flags)1345 static inline NV_STATUS kfifoStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) {
1346     return pKernelFifo->__kfifoStatePreUnload__(pGpu, pKernelFifo, flags);
1347 }
1348 
1349 NV_STATUS kfifoCheckChannelAllocAddrSpaces_GH100(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace);
1350 
kfifoCheckChannelAllocAddrSpaces_56cd7a(struct KernelFifo * pKernelFifo,NV_ADDRESS_SPACE userdAddrSpace,NV_ADDRESS_SPACE pushBuffAddrSpace,NV_ADDRESS_SPACE gpFifoAddrSpace)1351 static inline NV_STATUS kfifoCheckChannelAllocAddrSpaces_56cd7a(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace) {
1352     return NV_OK;
1353 }
1354 
kfifoCheckChannelAllocAddrSpaces_DISPATCH(struct KernelFifo * pKernelFifo,NV_ADDRESS_SPACE userdAddrSpace,NV_ADDRESS_SPACE pushBuffAddrSpace,NV_ADDRESS_SPACE gpFifoAddrSpace)1355 static inline NV_STATUS kfifoCheckChannelAllocAddrSpaces_DISPATCH(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace) {
1356     return pKernelFifo->__kfifoCheckChannelAllocAddrSpaces__(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace);
1357 }
1358 
1359 NV_STATUS kfifoConstructUsermodeMemdescs_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1360 
1361 NV_STATUS kfifoConstructUsermodeMemdescs_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1362 
kfifoConstructUsermodeMemdescs_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1363 static inline NV_STATUS kfifoConstructUsermodeMemdescs_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1364     return pKernelFifo->__kfifoConstructUsermodeMemdescs__(pGpu, pKernelFifo);
1365 }
1366 
1367 NvU32 kfifoChannelGroupGetLocalMaxSubcontext_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1);
1368 
1369 NvU32 kfifoChannelGroupGetLocalMaxSubcontext_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1);
1370 
kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannelGroup * arg0,NvBool arg1)1371 static inline NvU32 kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1) {
1372     return pKernelFifo->__kfifoChannelGroupGetLocalMaxSubcontext__(pGpu, pKernelFifo, arg0, arg1);
1373 }
1374 
1375 void kfifoGetCtxBufferMapFlags_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags);
1376 
kfifoGetCtxBufferMapFlags_b3696a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 engine,NvU32 * pFlags)1377 static inline void kfifoGetCtxBufferMapFlags_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags) {
1378     return;
1379 }
1380 
kfifoGetCtxBufferMapFlags_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 engine,NvU32 * pFlags)1381 static inline void kfifoGetCtxBufferMapFlags_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags) {
1382     pKernelFifo->__kfifoGetCtxBufferMapFlags__(pGpu, pKernelFifo, engine, pFlags);
1383 }
1384 
1385 NV_STATUS kfifoEngineInfoXlate_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal);
1386 
1387 NV_STATUS kfifoEngineInfoXlate_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal);
1388 
kfifoEngineInfoXlate_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,ENGINE_INFO_TYPE inType,NvU32 inVal,ENGINE_INFO_TYPE outType,NvU32 * pOutVal)1389 static inline NV_STATUS kfifoEngineInfoXlate_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal) {
1390     return pKernelFifo->__kfifoEngineInfoXlate__(pGpu, pKernelFifo, inType, inVal, outType, pOutVal);
1391 }
1392 
1393 NV_STATUS kfifoGenerateWorkSubmitToken_TU102(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost);
1394 
1395 NV_STATUS kfifoGenerateWorkSubmitToken_GA100(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost);
1396 
kfifoGenerateWorkSubmitToken_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * arg0,struct KernelChannel * arg1,NvU32 * pGeneratedToken,NvBool bUsedForHost)1397 static inline NV_STATUS kfifoGenerateWorkSubmitToken_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost) {
1398     return arg0->__kfifoGenerateWorkSubmitToken__(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost);
1399 }
1400 
1401 NV_STATUS kfifoUpdateUsermodeDoorbell_TU102(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
1402 
1403 NV_STATUS kfifoUpdateUsermodeDoorbell_GA100(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId);
1404 
kfifoUpdateUsermodeDoorbell_DISPATCH(struct OBJGPU * arg0,struct KernelFifo * arg1,NvU32 workSubmitToken,NvU32 runlisId)1405 static inline NV_STATUS kfifoUpdateUsermodeDoorbell_DISPATCH(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) {
1406     return arg1->__kfifoUpdateUsermodeDoorbell__(arg0, arg1, workSubmitToken, runlisId);
1407 }
1408 
1409 NvU32 kfifoRunlistGetBaseShift_GM107(struct KernelFifo *pKernelFifo);
1410 
1411 NvU32 kfifoRunlistGetBaseShift_GA100(struct KernelFifo *pKernelFifo);
1412 
1413 NvU32 kfifoRunlistGetBaseShift_GA102(struct KernelFifo *pKernelFifo);
1414 
kfifoRunlistGetBaseShift_DISPATCH(struct KernelFifo * pKernelFifo)1415 static inline NvU32 kfifoRunlistGetBaseShift_DISPATCH(struct KernelFifo *pKernelFifo) {
1416     return pKernelFifo->__kfifoRunlistGetBaseShift__(pKernelFifo);
1417 }
1418 
1419 NvU64 kfifoGetUserdBar1MapStartOffset_VF(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1420 
kfifoGetUserdBar1MapStartOffset_4a4dee(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1421 static inline NvU64 kfifoGetUserdBar1MapStartOffset_4a4dee(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1422     return 0;
1423 }
1424 
kfifoGetUserdBar1MapStartOffset_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1425 static inline NvU64 kfifoGetUserdBar1MapStartOffset_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1426     return pKernelFifo->__kfifoGetUserdBar1MapStartOffset__(pGpu, pKernelFifo);
1427 }
1428 
1429 NvU32 kfifoGetMaxCeChannelGroups_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1430 
1431 NvU32 kfifoGetMaxCeChannelGroups_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1432 
kfifoGetMaxCeChannelGroups_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1433 static inline NvU32 kfifoGetMaxCeChannelGroups_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1434     return pKernelFifo->__kfifoGetMaxCeChannelGroups__(pGpu, pKernelFifo);
1435 }
1436 
kfifoGetVChIdForSChId_c04480(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 chId,NvU32 gfid,NvU32 engineId,NvU32 * pVChid)1437 static inline NV_STATUS kfifoGetVChIdForSChId_c04480(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid) {
1438     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
1439 }
1440 
1441 NV_STATUS kfifoGetVChIdForSChId_FWCLIENT(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid);
1442 
kfifoGetVChIdForSChId_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 chId,NvU32 gfid,NvU32 engineId,NvU32 * pVChid)1443 static inline NV_STATUS kfifoGetVChIdForSChId_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid) {
1444     return pKernelFifo->__kfifoGetVChIdForSChId__(pGpu, pKernelFifo, chId, gfid, engineId, pVChid);
1445 }
1446 
kfifoProgramChIdTable_c04480(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 offset,NvU32 numChannels,NvU32 gfid,struct Device * pMigDevice,NvU32 engineFifoListNumEntries,FIFO_ENGINE_LIST * pEngineFifoList)1447 static inline NV_STATUS kfifoProgramChIdTable_c04480(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1448     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
1449 }
1450 
kfifoProgramChIdTable_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 offset,NvU32 numChannels,NvU32 gfid,struct Device * pMigDevice,NvU32 engineFifoListNumEntries,FIFO_ENGINE_LIST * pEngineFifoList)1451 static inline NV_STATUS kfifoProgramChIdTable_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1452     return NV_OK;
1453 }
1454 
kfifoProgramChIdTable_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 offset,NvU32 numChannels,NvU32 gfid,struct Device * pMigDevice,NvU32 engineFifoListNumEntries,FIFO_ENGINE_LIST * pEngineFifoList)1455 static inline NV_STATUS kfifoProgramChIdTable_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1456     return pKernelFifo->__kfifoProgramChIdTable__(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pMigDevice, engineFifoListNumEntries, pEngineFifoList);
1457 }
1458 
kfifoRecoverAllChannels_56cd7a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 gfid)1459 static inline NV_STATUS kfifoRecoverAllChannels_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 gfid) {
1460     return NV_OK;
1461 }
1462 
kfifoRecoverAllChannels_92bfc3(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 gfid)1463 static inline NV_STATUS kfifoRecoverAllChannels_92bfc3(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 gfid) {
1464     NV_ASSERT_PRECOMP(0);
1465     return NV_ERR_NOT_SUPPORTED;
1466 }
1467 
kfifoRecoverAllChannels_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 gfid)1468 static inline NV_STATUS kfifoRecoverAllChannels_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 gfid) {
1469     return pKernelFifo->__kfifoRecoverAllChannels__(pGpu, pKernelFifo, gfid);
1470 }
1471 
1472 void kfifoStartChannelHalt_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel);
1473 
kfifoStartChannelHalt_b3696a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * pKernelChannel)1474 static inline void kfifoStartChannelHalt_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel) {
1475     return;
1476 }
1477 
kfifoStartChannelHalt_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * pKernelChannel)1478 static inline void kfifoStartChannelHalt_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel) {
1479     pKernelFifo->__kfifoStartChannelHalt__(pGpu, pKernelFifo, pKernelChannel);
1480 }
1481 
1482 void kfifoCompleteChannelHalt_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, RMTIMEOUT *pTimeout);
1483 
kfifoCompleteChannelHalt_b3696a(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * pKernelChannel,RMTIMEOUT * pTimeout)1484 static inline void kfifoCompleteChannelHalt_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, RMTIMEOUT *pTimeout) {
1485     return;
1486 }
1487 
kfifoCompleteChannelHalt_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * pKernelChannel,RMTIMEOUT * pTimeout)1488 static inline void kfifoCompleteChannelHalt_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, RMTIMEOUT *pTimeout) {
1489     pKernelFifo->__kfifoCompleteChannelHalt__(pGpu, pKernelFifo, pKernelChannel, pTimeout);
1490 }
1491 
1492 NV_STATUS kfifoGetEnginePbdmaFaultIds_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE arg0, NvU32 arg1, NvU32 **arg2, NvU32 *arg3);
1493 
kfifoGetEnginePbdmaFaultIds_5baef9(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,ENGINE_INFO_TYPE arg0,NvU32 arg1,NvU32 ** arg2,NvU32 * arg3)1494 static inline NV_STATUS kfifoGetEnginePbdmaFaultIds_5baef9(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE arg0, NvU32 arg1, NvU32 **arg2, NvU32 *arg3) {
1495     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
1496 }
1497 
kfifoGetEnginePbdmaFaultIds_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,ENGINE_INFO_TYPE arg0,NvU32 arg1,NvU32 ** arg2,NvU32 * arg3)1498 static inline NV_STATUS kfifoGetEnginePbdmaFaultIds_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE arg0, NvU32 arg1, NvU32 **arg2, NvU32 *arg3) {
1499     return pKernelFifo->__kfifoGetEnginePbdmaFaultIds__(pGpu, pKernelFifo, arg0, arg1, arg2, arg3);
1500 }
1501 
1502 NvU32 kfifoGetNumPBDMAs_GM200(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1503 
1504 NvU32 kfifoGetNumPBDMAs_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1505 
kfifoGetNumPBDMAs_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1506 static inline NvU32 kfifoGetNumPBDMAs_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1507     return pKernelFifo->__kfifoGetNumPBDMAs__(pGpu, pKernelFifo);
1508 }
1509 
1510 const char *kfifoPrintPbdmaId_TU102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 pbdmaId);
1511 
1512 const char *kfifoPrintPbdmaId_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 pbdmaId);
1513 
kfifoPrintPbdmaId_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 pbdmaId)1514 static inline const char *kfifoPrintPbdmaId_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 pbdmaId) {
1515     return pKernelFifo->__kfifoPrintPbdmaId__(pGpu, pKernelFifo, pbdmaId);
1516 }
1517 
1518 const char *kfifoPrintInternalEngine_TU102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1519 
1520 const char *kfifoPrintInternalEngine_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1521 
1522 const char *kfifoPrintInternalEngine_AD102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1523 
1524 const char *kfifoPrintInternalEngine_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1525 
kfifoPrintInternalEngine_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)1526 static inline const char *kfifoPrintInternalEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1527     return pKernelFifo->__kfifoPrintInternalEngine__(pGpu, pKernelFifo, arg0);
1528 }
1529 
1530 const char *kfifoPrintInternalEngineCheck_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1531 
kfifoPrintInternalEngineCheck_fa6e19(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)1532 static inline const char *kfifoPrintInternalEngineCheck_fa6e19(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1533     return ((void *)0);
1534 }
1535 
kfifoPrintInternalEngineCheck_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)1536 static inline const char *kfifoPrintInternalEngineCheck_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1537     return pKernelFifo->__kfifoPrintInternalEngineCheck__(pGpu, pKernelFifo, arg0);
1538 }
1539 
1540 const char *kfifoGetClientIdStringCommon_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1541 
kfifoGetClientIdStringCommon_95626c(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,FIFO_MMU_EXCEPTION_DATA * arg0)1542 static inline const char *kfifoGetClientIdStringCommon_95626c(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0) {
1543     NV_ASSERT_OR_RETURN_PRECOMP(0, "UNKNOWN");
1544 }
1545 
kfifoGetClientIdStringCommon_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,FIFO_MMU_EXCEPTION_DATA * arg0)1546 static inline const char *kfifoGetClientIdStringCommon_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0) {
1547     return pKernelFifo->__kfifoGetClientIdStringCommon__(pGpu, pKernelFifo, arg0);
1548 }
1549 
1550 const char *kfifoGetClientIdString_TU102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1551 
1552 const char *kfifoGetClientIdString_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1553 
1554 const char *kfifoGetClientIdString_AD102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1555 
1556 const char *kfifoGetClientIdString_GH100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0);
1557 
kfifoGetClientIdString_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,FIFO_MMU_EXCEPTION_DATA * arg0)1558 static inline const char *kfifoGetClientIdString_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, FIFO_MMU_EXCEPTION_DATA *arg0) {
1559     return pKernelFifo->__kfifoGetClientIdString__(pGpu, pKernelFifo, arg0);
1560 }
1561 
1562 const char *kfifoGetClientIdStringCheck_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
1563 
kfifoGetClientIdStringCheck_da47da(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)1564 static inline const char *kfifoGetClientIdStringCheck_da47da(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1565     return "UNKNOWN";
1566 }
1567 
kfifoGetClientIdStringCheck_DISPATCH(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)1568 static inline const char *kfifoGetClientIdStringCheck_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
1569     return pKernelFifo->__kfifoGetClientIdStringCheck__(pGpu, pKernelFifo, arg0);
1570 }
1571 
kfifoStatePreLoad_DISPATCH(POBJGPU pGpu,struct KernelFifo * pEngstate,NvU32 arg0)1572 static inline NV_STATUS kfifoStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) {
1573     return pEngstate->__kfifoStatePreLoad__(pGpu, pEngstate, arg0);
1574 }
1575 
kfifoStatePostUnload_DISPATCH(POBJGPU pGpu,struct KernelFifo * pEngstate,NvU32 arg0)1576 static inline NV_STATUS kfifoStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) {
1577     return pEngstate->__kfifoStatePostUnload__(pGpu, pEngstate, arg0);
1578 }
1579 
kfifoStateInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelFifo * pEngstate)1580 static inline NV_STATUS kfifoStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1581     return pEngstate->__kfifoStateInitUnlocked__(pGpu, pEngstate);
1582 }
1583 
kfifoInitMissing_DISPATCH(POBJGPU pGpu,struct KernelFifo * pEngstate)1584 static inline void kfifoInitMissing_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1585     pEngstate->__kfifoInitMissing__(pGpu, pEngstate);
1586 }
1587 
kfifoStatePreInitLocked_DISPATCH(POBJGPU pGpu,struct KernelFifo * pEngstate)1588 static inline NV_STATUS kfifoStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1589     return pEngstate->__kfifoStatePreInitLocked__(pGpu, pEngstate);
1590 }
1591 
kfifoStatePreInitUnlocked_DISPATCH(POBJGPU pGpu,struct KernelFifo * pEngstate)1592 static inline NV_STATUS kfifoStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1593     return pEngstate->__kfifoStatePreInitUnlocked__(pGpu, pEngstate);
1594 }
1595 
kfifoIsPresent_DISPATCH(POBJGPU pGpu,struct KernelFifo * pEngstate)1596 static inline NvBool kfifoIsPresent_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) {
1597     return pEngstate->__kfifoIsPresent__(pGpu, pEngstate);
1598 }
1599 
kfifoGetEngineInfo(struct KernelFifo * pKernelFifo)1600 static inline const ENGINE_INFO *kfifoGetEngineInfo(struct KernelFifo *pKernelFifo) {
1601     if (pKernelFifo->engineInfo.engineInfoList == ((void *)0))
1602         return ((void *)0);
1603     return &pKernelFifo->engineInfo;
1604 }
1605 
kfifoGetPreallocatedUserdInfo(struct KernelFifo * pKernelFifo)1606 static inline const PREALLOCATED_USERD_INFO *kfifoGetPreallocatedUserdInfo(struct KernelFifo *pKernelFifo) {
1607     return &pKernelFifo->userdInfo;
1608 }
1609 
kfifoIsPerRunlistChramEnabled(struct KernelFifo * pKernelFifo)1610 static inline NvBool kfifoIsPerRunlistChramEnabled(struct KernelFifo *pKernelFifo) {
1611     return pKernelFifo->bUsePerRunlistChram;
1612 }
1613 
kfifoIsPerRunlistChramSupportedInHw(struct KernelFifo * pKernelFifo)1614 static inline NvBool kfifoIsPerRunlistChramSupportedInHw(struct KernelFifo *pKernelFifo) {
1615     return pKernelFifo->bIsPerRunlistChramSupportedInHw;
1616 }
1617 
kfifoIsChidHeapEnabled(struct KernelFifo * pKernelFifo)1618 static inline NvBool kfifoIsChidHeapEnabled(struct KernelFifo *pKernelFifo) {
1619     return pKernelFifo->bUseChidHeap;
1620 }
1621 
kfifoIsHostEngineExpansionSupported(struct KernelFifo * pKernelFifo)1622 static inline NvBool kfifoIsHostEngineExpansionSupported(struct KernelFifo *pKernelFifo) {
1623     return pKernelFifo->bHostEngineExpansion;
1624 }
1625 
kfifoIsSubcontextSupported(struct KernelFifo * pKernelFifo)1626 static inline NvBool kfifoIsSubcontextSupported(struct KernelFifo *pKernelFifo) {
1627     return pKernelFifo->bSubcontextSupported;
1628 }
1629 
kfifoHostHasLbOverflow(struct KernelFifo * pKernelFifo)1630 static inline NvBool kfifoHostHasLbOverflow(struct KernelFifo *pKernelFifo) {
1631     return pKernelFifo->bHostHasLbOverflow;
1632 }
1633 
kfifoIsUserdInSystemMemory(struct KernelFifo * pKernelFifo)1634 static inline NvBool kfifoIsUserdInSystemMemory(struct KernelFifo *pKernelFifo) {
1635     return pKernelFifo->bUserdInSystemMemory;
1636 }
1637 
kfifoIsUserdMapDmaSupported(struct KernelFifo * pKernelFifo)1638 static inline NvBool kfifoIsUserdMapDmaSupported(struct KernelFifo *pKernelFifo) {
1639     return pKernelFifo->bUserdMapDmaSupported;
1640 }
1641 
kfifoIsMixedInstmemApertureDefAllowed(struct KernelFifo * pKernelFifo)1642 static inline NvBool kfifoIsMixedInstmemApertureDefAllowed(struct KernelFifo *pKernelFifo) {
1643     return pKernelFifo->bMixedInstmemApertureDefAllowed;
1644 }
1645 
kfifoIsZombieSubctxWarEnabled(struct KernelFifo * pKernelFifo)1646 static inline NvBool kfifoIsZombieSubctxWarEnabled(struct KernelFifo *pKernelFifo) {
1647     return pKernelFifo->bIsZombieSubctxWarEnabled;
1648 }
1649 
kfifoIsWddmInterleavingPolicyEnabled(struct KernelFifo * pKernelFifo)1650 static inline NvBool kfifoIsWddmInterleavingPolicyEnabled(struct KernelFifo *pKernelFifo) {
1651     return pKernelFifo->bWddmInterleavingPolicyEnabled;
1652 }
1653 
kfifoIsSchedSupported(struct KernelFifo * pKernelFifo)1654 static inline NvBool kfifoIsSchedSupported(struct KernelFifo *pKernelFifo) {
1655     return pKernelFifo->bIsSchedSupported;
1656 }
1657 
kfifoGetKernelSchedMgr(struct KernelFifo * pKernelFifo)1658 static inline struct KernelSchedMgr *kfifoGetKernelSchedMgr(struct KernelFifo *pKernelFifo) {
1659     return pKernelFifo->pKernelSchedMgr;
1660 }
1661 
kfifoGetDummyPageMemDesc(struct KernelFifo * pKernelFifo)1662 static inline MEMORY_DESCRIPTOR *kfifoGetDummyPageMemDesc(struct KernelFifo *pKernelFifo) {
1663     return pKernelFifo->pDummyPageMemDesc;
1664 }
1665 
1666 void kfifoDestruct_IMPL(struct KernelFifo *pKernelFifo);
1667 
1668 #define __nvoc_kfifoDestruct(pKernelFifo) kfifoDestruct_IMPL(pKernelFifo)
1669 NV_STATUS kfifoChidMgrConstruct_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1670 
1671 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrConstruct(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1672 static inline NV_STATUS kfifoChidMgrConstruct(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1673     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1674     return NV_ERR_NOT_SUPPORTED;
1675 }
1676 #else //__nvoc_kernel_fifo_h_disabled
1677 #define kfifoChidMgrConstruct(pGpu, pKernelFifo) kfifoChidMgrConstruct_IMPL(pGpu, pKernelFifo)
1678 #endif //__nvoc_kernel_fifo_h_disabled
1679 
1680 void kfifoChidMgrDestruct_IMPL(struct KernelFifo *pKernelFifo);
1681 
1682 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrDestruct(struct KernelFifo * pKernelFifo)1683 static inline void kfifoChidMgrDestruct(struct KernelFifo *pKernelFifo) {
1684     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1685 }
1686 #else //__nvoc_kernel_fifo_h_disabled
1687 #define kfifoChidMgrDestruct(pKernelFifo) kfifoChidMgrDestruct_IMPL(pKernelFifo)
1688 #endif //__nvoc_kernel_fifo_h_disabled
1689 
1690 NV_STATUS kfifoChidMgrAllocChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvHandle hClient, CHANNEL_HW_ID_ALLOC_MODE arg0, NvBool bForceInternalIdx, NvU32 internalIdx, NvBool bForceUserdPage, NvU32 userdPageIdx, NvU32 ChID, struct KernelChannel *arg1);
1691 
1692 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrAllocChid(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvHandle hClient,CHANNEL_HW_ID_ALLOC_MODE arg0,NvBool bForceInternalIdx,NvU32 internalIdx,NvBool bForceUserdPage,NvU32 userdPageIdx,NvU32 ChID,struct KernelChannel * arg1)1693 static inline NV_STATUS kfifoChidMgrAllocChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvHandle hClient, CHANNEL_HW_ID_ALLOC_MODE arg0, NvBool bForceInternalIdx, NvU32 internalIdx, NvBool bForceUserdPage, NvU32 userdPageIdx, NvU32 ChID, struct KernelChannel *arg1) {
1694     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1695     return NV_ERR_NOT_SUPPORTED;
1696 }
1697 #else //__nvoc_kernel_fifo_h_disabled
1698 #define kfifoChidMgrAllocChid(pGpu, pKernelFifo, pChidMgr, hClient, arg0, bForceInternalIdx, internalIdx, bForceUserdPage, userdPageIdx, ChID, arg1) kfifoChidMgrAllocChid_IMPL(pGpu, pKernelFifo, pChidMgr, hClient, arg0, bForceInternalIdx, internalIdx, bForceUserdPage, userdPageIdx, ChID, arg1)
1699 #endif //__nvoc_kernel_fifo_h_disabled
1700 
1701 NV_STATUS kfifoChidMgrRetainChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1702 
1703 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrRetainChid(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 ChID)1704 static inline NV_STATUS kfifoChidMgrRetainChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1705     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1706     return NV_ERR_NOT_SUPPORTED;
1707 }
1708 #else //__nvoc_kernel_fifo_h_disabled
1709 #define kfifoChidMgrRetainChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrRetainChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1710 #endif //__nvoc_kernel_fifo_h_disabled
1711 
1712 NV_STATUS kfifoChidMgrReleaseChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1713 
1714 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrReleaseChid(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 ChID)1715 static inline NV_STATUS kfifoChidMgrReleaseChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1716     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1717     return NV_ERR_NOT_SUPPORTED;
1718 }
1719 #else //__nvoc_kernel_fifo_h_disabled
1720 #define kfifoChidMgrReleaseChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrReleaseChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1721 #endif //__nvoc_kernel_fifo_h_disabled
1722 
1723 NV_STATUS kfifoChidMgrFreeChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1724 
1725 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrFreeChid(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 ChID)1726 static inline NV_STATUS kfifoChidMgrFreeChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1727     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1728     return NV_ERR_NOT_SUPPORTED;
1729 }
1730 #else //__nvoc_kernel_fifo_h_disabled
1731 #define kfifoChidMgrFreeChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrFreeChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1732 #endif //__nvoc_kernel_fifo_h_disabled
1733 
1734 NV_STATUS kfifoChidMgrReserveSystemChids_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 numChannels, NvU32 flags, NvU32 gfid, NvU32 *pChidOffset, NvU64 offset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1735 
1736 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrReserveSystemChids(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 numChannels,NvU32 flags,NvU32 gfid,NvU32 * pChidOffset,NvU64 offset,NvU32 * pChannelCount,struct Device * pMigDevice,NvU32 engineFifoListNumEntries,FIFO_ENGINE_LIST * pEngineFifoList)1737 static inline NV_STATUS kfifoChidMgrReserveSystemChids(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 numChannels, NvU32 flags, NvU32 gfid, NvU32 *pChidOffset, NvU64 offset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1738     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1739     return NV_ERR_NOT_SUPPORTED;
1740 }
1741 #else //__nvoc_kernel_fifo_h_disabled
1742 #define kfifoChidMgrReserveSystemChids(pGpu, pKernelFifo, pChidMgr, numChannels, flags, gfid, pChidOffset, offset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoChidMgrReserveSystemChids_IMPL(pGpu, pKernelFifo, pChidMgr, numChannels, flags, gfid, pChidOffset, offset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
1743 #endif //__nvoc_kernel_fifo_h_disabled
1744 
1745 NV_STATUS kfifoChidMgrFreeSystemChids_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1746 
1747 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrFreeSystemChids(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 gfid,NvU32 * pChidOffset,NvU32 * pChannelCount,struct Device * pMigDevice,NvU32 engineFifoListNumEntries,FIFO_ENGINE_LIST * pEngineFifoList)1748 static inline NV_STATUS kfifoChidMgrFreeSystemChids(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1749     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1750     return NV_ERR_NOT_SUPPORTED;
1751 }
1752 #else //__nvoc_kernel_fifo_h_disabled
1753 #define kfifoChidMgrFreeSystemChids(pGpu, pKernelFifo, pChidMgr, gfid, pChidOffset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoChidMgrFreeSystemChids_IMPL(pGpu, pKernelFifo, pChidMgr, gfid, pChidOffset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
1754 #endif //__nvoc_kernel_fifo_h_disabled
1755 
1756 NV_STATUS kfifoSetChidOffset_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList);
1757 
1758 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoSetChidOffset(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 offset,NvU32 numChannels,NvU32 gfid,NvU32 * pChidOffset,NvU32 * pChannelCount,struct Device * pMigDevice,NvU32 engineFifoListNumEntries,FIFO_ENGINE_LIST * pEngineFifoList)1759 static inline NV_STATUS kfifoSetChidOffset(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, NvU32 gfid, NvU32 *pChidOffset, NvU32 *pChannelCount, struct Device *pMigDevice, NvU32 engineFifoListNumEntries, FIFO_ENGINE_LIST *pEngineFifoList) {
1760     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1761     return NV_ERR_NOT_SUPPORTED;
1762 }
1763 #else //__nvoc_kernel_fifo_h_disabled
1764 #define kfifoSetChidOffset(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pChidOffset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList) kfifoSetChidOffset_IMPL(pGpu, pKernelFifo, pChidMgr, offset, numChannels, gfid, pChidOffset, pChannelCount, pMigDevice, engineFifoListNumEntries, pEngineFifoList)
1765 #endif //__nvoc_kernel_fifo_h_disabled
1766 
1767 NvU32 kfifoChidMgrGetNumChannels_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr);
1768 
1769 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrGetNumChannels(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr)1770 static inline NvU32 kfifoChidMgrGetNumChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr) {
1771     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1772     return 0;
1773 }
1774 #else //__nvoc_kernel_fifo_h_disabled
1775 #define kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr) kfifoChidMgrGetNumChannels_IMPL(pGpu, pKernelFifo, pChidMgr)
1776 #endif //__nvoc_kernel_fifo_h_disabled
1777 
1778 NV_STATUS kfifoChidMgrAllocChannelGroupHwID_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 *pGrpId);
1779 
1780 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrAllocChannelGroupHwID(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 * pGrpId)1781 static inline NV_STATUS kfifoChidMgrAllocChannelGroupHwID(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 *pGrpId) {
1782     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1783     return NV_ERR_NOT_SUPPORTED;
1784 }
1785 #else //__nvoc_kernel_fifo_h_disabled
1786 #define kfifoChidMgrAllocChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, pGrpId) kfifoChidMgrAllocChannelGroupHwID_IMPL(pGpu, pKernelFifo, pChidMgr, pGrpId)
1787 #endif //__nvoc_kernel_fifo_h_disabled
1788 
1789 NV_STATUS kfifoChidMgrFreeChannelGroupHwID_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpId);
1790 
1791 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrFreeChannelGroupHwID(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 grpId)1792 static inline NV_STATUS kfifoChidMgrFreeChannelGroupHwID(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpId) {
1793     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1794     return NV_ERR_NOT_SUPPORTED;
1795 }
1796 #else //__nvoc_kernel_fifo_h_disabled
1797 #define kfifoChidMgrFreeChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, grpId) kfifoChidMgrFreeChannelGroupHwID_IMPL(pGpu, pKernelFifo, pChidMgr, grpId)
1798 #endif //__nvoc_kernel_fifo_h_disabled
1799 
1800 struct KernelChannelGroup *kfifoChidMgrGetKernelChannelGroup_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpID);
1801 
1802 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrGetKernelChannelGroup(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 grpID)1803 static inline struct KernelChannelGroup *kfifoChidMgrGetKernelChannelGroup(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpID) {
1804     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1805     return NULL;
1806 }
1807 #else //__nvoc_kernel_fifo_h_disabled
1808 #define kfifoChidMgrGetKernelChannelGroup(pGpu, pKernelFifo, pChidMgr, grpID) kfifoChidMgrGetKernelChannelGroup_IMPL(pGpu, pKernelFifo, pChidMgr, grpID)
1809 #endif //__nvoc_kernel_fifo_h_disabled
1810 
1811 struct KernelChannel *kfifoChidMgrGetKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID);
1812 
1813 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChidMgrGetKernelChannel(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHID_MGR * pChidMgr,NvU32 ChID)1814 static inline struct KernelChannel *kfifoChidMgrGetKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) {
1815     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1816     return NULL;
1817 }
1818 #else //__nvoc_kernel_fifo_h_disabled
1819 #define kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrGetKernelChannel_IMPL(pGpu, pKernelFifo, pChidMgr, ChID)
1820 #endif //__nvoc_kernel_fifo_h_disabled
1821 
1822 CHID_MGR *kfifoGetChidMgr_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
1823 
1824 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetChidMgr(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 runlistId)1825 static inline CHID_MGR *kfifoGetChidMgr(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
1826     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1827     return NULL;
1828 }
1829 #else //__nvoc_kernel_fifo_h_disabled
1830 #define kfifoGetChidMgr(pGpu, pKernelFifo, runlistId) kfifoGetChidMgr_IMPL(pGpu, pKernelFifo, runlistId)
1831 #endif //__nvoc_kernel_fifo_h_disabled
1832 
1833 NV_STATUS kfifoGetChidMgrFromType_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineInfoType, NvU32 value, CHID_MGR **arg0);
1834 
1835 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetChidMgrFromType(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 engineInfoType,NvU32 value,CHID_MGR ** arg0)1836 static inline NV_STATUS kfifoGetChidMgrFromType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineInfoType, NvU32 value, CHID_MGR **arg0) {
1837     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1838     return NV_ERR_NOT_SUPPORTED;
1839 }
1840 #else //__nvoc_kernel_fifo_h_disabled
1841 #define kfifoGetChidMgrFromType(pGpu, pKernelFifo, engineInfoType, value, arg0) kfifoGetChidMgrFromType_IMPL(pGpu, pKernelFifo, engineInfoType, value, arg0)
1842 #endif //__nvoc_kernel_fifo_h_disabled
1843 
1844 struct KernelChannelGroup *kfifoGetChannelGroup_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 grpID, NvU32 runlistID);
1845 
1846 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetChannelGroup(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 grpID,NvU32 runlistID)1847 static inline struct KernelChannelGroup *kfifoGetChannelGroup(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 grpID, NvU32 runlistID) {
1848     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1849     return NULL;
1850 }
1851 #else //__nvoc_kernel_fifo_h_disabled
1852 #define kfifoGetChannelGroup(pGpu, pKernelFifo, grpID, runlistID) kfifoGetChannelGroup_IMPL(pGpu, pKernelFifo, grpID, runlistID)
1853 #endif //__nvoc_kernel_fifo_h_disabled
1854 
1855 NvU32 kfifoGetChannelGroupsInUse_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
1856 
1857 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetChannelGroupsInUse(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)1858 static inline NvU32 kfifoGetChannelGroupsInUse(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
1859     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1860     return 0;
1861 }
1862 #else //__nvoc_kernel_fifo_h_disabled
1863 #define kfifoGetChannelGroupsInUse(pGpu, pKernelFifo) kfifoGetChannelGroupsInUse_IMPL(pGpu, pKernelFifo)
1864 #endif //__nvoc_kernel_fifo_h_disabled
1865 
1866 NvU32 kfifoGetRunlistChannelGroupsInUse_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId);
1867 
1868 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetRunlistChannelGroupsInUse(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 runlistId)1869 static inline NvU32 kfifoGetRunlistChannelGroupsInUse(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) {
1870     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1871     return 0;
1872 }
1873 #else //__nvoc_kernel_fifo_h_disabled
1874 #define kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId) kfifoGetRunlistChannelGroupsInUse_IMPL(pGpu, pKernelFifo, runlistId)
1875 #endif //__nvoc_kernel_fifo_h_disabled
1876 
1877 void kfifoGetChannelIterator_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, NvU32 runlistId);
1878 
1879 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetChannelIterator(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHANNEL_ITERATOR * pIt,NvU32 runlistId)1880 static inline void kfifoGetChannelIterator(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, NvU32 runlistId) {
1881     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1882 }
1883 #else //__nvoc_kernel_fifo_h_disabled
1884 #define kfifoGetChannelIterator(pGpu, pKernelFifo, pIt, runlistId) kfifoGetChannelIterator_IMPL(pGpu, pKernelFifo, pIt, runlistId)
1885 #endif //__nvoc_kernel_fifo_h_disabled
1886 
1887 NV_STATUS kfifoGetNextKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel);
1888 
1889 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetNextKernelChannel(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHANNEL_ITERATOR * pIt,struct KernelChannel ** ppKernelChannel)1890 static inline NV_STATUS kfifoGetNextKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel) {
1891     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1892     return NV_ERR_NOT_SUPPORTED;
1893 }
1894 #else //__nvoc_kernel_fifo_h_disabled
1895 #define kfifoGetNextKernelChannel(pGpu, pKernelFifo, pIt, ppKernelChannel) kfifoGetNextKernelChannel_IMPL(pGpu, pKernelFifo, pIt, ppKernelChannel)
1896 #endif //__nvoc_kernel_fifo_h_disabled
1897 
1898 void kfifoFillMemInfo_IMPL(struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc, NV2080_CTRL_FIFO_MEM_INFO *pMemory);
1899 
1900 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoFillMemInfo(struct KernelFifo * pKernelFifo,MEMORY_DESCRIPTOR * pMemDesc,NV2080_CTRL_FIFO_MEM_INFO * pMemory)1901 static inline void kfifoFillMemInfo(struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc, NV2080_CTRL_FIFO_MEM_INFO *pMemory) {
1902     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1903 }
1904 #else //__nvoc_kernel_fifo_h_disabled
1905 #define kfifoFillMemInfo(pKernelFifo, pMemDesc, pMemory) kfifoFillMemInfo_IMPL(pKernelFifo, pMemDesc, pMemory)
1906 #endif //__nvoc_kernel_fifo_h_disabled
1907 
1908 NvU32 kfifoGetAllocatedChannelMask_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, NvU32 *pBitMask, NvLength bitMaskSize);
1909 
1910 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetAllocatedChannelMask(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 runlistId,NvU32 * pBitMask,NvLength bitMaskSize)1911 static inline NvU32 kfifoGetAllocatedChannelMask(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, NvU32 *pBitMask, NvLength bitMaskSize) {
1912     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1913     return 0;
1914 }
1915 #else //__nvoc_kernel_fifo_h_disabled
1916 #define kfifoGetAllocatedChannelMask(pGpu, pKernelFifo, runlistId, pBitMask, bitMaskSize) kfifoGetAllocatedChannelMask_IMPL(pGpu, pKernelFifo, runlistId, pBitMask, bitMaskSize)
1917 #endif //__nvoc_kernel_fifo_h_disabled
1918 
1919 NV_STATUS kfifoChannelListCreate_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST **arg0);
1920 
1921 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChannelListCreate(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHANNEL_LIST ** arg0)1922 static inline NV_STATUS kfifoChannelListCreate(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST **arg0) {
1923     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1924     return NV_ERR_NOT_SUPPORTED;
1925 }
1926 #else //__nvoc_kernel_fifo_h_disabled
1927 #define kfifoChannelListCreate(pGpu, pKernelFifo, arg0) kfifoChannelListCreate_IMPL(pGpu, pKernelFifo, arg0)
1928 #endif //__nvoc_kernel_fifo_h_disabled
1929 
1930 NV_STATUS kfifoChannelListDestroy_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST *arg0);
1931 
1932 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChannelListDestroy(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,CHANNEL_LIST * arg0)1933 static inline NV_STATUS kfifoChannelListDestroy(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST *arg0) {
1934     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1935     return NV_ERR_NOT_SUPPORTED;
1936 }
1937 #else //__nvoc_kernel_fifo_h_disabled
1938 #define kfifoChannelListDestroy(pGpu, pKernelFifo, arg0) kfifoChannelListDestroy_IMPL(pGpu, pKernelFifo, arg0)
1939 #endif //__nvoc_kernel_fifo_h_disabled
1940 
1941 NV_STATUS kfifoChannelListAppend_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1);
1942 
1943 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChannelListAppend(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * arg0,CHANNEL_LIST * arg1)1944 static inline NV_STATUS kfifoChannelListAppend(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1) {
1945     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1946     return NV_ERR_NOT_SUPPORTED;
1947 }
1948 #else //__nvoc_kernel_fifo_h_disabled
1949 #define kfifoChannelListAppend(pGpu, pKernelFifo, arg0, arg1) kfifoChannelListAppend_IMPL(pGpu, pKernelFifo, arg0, arg1)
1950 #endif //__nvoc_kernel_fifo_h_disabled
1951 
1952 NV_STATUS kfifoChannelListRemove_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1);
1953 
1954 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChannelListRemove(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannel * arg0,CHANNEL_LIST * arg1)1955 static inline NV_STATUS kfifoChannelListRemove(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1) {
1956     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1957     return NV_ERR_NOT_SUPPORTED;
1958 }
1959 #else //__nvoc_kernel_fifo_h_disabled
1960 #define kfifoChannelListRemove(pGpu, pKernelFifo, arg0, arg1) kfifoChannelListRemove_IMPL(pGpu, pKernelFifo, arg0, arg1)
1961 #endif //__nvoc_kernel_fifo_h_disabled
1962 
1963 NvBool kfifoEngineListHasChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE *arg0, NvU32 arg1);
1964 
1965 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoEngineListHasChannel(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,RM_ENGINE_TYPE * arg0,NvU32 arg1)1966 static inline NvBool kfifoEngineListHasChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE *arg0, NvU32 arg1) {
1967     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1968     return NV_FALSE;
1969 }
1970 #else //__nvoc_kernel_fifo_h_disabled
1971 #define kfifoEngineListHasChannel(pGpu, pKernelFifo, arg0, arg1) kfifoEngineListHasChannel_IMPL(pGpu, pKernelFifo, arg0, arg1)
1972 #endif //__nvoc_kernel_fifo_h_disabled
1973 
1974 CTX_BUF_POOL_INFO *kfifoGetRunlistBufPool_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType);
1975 
1976 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetRunlistBufPool(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,RM_ENGINE_TYPE rmEngineType)1977 static inline CTX_BUF_POOL_INFO *kfifoGetRunlistBufPool(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, RM_ENGINE_TYPE rmEngineType) {
1978     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1979     return NULL;
1980 }
1981 #else //__nvoc_kernel_fifo_h_disabled
1982 #define kfifoGetRunlistBufPool(pGpu, pKernelFifo, rmEngineType) kfifoGetRunlistBufPool_IMPL(pGpu, pKernelFifo, rmEngineType)
1983 #endif //__nvoc_kernel_fifo_h_disabled
1984 
1985 NV_STATUS kfifoGetRunlistBufInfo_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvBool arg1, NvU32 arg2, NvU64 *arg3, NvU64 *arg4);
1986 
1987 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetRunlistBufInfo(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0,NvBool arg1,NvU32 arg2,NvU64 * arg3,NvU64 * arg4)1988 static inline NV_STATUS kfifoGetRunlistBufInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvBool arg1, NvU32 arg2, NvU64 *arg3, NvU64 *arg4) {
1989     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
1990     return NV_ERR_NOT_SUPPORTED;
1991 }
1992 #else //__nvoc_kernel_fifo_h_disabled
1993 #define kfifoGetRunlistBufInfo(pGpu, pKernelFifo, arg0, arg1, arg2, arg3, arg4) kfifoGetRunlistBufInfo_IMPL(pGpu, pKernelFifo, arg0, arg1, arg2, arg3, arg4)
1994 #endif //__nvoc_kernel_fifo_h_disabled
1995 
1996 NV_STATUS kfifoAddSchedulingHandler_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData);
1997 
1998 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoAddSchedulingHandler(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,PFifoSchedulingHandler pPostSchedulingEnableHandler,void * pPostSchedulingEnableHandlerData,PFifoSchedulingHandler pPreSchedulingDisableHandler,void * pPreSchedulingDisableHandlerData)1999 static inline NV_STATUS kfifoAddSchedulingHandler(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData) {
2000     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2001     return NV_ERR_NOT_SUPPORTED;
2002 }
2003 #else //__nvoc_kernel_fifo_h_disabled
2004 #define kfifoAddSchedulingHandler(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) kfifoAddSchedulingHandler_IMPL(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData)
2005 #endif //__nvoc_kernel_fifo_h_disabled
2006 
2007 void kfifoRemoveSchedulingHandler_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData);
2008 
2009 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRemoveSchedulingHandler(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,PFifoSchedulingHandler pPostSchedulingEnableHandler,void * pPostSchedulingEnableHandlerData,PFifoSchedulingHandler pPreSchedulingDisableHandler,void * pPreSchedulingDisableHandlerData)2010 static inline void kfifoRemoveSchedulingHandler(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData) {
2011     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2012 }
2013 #else //__nvoc_kernel_fifo_h_disabled
2014 #define kfifoRemoveSchedulingHandler(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) kfifoRemoveSchedulingHandler_IMPL(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData)
2015 #endif //__nvoc_kernel_fifo_h_disabled
2016 
2017 NV_STATUS kfifoTriggerPostSchedulingEnableCallback_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2018 
2019 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoTriggerPostSchedulingEnableCallback(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)2020 static inline NV_STATUS kfifoTriggerPostSchedulingEnableCallback(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2021     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2022     return NV_ERR_NOT_SUPPORTED;
2023 }
2024 #else //__nvoc_kernel_fifo_h_disabled
2025 #define kfifoTriggerPostSchedulingEnableCallback(pGpu, pKernelFifo) kfifoTriggerPostSchedulingEnableCallback_IMPL(pGpu, pKernelFifo)
2026 #endif //__nvoc_kernel_fifo_h_disabled
2027 
2028 NV_STATUS kfifoTriggerPreSchedulingDisableCallback_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2029 
2030 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoTriggerPreSchedulingDisableCallback(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)2031 static inline NV_STATUS kfifoTriggerPreSchedulingDisableCallback(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2032     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2033     return NV_ERR_NOT_SUPPORTED;
2034 }
2035 #else //__nvoc_kernel_fifo_h_disabled
2036 #define kfifoTriggerPreSchedulingDisableCallback(pGpu, pKernelFifo) kfifoTriggerPreSchedulingDisableCallback_IMPL(pGpu, pKernelFifo)
2037 #endif //__nvoc_kernel_fifo_h_disabled
2038 
2039 NvU32 kfifoGetMaxChannelsInSystem_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2040 
2041 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetMaxChannelsInSystem(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)2042 static inline NvU32 kfifoGetMaxChannelsInSystem(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2043     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2044     return 0;
2045 }
2046 #else //__nvoc_kernel_fifo_h_disabled
2047 #define kfifoGetMaxChannelsInSystem(pGpu, pKernelFifo) kfifoGetMaxChannelsInSystem_IMPL(pGpu, pKernelFifo)
2048 #endif //__nvoc_kernel_fifo_h_disabled
2049 
2050 NvU32 kfifoGetMaxChannelGroupsInSystem_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2051 
2052 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetMaxChannelGroupsInSystem(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)2053 static inline NvU32 kfifoGetMaxChannelGroupsInSystem(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2054     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2055     return 0;
2056 }
2057 #else //__nvoc_kernel_fifo_h_disabled
2058 #define kfifoGetMaxChannelGroupsInSystem(pGpu, pKernelFifo) kfifoGetMaxChannelGroupsInSystem_IMPL(pGpu, pKernelFifo)
2059 #endif //__nvoc_kernel_fifo_h_disabled
2060 
2061 void kfifoGetDeviceCaps_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU8 *pKfifoCaps, NvBool bCapsInitialized);
2062 
2063 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetDeviceCaps(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU8 * pKfifoCaps,NvBool bCapsInitialized)2064 static inline void kfifoGetDeviceCaps(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU8 *pKfifoCaps, NvBool bCapsInitialized) {
2065     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2066 }
2067 #else //__nvoc_kernel_fifo_h_disabled
2068 #define kfifoGetDeviceCaps(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized) kfifoGetDeviceCaps_IMPL(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized)
2069 #endif //__nvoc_kernel_fifo_h_disabled
2070 
2071 NvU32 kfifoReturnPushbufferCaps_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2072 
2073 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoReturnPushbufferCaps(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)2074 static inline NvU32 kfifoReturnPushbufferCaps(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2075     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2076     return 0;
2077 }
2078 #else //__nvoc_kernel_fifo_h_disabled
2079 #define kfifoReturnPushbufferCaps(pGpu, pKernelFifo) kfifoReturnPushbufferCaps_IMPL(pGpu, pKernelFifo)
2080 #endif //__nvoc_kernel_fifo_h_disabled
2081 
2082 void kfifoRunlistGetBufAllocParams_IMPL(struct OBJGPU *pGpu, NV_ADDRESS_SPACE *pAperture, NvU32 *pAttr, NvU64 *pAllocFlags);
2083 
2084 #define kfifoRunlistGetBufAllocParams(pGpu, pAperture, pAttr, pAllocFlags) kfifoRunlistGetBufAllocParams_IMPL(pGpu, pAperture, pAttr, pAllocFlags)
2085 NV_STATUS kfifoRunlistAllocBuffers_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bSupportTsg, NV_ADDRESS_SPACE aperture, NvU32 runlistId, NvU32 attr, NvU64 allocFlags, NvU64 maxRunlistEntries, NvBool bHWRL, PMEMORY_DESCRIPTOR *ppMemDesc);
2086 
2087 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoRunlistAllocBuffers(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvBool bSupportTsg,NV_ADDRESS_SPACE aperture,NvU32 runlistId,NvU32 attr,NvU64 allocFlags,NvU64 maxRunlistEntries,NvBool bHWRL,PMEMORY_DESCRIPTOR * ppMemDesc)2088 static inline NV_STATUS kfifoRunlistAllocBuffers(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bSupportTsg, NV_ADDRESS_SPACE aperture, NvU32 runlistId, NvU32 attr, NvU64 allocFlags, NvU64 maxRunlistEntries, NvBool bHWRL, PMEMORY_DESCRIPTOR *ppMemDesc) {
2089     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2090     return NV_ERR_NOT_SUPPORTED;
2091 }
2092 #else //__nvoc_kernel_fifo_h_disabled
2093 #define kfifoRunlistAllocBuffers(pGpu, pKernelFifo, bSupportTsg, aperture, runlistId, attr, allocFlags, maxRunlistEntries, bHWRL, ppMemDesc) kfifoRunlistAllocBuffers_IMPL(pGpu, pKernelFifo, bSupportTsg, aperture, runlistId, attr, allocFlags, maxRunlistEntries, bHWRL, ppMemDesc)
2094 #endif //__nvoc_kernel_fifo_h_disabled
2095 
2096 NV_STATUS kfifoGetEngineListForRunlist_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, RM_ENGINE_TYPE *pOutEngineIds, NvU32 *pNumEngines);
2097 
2098 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetEngineListForRunlist(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 runlistId,RM_ENGINE_TYPE * pOutEngineIds,NvU32 * pNumEngines)2099 static inline NV_STATUS kfifoGetEngineListForRunlist(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, RM_ENGINE_TYPE *pOutEngineIds, NvU32 *pNumEngines) {
2100     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2101     return NV_ERR_NOT_SUPPORTED;
2102 }
2103 #else //__nvoc_kernel_fifo_h_disabled
2104 #define kfifoGetEngineListForRunlist(pGpu, pKernelFifo, runlistId, pOutEngineIds, pNumEngines) kfifoGetEngineListForRunlist_IMPL(pGpu, pKernelFifo, runlistId, pOutEngineIds, pNumEngines)
2105 #endif //__nvoc_kernel_fifo_h_disabled
2106 
2107 NvU32 kfifoGetChannelClassId_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo);
2108 
2109 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetChannelClassId(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo)2110 static inline NvU32 kfifoGetChannelClassId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) {
2111     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2112     return 0;
2113 }
2114 #else //__nvoc_kernel_fifo_h_disabled
2115 #define kfifoGetChannelClassId(pGpu, pKernelFifo) kfifoGetChannelClassId_IMPL(pGpu, pKernelFifo)
2116 #endif //__nvoc_kernel_fifo_h_disabled
2117 
2118 NvBool kfifoIsMmuFaultEngineIdPbdma_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0);
2119 
2120 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoIsMmuFaultEngineIdPbdma(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0)2121 static inline NvBool kfifoIsMmuFaultEngineIdPbdma(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) {
2122     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2123     return NV_FALSE;
2124 }
2125 #else //__nvoc_kernel_fifo_h_disabled
2126 #define kfifoIsMmuFaultEngineIdPbdma(pGpu, pKernelFifo, arg0) kfifoIsMmuFaultEngineIdPbdma_IMPL(pGpu, pKernelFifo, arg0)
2127 #endif //__nvoc_kernel_fifo_h_disabled
2128 
2129 NV_STATUS kfifoGetPbdmaIdFromMmuFaultId_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvU32 *arg1);
2130 
2131 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetPbdmaIdFromMmuFaultId(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0,NvU32 * arg1)2132 static inline NV_STATUS kfifoGetPbdmaIdFromMmuFaultId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvU32 *arg1) {
2133     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2134     return NV_ERR_NOT_SUPPORTED;
2135 }
2136 #else //__nvoc_kernel_fifo_h_disabled
2137 #define kfifoGetPbdmaIdFromMmuFaultId(pGpu, pKernelFifo, arg0, arg1) kfifoGetPbdmaIdFromMmuFaultId_IMPL(pGpu, pKernelFifo, arg0, arg1)
2138 #endif //__nvoc_kernel_fifo_h_disabled
2139 
2140 NV_STATUS kfifoGetEngineTypeFromPbdmaFaultId_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, RM_ENGINE_TYPE *arg1);
2141 
2142 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetEngineTypeFromPbdmaFaultId(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,NvU32 arg0,RM_ENGINE_TYPE * arg1)2143 static inline NV_STATUS kfifoGetEngineTypeFromPbdmaFaultId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, RM_ENGINE_TYPE *arg1) {
2144     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2145     return NV_ERR_NOT_SUPPORTED;
2146 }
2147 #else //__nvoc_kernel_fifo_h_disabled
2148 #define kfifoGetEngineTypeFromPbdmaFaultId(pGpu, pKernelFifo, arg0, arg1) kfifoGetEngineTypeFromPbdmaFaultId_IMPL(pGpu, pKernelFifo, arg0, arg1)
2149 #endif //__nvoc_kernel_fifo_h_disabled
2150 
2151 NV_STATUS kfifoChannelGroupSetTimeslice_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit);
2152 
2153 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoChannelGroupSetTimeslice(struct OBJGPU * pGpu,struct KernelFifo * pKernelFifo,struct KernelChannelGroup * pKernelChannelGroup,NvU64 timesliceUs,NvBool bSkipSubmit)2154 static inline NV_STATUS kfifoChannelGroupSetTimeslice(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) {
2155     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2156     return NV_ERR_NOT_SUPPORTED;
2157 }
2158 #else //__nvoc_kernel_fifo_h_disabled
2159 #define kfifoChannelGroupSetTimeslice(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimeslice_IMPL(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit)
2160 #endif //__nvoc_kernel_fifo_h_disabled
2161 
2162 const FIFO_GUEST_ENGINE_TABLE *kfifoGetGuestEngineLookupTable_IMPL(NvU32 *pTableSize);
2163 
2164 #define kfifoGetGuestEngineLookupTable(pTableSize) kfifoGetGuestEngineLookupTable_IMPL(pTableSize)
2165 NvU32 kfifoGetNumEschedDrivenEngines_IMPL(struct KernelFifo *pKernelFifo);
2166 
2167 #ifdef __nvoc_kernel_fifo_h_disabled
kfifoGetNumEschedDrivenEngines(struct KernelFifo * pKernelFifo)2168 static inline NvU32 kfifoGetNumEschedDrivenEngines(struct KernelFifo *pKernelFifo) {
2169     NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
2170     return 0;
2171 }
2172 #else //__nvoc_kernel_fifo_h_disabled
2173 #define kfifoGetNumEschedDrivenEngines(pKernelFifo) kfifoGetNumEschedDrivenEngines_IMPL(pKernelFifo)
2174 #endif //__nvoc_kernel_fifo_h_disabled
2175 
2176 #undef PRIVATE_FIELD
2177 
2178 
2179 NV_STATUS RmIdleChannels(NvHandle hClient,
2180                          NvHandle hDevice,
2181                          NvHandle hChannel,
2182                          NvU32    numChannels,
2183                          NvP64    clients,
2184                          NvP64    devices,
2185                          NvP64    channels,
2186                          NvU32    flags,
2187                          NvU32    timeout,
2188                          NvBool   bUserModeArgs);
2189 
2190 #endif // _KERNELFIFO_H_
2191 
2192 #ifdef __cplusplus
2193 } // extern "C"
2194 #endif
2195 
2196 #endif // _G_KERNEL_FIFO_NVOC_H_
2197