1 #ifndef _G_KERNEL_MIG_MANAGER_NVOC_H_
2 #define _G_KERNEL_MIG_MANAGER_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_kernel_mig_manager_nvoc.h"
33 
34 #ifndef KERNEL_MIG_MANAGER_H
35 #define KERNEL_MIG_MANAGER_H
36 
37 #include "core/core.h"
38 #include "gpu/eng_state.h"
39 #include "gpu/gpu.h"
40 #include "gpu_mgr/gpu_mgr.h"
41 #include "kernel/gpu/gr/kernel_graphics_manager.h"
42 #include "kernel/gpu_mgr/gpu_mgr.h"
43 #include "kernel/gpu/mmu/kern_gmmu.h"
44 #include "kernel/gpu/nvbitmask.h"
45 
46 #include "ctrl/ctrlc637.h"
47 
48 typedef struct KERNEL_MIG_GPU_INSTANCE KERNEL_MIG_GPU_INSTANCE;
49 
50 // Forward declaration of opaque type
51 typedef struct KERNEL_MIG_MANAGER_PRIVATE_DATA KERNEL_MIG_MANAGER_PRIVATE_DATA;
52 typedef struct MIG_GPU_INSTANCE MIG_GPU_INSTANCE;
53 
54 #define  IS_MIG_ENABLED(pGpu) (((pGpu) != NULL) && (GPU_GET_KERNEL_MIG_MANAGER(pGpu) != NULL) && \
55                                kmigmgrIsMIGEnabled((pGpu), GPU_GET_KERNEL_MIG_MANAGER(pGpu)))
56 #define  IS_MIG_IN_USE(pGpu)  (((pGpu) != NULL) && (GPU_GET_KERNEL_MIG_MANAGER(pGpu) != NULL) && \
57                                kmigmgrIsMIGGpuInstancingEnabled((pGpu), GPU_GET_KERNEL_MIG_MANAGER(pGpu)))
58 
59 #define FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pLocal)                 \
60     {                                                                                \
61         NvU32 i;                                                                     \
62         for (i = 0; i < KMIGMGR_MAX_GPU_INSTANCES; ++i)                              \
63         {                                                                            \
64             (pLocal) = kmigmgrGetMIGGpuInstanceSlot((pGpu), (pKernelMIGManager), i); \
65             if (((pLocal) == NULL) || !(pLocal)->bValid)                             \
66                 continue;
67 
68 #define FOR_EACH_VALID_GPU_INSTANCE_END()                                           \
69         }                                                                           \
70     }
71 
72 #define KMIGMGR_SWIZZID_INVALID             0xFFFFFFFF
73 #define KMIGMGR_MAX_GPU_SWIZZID             15
74 #define KMIGMGR_MAX_GPU_INSTANCES           GPUMGR_MAX_GPU_INSTANCES
75 #define KMIGMGR_MAX_COMPUTE_INSTANCES       GPUMGR_MAX_COMPUTE_INSTANCES
76 #define KMIGMGR_COMPUTE_INSTANCE_ID_INVALID 0xFFFFFFFF
77 #define KMIGMGR_COMPUTE_SIZE_INVALID        NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE__SIZE
78 #define KMIGMGR_MAX_GPU_CTSID               21
79 #define KMIGMGR_CTSID_INVALID               0xFFFFFFFFUL
80 #define KMIGMGR_SPAN_OFFSET_INVALID         KMIGMGR_CTSID_INVALID
81 
82 #define KMIGMGR_INSTANCE_ATTRIBUTION_ID_INVALID            \
83     ((KMIGMGR_MAX_GPU_SWIZZID * KMIGMGR_MAX_GPU_SWIZZID) + \
84      KMIGMGR_MAX_COMPUTE_INSTANCES)
85 
86 MAKE_BITVECTOR(GFID_BIT_VECTOR, VMMU_MAX_GFID);
87 
88 typedef struct KMIGMGR_INSTANCE_HANDLES
89 {
90     /*!
91      * Client handle to make calls into this instance
92      */
93     NvHandle hClient;
94 
95     /*!
96      * Device handle to make calls into this instance
97      */
98     NvHandle hDevice;
99 
100     /*!
101      * Subdevice handle to make calls into this instance
102      */
103     NvHandle hSubdevice;
104 
105     /*!
106      * Subscription handle to make calls into this instance
107      */
108     NvHandle hSubscription;
109 } KMIGMGR_INSTANCE_HANDLES;
110 
111 typedef struct MIG_RESOURCE_ALLOCATION
112 {
113     /*!
114      * Logical GPC-IDs which are associated with this instance
115      * As current assumption is that GPCs within a instance is always
116      * physically contiguous, so we can use start and count also saving some
117      * memory however it will enforce contiguity restriction which may not be
118      * in future.
119      */
120     NvU32 gpcIds[KGRMGR_MAX_GPC];
121 
122     /*!
123      * Number of GPCs associated with this instance
124      */
125     NvU32 gpcCount;
126 
127     /*!
128      * Number of GFX GPCs associated with this instance. This should be a subset of gpcs included in gpcCount.
129      */
130     NvU32 gfxGpcCount;
131 
132     /*!
133      * VEID start offset for this instance
134      */
135     NvU32 veidOffset;
136 
137     /*!
138      * Number of VEIDs associated with this instance
139      */
140     NvU32 veidCount;
141 
142     /*!
143      * Bitvector of partitionable engines associated with this instance.
144      */
145     ENGTYPE_BIT_VECTOR engines;
146 
147     /*!
148      * Bitvector of local engine IDs associated with this instance.
149      */
150     ENGTYPE_BIT_VECTOR localEngines;
151 
152     /*!
153      * Virtualized GPC Count
154     */
155     NvU32 virtualGpcCount;
156 
157     /*!
158      * Number of SMs
159      */
160     NvU32 smCount;
161 } MIG_RESOURCE_ALLOCATION;
162 
163 typedef struct MIG_COMPUTE_INSTANCE
164 {
165     /*!
166      * Resource allocated for this instance
167      */
168     MIG_RESOURCE_ALLOCATION resourceAllocation;
169 
170     /*!
171      * States that this is a valid compute instance
172      */
173     NvBool bValid;
174 
175     /*!
176      * Flags indicating which engines (if any) are shared across multiple compute
177      * instances. Bit positions in this flag correspond to
178      * NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_*
179      */
180     NvU32 sharedEngFlag;
181 
182     /*!
183      * Compute instance ID
184      */
185     NvU32 id;
186 
187     /*!
188      * Shared object to track instance reference count
189      */
190     struct RsShared *pShare;
191 
192     /*!
193      * Opaque pointer to os-specific capabilities
194      */
195     OS_RM_CAPS *pOsRmCaps;
196 
197     /*!
198      * Compute instance UUID
199      */
200     NvUuid uuid;
201 
202     /*!
203      * Handles for RPC's into this instance
204      */
205     KMIGMGR_INSTANCE_HANDLES instanceHandles;
206 
207     /*!
208      * Span start of this compute instance indicating the "position" of the
209      * instance within a GPU instance's view. For non-CTS ID enabled chips,
210      * this corresponds to the start of a VEID segment. For CTS-ID chips, this
211      * corresponds to the offset from the first CTS ID of a given profile size.
212      */
213     NvU32 spanStart;
214 
215     /*!
216      * Compute Profile size associated with this MIG compute instance
217      * To associate an instance with a given compute profile, since a CTS
218      * ID may not have been assigned.
219      */
220     NvU32 computeSize;
221 } MIG_COMPUTE_INSTANCE;
222 
223 /*!
224  * @brief Situational params for compute instance creation API
225  *
226  * This structure comes with two specializations:
227  *  TYPE_REQUEST
228  *      Parameter refers to request data passed in via EXEC_PARTITIONS_CREATE ctrl
229  *      call. All resources claimed by new compute instance are chosen via allocator,
230  *      and the API may create multiple compute instances.
231  *  TYPE_RESTORE
232  *      Parameter refers to saved compute instance data. Most resources claimed by new
233  *      compute instance are determined by the save data, and others are claimed via
234  *      allocator.
235  *  requestFlags
236  *  TYPE_REQUEST_WITH_IDS
237  *      Parameter refers to request data passed in via EXEC_PARTITIONS_CREATE ctrl
238  *          call. All resources claimed by new instance are chosen via allocator unless
239  *          the _AT_SPAN flag is also specified.
240  *      RM also tries to allocate instance with compute instance id
241  *      requested by user. This flag is only supported on vGPU enabled RM build
242  *      and will be removed when vgpu plugin implements virtualized compute
243  *      instance ID support. (bug 2938187)
244  *      TYPE_REQUEST_AT_SPAN
245  *          Parameter refers to request data passed in via EXEC_PARTITIONS_CREATE ctrl
246  *          call. All resources claimed by new instance are attempt to be claimed by
247  *          the RM allocater starting at the specified resource span.
248  */
249 typedef struct KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS
250 {
251     enum
252     {
253         KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST,
254         KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_RESTORE
255     } type;
256     union
257     {
258         struct
259         {
260             NvU32 count;
261             NVC637_CTRL_EXEC_PARTITIONS_INFO *pReqComputeInstanceInfo;
262             NvU32 requestFlags;
263         } request;
264         struct
265         {
266             struct GPUMGR_SAVE_COMPUTE_INSTANCE *pComputeInstanceSave;
267         } restore;
268     } inst;
269 } KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS;
270 
271 typedef struct KMIGMGR_CONFIGURE_INSTANCE_PARAMS
272 {
273     NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE profile;
274     NvU32 ctsId;
275     NvU32 veidSpanStart;
276 } KMIGMGR_CONFIGURE_INSTANCE_REQUEST;
277 
278 typedef struct KERNEL_MIG_GPU_INSTANCE
279 {
280     /*! Structure containing GPU instance profile */
281     const NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO *pProfile;
282 
283     /*!
284      * Resource allocated for this instance
285      */
286     MIG_RESOURCE_ALLOCATION resourceAllocation;
287 
288     /*!
289      * Mask of physical engines in this GPU instance which are assigned exclusively
290      * to some compute instance. Indexed via RM_ENGINE_TYPE_*
291      */
292     ENGTYPE_BIT_VECTOR exclusiveEngMask;
293 
294     /*!
295      * Mask of physical engines in this GPU instance which are assigned to at least
296      * one compute instance, but may be assigned to others.
297      * Indexed via RM_ENGINE_TYPE_*
298      */
299     ENGTYPE_BIT_VECTOR sharedEngMask;
300 
301     /*!
302      * compute instance info.
303      */
304     MIG_COMPUTE_INSTANCE MIGComputeInstance[KMIGMGR_MAX_COMPUTE_INSTANCES];
305 
306     /*!
307      * Bit Vector of GFID's associated with this instance.
308      */
309     GFID_BIT_VECTOR gfidMap;
310 
311     /*!
312      * GPU instance ID
313      */
314     NvU32 swizzId;
315 
316     /*!
317      * Validated user-provided instance flags - NV2080_CTRL_GPU_PARTITION_FLAG_*
318      */
319     NvU32 partitionFlag;
320 
321     /*!
322      * Memory handle associated with partitioned memory
323      */
324     NvHandle hMemory;
325 
326     /*!
327      * Shared object to track instance reference count
328      */
329     struct RsShared *pShare;
330 
331     /*!
332      * Heap used for managing instance's memory
333      */
334     struct Heap *pMemoryPartitionHeap;
335 
336     /*!
337      * States that this instance is valid
338      */
339     NvBool bValid;
340 
341     /*!
342      * Indicates that the GPU instance scrubber is initialized and should be
343      * accounted for / ignored in the instance refcount when determining
344      * whether or not a instance can be destroyed.
345      */
346     NvBool bMemoryPartitionScrubberInitialized;
347 
348     /*!
349      * Physical memory address range for this instance.
350      */
351     NV_RANGE memRange;
352 
353     /*!
354      * Memory pool for client page table allocations
355      */
356     RM_POOL_ALLOC_MEM_RESERVE_INFO *pPageTableMemPool;
357 
358     /*!
359      * Physical MIG GPU Instance info for this instance
360      */
361     MIG_GPU_INSTANCE *pMIGGpuInstance;
362 
363     /*!
364      * Mask of runlistIds for engines that belong to this instance
365      */
366     NvU64 runlistIdMask;
367 
368     /*!
369      * Opaque pointer to os-specific capabilities
370      */
371     OS_RM_CAPS *pOsRmCaps;
372 
373     /*!
374      * Handles for RPC's into this instance
375      */
376     KMIGMGR_INSTANCE_HANDLES instanceHandles;
377 
378     /*!
379      * Mask of CTS IDs in use
380      */
381     NvU64 ctsIdsInUseMask;
382 
383     /*!
384      * GR to CTS ID mapping
385      */
386     NvU32 grCtsIdMap[KMIGMGR_MAX_COMPUTE_INSTANCES];
387 
388     /*!
389      * Mask tracking which compute spans are currently in-use
390      */
391     NvU32 spanInUseMask;
392 } KERNEL_MIG_GPU_INSTANCE;
393 
394 /*!
395  * @brief Situational params for GPU instance creation API
396  *
397  * This structure comes with two specializations:
398  *  TYPE_REQUEST
399  *      Parameter refers to request data passed in via SET_PARTITIONS ctrl
400  *      call. All resources claimed by new GPU instance are chosen via allocator.
401  *  TYPE_RESTORE
402  *      Parameter refers to saved GPU instance data. Most resources claimed by new
403  *      GPU instance are determined by the save data, and others are claimed via
404  *      allocator.
405  */
406 typedef struct KMIGMGR_CREATE_GPU_INSTANCE_PARAMS
407 {
408     enum
409     {
410         KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_REQUEST,
411         KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_RESTORE
412     } type;
413     union
414     {
415         struct
416         {
417             NvU32    partitionFlag;
418             NV_RANGE placement;
419             NvBool   bUsePlacement;
420         } request;
421         struct
422         {
423             struct GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave;
424         } restore;
425     } inst;
426 } KMIGMGR_CREATE_GPU_INSTANCE_PARAMS;
427 
428 /*!
429  * @brief Packed pointer to a GPU instance/compute instance combo
430  * @note  Having NULL pKernelMIGGpuInstance and non-NULL pMIGComputeInstance is never expected
431  */
432 struct MIG_INSTANCE_REF
433 {
434     KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
435     MIG_COMPUTE_INSTANCE *pMIGComputeInstance;
436 };
437 
438 typedef struct KERNEL_MIG_MANAGER_STATIC_INFO
439 {
440     /*! @ref NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_PROFILES */
441     NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS *pProfiles;
442 
443     /*! Mask of partitionable engines which are present on this GPU. */
444     NvU32 partitionableEngineMask[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
445 
446     /*! Per swizzId FB memory page ranges */
447     NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS *pSwizzIdFbMemPageRanges;
448 
449     /*! Compute instance profiles */
450     NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES_PARAMS *pCIProfiles;
451 
452     /*! Skyline info used to determine GPU and compute instance resources available */
453     NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS *pSkylineInfo;
454 } KERNEL_MIG_MANAGER_STATIC_INFO;
455 
456 /*!
457  * KernelMIGManager provides kernel side services for managing MIG instances.
458  * It also maintains state relating to GPU partitioning and related state.
459  */
460 #ifdef NVOC_KERNEL_MIG_MANAGER_H_PRIVATE_ACCESS_ALLOWED
461 #define PRIVATE_FIELD(x) x
462 #else
463 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
464 #endif
465 struct KernelMIGManager {
466     const struct NVOC_RTTI *__nvoc_rtti;
467     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
468     struct Object *__nvoc_pbase_Object;
469     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
470     struct KernelMIGManager *__nvoc_pbase_KernelMIGManager;
471     NV_STATUS (*__kmigmgrConstructEngine__)(OBJGPU *, struct KernelMIGManager *, ENGDESCRIPTOR);
472     NV_STATUS (*__kmigmgrStateInitLocked__)(OBJGPU *, struct KernelMIGManager *);
473     NV_STATUS (*__kmigmgrStateUnload__)(OBJGPU *, struct KernelMIGManager *, NvU32);
474     NV_STATUS (*__kmigmgrCreateGPUInstanceCheck__)(OBJGPU *, struct KernelMIGManager *, NvBool);
475     NvBool (*__kmigmgrIsDevinitMIGBitSet__)(OBJGPU *, struct KernelMIGManager *);
476     NvBool (*__kmigmgrIsGPUInstanceCombinationValid__)(OBJGPU *, struct KernelMIGManager *, NvU32);
477     NvBool (*__kmigmgrIsGPUInstanceFlagValid__)(OBJGPU *, struct KernelMIGManager *, NvU32);
478     NvBool (*__kmigmgrIsMemoryPartitioningRequested__)(OBJGPU *, struct KernelMIGManager *, NvU32);
479     NvBool (*__kmigmgrIsMemoryPartitioningNeeded__)(OBJGPU *, struct KernelMIGManager *, NvU32);
480     struct NV_RANGE (*__kmigmgrMemSizeFlagToSwizzIdRange__)(OBJGPU *, struct KernelMIGManager *, NvU32);
481     NV_STATUS (*__kmigmgrStateLoad__)(POBJGPU, struct KernelMIGManager *, NvU32);
482     NV_STATUS (*__kmigmgrStatePreLoad__)(POBJGPU, struct KernelMIGManager *, NvU32);
483     NV_STATUS (*__kmigmgrStatePostUnload__)(POBJGPU, struct KernelMIGManager *, NvU32);
484     void (*__kmigmgrStateDestroy__)(POBJGPU, struct KernelMIGManager *);
485     NV_STATUS (*__kmigmgrStatePreUnload__)(POBJGPU, struct KernelMIGManager *, NvU32);
486     NV_STATUS (*__kmigmgrStateInitUnlocked__)(POBJGPU, struct KernelMIGManager *);
487     void (*__kmigmgrInitMissing__)(POBJGPU, struct KernelMIGManager *);
488     NV_STATUS (*__kmigmgrStatePreInitLocked__)(POBJGPU, struct KernelMIGManager *);
489     NV_STATUS (*__kmigmgrStatePreInitUnlocked__)(POBJGPU, struct KernelMIGManager *);
490     NV_STATUS (*__kmigmgrStatePostLoad__)(POBJGPU, struct KernelMIGManager *, NvU32);
491     NvBool (*__kmigmgrIsPresent__)(POBJGPU, struct KernelMIGManager *);
492     NvBool PRIVATE_FIELD(bIsA100ReducedConfig);
493     KERNEL_MIG_MANAGER_PRIVATE_DATA *PRIVATE_FIELD(pPrivate);
494     KERNEL_MIG_GPU_INSTANCE PRIVATE_FIELD(kernelMIGGpuInstance)[8];
495     NvBool PRIVATE_FIELD(bMIGEnabled);
496     NvU64 PRIVATE_FIELD(swizzIdInUseMask);
497     NvBool PRIVATE_FIELD(bRestoreWatchdog);
498     NvBool PRIVATE_FIELD(bReenableWatchdog);
499     union ENGTYPE_BIT_VECTOR PRIVATE_FIELD(partitionableEnginesInUse);
500     NvBool PRIVATE_FIELD(bDeviceProfilingInUse);
501     NvBool PRIVATE_FIELD(bMIGAutoOnlineEnabled);
502 };
503 struct KernelMIGManager_PRIVATE {
504     const struct NVOC_RTTI *__nvoc_rtti;
505     struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
506     struct Object *__nvoc_pbase_Object;
507     struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
508     struct KernelMIGManager *__nvoc_pbase_KernelMIGManager;
509     NV_STATUS (*__kmigmgrConstructEngine__)(OBJGPU *, struct KernelMIGManager *, ENGDESCRIPTOR);
510     NV_STATUS (*__kmigmgrStateInitLocked__)(OBJGPU *, struct KernelMIGManager *);
511     NV_STATUS (*__kmigmgrStateUnload__)(OBJGPU *, struct KernelMIGManager *, NvU32);
512     NV_STATUS (*__kmigmgrCreateGPUInstanceCheck__)(OBJGPU *, struct KernelMIGManager *, NvBool);
513     NvBool (*__kmigmgrIsDevinitMIGBitSet__)(OBJGPU *, struct KernelMIGManager *);
514     NvBool (*__kmigmgrIsGPUInstanceCombinationValid__)(OBJGPU *, struct KernelMIGManager *, NvU32);
515     NvBool (*__kmigmgrIsGPUInstanceFlagValid__)(OBJGPU *, struct KernelMIGManager *, NvU32);
516     NvBool (*__kmigmgrIsMemoryPartitioningRequested__)(OBJGPU *, struct KernelMIGManager *, NvU32);
517     NvBool (*__kmigmgrIsMemoryPartitioningNeeded__)(OBJGPU *, struct KernelMIGManager *, NvU32);
518     struct NV_RANGE (*__kmigmgrMemSizeFlagToSwizzIdRange__)(OBJGPU *, struct KernelMIGManager *, NvU32);
519     NV_STATUS (*__kmigmgrStateLoad__)(POBJGPU, struct KernelMIGManager *, NvU32);
520     NV_STATUS (*__kmigmgrStatePreLoad__)(POBJGPU, struct KernelMIGManager *, NvU32);
521     NV_STATUS (*__kmigmgrStatePostUnload__)(POBJGPU, struct KernelMIGManager *, NvU32);
522     void (*__kmigmgrStateDestroy__)(POBJGPU, struct KernelMIGManager *);
523     NV_STATUS (*__kmigmgrStatePreUnload__)(POBJGPU, struct KernelMIGManager *, NvU32);
524     NV_STATUS (*__kmigmgrStateInitUnlocked__)(POBJGPU, struct KernelMIGManager *);
525     void (*__kmigmgrInitMissing__)(POBJGPU, struct KernelMIGManager *);
526     NV_STATUS (*__kmigmgrStatePreInitLocked__)(POBJGPU, struct KernelMIGManager *);
527     NV_STATUS (*__kmigmgrStatePreInitUnlocked__)(POBJGPU, struct KernelMIGManager *);
528     NV_STATUS (*__kmigmgrStatePostLoad__)(POBJGPU, struct KernelMIGManager *, NvU32);
529     NvBool (*__kmigmgrIsPresent__)(POBJGPU, struct KernelMIGManager *);
530     NvBool bIsA100ReducedConfig;
531     KERNEL_MIG_MANAGER_PRIVATE_DATA *pPrivate;
532     KERNEL_MIG_GPU_INSTANCE kernelMIGGpuInstance[8];
533     NvBool bMIGEnabled;
534     NvU64 swizzIdInUseMask;
535     NvBool bRestoreWatchdog;
536     NvBool bReenableWatchdog;
537     union ENGTYPE_BIT_VECTOR partitionableEnginesInUse;
538     NvBool bDeviceProfilingInUse;
539     NvBool bMIGAutoOnlineEnabled;
540 };
541 
542 #ifndef __NVOC_CLASS_KernelMIGManager_TYPEDEF__
543 #define __NVOC_CLASS_KernelMIGManager_TYPEDEF__
544 typedef struct KernelMIGManager KernelMIGManager;
545 #endif /* __NVOC_CLASS_KernelMIGManager_TYPEDEF__ */
546 
547 #ifndef __nvoc_class_id_KernelMIGManager
548 #define __nvoc_class_id_KernelMIGManager 0x01c1bf
549 #endif /* __nvoc_class_id_KernelMIGManager */
550 
551 extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMIGManager;
552 
553 #define __staticCast_KernelMIGManager(pThis) \
554     ((pThis)->__nvoc_pbase_KernelMIGManager)
555 
556 #ifdef __nvoc_kernel_mig_manager_h_disabled
557 #define __dynamicCast_KernelMIGManager(pThis) ((KernelMIGManager*)NULL)
558 #else //__nvoc_kernel_mig_manager_h_disabled
559 #define __dynamicCast_KernelMIGManager(pThis) \
560     ((KernelMIGManager*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelMIGManager)))
561 #endif //__nvoc_kernel_mig_manager_h_disabled
562 
563 #define PDB_PROP_KMIGMGR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
564 #define PDB_PROP_KMIGMGR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
565 
566 NV_STATUS __nvoc_objCreateDynamic_KernelMIGManager(KernelMIGManager**, Dynamic*, NvU32, va_list);
567 
568 NV_STATUS __nvoc_objCreate_KernelMIGManager(KernelMIGManager**, Dynamic*, NvU32);
569 #define __objCreate_KernelMIGManager(ppNewObj, pParent, createFlags) \
570     __nvoc_objCreate_KernelMIGManager((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
571 
572 #define kmigmgrConstructEngine(arg0, arg1, arg2) kmigmgrConstructEngine_DISPATCH(arg0, arg1, arg2)
573 #define kmigmgrStateInitLocked(arg0, arg1) kmigmgrStateInitLocked_DISPATCH(arg0, arg1)
574 #define kmigmgrStateUnload(arg0, arg1, flags) kmigmgrStateUnload_DISPATCH(arg0, arg1, flags)
575 #define kmigmgrCreateGPUInstanceCheck(arg0, arg1, bMemoryPartitioningNeeded) kmigmgrCreateGPUInstanceCheck_DISPATCH(arg0, arg1, bMemoryPartitioningNeeded)
576 #define kmigmgrCreateGPUInstanceCheck_HAL(arg0, arg1, bMemoryPartitioningNeeded) kmigmgrCreateGPUInstanceCheck_DISPATCH(arg0, arg1, bMemoryPartitioningNeeded)
577 #define kmigmgrIsDevinitMIGBitSet(arg0, arg1) kmigmgrIsDevinitMIGBitSet_DISPATCH(arg0, arg1)
578 #define kmigmgrIsDevinitMIGBitSet_HAL(arg0, arg1) kmigmgrIsDevinitMIGBitSet_DISPATCH(arg0, arg1)
579 #define kmigmgrIsGPUInstanceCombinationValid(arg0, arg1, gpuInstanceFlag) kmigmgrIsGPUInstanceCombinationValid_DISPATCH(arg0, arg1, gpuInstanceFlag)
580 #define kmigmgrIsGPUInstanceCombinationValid_HAL(arg0, arg1, gpuInstanceFlag) kmigmgrIsGPUInstanceCombinationValid_DISPATCH(arg0, arg1, gpuInstanceFlag)
581 #define kmigmgrIsGPUInstanceFlagValid(arg0, arg1, gpuInstanceFlag) kmigmgrIsGPUInstanceFlagValid_DISPATCH(arg0, arg1, gpuInstanceFlag)
582 #define kmigmgrIsGPUInstanceFlagValid_HAL(arg0, arg1, gpuInstanceFlag) kmigmgrIsGPUInstanceFlagValid_DISPATCH(arg0, arg1, gpuInstanceFlag)
583 #define kmigmgrIsMemoryPartitioningRequested(arg0, arg1, partitionFlags) kmigmgrIsMemoryPartitioningRequested_DISPATCH(arg0, arg1, partitionFlags)
584 #define kmigmgrIsMemoryPartitioningRequested_HAL(arg0, arg1, partitionFlags) kmigmgrIsMemoryPartitioningRequested_DISPATCH(arg0, arg1, partitionFlags)
585 #define kmigmgrIsMemoryPartitioningNeeded(arg0, arg1, swizzId) kmigmgrIsMemoryPartitioningNeeded_DISPATCH(arg0, arg1, swizzId)
586 #define kmigmgrIsMemoryPartitioningNeeded_HAL(arg0, arg1, swizzId) kmigmgrIsMemoryPartitioningNeeded_DISPATCH(arg0, arg1, swizzId)
587 #define kmigmgrMemSizeFlagToSwizzIdRange(arg0, arg1, memSizeFlag) kmigmgrMemSizeFlagToSwizzIdRange_DISPATCH(arg0, arg1, memSizeFlag)
588 #define kmigmgrMemSizeFlagToSwizzIdRange_HAL(arg0, arg1, memSizeFlag) kmigmgrMemSizeFlagToSwizzIdRange_DISPATCH(arg0, arg1, memSizeFlag)
589 #define kmigmgrStateLoad(pGpu, pEngstate, arg0) kmigmgrStateLoad_DISPATCH(pGpu, pEngstate, arg0)
590 #define kmigmgrStatePreLoad(pGpu, pEngstate, arg0) kmigmgrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
591 #define kmigmgrStatePostUnload(pGpu, pEngstate, arg0) kmigmgrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
592 #define kmigmgrStateDestroy(pGpu, pEngstate) kmigmgrStateDestroy_DISPATCH(pGpu, pEngstate)
593 #define kmigmgrStatePreUnload(pGpu, pEngstate, arg0) kmigmgrStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
594 #define kmigmgrStateInitUnlocked(pGpu, pEngstate) kmigmgrStateInitUnlocked_DISPATCH(pGpu, pEngstate)
595 #define kmigmgrInitMissing(pGpu, pEngstate) kmigmgrInitMissing_DISPATCH(pGpu, pEngstate)
596 #define kmigmgrStatePreInitLocked(pGpu, pEngstate) kmigmgrStatePreInitLocked_DISPATCH(pGpu, pEngstate)
597 #define kmigmgrStatePreInitUnlocked(pGpu, pEngstate) kmigmgrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
598 #define kmigmgrStatePostLoad(pGpu, pEngstate, arg0) kmigmgrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
599 #define kmigmgrIsPresent(pGpu, pEngstate) kmigmgrIsPresent_DISPATCH(pGpu, pEngstate)
600 NV_STATUS kmigmgrLoadStaticInfo_KERNEL(OBJGPU *arg0, struct KernelMIGManager *arg1);
601 
602 
603 #ifdef __nvoc_kernel_mig_manager_h_disabled
604 static inline NV_STATUS kmigmgrLoadStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) {
605     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
606     return NV_ERR_NOT_SUPPORTED;
607 }
608 #else //__nvoc_kernel_mig_manager_h_disabled
609 #define kmigmgrLoadStaticInfo(arg0, arg1) kmigmgrLoadStaticInfo_KERNEL(arg0, arg1)
610 #endif //__nvoc_kernel_mig_manager_h_disabled
611 
612 #define kmigmgrLoadStaticInfo_HAL(arg0, arg1) kmigmgrLoadStaticInfo(arg0, arg1)
613 
614 static inline NV_STATUS kmigmgrSetStaticInfo_46f6a7(OBJGPU *arg0, struct KernelMIGManager *arg1) {
615     return NV_ERR_NOT_SUPPORTED;
616 }
617 
618 
619 #ifdef __nvoc_kernel_mig_manager_h_disabled
620 static inline NV_STATUS kmigmgrSetStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) {
621     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
622     return NV_ERR_NOT_SUPPORTED;
623 }
624 #else //__nvoc_kernel_mig_manager_h_disabled
625 #define kmigmgrSetStaticInfo(arg0, arg1) kmigmgrSetStaticInfo_46f6a7(arg0, arg1)
626 #endif //__nvoc_kernel_mig_manager_h_disabled
627 
628 #define kmigmgrSetStaticInfo_HAL(arg0, arg1) kmigmgrSetStaticInfo(arg0, arg1)
629 
630 static inline void kmigmgrClearStaticInfo_b3696a(OBJGPU *arg0, struct KernelMIGManager *arg1) {
631     return;
632 }
633 
634 
635 #ifdef __nvoc_kernel_mig_manager_h_disabled
636 static inline void kmigmgrClearStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) {
637     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
638 }
639 #else //__nvoc_kernel_mig_manager_h_disabled
640 #define kmigmgrClearStaticInfo(arg0, arg1) kmigmgrClearStaticInfo_b3696a(arg0, arg1)
641 #endif //__nvoc_kernel_mig_manager_h_disabled
642 
643 #define kmigmgrClearStaticInfo_HAL(arg0, arg1) kmigmgrClearStaticInfo(arg0, arg1)
644 
645 static inline NV_STATUS kmigmgrSaveToPersistenceFromVgpuStaticInfo_46f6a7(OBJGPU *arg0, struct KernelMIGManager *arg1) {
646     return NV_ERR_NOT_SUPPORTED;
647 }
648 
649 
650 #ifdef __nvoc_kernel_mig_manager_h_disabled
651 static inline NV_STATUS kmigmgrSaveToPersistenceFromVgpuStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) {
652     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
653     return NV_ERR_NOT_SUPPORTED;
654 }
655 #else //__nvoc_kernel_mig_manager_h_disabled
656 #define kmigmgrSaveToPersistenceFromVgpuStaticInfo(arg0, arg1) kmigmgrSaveToPersistenceFromVgpuStaticInfo_46f6a7(arg0, arg1)
657 #endif //__nvoc_kernel_mig_manager_h_disabled
658 
659 #define kmigmgrSaveToPersistenceFromVgpuStaticInfo_HAL(arg0, arg1) kmigmgrSaveToPersistenceFromVgpuStaticInfo(arg0, arg1)
660 
661 NV_STATUS kmigmgrDeleteGPUInstanceRunlists_FWCLIENT(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
662 
663 
664 #ifdef __nvoc_kernel_mig_manager_h_disabled
665 static inline NV_STATUS kmigmgrDeleteGPUInstanceRunlists(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
666     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
667     return NV_ERR_NOT_SUPPORTED;
668 }
669 #else //__nvoc_kernel_mig_manager_h_disabled
670 #define kmigmgrDeleteGPUInstanceRunlists(arg0, arg1, arg2) kmigmgrDeleteGPUInstanceRunlists_FWCLIENT(arg0, arg1, arg2)
671 #endif //__nvoc_kernel_mig_manager_h_disabled
672 
673 #define kmigmgrDeleteGPUInstanceRunlists_HAL(arg0, arg1, arg2) kmigmgrDeleteGPUInstanceRunlists(arg0, arg1, arg2)
674 
675 NV_STATUS kmigmgrCreateGPUInstanceRunlists_FWCLIENT(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
676 
677 
678 #ifdef __nvoc_kernel_mig_manager_h_disabled
679 static inline NV_STATUS kmigmgrCreateGPUInstanceRunlists(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
680     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
681     return NV_ERR_NOT_SUPPORTED;
682 }
683 #else //__nvoc_kernel_mig_manager_h_disabled
684 #define kmigmgrCreateGPUInstanceRunlists(arg0, arg1, arg2) kmigmgrCreateGPUInstanceRunlists_FWCLIENT(arg0, arg1, arg2)
685 #endif //__nvoc_kernel_mig_manager_h_disabled
686 
687 #define kmigmgrCreateGPUInstanceRunlists_HAL(arg0, arg1, arg2) kmigmgrCreateGPUInstanceRunlists(arg0, arg1, arg2)
688 
689 NV_STATUS kmigmgrRestoreFromPersistence_PF(OBJGPU *arg0, struct KernelMIGManager *arg1);
690 
691 
692 #ifdef __nvoc_kernel_mig_manager_h_disabled
693 static inline NV_STATUS kmigmgrRestoreFromPersistence(OBJGPU *arg0, struct KernelMIGManager *arg1) {
694     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
695     return NV_ERR_NOT_SUPPORTED;
696 }
697 #else //__nvoc_kernel_mig_manager_h_disabled
698 #define kmigmgrRestoreFromPersistence(arg0, arg1) kmigmgrRestoreFromPersistence_PF(arg0, arg1)
699 #endif //__nvoc_kernel_mig_manager_h_disabled
700 
701 #define kmigmgrRestoreFromPersistence_HAL(arg0, arg1) kmigmgrRestoreFromPersistence(arg0, arg1)
702 
703 void kmigmgrDetectReducedConfig_KERNEL(OBJGPU *arg0, struct KernelMIGManager *arg1);
704 
705 
706 #ifdef __nvoc_kernel_mig_manager_h_disabled
707 static inline void kmigmgrDetectReducedConfig(OBJGPU *arg0, struct KernelMIGManager *arg1) {
708     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
709 }
710 #else //__nvoc_kernel_mig_manager_h_disabled
711 #define kmigmgrDetectReducedConfig(arg0, arg1) kmigmgrDetectReducedConfig_KERNEL(arg0, arg1)
712 #endif //__nvoc_kernel_mig_manager_h_disabled
713 
714 #define kmigmgrDetectReducedConfig_HAL(arg0, arg1) kmigmgrDetectReducedConfig(arg0, arg1)
715 
716 static inline NV_STATUS kmigmgrGenerateComputeInstanceUuid_5baef9(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU32 globalGrIdx, NvUuid *pUuid) {
717     NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
718 }
719 
720 
721 #ifdef __nvoc_kernel_mig_manager_h_disabled
722 static inline NV_STATUS kmigmgrGenerateComputeInstanceUuid(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU32 globalGrIdx, NvUuid *pUuid) {
723     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
724     return NV_ERR_NOT_SUPPORTED;
725 }
726 #else //__nvoc_kernel_mig_manager_h_disabled
727 #define kmigmgrGenerateComputeInstanceUuid(arg0, arg1, swizzId, globalGrIdx, pUuid) kmigmgrGenerateComputeInstanceUuid_5baef9(arg0, arg1, swizzId, globalGrIdx, pUuid)
728 #endif //__nvoc_kernel_mig_manager_h_disabled
729 
730 #define kmigmgrGenerateComputeInstanceUuid_HAL(arg0, arg1, swizzId, globalGrIdx, pUuid) kmigmgrGenerateComputeInstanceUuid(arg0, arg1, swizzId, globalGrIdx, pUuid)
731 
732 NV_STATUS kmigmgrCreateComputeInstances_FWCLIENT(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvBool bQuery, KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS arg3, NvU32 *pCIIds, NvBool bCreateCap);
733 
734 
735 #ifdef __nvoc_kernel_mig_manager_h_disabled
736 static inline NV_STATUS kmigmgrCreateComputeInstances(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvBool bQuery, KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS arg3, NvU32 *pCIIds, NvBool bCreateCap) {
737     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
738     return NV_ERR_NOT_SUPPORTED;
739 }
740 #else //__nvoc_kernel_mig_manager_h_disabled
741 #define kmigmgrCreateComputeInstances(arg0, arg1, arg2, bQuery, arg3, pCIIds, bCreateCap) kmigmgrCreateComputeInstances_FWCLIENT(arg0, arg1, arg2, bQuery, arg3, pCIIds, bCreateCap)
742 #endif //__nvoc_kernel_mig_manager_h_disabled
743 
744 #define kmigmgrCreateComputeInstances_HAL(arg0, arg1, arg2, bQuery, arg3, pCIIds, bCreateCap) kmigmgrCreateComputeInstances(arg0, arg1, arg2, bQuery, arg3, pCIIds, bCreateCap)
745 
746 NV_STATUS kmigmgrSetMIGState_FWCLIENT(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded, NvBool bEnable, NvBool bUnload);
747 
748 
749 #ifdef __nvoc_kernel_mig_manager_h_disabled
750 static inline NV_STATUS kmigmgrSetMIGState(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded, NvBool bEnable, NvBool bUnload) {
751     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
752     return NV_ERR_NOT_SUPPORTED;
753 }
754 #else //__nvoc_kernel_mig_manager_h_disabled
755 #define kmigmgrSetMIGState(arg0, arg1, bMemoryPartitioningNeeded, bEnable, bUnload) kmigmgrSetMIGState_FWCLIENT(arg0, arg1, bMemoryPartitioningNeeded, bEnable, bUnload)
756 #endif //__nvoc_kernel_mig_manager_h_disabled
757 
758 #define kmigmgrSetMIGState_HAL(arg0, arg1, bMemoryPartitioningNeeded, bEnable, bUnload) kmigmgrSetMIGState(arg0, arg1, bMemoryPartitioningNeeded, bEnable, bUnload)
759 
760 NvBool kmigmgrIsCTSAlignmentRequired_PF(OBJGPU *arg0, struct KernelMIGManager *arg1);
761 
762 
763 #ifdef __nvoc_kernel_mig_manager_h_disabled
764 static inline NvBool kmigmgrIsCTSAlignmentRequired(OBJGPU *arg0, struct KernelMIGManager *arg1) {
765     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
766     return NV_FALSE;
767 }
768 #else //__nvoc_kernel_mig_manager_h_disabled
769 #define kmigmgrIsCTSAlignmentRequired(arg0, arg1) kmigmgrIsCTSAlignmentRequired_PF(arg0, arg1)
770 #endif //__nvoc_kernel_mig_manager_h_disabled
771 
772 #define kmigmgrIsCTSAlignmentRequired_HAL(arg0, arg1) kmigmgrIsCTSAlignmentRequired(arg0, arg1)
773 
774 NV_STATUS kmigmgrConstructEngine_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, ENGDESCRIPTOR arg2);
775 
776 static inline NV_STATUS kmigmgrConstructEngine_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, ENGDESCRIPTOR arg2) {
777     return arg1->__kmigmgrConstructEngine__(arg0, arg1, arg2);
778 }
779 
780 NV_STATUS kmigmgrStateInitLocked_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
781 
782 static inline NV_STATUS kmigmgrStateInitLocked_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1) {
783     return arg1->__kmigmgrStateInitLocked__(arg0, arg1);
784 }
785 
786 NV_STATUS kmigmgrStateUnload_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 flags);
787 
788 static inline NV_STATUS kmigmgrStateUnload_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 flags) {
789     return arg1->__kmigmgrStateUnload__(arg0, arg1, flags);
790 }
791 
792 NV_STATUS kmigmgrCreateGPUInstanceCheck_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded);
793 
794 static inline NV_STATUS kmigmgrCreateGPUInstanceCheck_46f6a7(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded) {
795     return NV_ERR_NOT_SUPPORTED;
796 }
797 
798 static inline NV_STATUS kmigmgrCreateGPUInstanceCheck_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded) {
799     return arg1->__kmigmgrCreateGPUInstanceCheck__(arg0, arg1, bMemoryPartitioningNeeded);
800 }
801 
802 NvBool kmigmgrIsDevinitMIGBitSet_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1);
803 
804 static inline NvBool kmigmgrIsDevinitMIGBitSet_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1) {
805     return ((NvBool)(0 != 0));
806 }
807 
808 static inline NvBool kmigmgrIsDevinitMIGBitSet_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1) {
809     return arg1->__kmigmgrIsDevinitMIGBitSet__(arg0, arg1);
810 }
811 
812 NvBool kmigmgrIsGPUInstanceCombinationValid_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag);
813 
814 NvBool kmigmgrIsGPUInstanceCombinationValid_GH100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag);
815 
816 static inline NvBool kmigmgrIsGPUInstanceCombinationValid_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag) {
817     return ((NvBool)(0 != 0));
818 }
819 
820 static inline NvBool kmigmgrIsGPUInstanceCombinationValid_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag) {
821     return arg1->__kmigmgrIsGPUInstanceCombinationValid__(arg0, arg1, gpuInstanceFlag);
822 }
823 
824 NvBool kmigmgrIsGPUInstanceFlagValid_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag);
825 
826 NvBool kmigmgrIsGPUInstanceFlagValid_GH100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag);
827 
828 static inline NvBool kmigmgrIsGPUInstanceFlagValid_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag) {
829     return ((NvBool)(0 != 0));
830 }
831 
832 static inline NvBool kmigmgrIsGPUInstanceFlagValid_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag) {
833     return arg1->__kmigmgrIsGPUInstanceFlagValid__(arg0, arg1, gpuInstanceFlag);
834 }
835 
836 NvBool kmigmgrIsMemoryPartitioningRequested_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlags);
837 
838 static inline NvBool kmigmgrIsMemoryPartitioningRequested_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlags) {
839     return ((NvBool)(0 != 0));
840 }
841 
842 static inline NvBool kmigmgrIsMemoryPartitioningRequested_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlags) {
843     return arg1->__kmigmgrIsMemoryPartitioningRequested__(arg0, arg1, partitionFlags);
844 }
845 
846 NvBool kmigmgrIsMemoryPartitioningNeeded_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId);
847 
848 static inline NvBool kmigmgrIsMemoryPartitioningNeeded_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) {
849     return ((NvBool)(0 != 0));
850 }
851 
852 static inline NvBool kmigmgrIsMemoryPartitioningNeeded_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) {
853     return arg1->__kmigmgrIsMemoryPartitioningNeeded__(arg0, arg1, swizzId);
854 }
855 
856 static inline struct NV_RANGE kmigmgrMemSizeFlagToSwizzIdRange_d64cd6(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 memSizeFlag) {
857     return NV_RANGE_EMPTY;
858 }
859 
860 struct NV_RANGE kmigmgrMemSizeFlagToSwizzIdRange_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 memSizeFlag);
861 
862 static inline struct NV_RANGE kmigmgrMemSizeFlagToSwizzIdRange_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 memSizeFlag) {
863     return arg1->__kmigmgrMemSizeFlagToSwizzIdRange__(arg0, arg1, memSizeFlag);
864 }
865 
866 static inline NV_STATUS kmigmgrStateLoad_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) {
867     return pEngstate->__kmigmgrStateLoad__(pGpu, pEngstate, arg0);
868 }
869 
870 static inline NV_STATUS kmigmgrStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) {
871     return pEngstate->__kmigmgrStatePreLoad__(pGpu, pEngstate, arg0);
872 }
873 
874 static inline NV_STATUS kmigmgrStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) {
875     return pEngstate->__kmigmgrStatePostUnload__(pGpu, pEngstate, arg0);
876 }
877 
878 static inline void kmigmgrStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) {
879     pEngstate->__kmigmgrStateDestroy__(pGpu, pEngstate);
880 }
881 
882 static inline NV_STATUS kmigmgrStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) {
883     return pEngstate->__kmigmgrStatePreUnload__(pGpu, pEngstate, arg0);
884 }
885 
886 static inline NV_STATUS kmigmgrStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) {
887     return pEngstate->__kmigmgrStateInitUnlocked__(pGpu, pEngstate);
888 }
889 
890 static inline void kmigmgrInitMissing_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) {
891     pEngstate->__kmigmgrInitMissing__(pGpu, pEngstate);
892 }
893 
894 static inline NV_STATUS kmigmgrStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) {
895     return pEngstate->__kmigmgrStatePreInitLocked__(pGpu, pEngstate);
896 }
897 
898 static inline NV_STATUS kmigmgrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) {
899     return pEngstate->__kmigmgrStatePreInitUnlocked__(pGpu, pEngstate);
900 }
901 
902 static inline NV_STATUS kmigmgrStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) {
903     return pEngstate->__kmigmgrStatePostLoad__(pGpu, pEngstate, arg0);
904 }
905 
906 static inline NvBool kmigmgrIsPresent_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) {
907     return pEngstate->__kmigmgrIsPresent__(pGpu, pEngstate);
908 }
909 
910 static inline NvBool kmigmgrUseLegacyVgpuPolicy(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager) {
911     return ((NvBool)(0 != 0));
912 }
913 
914 static inline NvBool kmigmgrIsMIGNvlinkP2PSupportOverridden(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager) {
915     return ((NvBool)(0 != 0));
916 }
917 
918 static inline const union ENGTYPE_BIT_VECTOR *kmigmgrGetPartitionableEnginesInUse(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager) {
919     struct KernelMIGManager_PRIVATE *pKernelMIGManager_PRIVATE = (struct KernelMIGManager_PRIVATE *)pKernelMIGManager;
920     return &pKernelMIGManager_PRIVATE->partitionableEnginesInUse;
921 }
922 
923 static inline NvBool kmigmgrIsA100ReducedConfig(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager) {
924     struct KernelMIGManager_PRIVATE *pKernelMIGManager_PRIVATE = (struct KernelMIGManager_PRIVATE *)pKernelMIGManager;
925     return pKernelMIGManager_PRIVATE->bIsA100ReducedConfig;
926 }
927 
928 NV_STATUS kmigmgrIncRefCount_IMPL(struct RsShared *arg0);
929 
930 #define kmigmgrIncRefCount(arg0) kmigmgrIncRefCount_IMPL(arg0)
931 NV_STATUS kmigmgrDecRefCount_IMPL(struct RsShared *arg0);
932 
933 #define kmigmgrDecRefCount(arg0) kmigmgrDecRefCount_IMPL(arg0)
934 struct MIG_INSTANCE_REF kmigmgrMakeGIReference_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0);
935 
936 #define kmigmgrMakeGIReference(arg0) kmigmgrMakeGIReference_IMPL(arg0)
937 struct MIG_INSTANCE_REF kmigmgrMakeCIReference_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0, MIG_COMPUTE_INSTANCE *arg1);
938 
939 #define kmigmgrMakeCIReference(arg0, arg1) kmigmgrMakeCIReference_IMPL(arg0, arg1)
940 NV_STATUS kmigmgrEngineTypeXlate_IMPL(union ENGTYPE_BIT_VECTOR *pSrc, RM_ENGINE_TYPE srcEngineType, union ENGTYPE_BIT_VECTOR *pDst, RM_ENGINE_TYPE *pDstEngineType);
941 
942 #define kmigmgrEngineTypeXlate(pSrc, srcEngineType, pDst, pDstEngineType) kmigmgrEngineTypeXlate_IMPL(pSrc, srcEngineType, pDst, pDstEngineType)
943 NvBool kmigmgrIsInstanceAttributionIdValid_IMPL(NvU16 id);
944 
945 #define kmigmgrIsInstanceAttributionIdValid(id) kmigmgrIsInstanceAttributionIdValid_IMPL(id)
946 struct MIG_INSTANCE_REF kmigmgrMakeNoMIGReference_IMPL(void);
947 
948 #define kmigmgrMakeNoMIGReference() kmigmgrMakeNoMIGReference_IMPL()
949 NvBool kmigmgrIsMIGReferenceValid_IMPL(struct MIG_INSTANCE_REF *arg0);
950 
951 #define kmigmgrIsMIGReferenceValid(arg0) kmigmgrIsMIGReferenceValid_IMPL(arg0)
952 NvBool kmigmgrAreMIGReferencesSame_IMPL(struct MIG_INSTANCE_REF *arg0, struct MIG_INSTANCE_REF *arg1);
953 
954 #define kmigmgrAreMIGReferencesSame(arg0, arg1) kmigmgrAreMIGReferencesSame_IMPL(arg0, arg1)
955 NvU32 kmigmgrCountEnginesOfType_IMPL(const union ENGTYPE_BIT_VECTOR *arg0, RM_ENGINE_TYPE arg1);
956 
957 #define kmigmgrCountEnginesOfType(arg0, arg1) kmigmgrCountEnginesOfType_IMPL(arg0, arg1)
958 NvU16 kmigmgrGetAttributionIdFromMIGReference_IMPL(struct MIG_INSTANCE_REF arg0);
959 
960 #define kmigmgrGetAttributionIdFromMIGReference(arg0) kmigmgrGetAttributionIdFromMIGReference_IMPL(arg0)
961 NV_STATUS kmigmgrAllocateInstanceEngines_IMPL(union ENGTYPE_BIT_VECTOR *pSourceEngines, NvBool bShared, struct NV_RANGE engTypeRange, NvU32 reqEngCount, union ENGTYPE_BIT_VECTOR *pOutEngines, union ENGTYPE_BIT_VECTOR *pExclusiveEngines, union ENGTYPE_BIT_VECTOR *pSharedEngines, union ENGTYPE_BIT_VECTOR *pAllocatableEngines);
962 
963 #define kmigmgrAllocateInstanceEngines(pSourceEngines, bShared, engTypeRange, reqEngCount, pOutEngines, pExclusiveEngines, pSharedEngines, pAllocatableEngines) kmigmgrAllocateInstanceEngines_IMPL(pSourceEngines, bShared, engTypeRange, reqEngCount, pOutEngines, pExclusiveEngines, pSharedEngines, pAllocatableEngines)
964 void kmigmgrGetLocalEngineMask_IMPL(union ENGTYPE_BIT_VECTOR *pPhysicalEngineMask, union ENGTYPE_BIT_VECTOR *pLocalEngineMask);
965 
966 #define kmigmgrGetLocalEngineMask(pPhysicalEngineMask, pLocalEngineMask) kmigmgrGetLocalEngineMask_IMPL(pPhysicalEngineMask, pLocalEngineMask)
967 NV_STATUS kmigmgrAllocGPUInstanceHandles_IMPL(OBJGPU *arg0, NvU32 swizzId, KERNEL_MIG_GPU_INSTANCE *arg1);
968 
969 #define kmigmgrAllocGPUInstanceHandles(arg0, swizzId, arg1) kmigmgrAllocGPUInstanceHandles_IMPL(arg0, swizzId, arg1)
970 void kmigmgrFreeGPUInstanceHandles_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0);
971 
972 #define kmigmgrFreeGPUInstanceHandles(arg0) kmigmgrFreeGPUInstanceHandles_IMPL(arg0)
973 NvBool kmigmgrIsGPUInstanceReadyToBeDestroyed_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0);
974 
975 #define kmigmgrIsGPUInstanceReadyToBeDestroyed(arg0) kmigmgrIsGPUInstanceReadyToBeDestroyed_IMPL(arg0)
976 void kmigmgrDestruct_IMPL(struct KernelMIGManager *arg0);
977 
978 #define __nvoc_kmigmgrDestruct(arg0) kmigmgrDestruct_IMPL(arg0)
979 void kmigmgrInitRegistryOverrides_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
980 
981 #ifdef __nvoc_kernel_mig_manager_h_disabled
982 static inline void kmigmgrInitRegistryOverrides(OBJGPU *arg0, struct KernelMIGManager *arg1) {
983     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
984 }
985 #else //__nvoc_kernel_mig_manager_h_disabled
986 #define kmigmgrInitRegistryOverrides(arg0, arg1) kmigmgrInitRegistryOverrides_IMPL(arg0, arg1)
987 #endif //__nvoc_kernel_mig_manager_h_disabled
988 
989 KERNEL_MIG_GPU_INSTANCE *kmigmgrGetMIGGpuInstanceSlot_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 i);
990 
991 #ifdef __nvoc_kernel_mig_manager_h_disabled
992 static inline KERNEL_MIG_GPU_INSTANCE *kmigmgrGetMIGGpuInstanceSlot(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 i) {
993     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
994     return NULL;
995 }
996 #else //__nvoc_kernel_mig_manager_h_disabled
997 #define kmigmgrGetMIGGpuInstanceSlot(arg0, arg1, i) kmigmgrGetMIGGpuInstanceSlot_IMPL(arg0, arg1, i)
998 #endif //__nvoc_kernel_mig_manager_h_disabled
999 
1000 NvBool kmigmgrIsMIGSupported_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1001 
1002 #ifdef __nvoc_kernel_mig_manager_h_disabled
1003 static inline NvBool kmigmgrIsMIGSupported(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1004     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1005     return NV_FALSE;
1006 }
1007 #else //__nvoc_kernel_mig_manager_h_disabled
1008 #define kmigmgrIsMIGSupported(arg0, arg1) kmigmgrIsMIGSupported_IMPL(arg0, arg1)
1009 #endif //__nvoc_kernel_mig_manager_h_disabled
1010 
1011 NvBool kmigmgrIsMIGEnabled_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1012 
1013 #ifdef __nvoc_kernel_mig_manager_h_disabled
1014 static inline NvBool kmigmgrIsMIGEnabled(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1015     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1016     return NV_FALSE;
1017 }
1018 #else //__nvoc_kernel_mig_manager_h_disabled
1019 #define kmigmgrIsMIGEnabled(arg0, arg1) kmigmgrIsMIGEnabled_IMPL(arg0, arg1)
1020 #endif //__nvoc_kernel_mig_manager_h_disabled
1021 
1022 NvBool kmigmgrIsMIGGpuInstancingEnabled_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1023 
1024 #ifdef __nvoc_kernel_mig_manager_h_disabled
1025 static inline NvBool kmigmgrIsMIGGpuInstancingEnabled(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1026     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1027     return NV_FALSE;
1028 }
1029 #else //__nvoc_kernel_mig_manager_h_disabled
1030 #define kmigmgrIsMIGGpuInstancingEnabled(arg0, arg1) kmigmgrIsMIGGpuInstancingEnabled_IMPL(arg0, arg1)
1031 #endif //__nvoc_kernel_mig_manager_h_disabled
1032 
1033 NvBool kmigmgrIsMIGMemPartitioningEnabled_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1034 
1035 #ifdef __nvoc_kernel_mig_manager_h_disabled
1036 static inline NvBool kmigmgrIsMIGMemPartitioningEnabled(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1037     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1038     return NV_FALSE;
1039 }
1040 #else //__nvoc_kernel_mig_manager_h_disabled
1041 #define kmigmgrIsMIGMemPartitioningEnabled(arg0, arg1) kmigmgrIsMIGMemPartitioningEnabled_IMPL(arg0, arg1)
1042 #endif //__nvoc_kernel_mig_manager_h_disabled
1043 
1044 const KERNEL_MIG_MANAGER_STATIC_INFO *kmigmgrGetStaticInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1045 
1046 #ifdef __nvoc_kernel_mig_manager_h_disabled
1047 static inline const KERNEL_MIG_MANAGER_STATIC_INFO *kmigmgrGetStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1048     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1049     return NULL;
1050 }
1051 #else //__nvoc_kernel_mig_manager_h_disabled
1052 #define kmigmgrGetStaticInfo(arg0, arg1) kmigmgrGetStaticInfo_IMPL(arg0, arg1)
1053 #endif //__nvoc_kernel_mig_manager_h_disabled
1054 
1055 NV_STATUS kmigmgrSaveToPersistence_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1056 
1057 #ifdef __nvoc_kernel_mig_manager_h_disabled
1058 static inline NV_STATUS kmigmgrSaveToPersistence(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1059     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1060     return NV_ERR_NOT_SUPPORTED;
1061 }
1062 #else //__nvoc_kernel_mig_manager_h_disabled
1063 #define kmigmgrSaveToPersistence(arg0, arg1) kmigmgrSaveToPersistence_IMPL(arg0, arg1)
1064 #endif //__nvoc_kernel_mig_manager_h_disabled
1065 
1066 NV_STATUS kmigmgrDisableWatchdog_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1067 
1068 #ifdef __nvoc_kernel_mig_manager_h_disabled
1069 static inline NV_STATUS kmigmgrDisableWatchdog(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1070     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1071     return NV_ERR_NOT_SUPPORTED;
1072 }
1073 #else //__nvoc_kernel_mig_manager_h_disabled
1074 #define kmigmgrDisableWatchdog(arg0, arg1) kmigmgrDisableWatchdog_IMPL(arg0, arg1)
1075 #endif //__nvoc_kernel_mig_manager_h_disabled
1076 
1077 NV_STATUS kmigmgrRestoreWatchdog_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1078 
1079 #ifdef __nvoc_kernel_mig_manager_h_disabled
1080 static inline NV_STATUS kmigmgrRestoreWatchdog(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1081     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1082     return NV_ERR_NOT_SUPPORTED;
1083 }
1084 #else //__nvoc_kernel_mig_manager_h_disabled
1085 #define kmigmgrRestoreWatchdog(arg0, arg1) kmigmgrRestoreWatchdog_IMPL(arg0, arg1)
1086 #endif //__nvoc_kernel_mig_manager_h_disabled
1087 
1088 NV_STATUS kmigmgrSetSwizzIdInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId);
1089 
1090 #ifdef __nvoc_kernel_mig_manager_h_disabled
1091 static inline NV_STATUS kmigmgrSetSwizzIdInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) {
1092     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1093     return NV_ERR_NOT_SUPPORTED;
1094 }
1095 #else //__nvoc_kernel_mig_manager_h_disabled
1096 #define kmigmgrSetSwizzIdInUse(arg0, arg1, swizzId) kmigmgrSetSwizzIdInUse_IMPL(arg0, arg1, swizzId)
1097 #endif //__nvoc_kernel_mig_manager_h_disabled
1098 
1099 NV_STATUS kmigmgrClearSwizzIdInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId);
1100 
1101 #ifdef __nvoc_kernel_mig_manager_h_disabled
1102 static inline NV_STATUS kmigmgrClearSwizzIdInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) {
1103     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1104     return NV_ERR_NOT_SUPPORTED;
1105 }
1106 #else //__nvoc_kernel_mig_manager_h_disabled
1107 #define kmigmgrClearSwizzIdInUse(arg0, arg1, swizzId) kmigmgrClearSwizzIdInUse_IMPL(arg0, arg1, swizzId)
1108 #endif //__nvoc_kernel_mig_manager_h_disabled
1109 
1110 NvBool kmigmgrIsSwizzIdInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId);
1111 
1112 #ifdef __nvoc_kernel_mig_manager_h_disabled
1113 static inline NvBool kmigmgrIsSwizzIdInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) {
1114     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1115     return NV_FALSE;
1116 }
1117 #else //__nvoc_kernel_mig_manager_h_disabled
1118 #define kmigmgrIsSwizzIdInUse(arg0, arg1, swizzId) kmigmgrIsSwizzIdInUse_IMPL(arg0, arg1, swizzId)
1119 #endif //__nvoc_kernel_mig_manager_h_disabled
1120 
1121 NV_STATUS kmigmgrGetInvalidSwizzIdMask_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU64 *pUnsupportedSwizzIdMask);
1122 
1123 #ifdef __nvoc_kernel_mig_manager_h_disabled
1124 static inline NV_STATUS kmigmgrGetInvalidSwizzIdMask(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU64 *pUnsupportedSwizzIdMask) {
1125     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1126     return NV_ERR_NOT_SUPPORTED;
1127 }
1128 #else //__nvoc_kernel_mig_manager_h_disabled
1129 #define kmigmgrGetInvalidSwizzIdMask(arg0, arg1, swizzId, pUnsupportedSwizzIdMask) kmigmgrGetInvalidSwizzIdMask_IMPL(arg0, arg1, swizzId, pUnsupportedSwizzIdMask)
1130 #endif //__nvoc_kernel_mig_manager_h_disabled
1131 
1132 NvBool kmigmgrIsMIGNvlinkP2PSupported_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1133 
1134 #ifdef __nvoc_kernel_mig_manager_h_disabled
1135 static inline NvBool kmigmgrIsMIGNvlinkP2PSupported(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1136     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1137     return NV_FALSE;
1138 }
1139 #else //__nvoc_kernel_mig_manager_h_disabled
1140 #define kmigmgrIsMIGNvlinkP2PSupported(arg0, arg1) kmigmgrIsMIGNvlinkP2PSupported_IMPL(arg0, arg1)
1141 #endif //__nvoc_kernel_mig_manager_h_disabled
1142 
1143 NvU64 kmigmgrGetSwizzIdInUseMask_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1144 
1145 #ifdef __nvoc_kernel_mig_manager_h_disabled
1146 static inline NvU64 kmigmgrGetSwizzIdInUseMask(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1147     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1148     return 0;
1149 }
1150 #else //__nvoc_kernel_mig_manager_h_disabled
1151 #define kmigmgrGetSwizzIdInUseMask(arg0, arg1) kmigmgrGetSwizzIdInUseMask_IMPL(arg0, arg1)
1152 #endif //__nvoc_kernel_mig_manager_h_disabled
1153 
1154 NV_STATUS kmigmgrSetEnginesInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, union ENGTYPE_BIT_VECTOR *pEngines);
1155 
1156 #ifdef __nvoc_kernel_mig_manager_h_disabled
1157 static inline NV_STATUS kmigmgrSetEnginesInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, union ENGTYPE_BIT_VECTOR *pEngines) {
1158     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1159     return NV_ERR_NOT_SUPPORTED;
1160 }
1161 #else //__nvoc_kernel_mig_manager_h_disabled
1162 #define kmigmgrSetEnginesInUse(arg0, arg1, pEngines) kmigmgrSetEnginesInUse_IMPL(arg0, arg1, pEngines)
1163 #endif //__nvoc_kernel_mig_manager_h_disabled
1164 
1165 NV_STATUS kmigmgrClearEnginesInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, union ENGTYPE_BIT_VECTOR *pEngines);
1166 
1167 #ifdef __nvoc_kernel_mig_manager_h_disabled
1168 static inline NV_STATUS kmigmgrClearEnginesInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, union ENGTYPE_BIT_VECTOR *pEngines) {
1169     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1170     return NV_ERR_NOT_SUPPORTED;
1171 }
1172 #else //__nvoc_kernel_mig_manager_h_disabled
1173 #define kmigmgrClearEnginesInUse(arg0, arg1, pEngines) kmigmgrClearEnginesInUse_IMPL(arg0, arg1, pEngines)
1174 #endif //__nvoc_kernel_mig_manager_h_disabled
1175 
1176 NvBool kmigmgrIsEngineInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, RM_ENGINE_TYPE rmEngineType);
1177 
1178 #ifdef __nvoc_kernel_mig_manager_h_disabled
1179 static inline NvBool kmigmgrIsEngineInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, RM_ENGINE_TYPE rmEngineType) {
1180     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1181     return NV_FALSE;
1182 }
1183 #else //__nvoc_kernel_mig_manager_h_disabled
1184 #define kmigmgrIsEngineInUse(arg0, arg1, rmEngineType) kmigmgrIsEngineInUse_IMPL(arg0, arg1, rmEngineType)
1185 #endif //__nvoc_kernel_mig_manager_h_disabled
1186 
1187 NvBool kmigmgrIsEnginePartitionable_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, RM_ENGINE_TYPE rmEngineType);
1188 
1189 #ifdef __nvoc_kernel_mig_manager_h_disabled
1190 static inline NvBool kmigmgrIsEnginePartitionable(OBJGPU *arg0, struct KernelMIGManager *arg1, RM_ENGINE_TYPE rmEngineType) {
1191     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1192     return NV_FALSE;
1193 }
1194 #else //__nvoc_kernel_mig_manager_h_disabled
1195 #define kmigmgrIsEnginePartitionable(arg0, arg1, rmEngineType) kmigmgrIsEnginePartitionable_IMPL(arg0, arg1, rmEngineType)
1196 #endif //__nvoc_kernel_mig_manager_h_disabled
1197 
1198 NvBool kmigmgrIsEngineInInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, RM_ENGINE_TYPE globalRmEngType, struct MIG_INSTANCE_REF arg2);
1199 
1200 #ifdef __nvoc_kernel_mig_manager_h_disabled
1201 static inline NvBool kmigmgrIsEngineInInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, RM_ENGINE_TYPE globalRmEngType, struct MIG_INSTANCE_REF arg2) {
1202     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1203     return NV_FALSE;
1204 }
1205 #else //__nvoc_kernel_mig_manager_h_disabled
1206 #define kmigmgrIsEngineInInstance(arg0, arg1, globalRmEngType, arg2) kmigmgrIsEngineInInstance_IMPL(arg0, arg1, globalRmEngType, arg2)
1207 #endif //__nvoc_kernel_mig_manager_h_disabled
1208 
1209 NV_STATUS kmigmgrCreateGPUInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 *pSwizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2, NvBool bValid, NvBool bCreateCap);
1210 
1211 #ifdef __nvoc_kernel_mig_manager_h_disabled
1212 static inline NV_STATUS kmigmgrCreateGPUInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 *pSwizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2, NvBool bValid, NvBool bCreateCap) {
1213     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1214     return NV_ERR_NOT_SUPPORTED;
1215 }
1216 #else //__nvoc_kernel_mig_manager_h_disabled
1217 #define kmigmgrCreateGPUInstance(arg0, arg1, pSwizzId, arg2, bValid, bCreateCap) kmigmgrCreateGPUInstance_IMPL(arg0, arg1, pSwizzId, arg2, bValid, bCreateCap)
1218 #endif //__nvoc_kernel_mig_manager_h_disabled
1219 
1220 NV_STATUS kmigmgrInvalidateGPUInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvBool bUnload);
1221 
1222 #ifdef __nvoc_kernel_mig_manager_h_disabled
1223 static inline NV_STATUS kmigmgrInvalidateGPUInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvBool bUnload) {
1224     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1225     return NV_ERR_NOT_SUPPORTED;
1226 }
1227 #else //__nvoc_kernel_mig_manager_h_disabled
1228 #define kmigmgrInvalidateGPUInstance(arg0, arg1, swizzId, bUnload) kmigmgrInvalidateGPUInstance_IMPL(arg0, arg1, swizzId, bUnload)
1229 #endif //__nvoc_kernel_mig_manager_h_disabled
1230 
1231 NV_STATUS kmigmgrInitGPUInstanceScrubber_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1232 
1233 #ifdef __nvoc_kernel_mig_manager_h_disabled
1234 static inline NV_STATUS kmigmgrInitGPUInstanceScrubber(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1235     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1236     return NV_ERR_NOT_SUPPORTED;
1237 }
1238 #else //__nvoc_kernel_mig_manager_h_disabled
1239 #define kmigmgrInitGPUInstanceScrubber(arg0, arg1, arg2) kmigmgrInitGPUInstanceScrubber_IMPL(arg0, arg1, arg2)
1240 #endif //__nvoc_kernel_mig_manager_h_disabled
1241 
1242 void kmigmgrDestroyGPUInstanceScrubber_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1243 
1244 #ifdef __nvoc_kernel_mig_manager_h_disabled
1245 static inline void kmigmgrDestroyGPUInstanceScrubber(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1246     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1247 }
1248 #else //__nvoc_kernel_mig_manager_h_disabled
1249 #define kmigmgrDestroyGPUInstanceScrubber(arg0, arg1, arg2) kmigmgrDestroyGPUInstanceScrubber_IMPL(arg0, arg1, arg2)
1250 #endif //__nvoc_kernel_mig_manager_h_disabled
1251 
1252 NV_STATUS kmigmgrInitGPUInstanceBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1253 
1254 #ifdef __nvoc_kernel_mig_manager_h_disabled
1255 static inline NV_STATUS kmigmgrInitGPUInstanceBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1256     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1257     return NV_ERR_NOT_SUPPORTED;
1258 }
1259 #else //__nvoc_kernel_mig_manager_h_disabled
1260 #define kmigmgrInitGPUInstanceBufPools(arg0, arg1, arg2) kmigmgrInitGPUInstanceBufPools_IMPL(arg0, arg1, arg2)
1261 #endif //__nvoc_kernel_mig_manager_h_disabled
1262 
1263 NV_STATUS kmigmgrInitGPUInstanceGrBufPools_IMPL(OBJGPU *pGpu, struct KernelMIGManager *arg0, KERNEL_MIG_GPU_INSTANCE *arg1);
1264 
1265 #ifdef __nvoc_kernel_mig_manager_h_disabled
1266 static inline NV_STATUS kmigmgrInitGPUInstanceGrBufPools(OBJGPU *pGpu, struct KernelMIGManager *arg0, KERNEL_MIG_GPU_INSTANCE *arg1) {
1267     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1268     return NV_ERR_NOT_SUPPORTED;
1269 }
1270 #else //__nvoc_kernel_mig_manager_h_disabled
1271 #define kmigmgrInitGPUInstanceGrBufPools(pGpu, arg0, arg1) kmigmgrInitGPUInstanceGrBufPools_IMPL(pGpu, arg0, arg1)
1272 #endif //__nvoc_kernel_mig_manager_h_disabled
1273 
1274 void kmigmgrDestroyGPUInstanceGrBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1275 
1276 #ifdef __nvoc_kernel_mig_manager_h_disabled
1277 static inline void kmigmgrDestroyGPUInstanceGrBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1278     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1279 }
1280 #else //__nvoc_kernel_mig_manager_h_disabled
1281 #define kmigmgrDestroyGPUInstanceGrBufPools(arg0, arg1, arg2) kmigmgrDestroyGPUInstanceGrBufPools_IMPL(arg0, arg1, arg2)
1282 #endif //__nvoc_kernel_mig_manager_h_disabled
1283 
1284 NV_STATUS kmigmgrInitGPUInstancePool_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1285 
1286 #ifdef __nvoc_kernel_mig_manager_h_disabled
1287 static inline NV_STATUS kmigmgrInitGPUInstancePool(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1288     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1289     return NV_ERR_NOT_SUPPORTED;
1290 }
1291 #else //__nvoc_kernel_mig_manager_h_disabled
1292 #define kmigmgrInitGPUInstancePool(arg0, arg1, arg2) kmigmgrInitGPUInstancePool_IMPL(arg0, arg1, arg2)
1293 #endif //__nvoc_kernel_mig_manager_h_disabled
1294 
1295 void kmigmgrDestroyGPUInstancePool_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1296 
1297 #ifdef __nvoc_kernel_mig_manager_h_disabled
1298 static inline void kmigmgrDestroyGPUInstancePool(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1299     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1300 }
1301 #else //__nvoc_kernel_mig_manager_h_disabled
1302 #define kmigmgrDestroyGPUInstancePool(arg0, arg1, arg2) kmigmgrDestroyGPUInstancePool_IMPL(arg0, arg1, arg2)
1303 #endif //__nvoc_kernel_mig_manager_h_disabled
1304 
1305 NV_STATUS kmigmgrInitGPUInstanceRunlistBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1306 
1307 #ifdef __nvoc_kernel_mig_manager_h_disabled
1308 static inline NV_STATUS kmigmgrInitGPUInstanceRunlistBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1309     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1310     return NV_ERR_NOT_SUPPORTED;
1311 }
1312 #else //__nvoc_kernel_mig_manager_h_disabled
1313 #define kmigmgrInitGPUInstanceRunlistBufPools(arg0, arg1, arg2) kmigmgrInitGPUInstanceRunlistBufPools_IMPL(arg0, arg1, arg2)
1314 #endif //__nvoc_kernel_mig_manager_h_disabled
1315 
1316 void kmigmgrDestroyGPUInstanceRunlistBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1317 
1318 #ifdef __nvoc_kernel_mig_manager_h_disabled
1319 static inline void kmigmgrDestroyGPUInstanceRunlistBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1320     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1321 }
1322 #else //__nvoc_kernel_mig_manager_h_disabled
1323 #define kmigmgrDestroyGPUInstanceRunlistBufPools(arg0, arg1, arg2) kmigmgrDestroyGPUInstanceRunlistBufPools_IMPL(arg0, arg1, arg2)
1324 #endif //__nvoc_kernel_mig_manager_h_disabled
1325 
1326 void kmigmgrPrintSubscribingClients_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId);
1327 
1328 #ifdef __nvoc_kernel_mig_manager_h_disabled
1329 static inline void kmigmgrPrintSubscribingClients(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) {
1330     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1331 }
1332 #else //__nvoc_kernel_mig_manager_h_disabled
1333 #define kmigmgrPrintSubscribingClients(arg0, arg1, swizzId) kmigmgrPrintSubscribingClients_IMPL(arg0, arg1, swizzId)
1334 #endif //__nvoc_kernel_mig_manager_h_disabled
1335 
1336 void kmigmgrInitGPUInstanceInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1337 
1338 #ifdef __nvoc_kernel_mig_manager_h_disabled
1339 static inline void kmigmgrInitGPUInstanceInfo(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1340     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1341 }
1342 #else //__nvoc_kernel_mig_manager_h_disabled
1343 #define kmigmgrInitGPUInstanceInfo(arg0, arg1, arg2) kmigmgrInitGPUInstanceInfo_IMPL(arg0, arg1, arg2)
1344 #endif //__nvoc_kernel_mig_manager_h_disabled
1345 
1346 void kmigmgrTrimInstanceRunlistBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1347 
1348 #ifdef __nvoc_kernel_mig_manager_h_disabled
1349 static inline void kmigmgrTrimInstanceRunlistBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1350     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1351 }
1352 #else //__nvoc_kernel_mig_manager_h_disabled
1353 #define kmigmgrTrimInstanceRunlistBufPools(arg0, arg1, arg2) kmigmgrTrimInstanceRunlistBufPools_IMPL(arg0, arg1, arg2)
1354 #endif //__nvoc_kernel_mig_manager_h_disabled
1355 
1356 NV_STATUS kmigmgrSetDeviceProfilingInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1357 
1358 #ifdef __nvoc_kernel_mig_manager_h_disabled
1359 static inline NV_STATUS kmigmgrSetDeviceProfilingInUse(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1360     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1361     return NV_ERR_NOT_SUPPORTED;
1362 }
1363 #else //__nvoc_kernel_mig_manager_h_disabled
1364 #define kmigmgrSetDeviceProfilingInUse(arg0, arg1) kmigmgrSetDeviceProfilingInUse_IMPL(arg0, arg1)
1365 #endif //__nvoc_kernel_mig_manager_h_disabled
1366 
1367 void kmigmgrClearDeviceProfilingInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1368 
1369 #ifdef __nvoc_kernel_mig_manager_h_disabled
1370 static inline void kmigmgrClearDeviceProfilingInUse(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1371     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1372 }
1373 #else //__nvoc_kernel_mig_manager_h_disabled
1374 #define kmigmgrClearDeviceProfilingInUse(arg0, arg1) kmigmgrClearDeviceProfilingInUse_IMPL(arg0, arg1)
1375 #endif //__nvoc_kernel_mig_manager_h_disabled
1376 
1377 NvBool kmigmgrIsDeviceProfilingInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1378 
1379 #ifdef __nvoc_kernel_mig_manager_h_disabled
1380 static inline NvBool kmigmgrIsDeviceProfilingInUse(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1381     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1382     return NV_FALSE;
1383 }
1384 #else //__nvoc_kernel_mig_manager_h_disabled
1385 #define kmigmgrIsDeviceProfilingInUse(arg0, arg1) kmigmgrIsDeviceProfilingInUse_IMPL(arg0, arg1)
1386 #endif //__nvoc_kernel_mig_manager_h_disabled
1387 
1388 NvBool kmigmgrIsClientUsingDeviceProfiling_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient);
1389 
1390 #ifdef __nvoc_kernel_mig_manager_h_disabled
1391 static inline NvBool kmigmgrIsClientUsingDeviceProfiling(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient) {
1392     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1393     return NV_FALSE;
1394 }
1395 #else //__nvoc_kernel_mig_manager_h_disabled
1396 #define kmigmgrIsClientUsingDeviceProfiling(arg0, arg1, hClient) kmigmgrIsClientUsingDeviceProfiling_IMPL(arg0, arg1, hClient)
1397 #endif //__nvoc_kernel_mig_manager_h_disabled
1398 
1399 NvBool kmigmgrIsDeviceUsingDeviceProfiling_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *pDevice);
1400 
1401 #ifdef __nvoc_kernel_mig_manager_h_disabled
1402 static inline NvBool kmigmgrIsDeviceUsingDeviceProfiling(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *pDevice) {
1403     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1404     return NV_FALSE;
1405 }
1406 #else //__nvoc_kernel_mig_manager_h_disabled
1407 #define kmigmgrIsDeviceUsingDeviceProfiling(arg0, arg1, pDevice) kmigmgrIsDeviceUsingDeviceProfiling_IMPL(arg0, arg1, pDevice)
1408 #endif //__nvoc_kernel_mig_manager_h_disabled
1409 
1410 NV_STATUS kmigmgrEnableAllLCEs_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bEnableAllLCEs);
1411 
1412 #ifdef __nvoc_kernel_mig_manager_h_disabled
1413 static inline NV_STATUS kmigmgrEnableAllLCEs(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bEnableAllLCEs) {
1414     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1415     return NV_ERR_NOT_SUPPORTED;
1416 }
1417 #else //__nvoc_kernel_mig_manager_h_disabled
1418 #define kmigmgrEnableAllLCEs(arg0, arg1, bEnableAllLCEs) kmigmgrEnableAllLCEs_IMPL(arg0, arg1, bEnableAllLCEs)
1419 #endif //__nvoc_kernel_mig_manager_h_disabled
1420 
1421 NV_STATUS kmigmgrGetInstanceRefFromDevice_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *arg2, struct MIG_INSTANCE_REF *arg3);
1422 
1423 #ifdef __nvoc_kernel_mig_manager_h_disabled
1424 static inline NV_STATUS kmigmgrGetInstanceRefFromDevice(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *arg2, struct MIG_INSTANCE_REF *arg3) {
1425     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1426     return NV_ERR_NOT_SUPPORTED;
1427 }
1428 #else //__nvoc_kernel_mig_manager_h_disabled
1429 #define kmigmgrGetInstanceRefFromDevice(arg0, arg1, arg2, arg3) kmigmgrGetInstanceRefFromDevice_IMPL(arg0, arg1, arg2, arg3)
1430 #endif //__nvoc_kernel_mig_manager_h_disabled
1431 
1432 NV_STATUS kmigmgrGetInstanceRefFromClient_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, struct MIG_INSTANCE_REF *arg2);
1433 
1434 #ifdef __nvoc_kernel_mig_manager_h_disabled
1435 static inline NV_STATUS kmigmgrGetInstanceRefFromClient(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, struct MIG_INSTANCE_REF *arg2) {
1436     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1437     return NV_ERR_NOT_SUPPORTED;
1438 }
1439 #else //__nvoc_kernel_mig_manager_h_disabled
1440 #define kmigmgrGetInstanceRefFromClient(arg0, arg1, hClient, arg2) kmigmgrGetInstanceRefFromClient_IMPL(arg0, arg1, hClient, arg2)
1441 #endif //__nvoc_kernel_mig_manager_h_disabled
1442 
1443 NV_STATUS kmigmgrGetMemoryPartitionHeapFromDevice_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *arg2, struct Heap **arg3);
1444 
1445 #ifdef __nvoc_kernel_mig_manager_h_disabled
1446 static inline NV_STATUS kmigmgrGetMemoryPartitionHeapFromDevice(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *arg2, struct Heap **arg3) {
1447     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1448     return NV_ERR_NOT_SUPPORTED;
1449 }
1450 #else //__nvoc_kernel_mig_manager_h_disabled
1451 #define kmigmgrGetMemoryPartitionHeapFromDevice(arg0, arg1, arg2, arg3) kmigmgrGetMemoryPartitionHeapFromDevice_IMPL(arg0, arg1, arg2, arg3)
1452 #endif //__nvoc_kernel_mig_manager_h_disabled
1453 
1454 NV_STATUS kmigmgrGetSwizzIdFromDevice_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *pDevice, NvU32 *pSwizzId);
1455 
1456 #ifdef __nvoc_kernel_mig_manager_h_disabled
1457 static inline NV_STATUS kmigmgrGetSwizzIdFromDevice(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *pDevice, NvU32 *pSwizzId) {
1458     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1459     return NV_ERR_NOT_SUPPORTED;
1460 }
1461 #else //__nvoc_kernel_mig_manager_h_disabled
1462 #define kmigmgrGetSwizzIdFromDevice(arg0, arg1, pDevice, pSwizzId) kmigmgrGetSwizzIdFromDevice_IMPL(arg0, arg1, pDevice, pSwizzId)
1463 #endif //__nvoc_kernel_mig_manager_h_disabled
1464 
1465 void kmigmgrPrintGPUInstanceInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2);
1466 
1467 #ifdef __nvoc_kernel_mig_manager_h_disabled
1468 static inline void kmigmgrPrintGPUInstanceInfo(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) {
1469     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1470 }
1471 #else //__nvoc_kernel_mig_manager_h_disabled
1472 #define kmigmgrPrintGPUInstanceInfo(arg0, arg1, arg2) kmigmgrPrintGPUInstanceInfo_IMPL(arg0, arg1, arg2)
1473 #endif //__nvoc_kernel_mig_manager_h_disabled
1474 
1475 NV_STATUS kmigmgrSetGPUInstanceInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2);
1476 
1477 #ifdef __nvoc_kernel_mig_manager_h_disabled
1478 static inline NV_STATUS kmigmgrSetGPUInstanceInfo(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2) {
1479     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1480     return NV_ERR_NOT_SUPPORTED;
1481 }
1482 #else //__nvoc_kernel_mig_manager_h_disabled
1483 #define kmigmgrSetGPUInstanceInfo(arg0, arg1, swizzId, arg2) kmigmgrSetGPUInstanceInfo_IMPL(arg0, arg1, swizzId, arg2)
1484 #endif //__nvoc_kernel_mig_manager_h_disabled
1485 
1486 NV_STATUS kmigmgrGetGPUInstanceInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KERNEL_MIG_GPU_INSTANCE **arg2);
1487 
1488 #ifdef __nvoc_kernel_mig_manager_h_disabled
1489 static inline NV_STATUS kmigmgrGetGPUInstanceInfo(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KERNEL_MIG_GPU_INSTANCE **arg2) {
1490     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1491     return NV_ERR_NOT_SUPPORTED;
1492 }
1493 #else //__nvoc_kernel_mig_manager_h_disabled
1494 #define kmigmgrGetGPUInstanceInfo(arg0, arg1, swizzId, arg2) kmigmgrGetGPUInstanceInfo_IMPL(arg0, arg1, swizzId, arg2)
1495 #endif //__nvoc_kernel_mig_manager_h_disabled
1496 
1497 NV_STATUS kmigmgrGetLocalToGlobalEngineType_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct MIG_INSTANCE_REF arg2, RM_ENGINE_TYPE localEngType, RM_ENGINE_TYPE *pGlobalEngType);
1498 
1499 #ifdef __nvoc_kernel_mig_manager_h_disabled
1500 static inline NV_STATUS kmigmgrGetLocalToGlobalEngineType(OBJGPU *arg0, struct KernelMIGManager *arg1, struct MIG_INSTANCE_REF arg2, RM_ENGINE_TYPE localEngType, RM_ENGINE_TYPE *pGlobalEngType) {
1501     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1502     return NV_ERR_NOT_SUPPORTED;
1503 }
1504 #else //__nvoc_kernel_mig_manager_h_disabled
1505 #define kmigmgrGetLocalToGlobalEngineType(arg0, arg1, arg2, localEngType, pGlobalEngType) kmigmgrGetLocalToGlobalEngineType_IMPL(arg0, arg1, arg2, localEngType, pGlobalEngType)
1506 #endif //__nvoc_kernel_mig_manager_h_disabled
1507 
1508 NV_STATUS kmigmgrGetGlobalToLocalEngineType_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct MIG_INSTANCE_REF arg2, RM_ENGINE_TYPE globalEngType, RM_ENGINE_TYPE *pLocalEngType);
1509 
1510 #ifdef __nvoc_kernel_mig_manager_h_disabled
1511 static inline NV_STATUS kmigmgrGetGlobalToLocalEngineType(OBJGPU *arg0, struct KernelMIGManager *arg1, struct MIG_INSTANCE_REF arg2, RM_ENGINE_TYPE globalEngType, RM_ENGINE_TYPE *pLocalEngType) {
1512     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1513     return NV_ERR_NOT_SUPPORTED;
1514 }
1515 #else //__nvoc_kernel_mig_manager_h_disabled
1516 #define kmigmgrGetGlobalToLocalEngineType(arg0, arg1, arg2, globalEngType, pLocalEngType) kmigmgrGetGlobalToLocalEngineType_IMPL(arg0, arg1, arg2, globalEngType, pLocalEngType)
1517 #endif //__nvoc_kernel_mig_manager_h_disabled
1518 
1519 NV_STATUS kmigmgrFilterEngineList_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Subdevice *arg2, RM_ENGINE_TYPE *pEngineTypes, NvU32 *pEngineCount);
1520 
1521 #ifdef __nvoc_kernel_mig_manager_h_disabled
1522 static inline NV_STATUS kmigmgrFilterEngineList(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Subdevice *arg2, RM_ENGINE_TYPE *pEngineTypes, NvU32 *pEngineCount) {
1523     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1524     return NV_ERR_NOT_SUPPORTED;
1525 }
1526 #else //__nvoc_kernel_mig_manager_h_disabled
1527 #define kmigmgrFilterEngineList(arg0, arg1, arg2, pEngineTypes, pEngineCount) kmigmgrFilterEngineList_IMPL(arg0, arg1, arg2, pEngineTypes, pEngineCount)
1528 #endif //__nvoc_kernel_mig_manager_h_disabled
1529 
1530 NV_STATUS kmigmgrFilterEnginePartnerList_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Subdevice *arg2, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *arg3);
1531 
1532 #ifdef __nvoc_kernel_mig_manager_h_disabled
1533 static inline NV_STATUS kmigmgrFilterEnginePartnerList(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Subdevice *arg2, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *arg3) {
1534     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1535     return NV_ERR_NOT_SUPPORTED;
1536 }
1537 #else //__nvoc_kernel_mig_manager_h_disabled
1538 #define kmigmgrFilterEnginePartnerList(arg0, arg1, arg2, arg3) kmigmgrFilterEnginePartnerList_IMPL(arg0, arg1, arg2, arg3)
1539 #endif //__nvoc_kernel_mig_manager_h_disabled
1540 
1541 NV_STATUS kmigmgrGetProfileByPartitionFlag_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlag, const NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO **arg2);
1542 
1543 #ifdef __nvoc_kernel_mig_manager_h_disabled
1544 static inline NV_STATUS kmigmgrGetProfileByPartitionFlag(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlag, const NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO **arg2) {
1545     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1546     return NV_ERR_NOT_SUPPORTED;
1547 }
1548 #else //__nvoc_kernel_mig_manager_h_disabled
1549 #define kmigmgrGetProfileByPartitionFlag(arg0, arg1, partitionFlag, arg2) kmigmgrGetProfileByPartitionFlag_IMPL(arg0, arg1, partitionFlag, arg2)
1550 #endif //__nvoc_kernel_mig_manager_h_disabled
1551 
1552 NV_STATUS kmigmgrSaveComputeInstances_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, GPUMGR_SAVE_COMPUTE_INSTANCE *arg3);
1553 
1554 #ifdef __nvoc_kernel_mig_manager_h_disabled
1555 static inline NV_STATUS kmigmgrSaveComputeInstances(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, GPUMGR_SAVE_COMPUTE_INSTANCE *arg3) {
1556     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1557     return NV_ERR_NOT_SUPPORTED;
1558 }
1559 #else //__nvoc_kernel_mig_manager_h_disabled
1560 #define kmigmgrSaveComputeInstances(arg0, arg1, arg2, arg3) kmigmgrSaveComputeInstances_IMPL(arg0, arg1, arg2, arg3)
1561 #endif //__nvoc_kernel_mig_manager_h_disabled
1562 
1563 NV_STATUS kmigmgrSetPartitioningMode_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1564 
1565 #ifdef __nvoc_kernel_mig_manager_h_disabled
1566 static inline NV_STATUS kmigmgrSetPartitioningMode(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1567     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1568     return NV_ERR_NOT_SUPPORTED;
1569 }
1570 #else //__nvoc_kernel_mig_manager_h_disabled
1571 #define kmigmgrSetPartitioningMode(arg0, arg1) kmigmgrSetPartitioningMode_IMPL(arg0, arg1)
1572 #endif //__nvoc_kernel_mig_manager_h_disabled
1573 
1574 NV_STATUS kmigmgrGetMIGReferenceFromEngineType_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, RM_ENGINE_TYPE rmEngineType, struct MIG_INSTANCE_REF *arg2);
1575 
1576 #ifdef __nvoc_kernel_mig_manager_h_disabled
1577 static inline NV_STATUS kmigmgrGetMIGReferenceFromEngineType(OBJGPU *arg0, struct KernelMIGManager *arg1, RM_ENGINE_TYPE rmEngineType, struct MIG_INSTANCE_REF *arg2) {
1578     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1579     return NV_ERR_NOT_SUPPORTED;
1580 }
1581 #else //__nvoc_kernel_mig_manager_h_disabled
1582 #define kmigmgrGetMIGReferenceFromEngineType(arg0, arg1, rmEngineType, arg2) kmigmgrGetMIGReferenceFromEngineType_IMPL(arg0, arg1, rmEngineType, arg2)
1583 #endif //__nvoc_kernel_mig_manager_h_disabled
1584 
1585 NV_STATUS kmigmgrGetGPUInstanceScrubberCe_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *pDevice, NvU32 *ceInst);
1586 
1587 #ifdef __nvoc_kernel_mig_manager_h_disabled
1588 static inline NV_STATUS kmigmgrGetGPUInstanceScrubberCe(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Device *pDevice, NvU32 *ceInst) {
1589     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1590     return NV_ERR_NOT_SUPPORTED;
1591 }
1592 #else //__nvoc_kernel_mig_manager_h_disabled
1593 #define kmigmgrGetGPUInstanceScrubberCe(arg0, arg1, pDevice, ceInst) kmigmgrGetGPUInstanceScrubberCe_IMPL(arg0, arg1, pDevice, ceInst)
1594 #endif //__nvoc_kernel_mig_manager_h_disabled
1595 
1596 NV_STATUS kmigmgrDescribeGPUInstances_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *arg2);
1597 
1598 #ifdef __nvoc_kernel_mig_manager_h_disabled
1599 static inline NV_STATUS kmigmgrDescribeGPUInstances(OBJGPU *arg0, struct KernelMIGManager *arg1, NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *arg2) {
1600     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1601     return NV_ERR_NOT_SUPPORTED;
1602 }
1603 #else //__nvoc_kernel_mig_manager_h_disabled
1604 #define kmigmgrDescribeGPUInstances(arg0, arg1, arg2) kmigmgrDescribeGPUInstances_IMPL(arg0, arg1, arg2)
1605 #endif //__nvoc_kernel_mig_manager_h_disabled
1606 
1607 NV_STATUS kmigmgrSwizzIdToResourceAllocation_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2, KERNEL_MIG_GPU_INSTANCE *arg3, MIG_RESOURCE_ALLOCATION *arg4);
1608 
1609 #ifdef __nvoc_kernel_mig_manager_h_disabled
1610 static inline NV_STATUS kmigmgrSwizzIdToResourceAllocation(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2, KERNEL_MIG_GPU_INSTANCE *arg3, MIG_RESOURCE_ALLOCATION *arg4) {
1611     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1612     return NV_ERR_NOT_SUPPORTED;
1613 }
1614 #else //__nvoc_kernel_mig_manager_h_disabled
1615 #define kmigmgrSwizzIdToResourceAllocation(arg0, arg1, swizzId, arg2, arg3, arg4) kmigmgrSwizzIdToResourceAllocation_IMPL(arg0, arg1, swizzId, arg2, arg3, arg4)
1616 #endif //__nvoc_kernel_mig_manager_h_disabled
1617 
1618 NV_STATUS kmigmgrAllocComputeInstanceHandles_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3);
1619 
1620 #ifdef __nvoc_kernel_mig_manager_h_disabled
1621 static inline NV_STATUS kmigmgrAllocComputeInstanceHandles(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3) {
1622     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1623     return NV_ERR_NOT_SUPPORTED;
1624 }
1625 #else //__nvoc_kernel_mig_manager_h_disabled
1626 #define kmigmgrAllocComputeInstanceHandles(arg0, arg1, arg2, arg3) kmigmgrAllocComputeInstanceHandles_IMPL(arg0, arg1, arg2, arg3)
1627 #endif //__nvoc_kernel_mig_manager_h_disabled
1628 
1629 void kmigmgrFreeComputeInstanceHandles_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3);
1630 
1631 #ifdef __nvoc_kernel_mig_manager_h_disabled
1632 static inline void kmigmgrFreeComputeInstanceHandles(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3) {
1633     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1634 }
1635 #else //__nvoc_kernel_mig_manager_h_disabled
1636 #define kmigmgrFreeComputeInstanceHandles(arg0, arg1, arg2, arg3) kmigmgrFreeComputeInstanceHandles_IMPL(arg0, arg1, arg2, arg3)
1637 #endif //__nvoc_kernel_mig_manager_h_disabled
1638 
1639 void kmigmgrReleaseComputeInstanceEngines_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3);
1640 
1641 #ifdef __nvoc_kernel_mig_manager_h_disabled
1642 static inline void kmigmgrReleaseComputeInstanceEngines(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3) {
1643     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1644 }
1645 #else //__nvoc_kernel_mig_manager_h_disabled
1646 #define kmigmgrReleaseComputeInstanceEngines(arg0, arg1, arg2, arg3) kmigmgrReleaseComputeInstanceEngines_IMPL(arg0, arg1, arg2, arg3)
1647 #endif //__nvoc_kernel_mig_manager_h_disabled
1648 
1649 NV_STATUS kmigmgrDeleteComputeInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 CIId, NvBool bUnload);
1650 
1651 #ifdef __nvoc_kernel_mig_manager_h_disabled
1652 static inline NV_STATUS kmigmgrDeleteComputeInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 CIId, NvBool bUnload) {
1653     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1654     return NV_ERR_NOT_SUPPORTED;
1655 }
1656 #else //__nvoc_kernel_mig_manager_h_disabled
1657 #define kmigmgrDeleteComputeInstance(arg0, arg1, arg2, CIId, bUnload) kmigmgrDeleteComputeInstance_IMPL(arg0, arg1, arg2, CIId, bUnload)
1658 #endif //__nvoc_kernel_mig_manager_h_disabled
1659 
1660 NV_STATUS kmigmgrConfigureGPUInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, const KMIGMGR_CONFIGURE_INSTANCE_REQUEST *pConfigRequestPerCi, NvU32 updateEngMask);
1661 
1662 #ifdef __nvoc_kernel_mig_manager_h_disabled
1663 static inline NV_STATUS kmigmgrConfigureGPUInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, const KMIGMGR_CONFIGURE_INSTANCE_REQUEST *pConfigRequestPerCi, NvU32 updateEngMask) {
1664     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1665     return NV_ERR_NOT_SUPPORTED;
1666 }
1667 #else //__nvoc_kernel_mig_manager_h_disabled
1668 #define kmigmgrConfigureGPUInstance(arg0, arg1, swizzId, pConfigRequestPerCi, updateEngMask) kmigmgrConfigureGPUInstance_IMPL(arg0, arg1, swizzId, pConfigRequestPerCi, updateEngMask)
1669 #endif //__nvoc_kernel_mig_manager_h_disabled
1670 
1671 NV_STATUS kmigmgrInvalidateGrGpcMapping_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 grIdx);
1672 
1673 #ifdef __nvoc_kernel_mig_manager_h_disabled
1674 static inline NV_STATUS kmigmgrInvalidateGrGpcMapping(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 grIdx) {
1675     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1676     return NV_ERR_NOT_SUPPORTED;
1677 }
1678 #else //__nvoc_kernel_mig_manager_h_disabled
1679 #define kmigmgrInvalidateGrGpcMapping(arg0, arg1, arg2, grIdx) kmigmgrInvalidateGrGpcMapping_IMPL(arg0, arg1, arg2, grIdx)
1680 #endif //__nvoc_kernel_mig_manager_h_disabled
1681 
1682 NV_STATUS kmigmgrInvalidateGr_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 grIdx);
1683 
1684 #ifdef __nvoc_kernel_mig_manager_h_disabled
1685 static inline NV_STATUS kmigmgrInvalidateGr(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 grIdx) {
1686     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1687     return NV_ERR_NOT_SUPPORTED;
1688 }
1689 #else //__nvoc_kernel_mig_manager_h_disabled
1690 #define kmigmgrInvalidateGr(arg0, arg1, arg2, grIdx) kmigmgrInvalidateGr_IMPL(arg0, arg1, arg2, grIdx)
1691 #endif //__nvoc_kernel_mig_manager_h_disabled
1692 
1693 NvU32 kmigmgrGetNextComputeSize_IMPL(NvBool bGetNextSmallest, NvU32 computeSize);
1694 
1695 #define kmigmgrGetNextComputeSize(bGetNextSmallest, computeSize) kmigmgrGetNextComputeSize_IMPL(bGetNextSmallest, computeSize)
1696 NV_STATUS kmigmgrGetSkylineFromSize_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 computeSize, const NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO **ppSkyline);
1697 
1698 #ifdef __nvoc_kernel_mig_manager_h_disabled
1699 static inline NV_STATUS kmigmgrGetSkylineFromSize(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 computeSize, const NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO **ppSkyline) {
1700     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1701     return NV_ERR_NOT_SUPPORTED;
1702 }
1703 #else //__nvoc_kernel_mig_manager_h_disabled
1704 #define kmigmgrGetSkylineFromSize(arg0, arg1, computeSize, ppSkyline) kmigmgrGetSkylineFromSize_IMPL(arg0, arg1, computeSize, ppSkyline)
1705 #endif //__nvoc_kernel_mig_manager_h_disabled
1706 
1707 NV_STATUS kmigmgrGetComputeProfileFromSize_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 computeSize, NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE *pProfile);
1708 
1709 #ifdef __nvoc_kernel_mig_manager_h_disabled
1710 static inline NV_STATUS kmigmgrGetComputeProfileFromSize(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 computeSize, NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE *pProfile) {
1711     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1712     return NV_ERR_NOT_SUPPORTED;
1713 }
1714 #else //__nvoc_kernel_mig_manager_h_disabled
1715 #define kmigmgrGetComputeProfileFromSize(arg0, arg1, computeSize, pProfile) kmigmgrGetComputeProfileFromSize_IMPL(arg0, arg1, computeSize, pProfile)
1716 #endif //__nvoc_kernel_mig_manager_h_disabled
1717 
1718 NV_STATUS kmigmgrGetComputeProfileFromSmCount_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 smCount, NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE *pProfile);
1719 
1720 #ifdef __nvoc_kernel_mig_manager_h_disabled
1721 static inline NV_STATUS kmigmgrGetComputeProfileFromSmCount(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 smCount, NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE *pProfile) {
1722     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1723     return NV_ERR_NOT_SUPPORTED;
1724 }
1725 #else //__nvoc_kernel_mig_manager_h_disabled
1726 #define kmigmgrGetComputeProfileFromSmCount(arg0, arg1, smCount, pProfile) kmigmgrGetComputeProfileFromSmCount_IMPL(arg0, arg1, smCount, pProfile)
1727 #endif //__nvoc_kernel_mig_manager_h_disabled
1728 
1729 NV_STATUS kmigmgrGetComputeProfileFromGpcCount_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpcCount, NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE *pProfile);
1730 
1731 #ifdef __nvoc_kernel_mig_manager_h_disabled
1732 static inline NV_STATUS kmigmgrGetComputeProfileFromGpcCount(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpcCount, NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE *pProfile) {
1733     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1734     return NV_ERR_NOT_SUPPORTED;
1735 }
1736 #else //__nvoc_kernel_mig_manager_h_disabled
1737 #define kmigmgrGetComputeProfileFromGpcCount(arg0, arg1, gpcCount, pProfile) kmigmgrGetComputeProfileFromGpcCount_IMPL(arg0, arg1, gpcCount, pProfile)
1738 #endif //__nvoc_kernel_mig_manager_h_disabled
1739 
1740 NV_STATUS kmigmgrGetComputeProfileFromCTSId_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 ctsId, NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE *pProfile);
1741 
1742 #ifdef __nvoc_kernel_mig_manager_h_disabled
1743 static inline NV_STATUS kmigmgrGetComputeProfileFromCTSId(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 ctsId, NV2080_CTRL_INTERNAL_MIGMGR_COMPUTE_PROFILE *pProfile) {
1744     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1745     return NV_ERR_NOT_SUPPORTED;
1746 }
1747 #else //__nvoc_kernel_mig_manager_h_disabled
1748 #define kmigmgrGetComputeProfileFromCTSId(arg0, arg1, ctsId, pProfile) kmigmgrGetComputeProfileFromCTSId_IMPL(arg0, arg1, ctsId, pProfile)
1749 #endif //__nvoc_kernel_mig_manager_h_disabled
1750 
1751 NV_STATUS kmigmgrGetInvalidCTSIdMask_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 ctsId, NvU64 *pInvalidCTSIdMask);
1752 
1753 #ifdef __nvoc_kernel_mig_manager_h_disabled
1754 static inline NV_STATUS kmigmgrGetInvalidCTSIdMask(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 ctsId, NvU64 *pInvalidCTSIdMask) {
1755     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1756     return NV_ERR_NOT_SUPPORTED;
1757 }
1758 #else //__nvoc_kernel_mig_manager_h_disabled
1759 #define kmigmgrGetInvalidCTSIdMask(arg0, arg1, ctsId, pInvalidCTSIdMask) kmigmgrGetInvalidCTSIdMask_IMPL(arg0, arg1, ctsId, pInvalidCTSIdMask)
1760 #endif //__nvoc_kernel_mig_manager_h_disabled
1761 
1762 struct NV_RANGE kmigmgrComputeProfileSizeToCTSIdRange_IMPL(NvU32 computeSize);
1763 
1764 #define kmigmgrComputeProfileSizeToCTSIdRange(computeSize) kmigmgrComputeProfileSizeToCTSIdRange_IMPL(computeSize)
1765 NV_STATUS kmigmgrGetFreeCTSId_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 *pCtsId, NvU64 globalValidCtsMask, NvU64 ctsIdInUseMask, NvU32 profileSize);
1766 
1767 #ifdef __nvoc_kernel_mig_manager_h_disabled
1768 static inline NV_STATUS kmigmgrGetFreeCTSId(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 *pCtsId, NvU64 globalValidCtsMask, NvU64 ctsIdInUseMask, NvU32 profileSize) {
1769     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1770     return NV_ERR_NOT_SUPPORTED;
1771 }
1772 #else //__nvoc_kernel_mig_manager_h_disabled
1773 #define kmigmgrGetFreeCTSId(arg0, arg1, pCtsId, globalValidCtsMask, ctsIdInUseMask, profileSize) kmigmgrGetFreeCTSId_IMPL(arg0, arg1, pCtsId, globalValidCtsMask, ctsIdInUseMask, profileSize)
1774 #endif //__nvoc_kernel_mig_manager_h_disabled
1775 
1776 NvU32 kmigmgrGetComputeSizeFromCTSId_IMPL(NvU32 ctsId);
1777 
1778 #define kmigmgrGetComputeSizeFromCTSId(ctsId) kmigmgrGetComputeSizeFromCTSId_IMPL(ctsId)
1779 NvU32 kmigmgrSmallestComputeProfileSize_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1);
1780 
1781 #ifdef __nvoc_kernel_mig_manager_h_disabled
1782 static inline NvU32 kmigmgrSmallestComputeProfileSize(OBJGPU *arg0, struct KernelMIGManager *arg1) {
1783     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1784     return 0;
1785 }
1786 #else //__nvoc_kernel_mig_manager_h_disabled
1787 #define kmigmgrSmallestComputeProfileSize(arg0, arg1) kmigmgrSmallestComputeProfileSize_IMPL(arg0, arg1)
1788 #endif //__nvoc_kernel_mig_manager_h_disabled
1789 
1790 void kmigmgrSetCTSIdInUse_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0, NvU32 ctsId, NvU32 grId, NvBool bInUse);
1791 
1792 #define kmigmgrSetCTSIdInUse(arg0, ctsId, grId, bInUse) kmigmgrSetCTSIdInUse_IMPL(arg0, ctsId, grId, bInUse)
1793 NV_STATUS kmigmgrXlateSpanStartToCTSId_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 computeSize, NvU32 spanStart, NvU32 *pCtsId);
1794 
1795 #ifdef __nvoc_kernel_mig_manager_h_disabled
1796 static inline NV_STATUS kmigmgrXlateSpanStartToCTSId(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 computeSize, NvU32 spanStart, NvU32 *pCtsId) {
1797     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1798     return NV_ERR_NOT_SUPPORTED;
1799 }
1800 #else //__nvoc_kernel_mig_manager_h_disabled
1801 #define kmigmgrXlateSpanStartToCTSId(arg0, arg1, computeSize, spanStart, pCtsId) kmigmgrXlateSpanStartToCTSId_IMPL(arg0, arg1, computeSize, spanStart, pCtsId)
1802 #endif //__nvoc_kernel_mig_manager_h_disabled
1803 
1804 NV_STATUS kmigmgrGetSlotBasisMask_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU64 *pMask);
1805 
1806 #ifdef __nvoc_kernel_mig_manager_h_disabled
1807 static inline NV_STATUS kmigmgrGetSlotBasisMask(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU64 *pMask) {
1808     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1809     return NV_ERR_NOT_SUPPORTED;
1810 }
1811 #else //__nvoc_kernel_mig_manager_h_disabled
1812 #define kmigmgrGetSlotBasisMask(arg0, arg1, pMask) kmigmgrGetSlotBasisMask_IMPL(arg0, arg1, pMask)
1813 #endif //__nvoc_kernel_mig_manager_h_disabled
1814 
1815 NvU32 kmigmgrGetSpanStartFromCTSId_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 ctsId);
1816 
1817 #ifdef __nvoc_kernel_mig_manager_h_disabled
1818 static inline NvU32 kmigmgrGetSpanStartFromCTSId(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 ctsId) {
1819     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1820     return 0;
1821 }
1822 #else //__nvoc_kernel_mig_manager_h_disabled
1823 #define kmigmgrGetSpanStartFromCTSId(arg0, arg1, ctsId) kmigmgrGetSpanStartFromCTSId_IMPL(arg0, arg1, ctsId)
1824 #endif //__nvoc_kernel_mig_manager_h_disabled
1825 
1826 NvBool kmigmgrIsCTSIdAvailable_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU64 ctsIdValidMask, NvU64 ctsIdInUseMask, NvU32 ctsId);
1827 
1828 #ifdef __nvoc_kernel_mig_manager_h_disabled
1829 static inline NvBool kmigmgrIsCTSIdAvailable(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU64 ctsIdValidMask, NvU64 ctsIdInUseMask, NvU32 ctsId) {
1830     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1831     return NV_FALSE;
1832 }
1833 #else //__nvoc_kernel_mig_manager_h_disabled
1834 #define kmigmgrIsCTSIdAvailable(arg0, arg1, ctsIdValidMask, ctsIdInUseMask, ctsId) kmigmgrIsCTSIdAvailable_IMPL(arg0, arg1, ctsIdValidMask, ctsIdInUseMask, ctsId)
1835 #endif //__nvoc_kernel_mig_manager_h_disabled
1836 
1837 NV_STATUS kmigmgrUpdateCiConfigForVgpu_IMPL(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager, NvU32 execPartCount, NvU32 *pExecPartId, NvU32 gfid, NvBool bDelete);
1838 
1839 #ifdef __nvoc_kernel_mig_manager_h_disabled
1840 static inline NV_STATUS kmigmgrUpdateCiConfigForVgpu(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager, NvU32 execPartCount, NvU32 *pExecPartId, NvU32 gfid, NvBool bDelete) {
1841     NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!");
1842     return NV_ERR_NOT_SUPPORTED;
1843 }
1844 #else //__nvoc_kernel_mig_manager_h_disabled
1845 #define kmigmgrUpdateCiConfigForVgpu(pGpu, pKernelMIGManager, execPartCount, pExecPartId, gfid, bDelete) kmigmgrUpdateCiConfigForVgpu_IMPL(pGpu, pKernelMIGManager, execPartCount, pExecPartId, gfid, bDelete)
1846 #endif //__nvoc_kernel_mig_manager_h_disabled
1847 
1848 #undef PRIVATE_FIELD
1849 
1850 
1851 #endif // KERNEL_MIG_MANAGER_H
1852 
1853 
1854 #ifdef __cplusplus
1855 } // extern "C"
1856 #endif
1857 #endif // _G_KERNEL_MIG_MANAGER_NVOC_H_
1858