1 #ifndef _G_HEAP_NVOC_H_
2 #define _G_HEAP_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 
32 #include "g_heap_nvoc.h"
33 
34 #ifndef _OBJHEAP_H_
35 #define _OBJHEAP_H_
36 
37 /**************** Resource Manager Defines and Structures ******************\
38 *                                                                           *
39 *       Defines and structures used for the Heap Object. The heap object    *
40 *       is responsible for allocating memory based on usage and memory      *
41 *       configuration.                                                      *
42 *                                                                           *
43 \***************************************************************************/
44 
45 #include "nvlimits.h" // NV_MAX_SUBDEVICES
46 #include "gpu/mem_mgr/heap_base.h"
47 #include "core/core.h"
48 #include "gpu/mem_mgr/mem_desc.h"
49 #include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h"
50 #include "ctrl/ctrl2080/ctrl2080fb.h" // NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO
51 #include "resserv/resserv.h"
52 #include "resserv/rs_resource.h"
53 #include "containers/eheap_old.h"
54 
55 struct Memory;
56 
57 #ifndef __NVOC_CLASS_Memory_TYPEDEF__
58 #define __NVOC_CLASS_Memory_TYPEDEF__
59 typedef struct Memory Memory;
60 #endif /* __NVOC_CLASS_Memory_TYPEDEF__ */
61 
62 #ifndef __nvoc_class_id_Memory
63 #define __nvoc_class_id_Memory 0x4789f2
64 #endif /* __nvoc_class_id_Memory */
65 
66 
67 
68 typedef struct
69 {
70     NvU64 alignment;
71     NvU64 allocLo;
72     NvU64 allocAl;
73     NvU64 allocHi;
74     NvU64 allocSize;
75     NvBool ignoreBankPlacement;
76 } OBJHEAP_ALLOC_DATA;
77 
78 // New Stuff for WDDM
79 typedef struct
80 {
81     NvU32   client;
82     NvU32   owner;
83     NvU32   type;
84     NvU32   flags;
85     NvU32  *pHeight;
86     NvU32  *pWidth;
87     NvU32  *pPitch;
88     NvU64  *pSize;
89     NvU64  *pAlignment;
90     NvU32  *pAttr;
91     NvU32  *pAttr2;
92     NvU32  *pKind;
93     NvU32   bankPlacement;
94     NvBool  ignoreBankPlacement;
95     NvU64   pad;
96     NvU64   alignAdjust;
97     NvU32   format;
98 } HEAP_ALLOC_HINT_PARAMS;
99 
100 typedef struct
101 {
102     NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS *pUserParams;
103     NvU32            pad;
104     NvU32            hwResId;
105     void            *bindResultFunc;
106     void            *pHandle;
107     HWRESOURCE_INFO  hwResource;
108 } MEMORY_HW_RESOURCES_ALLOCATION_REQUEST;
109 
110 //
111 // Growth placement and direction modifiers - the grow direction if a bank placement
112 // fails (bit 7) | grow direction within a bank (bit 6) | bank number (bits 0..5)
113 // gives the algorithm the information to try placing in the specified bank with the
114 // specified direction within a bank.  If a bank placement fails, use the grow direction
115 // to search for free space anywhere.  Each bank placement group (image, depth, ..)
116 // gets MEM_NUM_BANKS_TO_TRY such bytes in a NvU32.
117 //
118 
119 // grow direction within a bank
120 #define BANK_MEM_GROW_UP                  0x00
121 #define BANK_MEM_GROW_DOWN                0x40
122 #define BANK_MEM_GROW_MASK                0x40
123 
124 // grow direction if a bank placement fails
125 #define MEM_GROW_UP                       0x00
126 #define MEM_GROW_DOWN                     0x80
127 #define MEM_GROW_MASK                     0x80
128 
129 // other defines
130 #define MEM_BANK_MASK                     0x3F
131 #define MEM_NO_BANK_SELECTION             0xFF
132 #define MEM_NUM_BANKS_TO_TRY              0x1 // check a max of 1 bank
133 #define MEM_BANK_DATA_SIZE                0x8 // store everything in a byte
134 
135 //
136 // defines the number of NvU32's in the bank placement array, which defines
137 // what kinds of allocations go where (see heap.c)
138 // If more types need to be handled separately in terms of bank placement,
139 // increase this number, add another define, and add some code to heapCreate
140 // and heapAlloc
141 //
142 #define BANK_PLACEMENT_IMAGE                0
143 #define BANK_PLACEMENT_DEPTH                1
144 #define BANK_PLACEMENT_TEX_OVERLAY_FONT     2
145 #define BANK_PLACEMENT_OTHER                3
146 #define BANK_PLACEMENT_NUM_GROUPS           0x00000004
147 
148 //
149 // Currently the HEAP_MEM_BLOCK refCount field is only 8 bits.
150 //
151 #define HEAP_MAX_REF_COUNT      0xFFFFFFFF
152 
153 //
154 // any allocations done for internal RM data structures from the heap should be
155 // marked as one of the following values.
156 // this is required so the RM can report back the internal scratch memory allocations
157 // in NVOS32_FUNCTION_INFO_TYPE_ALLOC_BLOCKS::NVOS32_TYPE_RM_SCRATCH
158 //
159 #define HEAP_OWNER_RM_SCRATCH_BEGIN         0xDEAF0000
160 #define HEAP_OWNER_RM_CHANNEL_INSTMEM       (HEAP_OWNER_RM_SCRATCH_BEGIN + 1)
161 #define HEAP_OWNER_RM_CHANNEL_CTX_BUFFER    (HEAP_OWNER_RM_SCRATCH_BEGIN + 2)
162 #define HEAP_OWNER_RM_VIDEO_UCODE           (HEAP_OWNER_RM_SCRATCH_BEGIN + 3)
163 #define HEAP_OWNER_RM_FB_BUG_147656         (HEAP_OWNER_RM_SCRATCH_BEGIN + 4)
164 #define HEAP_OWNER_RM_FB_BUG_177053         (HEAP_OWNER_RM_SCRATCH_BEGIN + 5)
165 #define HEAP_OWNER_RM_DSI_INST_MEM          (HEAP_OWNER_RM_SCRATCH_BEGIN + 6)
166 #define HEAP_OWNER_RM_CTX_SAVE_AREAS        (HEAP_OWNER_RM_SCRATCH_BEGIN + 7)
167 #define HEAP_OWNER_RM_RESERVED_REGION       (HEAP_OWNER_RM_SCRATCH_BEGIN + 8)
168 #define HEAP_OWNER_RM_SCRATCH_END           (HEAP_OWNER_RM_SCRATCH_BEGIN + 9)   // make this the last
169 
170 #define HEAP_OWNER_RM_KERNEL_CLIENT         (HEAP_OWNER_RM_SCRATCH_END + 1)
171 #define HEAP_OWNER_PMA_RESERVED_REGION      (HEAP_OWNER_RM_SCRATCH_END + 2)
172 #define HEAP_OWNER_RM_CLIENT_GENERIC        (HEAP_OWNER_RM_SCRATCH_END + 3)
173 
174 //
175 // size of the texture buffer array, when more than 4 clients detected,
176 // kill one of the clients listed in the client texture buffer
177 //
178 #define MAX_TEXTURE_CLIENT_IDS  4
179 
180 
181 //
182 // HEAP object is being created for multiple usecases now. Initial heap object created during RM init manages the whole FB
183 // and there are usecases such as PhysicalMemorySuballocator which uses HEAP to manage its internal allocations. We need to
184 // differentiate these heaps to allow/block certain features such as scrub/PMA etc.
185 //
186 typedef enum
187 {
188     HEAP_TYPE_RM_GLOBAL                    = 0x0,       // HEAP created by RM to manage entire FB
189     HEAP_TYPE_PHYS_MEM_SUBALLOCATOR        = 0x1,       // HEAP created by clients to manage Physical Memory Suballocations
190     HEAP_TYPE_PARTITION_LOCAL              = 0x2,       // HEAP created by RM to manage memory assigned to a SMC partition
191 } HEAP_TYPE_INTERNAL;
192 
193 /*!
194  * Structure to hold references to PhysMemSubAlloc resource
195  */
196 typedef struct _def_physmemsuballoc_data
197 {
198     void               *pObject;       // PMSA object
199     MEMORY_DESCRIPTOR *pMemDesc;       // Parent memdesc from which memory managed by PMSA is allocated
200 } PHYS_MEM_SUBALLOCATOR_DATA;
201 
202 typedef struct MEM_BLOCK MEM_BLOCK;
203 struct MEM_BLOCK
204 {
205     NvBool allocedMemDesc;
206     NvU8 reserved0;
207     NvU8 reserved1;
208     NvU32 owner;
209     NvHandle mhandle;
210     NvU64 begin;
211     NvU64 align;
212     NvU64 alignPad; // padding to beginning of surface from aligned start (hack for NV50 perf work)
213     NvU64 end;
214     NvU32 textureId;
215     NvU32 format;
216     NvU32 pitch;     // allocated surface pitch, needed for realloc
217     NvU32 height;    // allocated surface height, needed for realloc
218     NvU32 width;     // allocated surface width, needed for realloc
219     NvU32 refCount;
220     NODE node;
221     MEMORY_DESCRIPTOR *pMemDesc;    // Back pointer to the memory descriptor for this allocation
222     HWRESOURCE_INFO hwResource;
223     union
224     {
225         NvU32     type;
226         MEM_BLOCK *prevFree;
227     } u0;
228     union
229     {
230         MEM_BLOCK *nextFree;
231     } u1;
232     MEM_BLOCK *prev;
233     MEM_BLOCK *next;
234 
235     // hooks into noncontig block freelist
236     MEM_BLOCK *nextFreeNoncontig;
237     MEM_BLOCK *prevFreeNoncontig;
238     MEM_BLOCK *noncontigAllocListNext;
239 };
240 
241 typedef struct TEX_INFO
242 {
243     NvU32 clientId;                 // texture client id
244     NvU32 refCount;                 // how many textures have been allocated wrt this client
245     NvU8 placementFlags;            // how texture is grown
246     NvBool mostRecentAllocatedFlag; // most recently allocated client
247 } TEX_INFO;
248 
249 #define NV_HEAP_PAGE_OFFLINE_TYPE           31:29
250 #define NV_HEAP_PAGE_OFFLINE_PAGE_NUMBER    27:0
251 
252 typedef struct
253 {
254     MEMORY_DESCRIPTOR  *pMemDesc;                // memory descriptor for the blacklisted page
255     NvU64               physOffset;              // physical offset of blacklisted FB address
256     NvU64               size;                    // size of the blacklisted page
257     NvBool              bIsValid;                // If the blacklisted address is still managed by RM
258     NvBool              bPendingRetirement;      // if the dynamically blacklisted pages is pending to be retired.
259 } BLACKLIST_CHUNK;
260 
261 typedef struct
262 {
263     BLACKLIST_CHUNK    *pBlacklistChunks;
264     NvU32               count;
265 } BLACKLIST;
266 
267 typedef struct
268 {
269     NvU32 count;
270     BLACKLIST_ADDRESS* data;
271 } BLACKLIST_ADDRESSES;
272 
273 #define SHUFFLE_STRIDE_MAX 5
274 
275 
276 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
277 // the matching C source file, but causes diagnostics to be issued if another
278 // source file references the field.
279 #ifdef NVOC_HEAP_H_PRIVATE_ACCESS_ALLOWED
280 #define PRIVATE_FIELD(x) x
281 #else
282 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
283 #endif
284 
285 struct Heap {
286     const struct NVOC_RTTI *__nvoc_rtti;
287     struct Object __nvoc_base_Object;
288     struct Object *__nvoc_pbase_Object;
289     struct Heap *__nvoc_pbase_Heap;
290     NvBool PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT;
291     NvBool PDB_PROP_HEAP_PAGE_SHUFFLE;
292     HEAP_TYPE_INTERNAL heapType;
293     void *pHeapTypeSpecificData;
294     NvU64 refCount;
295     NvBool bHasFbRegions;
296     NvU64 base;
297     NvU64 total;
298     NvU64 free;
299     NvU64 reserved;
300     struct MEM_BLOCK *pBlockList;
301     struct MEM_BLOCK *pFreeBlockList;
302     NODE *pBlockTree;
303     NvHandle memHandle;
304     NvU32 numBlocks;
305     TEX_INFO textureData[4];
306     struct MEM_BLOCK *pNoncontigFreeBlockList;
307     BLACKLIST_ADDRESSES blackListAddresses;
308     BLACKLIST blackList;
309     NvU32 dynamicBlacklistSize;
310     NvU32 staticBlacklistSize;
311     NvU32 placementStrategy[4];
312     NvU32 shuffleStrides[5];
313     NvU32 shuffleStrideIndex;
314     PMA pmaObject;
315     NvU64 peakInternalUsage;
316     NvU64 peakExternalUsage;
317     NvU64 currInternalUsage;
318     NvU64 currExternalUsage;
319 };
320 
321 #ifndef __NVOC_CLASS_Heap_TYPEDEF__
322 #define __NVOC_CLASS_Heap_TYPEDEF__
323 typedef struct Heap Heap;
324 #endif /* __NVOC_CLASS_Heap_TYPEDEF__ */
325 
326 #ifndef __nvoc_class_id_Heap
327 #define __nvoc_class_id_Heap 0x556e9a
328 #endif /* __nvoc_class_id_Heap */
329 
330 extern const struct NVOC_CLASS_DEF __nvoc_class_def_Heap;
331 
332 #define __staticCast_Heap(pThis) \
333     ((pThis)->__nvoc_pbase_Heap)
334 
335 #ifdef __nvoc_heap_h_disabled
336 #define __dynamicCast_Heap(pThis) ((Heap*)NULL)
337 #else //__nvoc_heap_h_disabled
338 #define __dynamicCast_Heap(pThis) \
339     ((Heap*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Heap)))
340 #endif //__nvoc_heap_h_disabled
341 
342 #define PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT_BASE_CAST
343 #define PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT_BASE_NAME PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT
344 #define PDB_PROP_HEAP_PAGE_SHUFFLE_BASE_CAST
345 #define PDB_PROP_HEAP_PAGE_SHUFFLE_BASE_NAME PDB_PROP_HEAP_PAGE_SHUFFLE
346 
347 NV_STATUS __nvoc_objCreateDynamic_Heap(Heap**, Dynamic*, NvU32, va_list);
348 
349 NV_STATUS __nvoc_objCreate_Heap(Heap**, Dynamic*, NvU32);
350 #define __objCreate_Heap(ppNewObj, pParent, createFlags) \
351     __nvoc_objCreate_Heap((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
352 
353 NV_STATUS heapInit_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3, HEAP_TYPE_INTERNAL arg4, NvU32 arg5, void *arg6);
354 
355 #ifdef __nvoc_heap_h_disabled
heapInit(struct OBJGPU * arg0,struct Heap * arg1,NvU64 arg2,NvU64 arg3,HEAP_TYPE_INTERNAL arg4,NvU32 arg5,void * arg6)356 static inline NV_STATUS heapInit(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3, HEAP_TYPE_INTERNAL arg4, NvU32 arg5, void *arg6) {
357     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
358     return NV_ERR_NOT_SUPPORTED;
359 }
360 #else //__nvoc_heap_h_disabled
361 #define heapInit(arg0, arg1, arg2, arg3, arg4, arg5, arg6) heapInit_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
362 #endif //__nvoc_heap_h_disabled
363 
364 NV_STATUS heapInitInternal_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3, HEAP_TYPE_INTERNAL arg4, void *arg5);
365 
366 #ifdef __nvoc_heap_h_disabled
heapInitInternal(struct OBJGPU * arg0,struct Heap * arg1,NvU64 arg2,NvU64 arg3,HEAP_TYPE_INTERNAL arg4,void * arg5)367 static inline NV_STATUS heapInitInternal(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3, HEAP_TYPE_INTERNAL arg4, void *arg5) {
368     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
369     return NV_ERR_NOT_SUPPORTED;
370 }
371 #else //__nvoc_heap_h_disabled
372 #define heapInitInternal(arg0, arg1, arg2, arg3, arg4, arg5) heapInitInternal_IMPL(arg0, arg1, arg2, arg3, arg4, arg5)
373 #endif //__nvoc_heap_h_disabled
374 
375 void heapDestruct_IMPL(struct Heap *arg0);
376 
377 #define __nvoc_heapDestruct(arg0) heapDestruct_IMPL(arg0)
378 NV_STATUS heapAlloc_IMPL(struct OBJGPU *arg0, NvHandle arg1, struct Heap *arg2, MEMORY_ALLOCATION_REQUEST *arg3, NvHandle arg4, OBJHEAP_ALLOC_DATA *arg5, FB_ALLOC_INFO *arg6, HWRESOURCE_INFO **arg7, NvBool *arg8, NvBool arg9, NvBool arg10);
379 
380 #ifdef __nvoc_heap_h_disabled
heapAlloc(struct OBJGPU * arg0,NvHandle arg1,struct Heap * arg2,MEMORY_ALLOCATION_REQUEST * arg3,NvHandle arg4,OBJHEAP_ALLOC_DATA * arg5,FB_ALLOC_INFO * arg6,HWRESOURCE_INFO ** arg7,NvBool * arg8,NvBool arg9,NvBool arg10)381 static inline NV_STATUS heapAlloc(struct OBJGPU *arg0, NvHandle arg1, struct Heap *arg2, MEMORY_ALLOCATION_REQUEST *arg3, NvHandle arg4, OBJHEAP_ALLOC_DATA *arg5, FB_ALLOC_INFO *arg6, HWRESOURCE_INFO **arg7, NvBool *arg8, NvBool arg9, NvBool arg10) {
382     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
383     return NV_ERR_NOT_SUPPORTED;
384 }
385 #else //__nvoc_heap_h_disabled
386 #define heapAlloc(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) heapAlloc_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10)
387 #endif //__nvoc_heap_h_disabled
388 
389 NV_STATUS heapFree_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvHandle hClient, NvHandle hDevice, NvU32 owner, MEMORY_DESCRIPTOR *pMemDesc);
390 
391 #ifdef __nvoc_heap_h_disabled
heapFree(struct OBJGPU * arg0,struct Heap * arg1,NvHandle hClient,NvHandle hDevice,NvU32 owner,MEMORY_DESCRIPTOR * pMemDesc)392 static inline NV_STATUS heapFree(struct OBJGPU *arg0, struct Heap *arg1, NvHandle hClient, NvHandle hDevice, NvU32 owner, MEMORY_DESCRIPTOR *pMemDesc) {
393     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
394     return NV_ERR_NOT_SUPPORTED;
395 }
396 #else //__nvoc_heap_h_disabled
397 #define heapFree(arg0, arg1, hClient, hDevice, owner, pMemDesc) heapFree_IMPL(arg0, arg1, hClient, hDevice, owner, pMemDesc)
398 #endif //__nvoc_heap_h_disabled
399 
400 NV_STATUS heapReference_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU32 arg2, MEMORY_DESCRIPTOR *arg3);
401 
402 #ifdef __nvoc_heap_h_disabled
heapReference(struct OBJGPU * arg0,struct Heap * arg1,NvU32 arg2,MEMORY_DESCRIPTOR * arg3)403 static inline NV_STATUS heapReference(struct OBJGPU *arg0, struct Heap *arg1, NvU32 arg2, MEMORY_DESCRIPTOR *arg3) {
404     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
405     return NV_ERR_NOT_SUPPORTED;
406 }
407 #else //__nvoc_heap_h_disabled
408 #define heapReference(arg0, arg1, arg2, arg3) heapReference_IMPL(arg0, arg1, arg2, arg3)
409 #endif //__nvoc_heap_h_disabled
410 
411 NV_STATUS heapInfo_IMPL(struct Heap *arg0, NvU64 *arg1, NvU64 *arg2, NvU64 *arg3, NvU64 *arg4, NvU64 *arg5);
412 
413 #ifdef __nvoc_heap_h_disabled
heapInfo(struct Heap * arg0,NvU64 * arg1,NvU64 * arg2,NvU64 * arg3,NvU64 * arg4,NvU64 * arg5)414 static inline NV_STATUS heapInfo(struct Heap *arg0, NvU64 *arg1, NvU64 *arg2, NvU64 *arg3, NvU64 *arg4, NvU64 *arg5) {
415     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
416     return NV_ERR_NOT_SUPPORTED;
417 }
418 #else //__nvoc_heap_h_disabled
419 #define heapInfo(arg0, arg1, arg2, arg3, arg4, arg5) heapInfo_IMPL(arg0, arg1, arg2, arg3, arg4, arg5)
420 #endif //__nvoc_heap_h_disabled
421 
422 NV_STATUS heapInfoTypeAllocBlocks_IMPL(struct Heap *arg0, NvU32 arg1, NvU64 *arg2);
423 
424 #ifdef __nvoc_heap_h_disabled
heapInfoTypeAllocBlocks(struct Heap * arg0,NvU32 arg1,NvU64 * arg2)425 static inline NV_STATUS heapInfoTypeAllocBlocks(struct Heap *arg0, NvU32 arg1, NvU64 *arg2) {
426     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
427     return NV_ERR_NOT_SUPPORTED;
428 }
429 #else //__nvoc_heap_h_disabled
430 #define heapInfoTypeAllocBlocks(arg0, arg1, arg2) heapInfoTypeAllocBlocks_IMPL(arg0, arg1, arg2)
431 #endif //__nvoc_heap_h_disabled
432 
433 NV_STATUS heapGetSize_IMPL(struct Heap *arg0, NvU64 *arg1);
434 
435 #ifdef __nvoc_heap_h_disabled
heapGetSize(struct Heap * arg0,NvU64 * arg1)436 static inline NV_STATUS heapGetSize(struct Heap *arg0, NvU64 *arg1) {
437     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
438     return NV_ERR_NOT_SUPPORTED;
439 }
440 #else //__nvoc_heap_h_disabled
441 #define heapGetSize(arg0, arg1) heapGetSize_IMPL(arg0, arg1)
442 #endif //__nvoc_heap_h_disabled
443 
444 NV_STATUS heapGetFree_IMPL(struct Heap *arg0, NvU64 *arg1);
445 
446 #ifdef __nvoc_heap_h_disabled
heapGetFree(struct Heap * arg0,NvU64 * arg1)447 static inline NV_STATUS heapGetFree(struct Heap *arg0, NvU64 *arg1) {
448     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
449     return NV_ERR_NOT_SUPPORTED;
450 }
451 #else //__nvoc_heap_h_disabled
452 #define heapGetFree(arg0, arg1) heapGetFree_IMPL(arg0, arg1)
453 #endif //__nvoc_heap_h_disabled
454 
455 NV_STATUS heapGetUsableSize_IMPL(struct Heap *arg0, NvU64 *arg1);
456 
457 #ifdef __nvoc_heap_h_disabled
heapGetUsableSize(struct Heap * arg0,NvU64 * arg1)458 static inline NV_STATUS heapGetUsableSize(struct Heap *arg0, NvU64 *arg1) {
459     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
460     return NV_ERR_NOT_SUPPORTED;
461 }
462 #else //__nvoc_heap_h_disabled
463 #define heapGetUsableSize(arg0, arg1) heapGetUsableSize_IMPL(arg0, arg1)
464 #endif //__nvoc_heap_h_disabled
465 
466 NV_STATUS heapGetBase_IMPL(struct Heap *arg0, NvU64 *arg1);
467 
468 #ifdef __nvoc_heap_h_disabled
heapGetBase(struct Heap * arg0,NvU64 * arg1)469 static inline NV_STATUS heapGetBase(struct Heap *arg0, NvU64 *arg1) {
470     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
471     return NV_ERR_NOT_SUPPORTED;
472 }
473 #else //__nvoc_heap_h_disabled
474 #define heapGetBase(arg0, arg1) heapGetBase_IMPL(arg0, arg1)
475 #endif //__nvoc_heap_h_disabled
476 
477 NV_STATUS heapGetBlock_IMPL(struct Heap *arg0, NvU64 arg1, struct MEM_BLOCK **arg2);
478 
479 #ifdef __nvoc_heap_h_disabled
heapGetBlock(struct Heap * arg0,NvU64 arg1,struct MEM_BLOCK ** arg2)480 static inline NV_STATUS heapGetBlock(struct Heap *arg0, NvU64 arg1, struct MEM_BLOCK **arg2) {
481     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
482     return NV_ERR_NOT_SUPPORTED;
483 }
484 #else //__nvoc_heap_h_disabled
485 #define heapGetBlock(arg0, arg1, arg2) heapGetBlock_IMPL(arg0, arg1, arg2)
486 #endif //__nvoc_heap_h_disabled
487 
488 NV_STATUS heapGetBlockHandle_IMPL(struct Heap *arg0, NvU32 arg1, NvU32 arg2, NvU64 arg3, NvBool arg4, NvHandle *arg5);
489 
490 #ifdef __nvoc_heap_h_disabled
heapGetBlockHandle(struct Heap * arg0,NvU32 arg1,NvU32 arg2,NvU64 arg3,NvBool arg4,NvHandle * arg5)491 static inline NV_STATUS heapGetBlockHandle(struct Heap *arg0, NvU32 arg1, NvU32 arg2, NvU64 arg3, NvBool arg4, NvHandle *arg5) {
492     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
493     return NV_ERR_NOT_SUPPORTED;
494 }
495 #else //__nvoc_heap_h_disabled
496 #define heapGetBlockHandle(arg0, arg1, arg2, arg3, arg4, arg5) heapGetBlockHandle_IMPL(arg0, arg1, arg2, arg3, arg4, arg5)
497 #endif //__nvoc_heap_h_disabled
498 
499 NvU32 heapGetNumBlocks_IMPL(struct Heap *arg0);
500 
501 #ifdef __nvoc_heap_h_disabled
heapGetNumBlocks(struct Heap * arg0)502 static inline NvU32 heapGetNumBlocks(struct Heap *arg0) {
503     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
504     return 0;
505 }
506 #else //__nvoc_heap_h_disabled
507 #define heapGetNumBlocks(arg0) heapGetNumBlocks_IMPL(arg0)
508 #endif //__nvoc_heap_h_disabled
509 
510 NV_STATUS heapGetBlockInfo_IMPL(struct Heap *arg0, NvU32 arg1, NVOS32_HEAP_DUMP_BLOCK *arg2);
511 
512 #ifdef __nvoc_heap_h_disabled
heapGetBlockInfo(struct Heap * arg0,NvU32 arg1,NVOS32_HEAP_DUMP_BLOCK * arg2)513 static inline NV_STATUS heapGetBlockInfo(struct Heap *arg0, NvU32 arg1, NVOS32_HEAP_DUMP_BLOCK *arg2) {
514     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
515     return NV_ERR_NOT_SUPPORTED;
516 }
517 #else //__nvoc_heap_h_disabled
518 #define heapGetBlockInfo(arg0, arg1, arg2) heapGetBlockInfo_IMPL(arg0, arg1, arg2)
519 #endif //__nvoc_heap_h_disabled
520 
521 NV_STATUS heapAllocHint_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvHandle arg2, NvHandle arg3, HEAP_ALLOC_HINT_PARAMS *arg4);
522 
523 #ifdef __nvoc_heap_h_disabled
heapAllocHint(struct OBJGPU * arg0,struct Heap * arg1,NvHandle arg2,NvHandle arg3,HEAP_ALLOC_HINT_PARAMS * arg4)524 static inline NV_STATUS heapAllocHint(struct OBJGPU *arg0, struct Heap *arg1, NvHandle arg2, NvHandle arg3, HEAP_ALLOC_HINT_PARAMS *arg4) {
525     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
526     return NV_ERR_NOT_SUPPORTED;
527 }
528 #else //__nvoc_heap_h_disabled
529 #define heapAllocHint(arg0, arg1, arg2, arg3, arg4) heapAllocHint_IMPL(arg0, arg1, arg2, arg3, arg4)
530 #endif //__nvoc_heap_h_disabled
531 
532 NV_STATUS heapHwAlloc_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvHandle arg2, NvHandle arg3, NvHandle arg4, MEMORY_HW_RESOURCES_ALLOCATION_REQUEST *arg5, NvU32 *arg6, NvU32 *arg7);
533 
534 #ifdef __nvoc_heap_h_disabled
heapHwAlloc(struct OBJGPU * arg0,struct Heap * arg1,NvHandle arg2,NvHandle arg3,NvHandle arg4,MEMORY_HW_RESOURCES_ALLOCATION_REQUEST * arg5,NvU32 * arg6,NvU32 * arg7)535 static inline NV_STATUS heapHwAlloc(struct OBJGPU *arg0, struct Heap *arg1, NvHandle arg2, NvHandle arg3, NvHandle arg4, MEMORY_HW_RESOURCES_ALLOCATION_REQUEST *arg5, NvU32 *arg6, NvU32 *arg7) {
536     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
537     return NV_ERR_NOT_SUPPORTED;
538 }
539 #else //__nvoc_heap_h_disabled
540 #define heapHwAlloc(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) heapHwAlloc_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
541 #endif //__nvoc_heap_h_disabled
542 
543 void heapHwFree_IMPL(struct OBJGPU *arg0, struct Heap *arg1, struct Memory *arg2, NvU32 arg3);
544 
545 #ifdef __nvoc_heap_h_disabled
heapHwFree(struct OBJGPU * arg0,struct Heap * arg1,struct Memory * arg2,NvU32 arg3)546 static inline void heapHwFree(struct OBJGPU *arg0, struct Heap *arg1, struct Memory *arg2, NvU32 arg3) {
547     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
548 }
549 #else //__nvoc_heap_h_disabled
550 #define heapHwFree(arg0, arg1, arg2, arg3) heapHwFree_IMPL(arg0, arg1, arg2, arg3)
551 #endif //__nvoc_heap_h_disabled
552 
553 NV_STATUS heapFreeBlockCount_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU32 *arg2);
554 
555 #ifdef __nvoc_heap_h_disabled
heapFreeBlockCount(struct OBJGPU * arg0,struct Heap * arg1,NvU32 * arg2)556 static inline NV_STATUS heapFreeBlockCount(struct OBJGPU *arg0, struct Heap *arg1, NvU32 *arg2) {
557     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
558     return NV_ERR_NOT_SUPPORTED;
559 }
560 #else //__nvoc_heap_h_disabled
561 #define heapFreeBlockCount(arg0, arg1, arg2) heapFreeBlockCount_IMPL(arg0, arg1, arg2)
562 #endif //__nvoc_heap_h_disabled
563 
564 NV_STATUS heapFreeBlockInfo_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU32 arg2, void *arg3);
565 
566 #ifdef __nvoc_heap_h_disabled
heapFreeBlockInfo(struct OBJGPU * arg0,struct Heap * arg1,NvU32 arg2,void * arg3)567 static inline NV_STATUS heapFreeBlockInfo(struct OBJGPU *arg0, struct Heap *arg1, NvU32 arg2, void *arg3) {
568     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
569     return NV_ERR_NOT_SUPPORTED;
570 }
571 #else //__nvoc_heap_h_disabled
572 #define heapFreeBlockInfo(arg0, arg1, arg2, arg3) heapFreeBlockInfo_IMPL(arg0, arg1, arg2, arg3)
573 #endif //__nvoc_heap_h_disabled
574 
575 NV_STATUS heapInitRegistryOverrides_IMPL(struct OBJGPU *arg0, struct Heap *arg1);
576 
577 #ifdef __nvoc_heap_h_disabled
heapInitRegistryOverrides(struct OBJGPU * arg0,struct Heap * arg1)578 static inline NV_STATUS heapInitRegistryOverrides(struct OBJGPU *arg0, struct Heap *arg1) {
579     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
580     return NV_ERR_NOT_SUPPORTED;
581 }
582 #else //__nvoc_heap_h_disabled
583 #define heapInitRegistryOverrides(arg0, arg1) heapInitRegistryOverrides_IMPL(arg0, arg1)
584 #endif //__nvoc_heap_h_disabled
585 
586 NV_STATUS heapBlackListPages_IMPL(struct OBJGPU *arg0, struct Heap *arg1);
587 
588 #ifdef __nvoc_heap_h_disabled
heapBlackListPages(struct OBJGPU * arg0,struct Heap * arg1)589 static inline NV_STATUS heapBlackListPages(struct OBJGPU *arg0, struct Heap *arg1) {
590     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
591     return NV_ERR_NOT_SUPPORTED;
592 }
593 #else //__nvoc_heap_h_disabled
594 #define heapBlackListPages(arg0, arg1) heapBlackListPages_IMPL(arg0, arg1)
595 #endif //__nvoc_heap_h_disabled
596 
597 NV_STATUS heapFreeBlackListedPages_IMPL(struct OBJGPU *arg0, struct Heap *arg1);
598 
599 #ifdef __nvoc_heap_h_disabled
heapFreeBlackListedPages(struct OBJGPU * arg0,struct Heap * arg1)600 static inline NV_STATUS heapFreeBlackListedPages(struct OBJGPU *arg0, struct Heap *arg1) {
601     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
602     return NV_ERR_NOT_SUPPORTED;
603 }
604 #else //__nvoc_heap_h_disabled
605 #define heapFreeBlackListedPages(arg0, arg1) heapFreeBlackListedPages_IMPL(arg0, arg1)
606 #endif //__nvoc_heap_h_disabled
607 
608 NV_STATUS heapAddPageToBlackList_IMPL(struct OBJGPU *pGpu, struct Heap *pHeap, NvU64 pageNumber, NvU32 type);
609 
610 #ifdef __nvoc_heap_h_disabled
heapAddPageToBlackList(struct OBJGPU * pGpu,struct Heap * pHeap,NvU64 pageNumber,NvU32 type)611 static inline NV_STATUS heapAddPageToBlackList(struct OBJGPU *pGpu, struct Heap *pHeap, NvU64 pageNumber, NvU32 type) {
612     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
613     return NV_ERR_NOT_SUPPORTED;
614 }
615 #else //__nvoc_heap_h_disabled
616 #define heapAddPageToBlackList(pGpu, pHeap, pageNumber, type) heapAddPageToBlackList_IMPL(pGpu, pHeap, pageNumber, type)
617 #endif //__nvoc_heap_h_disabled
618 
619 NV_STATUS heapStoreBlackList_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 *arg2, NvU64 *arg3, NvU32 arg4);
620 
621 #ifdef __nvoc_heap_h_disabled
heapStoreBlackList(struct OBJGPU * arg0,struct Heap * arg1,NvU64 * arg2,NvU64 * arg3,NvU32 arg4)622 static inline NV_STATUS heapStoreBlackList(struct OBJGPU *arg0, struct Heap *arg1, NvU64 *arg2, NvU64 *arg3, NvU32 arg4) {
623     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
624     return NV_ERR_NOT_SUPPORTED;
625 }
626 #else //__nvoc_heap_h_disabled
627 #define heapStoreBlackList(arg0, arg1, arg2, arg3, arg4) heapStoreBlackList_IMPL(arg0, arg1, arg2, arg3, arg4)
628 #endif //__nvoc_heap_h_disabled
629 
630 NvBool heapIsPmaManaged_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3);
631 
632 #ifdef __nvoc_heap_h_disabled
heapIsPmaManaged(struct OBJGPU * arg0,struct Heap * arg1,NvU64 arg2,NvU64 arg3)633 static inline NvBool heapIsPmaManaged(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3) {
634     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
635     return NV_FALSE;
636 }
637 #else //__nvoc_heap_h_disabled
638 #define heapIsPmaManaged(arg0, arg1, arg2, arg3) heapIsPmaManaged_IMPL(arg0, arg1, arg2, arg3)
639 #endif //__nvoc_heap_h_disabled
640 
641 NvU32 heapAddRef_IMPL(struct Heap *arg0);
642 
643 #ifdef __nvoc_heap_h_disabled
heapAddRef(struct Heap * arg0)644 static inline NvU32 heapAddRef(struct Heap *arg0) {
645     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
646     return 0;
647 }
648 #else //__nvoc_heap_h_disabled
649 #define heapAddRef(arg0) heapAddRef_IMPL(arg0)
650 #endif //__nvoc_heap_h_disabled
651 
652 NvU32 heapRemoveRef_IMPL(struct Heap *arg0);
653 
654 #ifdef __nvoc_heap_h_disabled
heapRemoveRef(struct Heap * arg0)655 static inline NvU32 heapRemoveRef(struct Heap *arg0) {
656     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
657     return 0;
658 }
659 #else //__nvoc_heap_h_disabled
660 #define heapRemoveRef(arg0) heapRemoveRef_IMPL(arg0)
661 #endif //__nvoc_heap_h_disabled
662 
663 NV_STATUS heapResize_IMPL(struct Heap *arg0, NvS64 arg1);
664 
665 #ifdef __nvoc_heap_h_disabled
heapResize(struct Heap * arg0,NvS64 arg1)666 static inline NV_STATUS heapResize(struct Heap *arg0, NvS64 arg1) {
667     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
668     return NV_ERR_NOT_SUPPORTED;
669 }
670 #else //__nvoc_heap_h_disabled
671 #define heapResize(arg0, arg1) heapResize_IMPL(arg0, arg1)
672 #endif //__nvoc_heap_h_disabled
673 
674 void heapFilterBlackListPages_IMPL(struct Heap *arg0, NvU64 arg1, NvU64 arg2);
675 
676 #ifdef __nvoc_heap_h_disabled
heapFilterBlackListPages(struct Heap * arg0,NvU64 arg1,NvU64 arg2)677 static inline void heapFilterBlackListPages(struct Heap *arg0, NvU64 arg1, NvU64 arg2) {
678     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
679 }
680 #else //__nvoc_heap_h_disabled
681 #define heapFilterBlackListPages(arg0, arg1, arg2) heapFilterBlackListPages_IMPL(arg0, arg1, arg2)
682 #endif //__nvoc_heap_h_disabled
683 
684 NV_STATUS heapStorePendingBlackList_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3);
685 
686 #ifdef __nvoc_heap_h_disabled
heapStorePendingBlackList(struct OBJGPU * arg0,struct Heap * arg1,NvU64 arg2,NvU64 arg3)687 static inline NV_STATUS heapStorePendingBlackList(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3) {
688     NV_ASSERT_FAILED_PRECOMP("Heap was disabled!");
689     return NV_ERR_NOT_SUPPORTED;
690 }
691 #else //__nvoc_heap_h_disabled
692 #define heapStorePendingBlackList(arg0, arg1, arg2, arg3) heapStorePendingBlackList_IMPL(arg0, arg1, arg2, arg3)
693 #endif //__nvoc_heap_h_disabled
694 
695 #undef PRIVATE_FIELD
696 
697 
698 #endif // _OBJHEAP_H_
699 
700 #ifdef __cplusplus
701 } // extern "C"
702 #endif
703 
704 #endif // _G_HEAP_NVOC_H_
705