1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "mem_mgr/gpu_vaspace.h"
25 #include "gpu/mem_mgr/virt_mem_allocator.h"
26 #include "kernel/gpu/fifo/kernel_ctxshare.h"
27 #include "kernel/gpu/fifo/kernel_channel_group_api.h"
28 #include "kernel/gpu/fifo/kernel_channel_group.h"
29 #include "vgpu/rpc.h"
30 #include "gpu/device/device.h"
31 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
32 #include "kernel/gpu/gr/kernel_graphics_manager.h"
33 #include "core/locks.h"
34 #include "gpu/mem_mgr/vaspace_api.h"
35 #include "rmapi/rs_utils.h"
36 
37 #define SUBCTXID_EHEAP_OWNER NvU32_BUILD('n','v','r','m')
38 
39 NV_STATUS
40 kctxshareapiConstruct_IMPL
41 (
42     KernelCtxShareApi *pKernelCtxShareApi,
43     CALL_CONTEXT *pCallContext,
44     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
45 )
46 {
47     NV_STATUS                           rmStatus     = NV_OK;
48     OBJVASPACE                         *pVAS;
49     OBJGPU                             *pGpu         = GPU_RES_GET_GPU(pKernelCtxShareApi);
50     KernelChannelGroupApi              *pKernelChannelGroupApi;
51     KernelChannelGroup                 *pKernelChannelGroup;
52     Device                             *pDevice      = NULL;
53     RsResourceRef                      *pChanGrpRef;
54     RsClient                           *pClient;
55     NvHandle                            hDevice;
56     NvHandle                            hClient      = pParams->hClient;
57     NvHandle                            hVASpace     = 0;
58     NV_CTXSHARE_ALLOCATION_PARAMETERS  *pUserParams  = pParams->pAllocParams;
59     RsShared                           *pShared      = NULL;
60 
61     // To make context share a child of a TSG, a TSG must exist.
62     if (CliGetChannelGroup(pParams->hClient, pParams->hParent,
63             &pChanGrpRef, &hDevice) == NV_OK)
64     {
65         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
66                                              KernelChannelGroupApi);
67         NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL,
68                             NV_ERR_INVALID_STATE);
69         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
70     }
71     else
72     {
73         return NV_ERR_INVALID_OBJECT_HANDLE;
74     }
75 
76     // Copy Constructor path
77     if (RS_IS_COPY_CTOR(pParams))
78     {
79         rmStatus = kctxshareapiCopyConstruct_IMPL(pKernelCtxShareApi, pCallContext, pParams);
80         return rmStatus;
81     }
82 
83     rmStatus = serverGetClientUnderLock(&g_resServ, hClient, &pClient);
84     if (rmStatus != NV_OK)
85     {
86         NV_PRINTF(LEVEL_ERROR, "Invalid client handle!\n");
87         return NV_ERR_INVALID_ARGUMENT;
88     }
89 
90     //
91     // Depending on the va mode, allocating a context share might require allocation
92     // parameters that has a va space handle. If multiple vaspace mode is
93     // enabled (no va space under a device), a va handle is required!
94     //
95     // OPTIONAL_MULTIVA or SINGLE_VA MODES: Use the device va space.
96     //
97 
98     rmStatus = deviceGetByHandle(pClient, hDevice, &pDevice);
99     if (rmStatus != NV_OK)
100     {
101         NV_PRINTF(LEVEL_ERROR, "Invalid parent/device handle!\n");
102         return NV_ERR_INVALID_ARGUMENT;
103     }
104 
105     hVASpace = pUserParams->hVASpace;
106     NV_ASSERT((hVASpace == NV01_NULL_OBJECT) || (pDevice->vaMode != NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE));
107 
108     if (pKernelChannelGroup->bLegacyMode)
109     {
110         //
111         // RM is trying to pre-allocate the kctxshares to for legacy mode
112         // In this case, we use the the parent TSG's pVAS rather than
113         // the hVASpace param
114         //
115         NV_PRINTF(LEVEL_INFO, "Constructing Legacy Context Share\n");
116         NV_ASSERT(hVASpace == NV01_NULL_OBJECT);
117         pVAS = pKernelChannelGroup->pVAS;
118 
119         pKernelCtxShareApi->hVASpace = pKernelChannelGroupApi->hVASpace;
120     }
121     else
122     {
123         NV_PRINTF(LEVEL_INFO, "Constructing Client Allocated Context Share\n");
124         rmStatus = vaspaceGetByHandleOrDeviceDefault(pClient, hDevice, hVASpace, &pVAS);
125 
126         pKernelCtxShareApi->hVASpace = hVASpace;
127     }
128 
129     NV_ASSERT_OR_RETURN((rmStatus == NV_OK), rmStatus);
130     NV_ASSERT_OR_RETURN((pVAS != NULL), NV_ERR_INVALID_STATE);
131 
132     NV_ASSERT_OK_OR_GOTO(rmStatus,
133                          serverAllocShareWithHalspecParent(&g_resServ, classInfo(KernelCtxShare), &pShared, staticCast(pGpu, Object)),
134                          failed);
135 
136     NV_ASSERT_OK_OR_GOTO(rmStatus,
137                          kctxshareInitCommon(dynamicCast(pShared, KernelCtxShare),
138                                              pKernelCtxShareApi,
139                                              pGpu,
140                                              pVAS,
141                                              pUserParams->flags,
142                                              &pUserParams->subctxId,
143                                              pKernelChannelGroupApi),
144                         failed);
145 
146     pKernelCtxShareApi->pShareData = dynamicCast(pShared, KernelCtxShare);
147 
148     if (hVASpace != NV01_NULL_OBJECT)
149     {
150         RsResourceRef *pVASpaceRef;
151         rmStatus = clientGetResourceRef(pCallContext->pClient, hVASpace, &pVASpaceRef);
152         if (rmStatus != NV_OK)
153             goto failed;
154 
155         refAddDependant(pVASpaceRef, pCallContext->pResourceRef);
156     }
157 
158     if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT)
159     {
160         RsResourceRef *pKernelGraphicsContextRef;
161         rmStatus = clientGetResourceRef(pCallContext->pClient, pKernelChannelGroupApi->hKernelGraphicsContext, &pKernelGraphicsContextRef);
162         if (rmStatus != NV_OK)
163             goto failed;
164 
165         refAddDependant(pKernelGraphicsContextRef, pCallContext->pResourceRef);
166     }
167 
168 failed:
169     if (rmStatus != NV_OK)
170     {
171         if (pShared)
172         {
173             serverFreeShare(&g_resServ, pShared);
174         }
175     }
176 
177     return rmStatus;
178 }
179 
180 void
181 kctxshareapiDestruct_IMPL
182 (
183     KernelCtxShareApi *pKernelCtxShareApi
184 )
185 {
186     CALL_CONTEXT                *pCallContext;
187     RS_RES_FREE_PARAMS_INTERNAL *pParams;
188     OBJGPU                      *pGpu = GPU_RES_GET_GPU(pKernelCtxShareApi);
189     KernelChannelGroupApi       *pKernelChannelGroupApi = NULL;
190     KernelChannelGroup          *pKernelChannelGroup    = NULL;
191     RsResourceRef               *pChanGrpRef;
192     RsShared                    *pShared    = NULL;
193     NvS32                        refcnt     = 0;
194 
195     resGetFreeParams(staticCast(pKernelCtxShareApi, RsResource), &pCallContext, &pParams);
196     pChanGrpRef = pCallContext->pResourceRef->pParentRef;
197     if (pChanGrpRef != NULL)
198     {
199         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
200                                              KernelChannelGroupApi);
201         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
202     }
203 
204     NV_ASSERT(pKernelChannelGroup);
205 
206     if (pKernelCtxShareApi->pShareData != NULL)
207     {
208         NV_ASSERT(pKernelCtxShareApi->pShareData->pKernelChannelGroup ==
209                   pKernelChannelGroup);
210 
211 
212         NV_PRINTF(LEVEL_INFO, "KernelCtxShareApi Ptr: %p   ChanGrp: %p  !\n",
213                   pKernelCtxShareApi, pKernelCtxShareApi->pShareData->pKernelChannelGroup);
214 
215         pShared = staticCast(pKernelCtxShareApi->pShareData, RsShared);
216         refcnt  = serverGetShareRefCount(&g_resServ, pShared);
217 
218         NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL called on KernelCtxShare %p with refcnt %d\n",
219           pShared, refcnt);
220 
221         NV_ASSERT_OR_RETURN_VOID(refcnt >= 1);
222 
223         if (refcnt > 1)
224         {
225             //
226             // serverFreeShare will delete the object automatically if the count hits 0;
227             // we'd still need it to free all underlying resourcees, however.
228             // For this reason we only decrement here if no free is needed
229             //
230             serverFreeShare(&g_resServ, pShared);
231 
232             NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL: KernelCtxShare %p has %d references left\n",
233                       pShared, refcnt-1);
234         }
235         else
236         {
237             NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL: KernelCtxShare %p has no more references, destroying...\n",
238                       pShared);
239 
240             pParams->status = kctxshareDestroyCommon(pKernelCtxShareApi->pShareData, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi);
241             NV_ASSERT(pParams->status == NV_OK);
242 
243             serverFreeShare(&g_resServ, pShared);
244         }
245     }
246 }
247 
248 NV_STATUS
249 kctxshareapiCopyConstruct_IMPL
250 (
251     KernelCtxShareApi *pKernelCtxShareApi,
252     CALL_CONTEXT *pCallContext,
253     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
254 )
255 {
256     NV_STATUS      rmStatus     = NV_OK;
257     OBJGPU        *pGpu         = GPU_RES_GET_GPU(pKernelCtxShareApi);
258     RsClient      *pDstClient   = pCallContext->pClient;
259     RsResourceRef *pDstRef      = pCallContext->pResourceRef;
260     RsResourceRef *pSrcRef      = pParams->pSrcRef;
261     KernelCtxShareApi *pKernelCtxShareSrc = dynamicCast(pSrcRef->pResource, KernelCtxShareApi);
262     KernelChannelGroupApi *pKernelChannelGroupApi;
263     RS_ITERATOR    iter;
264     RsResourceRef *pVaspaceRef  = NULL;
265     VaSpaceApi    *pVaspaceApi  = NULL;
266     RsResourceRef *pChanGrpRef  = pDstRef->pParentRef;
267 
268     pKernelCtxShareApi->pShareData = pKernelCtxShareSrc->pShareData;
269 
270     RsShared *pShared = staticCast(pKernelCtxShareApi->pShareData, RsShared);
271     serverRefShare(&g_resServ, pShared);
272 
273     iter =  serverutilRefIter(pDstClient->hClient, pDstRef->pParentRef->pParentRef->hResource, classId(VaSpaceApi), RS_ITERATE_DESCENDANTS, NV_TRUE);
274     while (clientRefIterNext(iter.pClient, &iter))
275     {
276         pVaspaceRef = iter.pResourceRef;
277         pVaspaceApi = dynamicCast(pVaspaceRef->pResource, VaSpaceApi);
278         NV_ASSERT_OR_ELSE(pVaspaceApi != NULL, rmStatus = NV_ERR_INVALID_STATE; goto done);
279 
280         if (pVaspaceApi->pVASpace == pKernelCtxShareApi->pShareData->pVAS)
281         {
282             refAddDependant(pVaspaceRef, pDstRef);
283             break;
284         }
285     }
286 
287     pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
288                                          KernelChannelGroupApi);
289     NV_ASSERT_OR_ELSE(pKernelChannelGroupApi != NULL,
290                       rmStatus = NV_ERR_INVALID_STATE; goto done);
291 
292     if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT)
293     {
294         RsResourceRef *pKernelGraphicsContextRef;
295         NV_ASSERT_OK_OR_ELSE(rmStatus,
296                              clientGetResourceRef(pCallContext->pClient, pKernelChannelGroupApi->hKernelGraphicsContext, &pKernelGraphicsContextRef),
297                              goto done);
298 
299         refAddDependant(pKernelGraphicsContextRef, pDstRef);
300     }
301 
302     //
303     // For legacy internal kctxshares, RPC is handled by the channelgroup object's copy ctor,
304     // so we skip the automatic RPC here
305     //
306     if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && !pKernelCtxShareApi->pShareData->pKernelChannelGroup->bLegacyMode)
307     {
308         NV_RM_RPC_DUP_OBJECT(pGpu, pDstClient->hClient, pDstRef->pParentRef->hResource, pDstRef->hResource,
309                              pParams->pSrcClient->hClient, pSrcRef->hResource, 0,
310                              NV_TRUE, // automatically issue RPC_FREE on object free
311                              pDstRef, rmStatus);
312     }
313 
314 done:
315     if (rmStatus != NV_OK)
316     {
317         serverFreeShare(&g_resServ, pShared);
318     }
319 
320     return rmStatus;
321 }
322 
323 NvBool
324 kctxshareapiCanCopy_IMPL
325 (
326     KernelCtxShareApi *pKernelCtxShareApi
327 )
328 {
329     return NV_TRUE;
330 }
331 
332 NV_STATUS
333 kctxshareConstruct_IMPL
334 (
335     KernelCtxShare *pKernelCtxShare
336 )
337 {
338     return NV_OK;
339 }
340 
341 /**
342  * @brief Initializes a new context share tracking structure.
343  *
344  * To be called immediately after allocation, initializes a broadcast context share
345  * object to what the client specified. Afterwards, include the context share object
346  * inside of a ChannelGroup's heap object based on the flag provided.
347  *
348  * @param pKernelCtxShare
349  * @param pKernelCtxShareApi
350  * @param pGpu
351  * @param[in] pVAS
352  * @param[in] Flags
353  * @param[in,out] subctxId
354  * @param[in] pKernelChannelGroupApi
355  */
356 NV_STATUS
357 kctxshareInitCommon_IMPL
358 (
359     KernelCtxShare        *pKernelCtxShare,
360     KernelCtxShareApi     *pKernelCtxShareApi,
361     OBJGPU                *pGpu,
362     OBJVASPACE            *pVAS,
363     NvU32                  Flags,
364     NvU32                 *pSubctxId,
365     KernelChannelGroupApi *pKernelChannelGroupApi
366 )
367 {
368     NV_STATUS           status                = NV_OK;
369     NvU32               heapFlag              = 0;
370     NvU64               offset                = 0;
371     NvU64               size                  = 1;
372     PEMEMBLOCK          pBlock;
373     KernelChannelGroup *pKernelChannelGroup;
374 
375     NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE);
376     pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
377     NV_ASSERT(pKernelChannelGroup != NULL);
378     NV_ASSERT(pVAS != NULL);
379 
380     // GPU lock must be held before calling this function
381     LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance));
382 
383     //
384     // For external VAS, create subcontext only after SetPageDirectory() call is made.
385     // This will ensure that new PDB will be updated in all channels subcontext array.
386     // See Bug 1805222 comment #11 for more details
387     //
388     if (!IsAMODEL(pGpu) && vaspaceIsExternallyOwned(pVAS))
389     {
390         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
391         if (vaspaceGetPageDirBase(pVAS, pGpu) == NULL)
392         {
393             NV_ASSERT(0);
394             SLI_LOOP_RETURN(NV_ERR_INVALID_STATE);
395         }
396         SLI_LOOP_END
397     }
398 
399    // If flag is equal to SYNC, allocate context share from veId 0.
400     if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC)
401     {
402         heapFlag = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE;
403         offset   = 0;
404     }
405     //
406     // If the flag is Async, we want to allocate a free block in reverse order.
407     // This allocates a block between veId 1 and veId 63.
408     // If no blocks are available between veId 1 and veId 63, use veId 0.
409     //
410     else if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC)
411     {
412         heapFlag = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN;
413     }
414     else if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SPECIFIED)
415     {
416         heapFlag = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE;
417         offset   = *pSubctxId;
418     }
419     else
420     {
421         DBG_BREAKPOINT();
422         return NV_ERR_INVALID_ARGUMENT;
423     }
424 
425     status = pKernelChannelGroup->pSubctxIdHeap->eheapAlloc(
426         pKernelChannelGroup->pSubctxIdHeap,
427         SUBCTXID_EHEAP_OWNER,
428         &heapFlag,
429         &offset,
430         &size,
431         1,
432         1,
433         &pBlock,
434         NULL,
435         NULL);
436     if (status != NV_OK)
437     {
438         return status;
439     }
440 
441     pKernelCtxShare->pVAS                = pVAS;
442     pKernelCtxShare->subctxId            = NvU64_LO32(offset);
443     pKernelCtxShare->pKernelChannelGroup = pKernelChannelGroup;
444     pKernelCtxShare->flags               = Flags;
445 
446     pBlock->pData = (void *)pKernelCtxShare;
447 
448     status = kctxshareInit_HAL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS,
449                                pKernelChannelGroupApi, offset, pBlock);
450 
451     if(status != NV_OK)
452     {
453         goto done;
454     }
455 
456 // @todo Code under label "fail" should handle failure case
457 done:
458     if (status == NV_OK)
459     {
460        *pSubctxId   = NvU64_LO32(offset);
461 
462         NV_PRINTF(LEVEL_INFO,
463                   "New Context Share 0x%p allocated with id 0x%x\n",
464                   pKernelCtxShare, NvU64_LO32(offset));
465     }
466     else
467     {
468         NV_STATUS tmpStatus;
469 
470         tmpStatus = pKernelChannelGroup->pSubctxIdHeap->eheapFree(
471             pKernelChannelGroup->pSubctxIdHeap,
472             offset);
473         NV_ASSERT(tmpStatus == NV_OK);
474 
475         NV_PRINTF(LEVEL_INFO,
476                   "Context Share 0x%p allocation with id 0x%x failed, status is %x\n",
477                   pKernelCtxShare, NvU64_LO32(offset), status);
478     }
479 
480     return status;
481 }
482 
483 /**
484  * @brief Frees a context share tracking structure if no references remain.
485  *
486  * This funtion should be used to free kctxshare rather than freeing object
487  * directly using serverFreeShare or objDelete.
488  * Frees child ENGINE_CTX_DESCRIPTORs but does not free any memory pointed at
489  * by pPrivCtxData.  It is the responsiblity of the user of that memory to
490  * ensure it is freed before this function is called (or that another pointer
491  * exists).
492  *
493  * @param[in] pKernelCtxShare
494  * @param[in] pKernelCtxShareApi
495  * @param pGpu
496  * @param pKernelChannelGroupApi
497  */
498 NV_STATUS
499 kctxshareDestroyCommon_IMPL
500 (
501     KernelCtxShare *pKernelCtxShare,
502     KernelCtxShareApi *pKernelCtxShareApi,
503     OBJGPU *pGpu,
504     KernelChannelGroupApi *pKernelChannelGroupApi
505 )
506 {
507     NV_STATUS               status = NV_OK;
508     NvU32                   subctxId;
509     NvU32                   i;
510     KernelChannelGroup     *pKernelChannelGroup;
511     NvU64                   numMax = 0;
512     NvBool                  bRelease = NV_TRUE;
513     RsShared               *pShared = NULL;
514     NvS32                   refcnt = 0;
515 
516     NV_ASSERT_OR_RETURN(pKernelCtxShare != NULL, NV_ERR_INVALID_STATE);
517 
518     // This function should only be called on the last free of the object
519     pShared = staticCast(pKernelCtxShare, RsShared);
520     refcnt  = serverGetShareRefCount(&g_resServ, pShared);
521     NV_ASSERT_OR_RETURN(refcnt == 1, NV_ERR_INVALID_STATE);
522 
523     // GPU lock must be held before calling this function
524     LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance));
525 
526     pKernelChannelGroup = pKernelCtxShare->pKernelChannelGroup;
527     NV_ASSERT(pKernelChannelGroup == pKernelChannelGroupApi->pKernelChannelGroup);
528     subctxId = pKernelCtxShare->subctxId;
529 
530     //
531     // Handle the case when VAS is shared by subcontexts.
532     // Release the shared resources only when the last subcontext using this VAS is freed.
533     //
534     status = pKernelChannelGroup->pSubctxIdHeap->eheapGetSize(
535         pKernelChannelGroup->pSubctxIdHeap,
536         &numMax);
537     NV_ASSERT(status == NV_OK);
538 
539     for (i = 0; i < numMax; i++)
540     {
541         if (i == pKernelCtxShare->subctxId)
542         {
543             continue;
544         }
545 
546         PEMEMBLOCK pBlock = pKernelChannelGroup->pSubctxIdHeap->eheapGetBlock(
547             pKernelChannelGroup->pSubctxIdHeap,
548             i,
549             NV_FALSE);
550         if (pBlock)
551         {
552             OBJVASPACE *pSubctxVAS = ((KernelCtxShare *)pBlock->pData)->pVAS;
553             if (pSubctxVAS == pKernelCtxShare->pVAS)
554             {
555                 bRelease = NV_FALSE;
556                 break;
557             }
558         }
559     }
560 
561     status = kctxshareDestroy_HAL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi, bRelease);
562     if (status != NV_OK)
563     {
564         goto fail;
565     }
566 
567     status = pKernelChannelGroup->pSubctxIdHeap->eheapFree(
568         pKernelChannelGroup->pSubctxIdHeap,
569         subctxId);
570     NV_ASSERT_OR_GOTO(status == NV_OK, fail);
571 
572 fail:
573     if (status == NV_OK)
574     {
575         NV_PRINTF(LEVEL_INFO, "Freed Context Share 0x%p with id 0x%x\n",
576                   pKernelCtxShare, subctxId);
577     }
578     else
579     {
580         NV_PRINTF(LEVEL_INFO, "Failed to free Context Share 0x%p with id 0x%x\n",
581                   pKernelCtxShare, subctxId);
582     }
583 
584     return status;
585 }
586 
587 void
588 kctxshareDestruct_IMPL
589 (
590     KernelCtxShare *pKernelCtxShare
591 )
592 {
593     //
594     // Assert that kctxshareDestroyCommon was called to free kctxshare resources before
595     // getting here by checking if subctxId has been freed from heap.
596     // pKernelChannelGroup may not be set if kctxshare failed initialization.
597     //
598     if(pKernelCtxShare->pKernelChannelGroup != NULL)
599     {
600         PEMEMBLOCK pBlock =
601             pKernelCtxShare->pKernelChannelGroup->pSubctxIdHeap->eheapGetBlock(
602                 pKernelCtxShare->pKernelChannelGroup->pSubctxIdHeap,
603                 pKernelCtxShare->subctxId,
604                 NV_FALSE);
605 
606         NV_ASSERT(pBlock == NULL);
607     }
608 }
609