1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "mem_mgr/gpu_vaspace.h"
25 #include "gpu/mem_mgr/virt_mem_allocator.h"
26 #include "kernel/gpu/fifo/kernel_ctxshare.h"
27 #include "kernel/gpu/fifo/kernel_channel_group_api.h"
28 #include "kernel/gpu/fifo/kernel_channel_group.h"
29 #include "vgpu/rpc.h"
30 #include "gpu/device/device.h"
31 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
32 #include "kernel/gpu/gr/kernel_graphics_manager.h"
33 #include "core/locks.h"
34 #include "gpu/mem_mgr/vaspace_api.h"
35 #include "rmapi/rs_utils.h"
36 #include "platform/sli/sli.h"
37 #include "containers/eheap_old.h"
38 
39 #define SUBCTXID_EHEAP_OWNER NvU32_BUILD('n','v','r','m')
40 
41 NV_STATUS
kctxshareapiConstruct_IMPL(KernelCtxShareApi * pKernelCtxShareApi,CALL_CONTEXT * pCallContext,RS_RES_ALLOC_PARAMS_INTERNAL * pParams)42 kctxshareapiConstruct_IMPL
43 (
44     KernelCtxShareApi *pKernelCtxShareApi,
45     CALL_CONTEXT *pCallContext,
46     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
47 )
48 {
49     NV_STATUS                           rmStatus     = NV_OK;
50     OBJVASPACE                         *pVAS;
51     OBJGPU                             *pGpu         = GPU_RES_GET_GPU(pKernelCtxShareApi);
52     KernelChannelGroupApi              *pKernelChannelGroupApi;
53     KernelChannelGroup                 *pKernelChannelGroup;
54     Device                             *pDevice      = NULL;
55     RsResourceRef                      *pChanGrpRef;
56     RsClient                           *pClient;
57     NvHandle                            hDevice;
58     NvHandle                            hClient      = pParams->hClient;
59     NvHandle                            hVASpace     = 0;
60     NV_CTXSHARE_ALLOCATION_PARAMETERS  *pUserParams  = pParams->pAllocParams;
61     RsShared                           *pShared      = NULL;
62 
63     // To make context share a child of a TSG, a TSG must exist.
64     if (CliGetChannelGroup(pParams->hClient, pParams->hParent,
65             &pChanGrpRef, &hDevice) == NV_OK)
66     {
67         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
68                                              KernelChannelGroupApi);
69         NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL,
70                             NV_ERR_INVALID_STATE);
71         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
72     }
73     else
74     {
75         return NV_ERR_INVALID_OBJECT_HANDLE;
76     }
77 
78     // Copy Constructor path
79     if (RS_IS_COPY_CTOR(pParams))
80     {
81         rmStatus = kctxshareapiCopyConstruct_IMPL(pKernelCtxShareApi, pCallContext, pParams);
82         return rmStatus;
83     }
84 
85     rmStatus = serverGetClientUnderLock(&g_resServ, hClient, &pClient);
86     if (rmStatus != NV_OK)
87     {
88         NV_PRINTF(LEVEL_ERROR, "Invalid client handle!\n");
89         return NV_ERR_INVALID_ARGUMENT;
90     }
91 
92     //
93     // Depending on the va mode, allocating a context share might require allocation
94     // parameters that has a va space handle. If multiple vaspace mode is
95     // enabled (no va space under a device), a va handle is required!
96     //
97     // OPTIONAL_MULTIVA or SINGLE_VA MODES: Use the device va space.
98     //
99 
100     rmStatus = deviceGetByHandle(pClient, hDevice, &pDevice);
101     if (rmStatus != NV_OK)
102     {
103         NV_PRINTF(LEVEL_ERROR, "Invalid parent/device handle!\n");
104         return NV_ERR_INVALID_ARGUMENT;
105     }
106 
107     hVASpace = pUserParams->hVASpace;
108     NV_ASSERT((hVASpace == NV01_NULL_OBJECT) || (pDevice->vaMode != NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE));
109 
110     if (pKernelChannelGroup->bLegacyMode)
111     {
112         //
113         // RM is trying to pre-allocate the kctxshares to for legacy mode
114         // In this case, we use the the parent TSG's pVAS rather than
115         // the hVASpace param
116         //
117         NV_PRINTF(LEVEL_INFO, "Constructing Legacy Context Share\n");
118         NV_ASSERT(hVASpace == NV01_NULL_OBJECT);
119         pVAS = pKernelChannelGroup->pVAS;
120 
121         pKernelCtxShareApi->hVASpace = pKernelChannelGroupApi->hVASpace;
122     }
123     else
124     {
125         NV_PRINTF(LEVEL_INFO, "Constructing Client Allocated Context Share\n");
126         rmStatus = vaspaceGetByHandleOrDeviceDefault(pClient, hDevice, hVASpace, &pVAS);
127 
128         pKernelCtxShareApi->hVASpace = hVASpace;
129     }
130 
131     NV_ASSERT_OR_RETURN((rmStatus == NV_OK), rmStatus);
132     NV_ASSERT_OR_RETURN((pVAS != NULL), NV_ERR_INVALID_STATE);
133 
134     NV_ASSERT_OK_OR_GOTO(rmStatus,
135                          serverAllocShareWithHalspecParent(&g_resServ, classInfo(KernelCtxShare), &pShared, staticCast(pGpu, Object)),
136                          failed);
137 
138     NV_ASSERT_OK_OR_GOTO(rmStatus,
139                          kctxshareInitCommon(dynamicCast(pShared, KernelCtxShare),
140                                              pKernelCtxShareApi,
141                                              pGpu,
142                                              pVAS,
143                                              pUserParams->flags,
144                                              &pUserParams->subctxId,
145                                              pKernelChannelGroupApi),
146                         failed);
147 
148     pKernelCtxShareApi->pShareData = dynamicCast(pShared, KernelCtxShare);
149 
150     if (hVASpace != NV01_NULL_OBJECT)
151     {
152         RsResourceRef *pVASpaceRef;
153         rmStatus = clientGetResourceRef(pCallContext->pClient, hVASpace, &pVASpaceRef);
154         if (rmStatus != NV_OK)
155             goto failed;
156 
157         refAddDependant(pVASpaceRef, pCallContext->pResourceRef);
158     }
159 
160     if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT)
161     {
162         RsResourceRef *pKernelGraphicsContextRef;
163         rmStatus = clientGetResourceRef(pCallContext->pClient, pKernelChannelGroupApi->hKernelGraphicsContext, &pKernelGraphicsContextRef);
164         if (rmStatus != NV_OK)
165             goto failed;
166 
167         refAddDependant(pKernelGraphicsContextRef, pCallContext->pResourceRef);
168     }
169 
170 failed:
171     if (rmStatus != NV_OK)
172     {
173         if (pShared)
174         {
175             serverFreeShare(&g_resServ, pShared);
176         }
177     }
178 
179     return rmStatus;
180 }
181 
182 void
kctxshareapiDestruct_IMPL(KernelCtxShareApi * pKernelCtxShareApi)183 kctxshareapiDestruct_IMPL
184 (
185     KernelCtxShareApi *pKernelCtxShareApi
186 )
187 {
188     CALL_CONTEXT                *pCallContext;
189     RS_RES_FREE_PARAMS_INTERNAL *pParams;
190     OBJGPU                      *pGpu = GPU_RES_GET_GPU(pKernelCtxShareApi);
191     KernelChannelGroupApi       *pKernelChannelGroupApi = NULL;
192     KernelChannelGroup          *pKernelChannelGroup    = NULL;
193     RsResourceRef               *pChanGrpRef;
194     RsShared                    *pShared    = NULL;
195     NvS32                        refcnt     = 0;
196 
197     resGetFreeParams(staticCast(pKernelCtxShareApi, RsResource), &pCallContext, &pParams);
198     pChanGrpRef = pCallContext->pResourceRef->pParentRef;
199     if (pChanGrpRef != NULL)
200     {
201         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
202                                              KernelChannelGroupApi);
203         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
204     }
205 
206     NV_ASSERT(pKernelChannelGroup);
207 
208     if (pKernelCtxShareApi->pShareData != NULL)
209     {
210         NV_ASSERT(pKernelCtxShareApi->pShareData->pKernelChannelGroup ==
211                   pKernelChannelGroup);
212 
213 
214         NV_PRINTF(LEVEL_INFO, "KernelCtxShareApi Ptr: %p   ChanGrp: %p  !\n",
215                   pKernelCtxShareApi, pKernelCtxShareApi->pShareData->pKernelChannelGroup);
216 
217         pShared = staticCast(pKernelCtxShareApi->pShareData, RsShared);
218         refcnt  = serverGetShareRefCount(&g_resServ, pShared);
219 
220         NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL called on KernelCtxShare %p with refcnt %d\n",
221           pShared, refcnt);
222 
223         NV_ASSERT_OR_RETURN_VOID(refcnt >= 1);
224 
225         if (refcnt > 1)
226         {
227             //
228             // serverFreeShare will delete the object automatically if the count hits 0;
229             // we'd still need it to free all underlying resourcees, however.
230             // For this reason we only decrement here if no free is needed
231             //
232             serverFreeShare(&g_resServ, pShared);
233 
234             NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL: KernelCtxShare %p has %d references left\n",
235                       pShared, refcnt-1);
236         }
237         else
238         {
239             NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL: KernelCtxShare %p has no more references, destroying...\n",
240                       pShared);
241 
242             pParams->status = kctxshareDestroyCommon(pKernelCtxShareApi->pShareData, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi);
243             NV_ASSERT(pParams->status == NV_OK);
244 
245             serverFreeShare(&g_resServ, pShared);
246         }
247     }
248 }
249 
250 NV_STATUS
kctxshareapiCopyConstruct_IMPL(KernelCtxShareApi * pKernelCtxShareApi,CALL_CONTEXT * pCallContext,RS_RES_ALLOC_PARAMS_INTERNAL * pParams)251 kctxshareapiCopyConstruct_IMPL
252 (
253     KernelCtxShareApi *pKernelCtxShareApi,
254     CALL_CONTEXT *pCallContext,
255     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
256 )
257 {
258     NV_STATUS      rmStatus     = NV_OK;
259     OBJGPU        *pGpu         = GPU_RES_GET_GPU(pKernelCtxShareApi);
260     RsClient      *pDstClient   = pCallContext->pClient;
261     RsResourceRef *pDstRef      = pCallContext->pResourceRef;
262     RsResourceRef *pSrcRef      = pParams->pSrcRef;
263     KernelCtxShareApi *pKernelCtxShareSrc = dynamicCast(pSrcRef->pResource, KernelCtxShareApi);
264     KernelChannelGroupApi *pKernelChannelGroupApi;
265     RS_ITERATOR    iter;
266     RsResourceRef *pVaspaceRef  = NULL;
267     VaSpaceApi    *pVaspaceApi  = NULL;
268     RsResourceRef *pChanGrpRef  = pDstRef->pParentRef;
269 
270     pKernelCtxShareApi->pShareData = pKernelCtxShareSrc->pShareData;
271 
272     RsShared *pShared = staticCast(pKernelCtxShareApi->pShareData, RsShared);
273     serverRefShare(&g_resServ, pShared);
274 
275     iter =  serverutilRefIter(pDstClient->hClient, pDstRef->pParentRef->pParentRef->hResource, classId(VaSpaceApi), RS_ITERATE_DESCENDANTS, NV_TRUE);
276     while (clientRefIterNext(iter.pClient, &iter))
277     {
278         pVaspaceRef = iter.pResourceRef;
279         pVaspaceApi = dynamicCast(pVaspaceRef->pResource, VaSpaceApi);
280         NV_ASSERT_OR_ELSE(pVaspaceApi != NULL, rmStatus = NV_ERR_INVALID_STATE; goto done);
281 
282         if (pVaspaceApi->pVASpace == pKernelCtxShareApi->pShareData->pVAS)
283         {
284             refAddDependant(pVaspaceRef, pDstRef);
285             break;
286         }
287     }
288 
289     pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
290                                          KernelChannelGroupApi);
291     NV_ASSERT_OR_ELSE(pKernelChannelGroupApi != NULL,
292                       rmStatus = NV_ERR_INVALID_STATE; goto done);
293 
294     if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT)
295     {
296         RsResourceRef *pKernelGraphicsContextRef;
297         NV_ASSERT_OK_OR_ELSE(rmStatus,
298                              clientGetResourceRef(pCallContext->pClient, pKernelChannelGroupApi->hKernelGraphicsContext, &pKernelGraphicsContextRef),
299                              goto done);
300 
301         refAddDependant(pKernelGraphicsContextRef, pDstRef);
302     }
303 
304     //
305     // For legacy internal kctxshares, RPC is handled by the channelgroup object's copy ctor,
306     // so we skip the automatic RPC here
307     //
308     if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && !pKernelCtxShareApi->pShareData->pKernelChannelGroup->bLegacyMode)
309     {
310         NV_RM_RPC_DUP_OBJECT(pGpu, pDstClient->hClient, pDstRef->pParentRef->hResource, pDstRef->hResource,
311                              pParams->pSrcClient->hClient, pSrcRef->hResource, 0,
312                              NV_TRUE, // automatically issue RPC_FREE on object free
313                              pDstRef, rmStatus);
314     }
315 
316 done:
317     if (rmStatus != NV_OK)
318     {
319         serverFreeShare(&g_resServ, pShared);
320     }
321 
322     return rmStatus;
323 }
324 
325 NvBool
kctxshareapiCanCopy_IMPL(KernelCtxShareApi * pKernelCtxShareApi)326 kctxshareapiCanCopy_IMPL
327 (
328     KernelCtxShareApi *pKernelCtxShareApi
329 )
330 {
331     return NV_TRUE;
332 }
333 
334 NV_STATUS
kctxshareConstruct_IMPL(KernelCtxShare * pKernelCtxShare)335 kctxshareConstruct_IMPL
336 (
337     KernelCtxShare *pKernelCtxShare
338 )
339 {
340     return NV_OK;
341 }
342 
343 /**
344  * @brief Initializes a new context share tracking structure.
345  *
346  * To be called immediately after allocation, initializes a broadcast context share
347  * object to what the client specified. Afterwards, include the context share object
348  * inside of a ChannelGroup's heap object based on the flag provided.
349  *
350  * @param pKernelCtxShare
351  * @param pKernelCtxShareApi
352  * @param pGpu
353  * @param[in] pVAS
354  * @param[in] Flags
355  * @param[in,out] subctxId
356  * @param[in] pKernelChannelGroupApi
357  */
358 NV_STATUS
kctxshareInitCommon_IMPL(KernelCtxShare * pKernelCtxShare,KernelCtxShareApi * pKernelCtxShareApi,OBJGPU * pGpu,OBJVASPACE * pVAS,NvU32 Flags,NvU32 * pSubctxId,KernelChannelGroupApi * pKernelChannelGroupApi)359 kctxshareInitCommon_IMPL
360 (
361     KernelCtxShare        *pKernelCtxShare,
362     KernelCtxShareApi     *pKernelCtxShareApi,
363     OBJGPU                *pGpu,
364     OBJVASPACE            *pVAS,
365     NvU32                  Flags,
366     NvU32                 *pSubctxId,
367     KernelChannelGroupApi *pKernelChannelGroupApi
368 )
369 {
370     NV_STATUS           status                = NV_OK;
371     NvU32               heapFlag              = 0;
372     NvU64               offset                = 0;
373     NvU64               size                  = 1;
374     EMEMBLOCK          *pBlock;
375     KernelChannelGroup *pKernelChannelGroup;
376 
377     NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE);
378     pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
379     NV_ASSERT(pKernelChannelGroup != NULL);
380     NV_ASSERT(pVAS != NULL);
381 
382     // GPU lock must be held before calling this function
383     LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance));
384 
385     //
386     // For external VAS, create subcontext only after SetPageDirectory() call is made.
387     // This will ensure that new PDB will be updated in all channels subcontext array.
388     // See Bug 1805222 comment #11 for more details
389     //
390     if (!IsAMODEL(pGpu) && vaspaceIsExternallyOwned(pVAS))
391     {
392         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
393         if (vaspaceGetPageDirBase(pVAS, pGpu) == NULL)
394         {
395             NV_ASSERT(0);
396             SLI_LOOP_RETURN(NV_ERR_INVALID_STATE);
397         }
398         SLI_LOOP_END
399     }
400 
401    // If flag is equal to SYNC, allocate context share from veId 0.
402     if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC)
403     {
404         heapFlag = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE;
405         offset   = 0;
406     }
407     //
408     // If the flag is Async, we want to allocate a free block in reverse order.
409     // This allocates a block between veId 1 and veId 63.
410     // If no blocks are available between veId 1 and veId 63, use veId 0.
411     //
412     else if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC)
413     {
414         heapFlag = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN;
415     }
416     else if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SPECIFIED)
417     {
418         heapFlag = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE;
419         offset   = *pSubctxId;
420     }
421     else
422     {
423         DBG_BREAKPOINT();
424         return NV_ERR_INVALID_ARGUMENT;
425     }
426 
427     status = pKernelChannelGroup->pSubctxIdHeap->eheapAlloc(
428         pKernelChannelGroup->pSubctxIdHeap,
429         SUBCTXID_EHEAP_OWNER,
430         &heapFlag,
431         &offset,
432         &size,
433         1,
434         1,
435         &pBlock,
436         NULL,
437         NULL);
438     if (status != NV_OK)
439     {
440         return status;
441     }
442 
443     pKernelCtxShare->pVAS                = pVAS;
444     pKernelCtxShare->subctxId            = NvU64_LO32(offset);
445     pKernelCtxShare->pKernelChannelGroup = pKernelChannelGroup;
446     pKernelCtxShare->flags               = Flags;
447 
448     pBlock->pData = (void *)pKernelCtxShare;
449 
450     status = kctxshareInit_HAL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS,
451                                pKernelChannelGroupApi, offset, pBlock);
452 
453     if(status != NV_OK)
454     {
455         goto done;
456     }
457 
458 // @todo Code under label "fail" should handle failure case
459 done:
460     if (status == NV_OK)
461     {
462        *pSubctxId   = NvU64_LO32(offset);
463 
464         NV_PRINTF(LEVEL_INFO,
465                   "New Context Share 0x%p allocated with id 0x%x\n",
466                   pKernelCtxShare, NvU64_LO32(offset));
467     }
468     else
469     {
470         NV_STATUS tmpStatus;
471 
472         tmpStatus = pKernelChannelGroup->pSubctxIdHeap->eheapFree(
473             pKernelChannelGroup->pSubctxIdHeap,
474             offset);
475         NV_ASSERT(tmpStatus == NV_OK);
476 
477         NV_PRINTF(LEVEL_INFO,
478                   "Context Share 0x%p allocation with id 0x%x failed, status is %x\n",
479                   pKernelCtxShare, NvU64_LO32(offset), status);
480     }
481 
482     return status;
483 }
484 
485 /**
486  * @brief Frees a context share tracking structure if no references remain.
487  *
488  * This funtion should be used to free kctxshare rather than freeing object
489  * directly using serverFreeShare or objDelete.
490  * Frees child ENGINE_CTX_DESCRIPTORs but does not free any memory pointed at
491  * by pPrivCtxData.  It is the responsiblity of the user of that memory to
492  * ensure it is freed before this function is called (or that another pointer
493  * exists).
494  *
495  * @param[in] pKernelCtxShare
496  * @param[in] pKernelCtxShareApi
497  * @param pGpu
498  * @param pKernelChannelGroupApi
499  */
500 NV_STATUS
kctxshareDestroyCommon_IMPL(KernelCtxShare * pKernelCtxShare,KernelCtxShareApi * pKernelCtxShareApi,OBJGPU * pGpu,KernelChannelGroupApi * pKernelChannelGroupApi)501 kctxshareDestroyCommon_IMPL
502 (
503     KernelCtxShare *pKernelCtxShare,
504     KernelCtxShareApi *pKernelCtxShareApi,
505     OBJGPU *pGpu,
506     KernelChannelGroupApi *pKernelChannelGroupApi
507 )
508 {
509     NV_STATUS               status = NV_OK;
510     NvU32                   subctxId;
511     NvU32                   i;
512     KernelChannelGroup     *pKernelChannelGroup;
513     NvU64                   numMax = 0;
514     NvBool                  bRelease = NV_TRUE;
515     RsShared               *pShared = NULL;
516     NvS32                   refcnt = 0;
517 
518     NV_ASSERT_OR_RETURN(pKernelCtxShare != NULL, NV_ERR_INVALID_STATE);
519 
520     // This function should only be called on the last free of the object
521     pShared = staticCast(pKernelCtxShare, RsShared);
522     refcnt  = serverGetShareRefCount(&g_resServ, pShared);
523     NV_ASSERT_OR_RETURN(refcnt == 1, NV_ERR_INVALID_STATE);
524 
525     // GPU lock must be held before calling this function
526     LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance));
527 
528     pKernelChannelGroup = pKernelCtxShare->pKernelChannelGroup;
529     NV_ASSERT(pKernelChannelGroup == pKernelChannelGroupApi->pKernelChannelGroup);
530     subctxId = pKernelCtxShare->subctxId;
531 
532     //
533     // Handle the case when VAS is shared by subcontexts.
534     // Release the shared resources only when the last subcontext using this VAS is freed.
535     //
536     status = pKernelChannelGroup->pSubctxIdHeap->eheapGetSize(
537         pKernelChannelGroup->pSubctxIdHeap,
538         &numMax);
539     NV_ASSERT(status == NV_OK);
540 
541     for (i = 0; i < numMax; i++)
542     {
543         if (i == pKernelCtxShare->subctxId)
544         {
545             continue;
546         }
547 
548         EMEMBLOCK *pBlock = pKernelChannelGroup->pSubctxIdHeap->eheapGetBlock(
549             pKernelChannelGroup->pSubctxIdHeap,
550             i,
551             NV_FALSE);
552         if (pBlock)
553         {
554             OBJVASPACE *pSubctxVAS = ((KernelCtxShare *)pBlock->pData)->pVAS;
555             if (pSubctxVAS == pKernelCtxShare->pVAS)
556             {
557                 bRelease = NV_FALSE;
558                 break;
559             }
560         }
561     }
562 
563     status = kctxshareDestroy_HAL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi, bRelease);
564     if (status != NV_OK)
565     {
566         goto fail;
567     }
568 
569     status = pKernelChannelGroup->pSubctxIdHeap->eheapFree(
570         pKernelChannelGroup->pSubctxIdHeap,
571         subctxId);
572     NV_ASSERT_OR_GOTO(status == NV_OK, fail);
573 
574 fail:
575     if (status == NV_OK)
576     {
577         NV_PRINTF(LEVEL_INFO, "Freed Context Share 0x%p with id 0x%x\n",
578                   pKernelCtxShare, subctxId);
579     }
580     else
581     {
582         NV_PRINTF(LEVEL_INFO, "Failed to free Context Share 0x%p with id 0x%x\n",
583                   pKernelCtxShare, subctxId);
584     }
585 
586     return status;
587 }
588 
589 void
kctxshareDestruct_IMPL(KernelCtxShare * pKernelCtxShare)590 kctxshareDestruct_IMPL
591 (
592     KernelCtxShare *pKernelCtxShare
593 )
594 {
595     //
596     // Assert that kctxshareDestroyCommon was called to free kctxshare resources before
597     // getting here by checking if subctxId has been freed from heap.
598     // pKernelChannelGroup may not be set if kctxshare failed initialization.
599     //
600     if(pKernelCtxShare->pKernelChannelGroup != NULL)
601     {
602         EMEMBLOCK *pBlock =
603             pKernelCtxShare->pKernelChannelGroup->pSubctxIdHeap->eheapGetBlock(
604                 pKernelCtxShare->pKernelChannelGroup->pSubctxIdHeap,
605                 pKernelCtxShare->subctxId,
606                 NV_FALSE);
607 
608         NV_ASSERT(pBlock == NULL);
609     }
610 }
611