1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "kernel/gpu/fifo/kernel_channel_group_api.h"
25 
26 #include "kernel/core/locks.h"
27 #include "kernel/gpu/fifo/kernel_channel_group.h"
28 #include "kernel/gpu/mem_mgr/mem_mgr.h"
29 #include "kernel/gpu/gr/kernel_graphics.h"
30 #include "kernel/gpu/falcon/kernel_falcon.h"
31 
32 #include "class/cl0090.h" // KERNEL_GRAPHICS_CONTEXT
33 #include "class/cl9067.h" // FERMI_CONTEXT_SHARE_A
34 
35 #include "libraries/utils/nvprintf.h"
36 #include "gpu/gpu.h"
37 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
38 #include "gpu/mem_mgr/vaspace_api.h"
39 #include "vgpu/rpc.h"
40 #include "rmapi/rs_utils.h"
41 
42 NV_STATUS
43 kchangrpapiConstruct_IMPL
44 (
45     KernelChannelGroupApi        *pKernelChannelGroupApi,
46     CALL_CONTEXT                 *pCallContext,
47     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
48 )
49 {
50     NvBool            bTsgAllocated     = NV_FALSE;
51     RsResourceRef    *pResourceRef      = pCallContext->pResourceRef;
52     NV_STATUS         rmStatus;
53     OBJVASPACE       *pVAS              = NULL;
54     OBJGPU           *pGpu              = GPU_RES_GET_GPU(pKernelChannelGroupApi);
55     KernelMIGManager *pKernelMIGManager = NULL;
56     KernelFifo       *pKernelFifo       = GPU_GET_KERNEL_FIFO(pGpu);
57     NvHandle          hVASpace          = NV01_NULL_OBJECT;
58     Device           *pDevice           = NULL;
59     NvU32             gfid              = GPU_GFID_PF;
60     RsShared         *pShared           = NULL;
61     RsClient         *pClient;
62     NvBool            bLockAcquired           = NV_FALSE;
63     Heap             *pHeap                   = GPU_GET_HEAP(pGpu);
64     NvBool            bMIGInUse               = NV_FALSE;
65     CTX_BUF_INFO     *bufInfoList             = NULL;
66     NvU32             bufCount                = 0;
67     NvBool            bReserveMem             = NV_FALSE;
68     MIG_INSTANCE_REF  ref;
69     RM_API           *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
70     KernelChannelGroup *pKernelChannelGroup = NULL;
71     NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS *pAllocParams = NULL;
72     RM_ENGINE_TYPE    rmEngineType;
73 
74     NV_PRINTF(LEVEL_INFO,
75               "hClient: 0x%x, hParent: 0x%x, hObject:0x%x, hClass: 0x%x\n",
76               pParams->hClient, pParams->hParent, pParams->hResource,
77               pParams->externalClassId);
78 
79     if (RS_IS_COPY_CTOR(pParams))
80     {
81         NV_ASSERT_OK_OR_GOTO(rmStatus,
82                              rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO),
83                              done);
84         bLockAcquired = NV_TRUE;
85         rmStatus = kchangrpapiCopyConstruct_IMPL(pKernelChannelGroupApi,
86                                                  pCallContext, pParams);
87         goto done;
88     }
89 
90     //
91     // Make sure this GPU is not already locked by this thread
92     // Ideally this thread shouldn't have locked any GPU in the system but
93     // checking this is sufficient as memory allocation from PMA requires
94     // current GPU's lock not to be held
95     //
96     if (rmDeviceGpuLockIsOwner(pGpu->gpuInstance))
97     {
98         NV_PRINTF(LEVEL_ERROR, "TSG alloc should be called without acquiring GPU lock\n");
99         LOCK_ASSERT_AND_RETURN(0);
100     }
101 
102     bufInfoList = portMemAllocNonPaged(NV_ENUM_SIZE(GR_CTX_BUFFER) * sizeof(*bufInfoList));
103     if (bufInfoList == NULL)
104     {
105         return NV_ERR_NO_MEMORY;
106     }
107 
108     // Acquire the lock *only after* PMA is done allocating.
109     NV_ASSERT_OK_OR_GOTO(rmStatus,
110                          rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO),
111                          done);
112     bLockAcquired = NV_TRUE;
113 
114     pAllocParams = pParams->pAllocParams;
115     hVASpace     = pAllocParams->hVASpace;
116 
117     NV_ASSERT_OK_OR_GOTO(rmStatus,
118         serverAllocShareWithHalspecParent(&g_resServ, classInfo(KernelChannelGroup),
119                                           &pShared, staticCast(pGpu, Object)),
120         failed);
121 
122     pKernelChannelGroup = dynamicCast(pShared, KernelChannelGroup);
123     pKernelChannelGroupApi->pKernelChannelGroup = pKernelChannelGroup;
124 
125     if (!gpuIsClassSupported(pGpu, pResourceRef->externalClassId))
126     {
127         NV_PRINTF(LEVEL_ERROR, "class %x not supported\n",
128                   pResourceRef->externalClassId);
129         rmStatus = NV_ERR_NOT_SUPPORTED;
130         goto failed;
131     }
132 
133     pKernelChannelGroupApi->hVASpace = hVASpace;
134 
135     rmStatus = serverGetClientUnderLock(&g_resServ, pParams->hClient, &pClient);
136     if (rmStatus != NV_OK)
137     {
138         NV_PRINTF(LEVEL_ERROR, "Invalid client handle!\n");
139         rmStatus = NV_ERR_INVALID_ARGUMENT;
140         goto failed;
141     }
142 
143     rmStatus = deviceGetByHandle(pClient, pParams->hParent, &pDevice);
144     if (rmStatus != NV_OK)
145     {
146         NV_PRINTF(LEVEL_ERROR, "Invalid parent/device handle!\n");
147         rmStatus = NV_ERR_INVALID_ARGUMENT;
148         goto failed;
149     }
150 
151     pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
152     bMIGInUse = IS_MIG_IN_USE(pGpu);
153 
154     rmEngineType = gpuGetRmEngineType(pAllocParams->engineType);
155 
156     if (kfifoIsPerRunlistChramSupportedInHw(pKernelFifo))
157     {
158         if (!RM_ENGINE_TYPE_IS_VALID(rmEngineType))
159         {
160             NV_PRINTF(LEVEL_NOTICE, "Valid engine Id must be specified while allocating TSGs or bare channels!\n");
161             rmStatus = NV_ERR_INVALID_ARGUMENT;
162             goto failed;
163         }
164 
165         //
166         // If we have a separate channel RAM for each runlist then we need
167         // to determine runlistId from engineId passed by client. This
168         // runlistId is used to associate all future channels in this TSG to
169         // that runlist. Setting the engineType will cause the runlist
170         // corresponding to that engine to be chosen in
171         // kchangrpGetDefaultRunlist_HAL.
172         //
173         pKernelChannelGroup->engineType = rmEngineType;
174     }
175 
176     //
177     // If MIG is enabled, client passes a logical engineId w.r.t its own GPU instance
178     // we need to convert this logical Id to a physical engine Id as we use it
179     // to set runlistId
180     //
181     if (bMIGInUse)
182     {
183         // Engine type must be valid for MIG
184         NV_CHECK_OR_ELSE(LEVEL_NOTICE, RM_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType),
185                          rmStatus = NV_ERR_INVALID_STATE; goto failed);
186 
187         NV_CHECK_OK_OR_GOTO(
188             rmStatus,
189             LEVEL_ERROR,
190             kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, pParams->hClient, &ref),
191             failed);
192 
193         NV_CHECK_OK_OR_GOTO(
194             rmStatus,
195             LEVEL_ERROR,
196             kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref,
197                                               rmEngineType,
198                                               &rmEngineType),
199             failed);
200 
201         // Rewrite the engineType with the global engine type
202         pKernelChannelGroup->engineType = rmEngineType;
203         pHeap = ref.pKernelMIGGpuInstance->pMemoryPartitionHeap;
204     }
205 
206     if((pDevice->vaMode != NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES) || (hVASpace != 0))
207     {
208         NV_ASSERT_OK_OR_GOTO(rmStatus,
209             vaspaceGetByHandleOrDeviceDefault(pClient, pParams->hParent, hVASpace, &pVAS),
210             failed);
211 
212         if (pVAS == NULL)
213         {
214             rmStatus = NV_ERR_INVALID_STATE;
215             goto failed;
216         }
217     }
218 
219 
220     // vGpu plugin context flag should only be set on host if context is plugin
221     if (gpuIsSriovEnabled(pGpu))
222         pKernelChannelGroup->bIsCallingContextVgpuPlugin = pAllocParams->bIsCallingContextVgpuPlugin;
223 
224     if (pKernelChannelGroup->bIsCallingContextVgpuPlugin)
225         gfid = GPU_GFID_PF;
226     else
227     {
228         NV_ASSERT_OK_OR_GOTO(rmStatus, vgpuGetCallingContextGfid(pGpu, &gfid), failed);
229     }
230 
231     if (!RMCFG_FEATURE_PLATFORM_GSP)
232     {
233         NV_ASSERT_OK_OR_GOTO(rmStatus,
234             ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pCtxBufPool),
235             failed);
236 
237         NV_ASSERT_OK_OR_GOTO(rmStatus,
238             ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pChannelBufPool),
239             failed);
240     }
241 
242     NV_ASSERT_OK_OR_GOTO(rmStatus,
243                          kchangrpInit(pGpu, pKernelChannelGroup, pVAS, gfid),
244                          failed);
245     bTsgAllocated = NV_TRUE;
246 
247     pKernelChannelGroupApi->hLegacykCtxShareSync  = 0;
248     pKernelChannelGroupApi->hLegacykCtxShareAsync = 0;
249 
250     if (hVASpace != 0)
251     {
252         RsResourceRef *pVASpaceRef;
253         rmStatus = clientGetResourceRef(pCallContext->pClient, hVASpace, &pVASpaceRef);
254         NV_ASSERT(rmStatus == NV_OK);
255         if (rmStatus == NV_OK)
256             refAddDependant(pVASpaceRef, pResourceRef);
257     }
258 
259     pKernelChannelGroupApi->hErrorContext    = pAllocParams->hObjectError;
260     pKernelChannelGroupApi->hEccErrorContext = pAllocParams->hObjectEccError;
261 
262     // Default interleave level
263     NV_ASSERT_OK_OR_GOTO(
264         rmStatus,
265         kchangrpSetInterleaveLevel(pGpu, pKernelChannelGroup,
266                                    NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM),
267         failed);
268 
269     //
270     // If ctx buf pools are enabled, filter out partitionable engines
271     // that aren't part of our instance.
272     //
273     // Memory needs to be reserved in the pool only for buffers for
274     // engines in instance.
275     //
276     if (pKernelChannelGroup->pCtxBufPool != NULL &&
277         kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, pKernelChannelGroup->engineType, ref))
278     {
279         // GR Buffers
280         if (RM_ENGINE_TYPE_IS_GR(pKernelChannelGroup->engineType))
281         {
282             KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, RM_ENGINE_TYPE_GR_IDX(pKernelChannelGroup->engineType));
283             NvU32 bufId;
284             portMemSet(&bufInfoList[0], 0, sizeof(CTX_BUF_INFO) * NV_ENUM_SIZE(GR_CTX_BUFFER));
285             bufCount = 0;
286             FOR_EACH_IN_ENUM(GR_CTX_BUFFER, bufId)
287             {
288                 // TODO expose engine class capabilities to kernel RM
289                 if (kgrmgrIsCtxBufSupported(bufId, !IS_MIG_ENABLED(pGpu)))
290                 {
291                     const CTX_BUF_INFO *pBufInfo = kgraphicsGetCtxBufferInfo(pGpu, pKernelGraphics, bufId);
292                     bufInfoList[bufCount] = *pBufInfo;
293                     NV_PRINTF(LEVEL_INFO, "Reserving 0x%llx bytes for GR ctx bufId = %d\n",
294                                   bufInfoList[bufCount].size, bufId);
295                     bufCount++;
296                 }
297             }
298             FOR_EACH_IN_ENUM_END;
299             bReserveMem = NV_TRUE;
300         }
301         else
302         {
303             // Allocate falcon context buffers if engine has (Kernel) Falcon object
304             NvU32 ctxBufferSize;
305             if (IS_GSP_CLIENT(pGpu))
306             {
307                 ENGDESCRIPTOR engDesc;
308                 KernelFalcon *pKernelFalcon = NULL;
309 
310                 NV_ASSERT_OK_OR_GOTO(rmStatus,
311                     gpuXlateClientEngineIdToEngDesc(pGpu,
312                                                     pKernelChannelGroup->engineType,
313                                                     &engDesc),
314                     failed);
315 
316                 pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, engDesc);
317                 if (pKernelFalcon != NULL)
318                 {
319                     ctxBufferSize = pKernelFalcon->ctxBufferSize;
320                     bReserveMem = NV_TRUE;
321                 }
322             }
323 
324             if (bReserveMem)
325             {
326                 bufInfoList[0].size  = ctxBufferSize;
327                 bufInfoList[0].align = RM_PAGE_SIZE;
328                 bufInfoList[0].attr  = RM_ATTR_PAGE_SIZE_4KB;
329                 bufInfoList[0].bContig = NV_TRUE;
330                 NV_PRINTF(LEVEL_INFO, "Reserving 0x%llx bytes for engineType %d (%d) flcn ctx buffer\n",
331                               bufInfoList[0].size, gpuGetNv2080EngineType(pKernelChannelGroup->engineType),
332                               pKernelChannelGroup->engineType);
333                 bufCount++;
334             }
335             else
336             {
337                 NV_PRINTF(LEVEL_INFO, "No buffer reserved for engineType %d (%d) in ctx_buf_pool\n",
338                                   gpuGetNv2080EngineType(pKernelChannelGroup->engineType),
339                                   pKernelChannelGroup->engineType);
340             }
341         }
342     }
343 
344     if ((!bMIGInUse || RM_ENGINE_TYPE_IS_GR(pKernelChannelGroup->engineType))
345         && !IsT234D(pGpu))
346     {
347         NV_ASSERT_OK_OR_GOTO(rmStatus,
348             pRmApi->AllocWithSecInfo(pRmApi,
349                 pParams->hClient,
350                 RES_GET_HANDLE(pKernelChannelGroupApi),
351                 &pKernelChannelGroupApi->hKernelGraphicsContext,
352                 KERNEL_GRAPHICS_CONTEXT,
353                 NvP64_NULL,
354                 RMAPI_ALLOC_FLAGS_SKIP_RPC,
355                 NvP64_NULL,
356                 &pRmApi->defaultSecInfo),
357             failed);
358     }
359 
360     NV_PRINTF(LEVEL_INFO, "Adding group Id: %d hClient:0x%x\n",
361               pKernelChannelGroup->grpID, pParams->hClient);
362 
363     if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) &&
364         !(pParams->allocFlags & RMAPI_ALLOC_FLAGS_SKIP_RPC))
365     {
366         NV_RM_RPC_ALLOC_OBJECT(pGpu,
367                                pParams->hClient,
368                                pParams->hParent,
369                                pParams->hResource,
370                                pParams->externalClassId,
371                                pAllocParams,
372                                rmStatus);
373         //
374         // Make sure that corresponding RPC occurs when freeing
375         // KernelChannelGroupApi. Resource server checks this variable during
376         // free and ignores any RPC flags set in resource_list.h
377         //
378         staticCast(pKernelChannelGroupApi, RmResource)->bRpcFree = NV_TRUE;
379 
380         if (rmStatus != NV_OK)
381         {
382             NV_PRINTF(LEVEL_ERROR,
383                       "KernelChannelGroupApi alloc RPC to vGpu Host failed\n");
384             goto failed;
385         }
386 
387         if (IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) || IS_GSP_CLIENT(pGpu))
388         {
389             NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS params = {
390                 0};
391             NvU32 runqueueIdx;
392             NvU32 maxRunqueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo);
393 
394             for (runqueueIdx = 0; runqueueIdx < maxRunqueues; ++runqueueIdx)
395             {
396                 MEMORY_DESCRIPTOR          *pSrcMemDesc;
397                 HW_ENG_FAULT_METHOD_BUFFER *pMthdBuffer;
398                 pMthdBuffer = &pKernelChannelGroup->pMthdBuffers[runqueueIdx];
399                 pSrcMemDesc = pMthdBuffer->pMemDesc;
400 
401                 params.methodBufferMemdesc[runqueueIdx].size = (
402                     pSrcMemDesc->Size);
403                 params.methodBufferMemdesc[runqueueIdx].addressSpace = (
404                     memdescGetAddressSpace(pSrcMemDesc));
405                 params.methodBufferMemdesc[runqueueIdx].cpuCacheAttrib = (
406                     memdescGetCpuCacheAttrib(pSrcMemDesc));
407                 params.methodBufferMemdesc[runqueueIdx].alignment = 1;
408 
409                 if (IS_VIRTUAL_WITH_FULL_SRIOV(pGpu))
410                 {
411                     params.bar2Addr[runqueueIdx] = pMthdBuffer->bar2Addr;
412                     params.methodBufferMemdesc[runqueueIdx].base = (
413                         memdescGetPhysAddr(pSrcMemDesc, AT_CPU, 0));
414                 }
415                 else
416                 {
417                     //
418                     // The case of both vGpu full SRIOV + GSP_CLIENT host is not
419                     // supported. This else branch considers the case of
420                     // GSP_CLIENT only without vGpu.
421                     //
422                     params.methodBufferMemdesc[runqueueIdx].base = (
423                         memdescGetPhysAddr(pSrcMemDesc, AT_GPU, 0));
424                 }
425             }
426             params.numValidEntries = runqueueIdx;
427 
428             rmStatus = pRmApi->Control(pRmApi,
429                 pParams->hClient,
430                 RES_GET_HANDLE(pKernelChannelGroupApi),
431                 NVA06C_CTRL_CMD_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS,
432                 &params,
433                 sizeof params);
434 
435             if (rmStatus != NV_OK)
436             {
437                 NV_PRINTF(LEVEL_ERROR,
438                     "Control call to update method buffer memdesc failed\n");
439                 goto failed;
440             }
441         }
442     }
443 
444     if (kfifoIsZombieSubctxWarEnabled(pKernelFifo))
445     {
446         kchangrpSetSubcontextZombieState_HAL(pGpu, pKernelChannelGroup, 0, NV_TRUE);
447         kchangrpUpdateSubcontextMask_HAL(pGpu, pKernelChannelGroup, 0, NV_TRUE);
448     }
449 
450     // initialize apiObjList with original client's KernelChannelGroupApi object
451     listInit(&pKernelChannelGroup->apiObjList, portMemAllocatorGetGlobalNonPaged());
452 
453     if (listAppendValue(&pKernelChannelGroup->apiObjList, &pKernelChannelGroupApi) == NULL)
454     {
455         rmStatus = NV_ERR_INSUFFICIENT_RESOURCES;
456         listClear(&pKernelChannelGroup->apiObjList);
457         goto failed;
458     }
459 
460 failed:
461     if (rmStatus != NV_OK)
462     {
463         if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT)
464         {
465             pRmApi->Free(pRmApi, pParams->hClient,
466                          pKernelChannelGroupApi->hKernelGraphicsContext);
467         }
468 
469         if (pKernelChannelGroup != NULL)
470         {
471             if (bTsgAllocated)
472                 kchangrpDestroy(pGpu, pKernelChannelGroup);
473 
474             if (pKernelChannelGroup->pCtxBufPool != NULL)
475                 ctxBufPoolDestroy(&pKernelChannelGroup->pCtxBufPool);
476 
477             if (pKernelChannelGroup->pChannelBufPool != NULL)
478                 ctxBufPoolDestroy(&pKernelChannelGroup->pChannelBufPool);
479 
480         }
481 
482         if (pShared)
483             serverFreeShare(&g_resServ, pShared);
484     }
485 
486 done:
487 
488     if (bLockAcquired)
489         rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
490 
491     if (bReserveMem)
492     {
493         // GPU lock should not be held when reserving memory for ctxBufPool
494         NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
495             ctxBufPoolReserve(pGpu, pKernelChannelGroup->pCtxBufPool, bufInfoList, bufCount));
496     }
497 
498     portMemFree(bufInfoList);
499 
500     return rmStatus;
501 }
502 
503 NV_STATUS
504 kchangrpapiControl_IMPL
505 (
506     KernelChannelGroupApi          *pKernelChannelGroupApi,
507     CALL_CONTEXT                   *pCallContext,
508     RS_RES_CONTROL_PARAMS_INTERNAL *pParams
509 )
510 {
511     RsResourceRef *pResourceRef = RES_GET_REF(pKernelChannelGroupApi);
512 
513     (void)pResourceRef;
514     NV_PRINTF(LEVEL_INFO, "grpID 0x%x handle 0x%x cmd 0x%x\n",
515               pKernelChannelGroupApi->pKernelChannelGroup->grpID,
516               pResourceRef->hResource, pParams->pLegacyParams->cmd);
517 
518     return gpuresControl_IMPL(staticCast(pKernelChannelGroupApi, GpuResource),
519                               pCallContext, pParams);
520 }
521 
522 void
523 kchangrpapiDestruct_IMPL
524 (
525     KernelChannelGroupApi *pKernelChannelGroupApi
526 )
527 {
528     CALL_CONTEXT           *pCallContext;
529     RS_RES_FREE_PARAMS_INTERNAL *pParams;
530     RsResourceRef          *pResourceRef;
531     RsClient               *pClient;
532     KernelChannelGroup *pKernelChannelGroup =
533         pKernelChannelGroupApi->pKernelChannelGroup;
534     OBJGPU                 *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi);
535     NV_STATUS               rmStatus = NV_OK;
536     RS_ORDERED_ITERATOR     it;
537     RsShared               *pShared = staticCast(pKernelChannelGroup, RsShared);
538     RM_API                 *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
539 
540     resGetFreeParams(staticCast(pKernelChannelGroupApi, RsResource),
541                      &pCallContext, &pParams);
542     pResourceRef = pCallContext->pResourceRef;
543     pClient = pCallContext->pClient;
544 
545     NV_PRINTF(LEVEL_INFO, "\n");
546 
547     // RS-TODO should still free channels?
548     if (serverGetShareRefCount(&g_resServ, pShared) > 1)
549     {
550         // Remove this kchangrpapi object from the list of owners in the shared object
551         listRemoveFirstByValue(&pKernelChannelGroupApi->pKernelChannelGroup->apiObjList, &pKernelChannelGroupApi);
552         goto done;
553     }
554 
555     if (pKernelChannelGroup != NULL)
556         kchangrpSetRealtime_HAL(pGpu, pKernelChannelGroup, NV_FALSE);
557 
558     // If channels still exist in this group, free them
559     // RS-TODO this can be removed after re-parenting support is added
560     it = kchannelGetIter(pClient, pResourceRef);
561     while (clientRefOrderedIterNext(pClient, &it))
562     {
563         NV_STATUS tmpStatus;
564 
565         tmpStatus = pRmApi->Free(pRmApi, pClient->hClient, it.pResourceRef->hResource);
566         if ((tmpStatus != NV_OK) && (rmStatus == NV_OK))
567             rmStatus = tmpStatus;
568     }
569 
570     NV_ASSERT(rmStatus == NV_OK);
571 
572     if (pKernelChannelGroup != NULL)
573     {
574         kchangrpDestroy(pGpu, pKernelChannelGroup);
575 
576         if (pKernelChannelGroup->pCtxBufPool != NULL)
577         {
578             ctxBufPoolRelease(pKernelChannelGroup->pCtxBufPool);
579             ctxBufPoolDestroy(&pKernelChannelGroup->pCtxBufPool);
580         }
581 
582         if (pKernelChannelGroup->pChannelBufPool != NULL)
583         {
584             ctxBufPoolRelease(pKernelChannelGroup->pChannelBufPool);
585             ctxBufPoolDestroy(&pKernelChannelGroup->pChannelBufPool);
586         }
587 
588         listClear(&pKernelChannelGroup->apiObjList);
589     }
590 
591 done:
592     serverFreeShare(&g_resServ, pShared);
593 
594     pParams->status = rmStatus;
595 }
596 
597 NV_STATUS
598 kchangrpapiCopyConstruct_IMPL
599 (
600     KernelChannelGroupApi        *pKernelChannelGroupApi,
601     CALL_CONTEXT                 *pCallContext,
602     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
603 )
604 {
605     RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
606     RsClient *pDstClient = pCallContext->pClient;
607     RsResourceRef *pDstRef = pCallContext->pResourceRef;
608     RsResourceRef *pSrcRef = pParams->pSrcRef;
609     KernelChannelGroupApi *pChanGrpSrc = dynamicCast(pSrcRef->pResource,
610                                                      KernelChannelGroupApi);
611     RS_ITERATOR iter;
612     OBJGPU       *pGpu   = GPU_RES_GET_GPU(pKernelChannelGroupApi);
613     NV_STATUS     status = NV_OK;
614     RsResourceRef *pVaspaceRef = NULL;
615     VaSpaceApi *pVaspaceApi = NULL;
616 
617     pKernelChannelGroupApi->hKernelGraphicsContext  = NV01_NULL_OBJECT;
618     pKernelChannelGroupApi->hLegacykCtxShareSync    = NV01_NULL_OBJECT;
619     pKernelChannelGroupApi->hLegacykCtxShareAsync   = NV01_NULL_OBJECT;
620 
621     pKernelChannelGroupApi->pKernelChannelGroup =
622         pChanGrpSrc->pKernelChannelGroup;
623     serverRefShare(&g_resServ,
624         staticCast(pKernelChannelGroupApi->pKernelChannelGroup, RsShared));
625 
626     iter =  serverutilRefIter(pDstClient->hClient, pDstRef->pParentRef->hResource, classId(VaSpaceApi), RS_ITERATE_DESCENDANTS, NV_TRUE);
627     while (clientRefIterNext(iter.pClient, &iter))
628     {
629         pVaspaceRef = iter.pResourceRef;
630         pVaspaceApi = dynamicCast(pVaspaceRef->pResource, VaSpaceApi);
631         NV_ASSERT_OR_RETURN(pVaspaceApi != NULL, NV_ERR_INVALID_STATE);
632 
633         if (pVaspaceApi->pVASpace ==
634             pKernelChannelGroupApi->pKernelChannelGroup->pVAS)
635         {
636             refAddDependant(pVaspaceRef, pDstRef);
637             break;
638         }
639     }
640 
641     if (pChanGrpSrc->hKernelGraphicsContext != NV01_NULL_OBJECT)
642     {
643         NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
644             pRmApi->DupObject(pRmApi,
645                               pDstClient->hClient,
646                               pDstRef->hResource,
647                               &pKernelChannelGroupApi->hKernelGraphicsContext,
648                               pParams->pSrcClient->hClient,
649                               pChanGrpSrc->hKernelGraphicsContext,
650                               0),
651             fail);
652     }
653 
654     //
655     // If this channel group is in legacy mode, new client needs its own handles to the
656     // sync and async internally allocated kctxshares
657     //
658     if (pChanGrpSrc->pKernelChannelGroup->bLegacyMode)
659     {
660         NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
661             pRmApi->DupObject(pRmApi,
662                               pDstClient->hClient,
663                               pDstRef->hResource,
664                               &pKernelChannelGroupApi->hLegacykCtxShareSync,
665                               pParams->pSrcClient->hClient,
666                               pChanGrpSrc->hLegacykCtxShareSync,
667                               0),
668             fail);
669 
670         // All chips have SYNC, Some chips won't have an ASYNC kctxshare
671         if (pChanGrpSrc->hLegacykCtxShareAsync != 0)
672         {
673             NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
674                 pRmApi->DupObject(pRmApi,
675                                   pDstClient->hClient,
676                                   pDstRef->hResource,
677                                   &pKernelChannelGroupApi->hLegacykCtxShareAsync,
678                                   pParams->pSrcClient->hClient,
679                                   pChanGrpSrc->hLegacykCtxShareAsync,
680                                   0),
681             fail);
682         }
683     }
684 
685     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
686     {
687         NV_RM_RPC_DUP_OBJECT(pGpu, pDstClient->hClient, pDstRef->pParentRef->hResource, pDstRef->hResource,
688                              pParams->pSrcClient->hClient, pSrcRef->hResource, 0,
689                              NV_TRUE, // automatically issue RPC_FREE on object free
690                              pDstRef, status);
691 
692         if (status != NV_OK)
693             goto fail;
694     }
695 
696     if (listAppendValue(&pKernelChannelGroupApi->pKernelChannelGroup->apiObjList, &pKernelChannelGroupApi) == NULL)
697     {
698         status = NV_ERR_INSUFFICIENT_RESOURCES;
699         goto fail;
700     }
701 
702     return status;
703 
704 fail:
705     if (pKernelChannelGroupApi->hLegacykCtxShareAsync != NV01_NULL_OBJECT)
706     {
707         pRmApi->Free(pRmApi, pDstClient->hClient,
708                      pKernelChannelGroupApi->hLegacykCtxShareAsync);
709     }
710     if (pKernelChannelGroupApi->hLegacykCtxShareSync != NV01_NULL_OBJECT)
711     {
712         pRmApi->Free(pRmApi, pDstClient->hClient,
713                      pKernelChannelGroupApi->hLegacykCtxShareSync);
714     }
715     if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT)
716     {
717         pRmApi->Free(pRmApi, pDstClient->hClient,
718                      pKernelChannelGroupApi->hKernelGraphicsContext);
719     }
720 
721     serverFreeShare(&g_resServ,
722         staticCast(pKernelChannelGroupApi->pKernelChannelGroup, RsShared));
723 
724     return status;
725 }
726 
727 NvBool
728 kchangrpapiCanCopy_IMPL
729 (
730     KernelChannelGroupApi *pKernelChannelGroupApi
731 )
732 {
733     return NV_TRUE;
734 }
735 
736 NV_STATUS
737 CliGetChannelGroup
738 (
739     NvHandle                 hClient,
740     NvHandle                 hChanGrp,
741     RsResourceRef          **ppChanGrpRef,
742     NvHandle                *phDevice
743 )
744 {
745     NV_STATUS status;
746     RsClient *pRsClient;
747     RsResourceRef *pResourceRef;
748     RsResourceRef *pParentRef;
749 
750     if (!ppChanGrpRef)
751     {
752         return NV_ERR_INVALID_ARGUMENT;
753     }
754 
755     status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient);
756     NV_ASSERT(status == NV_OK);
757     if (status != NV_OK)
758         return status;
759 
760     status = clientGetResourceRefByType(pRsClient, hChanGrp,
761                                         classId(KernelChannelGroupApi),
762                                         &pResourceRef);
763     if (status != NV_OK)
764         return status;
765 
766     *ppChanGrpRef = pResourceRef;
767 
768     if (phDevice)
769     {
770         pParentRef = pResourceRef->pParentRef;
771         *phDevice = pParentRef->hResource;
772     }
773 
774     return NV_OK;
775 }
776 
777 /*!
778  * @brief Use TSG in legacy mode
779  *
780  * In legacy mode, RM pre-allocates the subcontexts in a TSG.
781  * This is needed for the following reasons:
782  *
783  *  1. We are also using subcontext to represent TSG contexts in pre-VOLTA chips (see below).
784  *     But RM clients haven't yet moved to the subcontext model in production code.
785  *     So RM implicitly creates it for them, until they make the switch.
786  *
787  *  2. Pre-VOLTA, we only support one address space in a TSG.
788  *     Preallocating the subcontext prevents accidental use of multiple address spaces within a TSG.
789  *     So we use the vaspace specified/implied at TSG creation to create the subcontexts.
790  *
791  *  3. Tests and clients on VOLTA that don't explicitly specify subcontexts need to behave similar
792  *     to previous chips until they allocate the kctxshares themselves.
793  *
794  *  Legacy subcontexts are interpreted in the following ways:
795  *
796  *     VOLTA+            : subcontext 0 is VEID 0, subcontext 1 is VEID 1
797  *     GM20X thru PASCAL : subcontext 0 is SCG type 0, subcontext 1 is SCG type 1
798  *     pre-GM20X         : just a single subcontext 0; no SCG or VEIDs attached to it.
799  *
800  * @param[in] pKernelChannelGroupApi Channel group pointer
801  * @param[in] pGpu                   GPU object pointer
802  * @param[in] pKernelFifo            FIFO object pointer
803  * @param[in] hClient                Client handle
804  *
805  */
806 NV_STATUS
807 kchangrpapiSetLegacyMode_IMPL
808 (
809     KernelChannelGroupApi *pKernelChannelGroupApi,
810     OBJGPU                *pGpu,
811     KernelFifo            *pKernelFifo,
812     NvHandle               hClient
813 )
814 {
815     KernelChannelGroup *pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
816     NvHandle hTsg = RES_GET_HANDLE(pKernelChannelGroupApi);
817     NvHandle hkCtxShare = 0;
818     NV_STATUS status = NV_OK;
819     NvU32 maxSubctx = 0;
820     NvU64 numMax = 0;
821     NvU64 numFree = 0;
822     RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
823     KernelChannelGroupApiListIter it;
824 
825     NV_CTXSHARE_ALLOCATION_PARAMETERS kctxshareParams = { 0 };
826 
827     ct_assert(NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC == 0);
828     ct_assert(NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC == 1);
829 
830     NV_ASSERT_OK(pKernelChannelGroup->pSubctxIdHeap->eheapGetSize(
831         pKernelChannelGroup->pSubctxIdHeap,
832         &numMax));
833 
834     NV_ASSERT_OK(pKernelChannelGroup->pSubctxIdHeap->eheapGetFree(
835         pKernelChannelGroup->pSubctxIdHeap,
836         &numFree));
837 
838     NV_ASSERT(numMax ==
839               kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo,
840                                                          pKernelChannelGroup,
841                                                          NV_FALSE));
842 
843     NV_ASSERT_OR_RETURN(numMax == numFree && numMax != 0, NV_ERR_INVALID_STATE);
844 
845     pKernelChannelGroup->pSubctxIdHeap->eheapDestruct(
846         pKernelChannelGroup->pSubctxIdHeap);
847     //
848     // There should only be 1 (SYNC) or 2 legacy kctxshares (SYNC + ASYNC),
849     // depending on chip
850     //
851     maxSubctx = kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo,
852                                                            pKernelChannelGroup,
853                                                            NV_TRUE);
854     NV_ASSERT_OR_RETURN(numMax == numFree, NV_ERR_INVALID_STATE);
855     NV_ASSERT(maxSubctx == 1 || maxSubctx == 2);
856 
857     constructObjEHeap(pKernelChannelGroup->pSubctxIdHeap,
858                       0, maxSubctx, sizeof(KernelCtxShare *), 0);
859 
860     pKernelChannelGroup->bLegacyMode = NV_TRUE;
861 
862     // Allocate SYNC
863     hkCtxShare = 0;
864     kctxshareParams.hVASpace = 0;
865     kctxshareParams.flags    = NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC;
866     kctxshareParams.subctxId = 0xFFFFFFFF;
867 
868     NV_ASSERT_OK_OR_GOTO(status,
869                          pRmApi->AllocWithSecInfo(pRmApi,
870                                                   hClient,
871                                                   hTsg,
872                                                   &hkCtxShare,
873                                                   FERMI_CONTEXT_SHARE_A,
874                                                   NV_PTR_TO_NvP64(&kctxshareParams),
875                                                   RMAPI_ALLOC_FLAGS_SKIP_RPC,
876                                                   NvP64_NULL,
877                                                   &pRmApi->defaultSecInfo),
878                          fail);
879 
880     NV_ASSERT(kctxshareParams.subctxId == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC);
881 
882     pKernelChannelGroupApi->hLegacykCtxShareSync = hkCtxShare;
883 
884     if(maxSubctx == 2)
885     {
886         // Allocate ASYNC
887         hkCtxShare = 0;
888         kctxshareParams.hVASpace = 0;
889         kctxshareParams.flags    = NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC;
890         kctxshareParams.subctxId = 0xFFFFFFFF;
891 
892         NV_ASSERT_OK_OR_GOTO(status,
893                              pRmApi->AllocWithSecInfo(pRmApi,
894                                                       hClient,
895                                                       hTsg,
896                                                       &hkCtxShare,
897                                                       FERMI_CONTEXT_SHARE_A,
898                                                       NV_PTR_TO_NvP64(&kctxshareParams),
899                                                       RMAPI_ALLOC_FLAGS_SKIP_RPC,
900                                                       NvP64_NULL,
901                                                       &pRmApi->defaultSecInfo),
902                              fail);
903 
904         NV_ASSERT(kctxshareParams.subctxId == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC);
905 
906         pKernelChannelGroupApi->hLegacykCtxShareAsync = hkCtxShare;
907     }
908 
909     NV_ASSERT_OK_OR_GOTO(status,
910                          pKernelChannelGroup->pSubctxIdHeap->eheapGetFree(
911                              pKernelChannelGroup->pSubctxIdHeap,
912                              &numFree),
913                          fail);
914 
915     NV_ASSERT_OR_GOTO(numFree == 0, fail);
916 
917     //
918     // If this channel group has been duped, we need to provide kctxshareApi handles to the
919     // other channelGroupApi objects that share this channel group since the handles will
920     // only work for a single client.
921     //
922     it = listIterAll(&pKernelChannelGroup->apiObjList);
923     while (listIterNext(&it))
924     {
925         KernelChannelGroupApi *pChanGrpDest = *it.pValue;
926 
927         if(pChanGrpDest == pKernelChannelGroupApi)
928             continue;
929 
930         NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
931             pRmApi->DupObject(pRmApi,
932                               RES_GET_CLIENT_HANDLE(pChanGrpDest),
933                               RES_GET_HANDLE(pChanGrpDest),
934                               &pChanGrpDest->hLegacykCtxShareSync,
935                               RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi),
936                               pKernelChannelGroupApi->hLegacykCtxShareSync,
937                               0),
938             fail);
939 
940         if (maxSubctx == 2)
941         {
942             NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
943                 pRmApi->DupObject(pRmApi,
944                                   RES_GET_CLIENT_HANDLE(pChanGrpDest),
945                                   RES_GET_HANDLE(pChanGrpDest),
946                                   &pChanGrpDest->hLegacykCtxShareAsync,
947                                   RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi),
948                                   pKernelChannelGroupApi->hLegacykCtxShareAsync,
949                                   0),
950             fail);
951         }
952     }
953 
954     return status;
955 
956 fail:
957     NV_PRINTF(LEVEL_ERROR, "Failed to set channel group in legacy mode.\n");
958 
959     pKernelChannelGroup->bLegacyMode = NV_FALSE;
960 
961     it = listIterAll(&pKernelChannelGroup->apiObjList);
962 
963     while (listIterNext(&it))
964     {
965         KernelChannelGroupApi *pChanGrpIt = *it.pValue;
966 
967         if (pChanGrpIt->hLegacykCtxShareSync != 0)
968         {
969            pRmApi->Free(pRmApi, RES_GET_CLIENT_HANDLE(pChanGrpIt), pChanGrpIt->hLegacykCtxShareSync);
970            pChanGrpIt->hLegacykCtxShareSync = 0;
971         }
972 
973         if (pChanGrpIt->hLegacykCtxShareAsync != 0)
974         {
975            pRmApi->Free(pRmApi, RES_GET_CLIENT_HANDLE(pChanGrpIt), pChanGrpIt->hLegacykCtxShareAsync);
976            pChanGrpIt->hLegacykCtxShareAsync = 0;
977         }
978     }
979 
980     if(status == NV_OK)
981     {
982         status = NV_ERR_INVALID_STATE;
983     }
984 
985     return status;
986 }
987 
988 NV_STATUS
989 kchangrpapiCtrlCmdGpFifoSchedule_IMPL
990 (
991     KernelChannelGroupApi              *pKernelChannelGroupApi,
992     NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams
993 )
994 {
995     OBJGPU              *pGpu         = GPU_RES_GET_GPU(pKernelChannelGroupApi);
996     RsResourceRef       *pResourceRef = RES_GET_REF(pKernelChannelGroupApi);
997     KernelChannelGroup  *pKernelChannelGroup = NULL;
998     NV_STATUS            status       = NV_OK;
999     KernelFifo          *pKernelFifo;
1000     CLASSDESCRIPTOR     *pClass       = NULL;
1001     CHANNEL_NODE        *pChanNode    = NULL;
1002     CHANNEL_LIST        *pChanList    = NULL;
1003     NvU32                runlistId    = INVALID_RUNLIST_ID;
1004     RM_API              *pRmApi       = GPU_GET_PHYSICAL_RMAPI(pGpu);
1005 
1006     if (pKernelChannelGroupApi->pKernelChannelGroup == NULL)
1007         return NV_ERR_INVALID_OBJECT;
1008     pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
1009 
1010     if (gpuGetClassByClassId(pGpu, pResourceRef->externalClassId, &pClass) != NV_OK)
1011     {
1012         NV_PRINTF(LEVEL_ERROR, "class %x not supported\n",
1013                   pResourceRef->externalClassId);
1014     }
1015     NV_ASSERT_OR_RETURN((pClass != NULL), NV_ERR_NOT_SUPPORTED);
1016 
1017     //
1018     // Bug 1737765: Prevent Externally Owned Channels from running unless bound
1019     //  It is possible for clients to allocate and schedule channels while
1020     //  skipping the UVM registration step which binds the appropriate
1021     //  allocations in RM. We need to fail channel scheduling if the channels
1022     //  have not been registered with UVM.
1023     //  We include this check for every channel in the group because it is
1024     //  expected that Volta+ may use a separate VAS for each channel.
1025     //
1026 
1027     pChanList = pKernelChannelGroup->pChanList;
1028 
1029     for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext)
1030     {
1031         NV_CHECK_OR_RETURN(LEVEL_NOTICE, kchannelIsSchedulable_HAL(pGpu, pChanNode->pKernelChannel),
1032             NV_ERR_INVALID_STATE);
1033     }
1034 
1035     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY);
1036     pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1037     pChanList = pKernelChannelGroup->pChanList;
1038 
1039     //
1040     // Some channels may not have objects allocated on them, so they won't have
1041     // a runlist committed yet.  Force them all onto the same runlist so the
1042     // low level code knows what do to with them.
1043     //
1044     // First we walk through the channels to see if there is a runlist assigned
1045     // already and if so are the channels consistent.
1046     //
1047     runlistId = pKernelChannelGroup->runlistId; // Start with TSG runlistId
1048     for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext)
1049     {
1050         KernelChannel *pKernelChannel = pChanNode->pKernelChannel;
1051 
1052         NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue);
1053 
1054         if (kchannelIsRunlistSet(pGpu, pKernelChannel))
1055         {
1056             if (runlistId == INVALID_RUNLIST_ID)
1057             {
1058                 runlistId = kchannelGetRunlistId(pKernelChannel);
1059             }
1060             else // Catch if 2 channels in the same TSG have different runlistId
1061             {
1062                 if (runlistId != kchannelGetRunlistId(pKernelChannel))
1063                 {
1064                     NV_PRINTF(LEVEL_ERROR,
1065                         "Channels in TSG %d have different runlist IDs this should never happen!\n",
1066                         pKernelChannelGroup->grpID);
1067                     DBG_BREAKPOINT();
1068                 }
1069             }
1070         }
1071     }
1072 
1073     // If no channels have a runlist set, get the default and use it.
1074     if (runlistId == INVALID_RUNLIST_ID)
1075     {
1076         runlistId = kchangrpGetDefaultRunlist_HAL(pGpu, pKernelChannelGroup);
1077     }
1078 
1079     // We can rewrite TSG runlist id just as we will do that for all TSG channels below
1080     pKernelChannelGroup->runlistId = runlistId;
1081 
1082     //
1083     // Now go through and force any channels w/o the runlist set to use either
1084     // the default or whatever we found other channels to be allocated on.
1085     //
1086     for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext)
1087     {
1088         KernelChannel *pKernelChannel = pChanNode->pKernelChannel;
1089 
1090         NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue);
1091 
1092         if (!kchannelIsRunlistSet(pGpu, pKernelChannel))
1093         {
1094             kfifoRunlistSetId_HAL(pGpu, pKernelFifo, pKernelChannel, runlistId);
1095         }
1096     }
1097     SLI_LOOP_END
1098 
1099     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
1100     {
1101         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
1102         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
1103         NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi);
1104         NvHandle hObject = RES_GET_HANDLE(pKernelChannelGroupApi);
1105 
1106         NV_RM_RPC_CONTROL(pGpu,
1107                           hClient,
1108                           hObject,
1109                           pRmCtrlParams->cmd,
1110                           pRmCtrlParams->pParams,
1111                           pRmCtrlParams->paramsSize,
1112                           status);
1113         return status;
1114     }
1115 
1116 
1117     //
1118     // Do an internal control call to do channel reset
1119     // on Host (Physical) RM
1120     //
1121     status = pRmApi->Control(pRmApi,
1122                              RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi),
1123                              RES_GET_HANDLE(pKernelChannelGroupApi),
1124                              NVA06C_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE,
1125                              pSchedParams,
1126                              sizeof(NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS));
1127 
1128     return status;
1129 }
1130 
1131 NV_STATUS
1132 kchangrpapiCtrlCmdBind_IMPL
1133 (
1134     KernelChannelGroupApi   *pKernelChannelGroupApi,
1135     NVA06C_CTRL_BIND_PARAMS *pParams
1136 )
1137 {
1138     NV_STATUS     rmStatus = NV_OK;
1139     OBJGPU       *pGpu     = GPU_RES_GET_GPU(pKernelChannelGroupApi);
1140     NvHandle      hClient  = RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi);
1141     CHANNEL_NODE *pChanNode;
1142     RM_ENGINE_TYPE localEngineType;
1143     RM_ENGINE_TYPE globalEngineType;
1144     ENGDESCRIPTOR engineDesc;
1145     NvBool        bMIGInUse = IS_MIG_IN_USE(pGpu);
1146 
1147     NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_INVALID_ARGUMENT);
1148 
1149     localEngineType = globalEngineType = gpuGetRmEngineType(pParams->engineType);
1150 
1151     if (bMIGInUse)
1152     {
1153         KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
1154         MIG_INSTANCE_REF ref;
1155 
1156         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
1157             kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref));
1158 
1159         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
1160             kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref,
1161                                               localEngineType,
1162                                               &globalEngineType));
1163     }
1164 
1165     NV_PRINTF(LEVEL_INFO,
1166               "Binding TSG %d to Engine %d (%d)\n",
1167               pKernelChannelGroupApi->pKernelChannelGroup->grpID,
1168               gpuGetNv2080EngineType(globalEngineType), globalEngineType);
1169 
1170     // Translate globalEnginetype -> enginedesc
1171     NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
1172         gpuXlateClientEngineIdToEngDesc(pGpu, globalEngineType, &engineDesc));
1173 
1174     // Translate engineDesc -> runlistId for TSG
1175     NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
1176         kfifoEngineInfoXlate_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu),
1177             ENGINE_INFO_TYPE_ENG_DESC,
1178             engineDesc,
1179             ENGINE_INFO_TYPE_RUNLIST,
1180             &pKernelChannelGroupApi->pKernelChannelGroup->runlistId));
1181 
1182     for (pChanNode =
1183              pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead;
1184          pChanNode != NULL;
1185          pChanNode = pChanNode->pNext)
1186     {
1187         NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
1188             kchannelBindToRunlist(pChanNode->pKernelChannel,
1189                                   localEngineType,
1190                                   engineDesc));
1191         if (rmStatus != NV_OK)
1192         {
1193             break;
1194         }
1195     }
1196 
1197     return rmStatus;
1198 }
1199 
1200 NV_STATUS
1201 kchangrpapiCtrlCmdGetTimeslice_IMPL
1202 (
1203     KernelChannelGroupApi        *pKernelChannelGroupApi,
1204     NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams
1205 )
1206 {
1207     KernelChannelGroup *pKernelChannelGroup = NULL;
1208 
1209     if (pKernelChannelGroupApi->pKernelChannelGroup == NULL)
1210         return NV_ERR_INVALID_OBJECT;
1211     pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
1212 
1213     pTsParams->timesliceUs = pKernelChannelGroup->timesliceUs;
1214 
1215     return NV_OK;
1216 }
1217 
1218 NV_STATUS
1219 kchangrpapiCtrlCmdSetTimeslice_IMPL
1220 (
1221     KernelChannelGroupApi        *pKernelChannelGroupApi,
1222     NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams
1223 )
1224 {
1225     OBJGPU             *pGpu                = GPU_RES_GET_GPU(pKernelChannelGroupApi);
1226     RsResourceRef      *pResourceRef        = RES_GET_REF(pKernelChannelGroupApi);
1227     KernelChannelGroup *pKernelChannelGroup = NULL;
1228     NV_STATUS           status              = NV_OK;
1229     CLASSDESCRIPTOR    *pClass              = NULL;
1230     RM_API             *pRmApi              = GPU_GET_PHYSICAL_RMAPI(pGpu);
1231 
1232     if (pKernelChannelGroupApi->pKernelChannelGroup == NULL)
1233         return NV_ERR_INVALID_OBJECT;
1234     pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
1235 
1236     if (gpuGetClassByClassId(pGpu, pResourceRef->externalClassId, &pClass) != NV_OK)
1237     {
1238         NV_PRINTF(LEVEL_ERROR, "class %x not supported\n",
1239                   pResourceRef->externalClassId);
1240     }
1241     NV_ASSERT_OR_RETURN((pClass != NULL), NV_ERR_NOT_SUPPORTED);
1242 
1243     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
1244     {
1245         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
1246         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
1247         NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi);
1248         NvHandle hObject = RES_GET_HANDLE(pKernelChannelGroupApi);
1249         NVA06C_CTRL_TIMESLICE_PARAMS *pParams = (NVA06C_CTRL_TIMESLICE_PARAMS *)(pRmCtrlParams->pParams);
1250 
1251         NV_RM_RPC_CONTROL(pGpu,
1252                           hClient,
1253                           hObject,
1254                           pRmCtrlParams->cmd,
1255                           pRmCtrlParams->pParams,
1256                           pRmCtrlParams->paramsSize,
1257                           status);
1258 
1259         // Update guest RM's internal bookkeeping with the timeslice.
1260         if (status == NV_OK)
1261         {
1262             pKernelChannelGroup->timesliceUs = pParams->timesliceUs;
1263         }
1264 
1265         return status;
1266     }
1267 
1268     //
1269     // Do an internal control call to do channel reset
1270     // on Host (Physical) RM
1271     //
1272     status = pRmApi->Control(pRmApi,
1273                              RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi),
1274                              RES_GET_HANDLE(pKernelChannelGroupApi),
1275                              NVA06C_CTRL_CMD_INTERNAL_SET_TIMESLICE,
1276                              pTsParams,
1277                              sizeof(NVA06C_CTRL_TIMESLICE_PARAMS));
1278 
1279     return status;
1280 }
1281 
1282 NV_STATUS
1283 kchangrpapiCtrlCmdGetInfo_IMPL
1284 (
1285     KernelChannelGroupApi       *pKernelChannelGroupApi,
1286     NVA06C_CTRL_GET_INFO_PARAMS *pParams
1287 )
1288 {
1289     KernelChannelGroup *pKernelChannelGroup = NULL;
1290 
1291     if (pKernelChannelGroupApi->pKernelChannelGroup == NULL)
1292         return NV_ERR_INVALID_OBJECT;
1293     pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
1294 
1295     pParams->tsgID = pKernelChannelGroup->grpID;
1296 
1297     return NV_OK;
1298 }
1299 
1300 NV_STATUS
1301 kchangrpapiCtrlCmdSetInterleaveLevel_IMPL
1302 (
1303     KernelChannelGroupApi               *pKernelChannelGroupApi,
1304     NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams
1305 )
1306 {
1307     OBJGPU          *pGpu         = GPU_RES_GET_GPU(pKernelChannelGroupApi);
1308     RsResourceRef   *pResourceRef = RES_GET_REF(pKernelChannelGroupApi);
1309     KernelChannelGroup *pKernelChannelGroup =
1310         pKernelChannelGroupApi->pKernelChannelGroup;
1311     PCLASSDESCRIPTOR pClass       = NULL;
1312     NV_STATUS        status       = NV_OK;
1313 
1314     if (gpuGetClassByClassId(pGpu, pResourceRef->externalClassId, &pClass) != NV_OK)
1315     {
1316         NV_PRINTF(LEVEL_ERROR, "class %x not supported\n",
1317                   pResourceRef->externalClassId);
1318     }
1319     NV_ASSERT_OR_RETURN((pClass != NULL), NV_ERR_NOT_SUPPORTED);
1320 
1321     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
1322     {
1323         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
1324         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
1325         NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi);
1326         NvHandle hObject = RES_GET_HANDLE(pKernelChannelGroupApi);
1327 
1328         NV_RM_RPC_CONTROL(pGpu,
1329                           hClient,
1330                           hObject,
1331                           pRmCtrlParams->cmd,
1332                           pRmCtrlParams->pParams,
1333                           pRmCtrlParams->paramsSize,
1334                           status);
1335         NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, NV_ERR_NOT_SUPPORTED);
1336     }
1337 
1338     status = kchangrpSetInterleaveLevel(pGpu, pKernelChannelGroup, pParams->tsgInterleaveLevel);
1339 
1340     return status;
1341 }
1342 
1343 NV_STATUS
1344 kchangrpapiCtrlCmdGetInterleaveLevel_IMPL
1345 (
1346     KernelChannelGroupApi               *pKernelChannelGroupApi,
1347     NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams
1348 )
1349 {
1350     KernelChannelGroup *pKernelChannelGroup = NULL;
1351     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi);
1352     NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
1353 
1354     if (pKernelChannelGroupApi->pKernelChannelGroup == NULL)
1355         return NV_ERR_INVALID_OBJECT;
1356     pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
1357 
1358     pParams->tsgInterleaveLevel = pKernelChannelGroup->pInterleaveLevel[subdevInst];
1359 
1360     return NV_OK;
1361 }
1362 
1363 /*!
1364  * @brief Handler for NVA06C_CTRL_CMD_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS
1365  *
1366  * This is currently un-implemented as split change for bug 200691429
1367  */
1368 NV_STATUS
1369 kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_IMPL
1370 (
1371     KernelChannelGroupApi *pKernelChannelGroupApi,
1372     NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *pParams
1373 )
1374 {
1375     NV_PRINTF(LEVEL_INFO,
1376         "bug 200691429: kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_IMPL received\n");
1377     return NV_OK;
1378 }
1379