1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 // FIXME XXX
25 #define NVOC_KERNEL_GRAPHICS_CONTEXT_H_PRIVATE_ACCESS_ALLOWED
26 #define NVOC_KERNEL_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
27 
28 #include "kernel/gpu/fifo/kernel_channel.h"
29 
30 #include "kernel/core/locks.h"
31 #include "gpu/subdevice/subdevice.h"
32 #include "kernel/diagnostics/gpu_acct.h"
33 #include "kernel/gpu/conf_compute/conf_compute.h"
34 #include "kernel/gpu/device/device.h"
35 #include "kernel/gpu/fifo/kernel_ctxshare.h"
36 #include "kernel/gpu/fifo/kernel_channel_group.h"
37 #include "kernel/gpu/gr/kernel_graphics.h"
38 #include "kernel/gpu/mem_mgr/context_dma.h"
39 #include "kernel/gpu/mem_mgr/heap.h"
40 #include "kernel/gpu/mem_mgr/mem_mgr.h"
41 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
42 #include "kernel/gpu/rc/kernel_rc.h"
43 #include "kernel/mem_mgr/ctx_buf_pool.h"
44 #include "kernel/mem_mgr/gpu_vaspace.h"
45 #include "kernel/rmapi/event.h"
46 #include "kernel/rmapi/rmapi.h"
47 #include "kernel/rmapi/rs_utils.h"
48 #include "kernel/virtualization/hypervisor/hypervisor.h"
49 #include "gpu/bus/kern_bus.h"
50 #include "gpu/mem_mgr/virt_mem_allocator.h"
51 #include "objtmr.h"
52 
53 #include "class/cl0090.h"   // KERNEL_GRAPHICS_CONTEXT
54 #include "class/cl906fsw.h" // GF100_GPFIFO
55 #include "class/cla06c.h"   // KEPLER_CHANNEL_GROUP_A
56 #include "class/cla06f.h"   // KEPLER_CHANNEL_GPFIFO_A
57 #include "class/cla06fsw.h" // KEPLER_CHANNEL_GPFIFO_A
58 #include "class/cla16f.h"   // KEPLER_CHANNEL_GPFIFO_B
59 #include "class/cla16fsw.h" // KEPLER_CHANNEL_GPFIFO_B
60 #include "class/clb06f.h"   // MAXWELL_CHANNEL_GPFIFO_A
61 #include "class/clb06fsw.h" // MAXWELL_CHANNEL_GPFIFO_A
62 #include "class/clc06f.h"   // PASCAL_CHANNEL_GPFIFO_A
63 #include "class/clc06fsw.h" // PASCAL_CHANNEL_GPFIFO_A
64 #include "class/clc36f.h"   // VOLTA_CHANNEL_GPFIFO_A
65 #include "class/clc36fsw.h" // VOLTA_CHANNEL_GPFIFO_A
66 #include "class/clc46f.h"   // TURING_CHANNEL_GPFIFO_A
67 #include "class/clc46fsw.h" // TURING_CHANNEL_GPFIFO_A
68 #include "class/clc56f.h"   // AMPERE_CHANNEL_GPFIFO_A
69 #include "class/clc56fsw.h" // AMPERE_CHANNEL_GPFIFO_A
70 #include "class/clc572.h"   // PHYSICAL_CHANNEL_GPFIFO
71 #include "class/clc86f.h"   // HOPPER_CHANNEL_GPFIFO_A
72 #include "class/clc86fsw.h" // HOPPER_CHANNEL_GPFIFO_A
73 
74 #include "ctrl/ctrl906f.h"
75 #include "ctrl/ctrlc46f.h"
76 #include "ctrl/ctrlc86f.h"
77 
78 #include "Nvcm.h"
79 #include "libraries/resserv/resserv.h"
80 #include "libraries/resserv/rs_client.h"
81 #include "libraries/resserv/rs_resource.h"
82 #include "libraries/resserv/rs_server.h"
83 #include "nvRmReg.h"
84 #include "nvstatuscodes.h"
85 #include "vgpu/rpc.h"
86 
87 // Instmem static functions
88 static NV_STATUS _kchannelAllocHalData(OBJGPU *pGpu, KernelChannel *pKernelChannel);
89 static void      _kchannelFreeHalData(OBJGPU *pGpu, KernelChannel *pKernelChannel);
90 static NV_STATUS _kchannelAllocOrDescribeInstMem(
91     KernelChannel  *pKernelChannel,
92     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams);
93 static NV_STATUS _kchannelDescribeMemDescsFromParams(
94     OBJGPU *pGpu,
95     KernelChannel *pKernelChannel,
96     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams);
97 static NV_STATUS _kchannelDescribeMemDescsHeavySriov(OBJGPU *pGpu, KernelChannel *pKernelChannel);
98 static NV_STATUS _kchannelSendChannelAllocRpc(
99     KernelChannel *pKernelChannel,
100     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams,
101     KernelChannelGroup *pKernelChannelGroup,
102     NvBool bFullSriov);
103 
104 static NV_STATUS _kchannelSetupNotifyActions(KernelChannel *pKernelChannel,
105                                              NvU32 classNum);
106 static void _kchannelCleanupNotifyActions(KernelChannel *pKernelChannel);
107 static NV_STATUS _kchannelNotifyOfChid(OBJGPU *pGpu, KernelChannel *pKernelChannel, RsClient *pRsClient);
108 static NV_STATUS _kchannelGetUserMemDesc(OBJGPU *pGpu, KernelChannel *pKernelChannel, PMEMORY_DESCRIPTOR *ppMemDesc);
109 static void _kchannelUpdateFifoMapping(KernelChannel    *pKernelChannel,
110                                        OBJGPU           *pGpu,
111                                        NvBool            bKernel,
112                                        NvP64             cpuAddress,
113                                        NvP64             priv,
114                                        NvU64             cpuMapLength,
115                                        NvU32             flags,
116                                        NvHandle          hSubdevice,
117                                        RsCpuMapping     *pMapping);
118 
119 /*!
120  * @brief Construct a new KernelChannel, which also creates a Channel.
121  *
122  * @param[in,out]  pCallContext     The call context
123  * @param[in,out]  pParams          Params for the *_CHANNEL_GPFIFO class
124  *                                  object being created
125  *
126  * @returns NV_OK on success, specific error code on failure.
127  */
128 NV_STATUS
129 kchannelConstruct_IMPL
130 (
131     KernelChannel *pKernelChannel,
132     CALL_CONTEXT *pCallContext,
133     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
134 )
135 {
136     OBJGPU                 *pGpu             = GPU_RES_GET_GPU(pKernelChannel);
137     OBJSYS                 *pSys             = SYS_GET_INSTANCE();
138     KernelMIGManager       *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
139     KernelFifo             *pKernelFifo      = GPU_GET_KERNEL_FIFO(pGpu);
140     RsClient               *pRsClient        = pCallContext->pClient;
141     RmClient               *pRmClient        = NULL;
142     RsResourceRef          *pResourceRef     = pCallContext->pResourceRef;
143     RsResourceRef          *pKernelCtxShareRef = NULL;
144     NV_STATUS               status;
145     RM_API                 *pRmApi           = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
146     NvHandle                hClient          = pRsClient->hClient;
147     NvHandle                hParent          = pResourceRef->pParentRef->hResource;
148     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams = pParams->pAllocParams;
149     RsResourceRef          *pChanGrpRef      = NULL;
150     KernelChannelGroupApi  *pKernelChannelGroupApi = NULL;
151     NvHandle                hKernelCtxShare  = pChannelGpfifoParams->hContextShare;
152     NvBool                  bTsgAllocated    = NV_FALSE;
153     NvHandle                hChanGrp         = NV01_NULL_OBJECT;
154     RsResourceRef          *pDeviceRef       = NULL;
155     RsResourceRef          *pVASpaceRef      = NULL;
156     KernelGraphicsContext  *pKernelGraphicsContext = NULL;
157     NvBool                  bMIGInUse;
158     KernelChannelGroup     *pKernelChannelGroup = NULL;
159     NvU32                   chID             = ~0;
160     NvU32                   flags            = pChannelGpfifoParams->flags;
161     RM_ENGINE_TYPE          globalRmEngineType = RM_ENGINE_TYPE_NULL;
162     NvU32                   verifFlags2      = 0;
163     NvBool                  bChidAllocated   = NV_FALSE;
164     NvBool                  bLockAcquired    = NV_FALSE;
165     NvBool                  bNotifyActionsSetup = NV_FALSE;
166     CTX_BUF_POOL_INFO      *pChannelBufPool  = NULL;
167     CTX_BUF_INFO            bufInfo          = {0};
168     NvBool                  bRpcAllocated    = NV_FALSE;
169     NvBool                  bFullSriov       = IS_VIRTUAL_WITH_SRIOV(pGpu) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu);
170     NvBool                  bAddedToGroup    = NV_FALSE;
171     NvU32                   callingContextGfid;
172     Device                 *pDevice;
173 
174     // We only support physical channels.
175     NV_ASSERT_OR_RETURN(FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_TYPE, _PHYSICAL, flags),
176         NV_ERR_NOT_SUPPORTED);
177 
178     pKernelChannel->refCount = 1;
179     pKernelChannel->bIsContextBound = NV_FALSE;
180     pKernelChannel->nextObjectClassID = 0;
181     pKernelChannel->subctxId = 0;
182     pKernelChannel->bSkipCtxBufferAlloc = FLD_TEST_DRF(OS04, _FLAGS,
183                                                        _SKIP_CTXBUFFER_ALLOC, _TRUE, flags);
184     pKernelChannel->cid = portAtomicIncrementU32(&pSys->currentCid);
185     pKernelChannel->runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, flags);
186     pKernelChannel->engineType = RM_ENGINE_TYPE_NULL;
187     pChannelGpfifoParams->cid = pKernelChannel->cid;
188     NV_ASSERT_OK_OR_GOTO(status, refFindAncestorOfType(pResourceRef, classId(Device), &pDeviceRef), cleanup);
189     NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &callingContextGfid));
190 
191     pDevice = dynamicCast(pDeviceRef->pResource, Device);
192 
193     // Internal fields must be cleared when RMAPI call is from client
194     if (!hypervisorIsVgxHyper() || IS_GSP_CLIENT(pGpu))
195         pChannelGpfifoParams->hPhysChannelGroup = NV01_NULL_OBJECT;
196     pChannelGpfifoParams->internalFlags = 0;
197     portMemSet(&pChannelGpfifoParams->errorNotifierMem, 0,
198                sizeof pChannelGpfifoParams->errorNotifierMem);
199     portMemSet(&pChannelGpfifoParams->eccErrorNotifierMem, 0,
200                sizeof pChannelGpfifoParams->eccErrorNotifierMem);
201     pChannelGpfifoParams->ProcessID = 0;
202     pChannelGpfifoParams->SubProcessID = 0;
203     portMemSet(pChannelGpfifoParams->encryptIv, 0, sizeof(pChannelGpfifoParams->encryptIv));
204     portMemSet(pChannelGpfifoParams->decryptIv, 0, sizeof(pChannelGpfifoParams->decryptIv));
205     portMemSet(pChannelGpfifoParams->hmacNonce, 0, sizeof(pChannelGpfifoParams->hmacNonce));
206 
207     pRmClient = dynamicCast(pRsClient, RmClient);
208     if (pRmClient == NULL)
209     {
210         return NV_ERR_OBJECT_NOT_FOUND;
211     }
212     pKernelChannel->pUserInfo = pRmClient->pUserInfo;
213 
214     //
215     // GSP-RM needs privilegeLevel passed in as an alloc param because it cannot
216     // check pRmClient for kernel/admin.
217     // Other platforms check pRmClient to determine privilegeLevel.
218     //
219     if (RMCFG_FEATURE_PLATFORM_GSP)
220     {
221         // Guest-RM clients can allocate a privileged channel to perform
222         // actions such as updating page tables in physical mode or scrubbing.
223         // Security for these channels is enforced by VMMU and IOMMU
224         if (gpuIsSriovEnabled(pGpu) && IS_GFID_VF(callingContextGfid) &&
225                 FLD_TEST_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, flags))
226         {
227             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN;
228         }
229         else
230         {
231             pKernelChannel->privilegeLevel =
232                 DRF_VAL(_KERNELCHANNEL, _ALLOC_INTERNALFLAGS, _PRIVILEGE, pChannelGpfifoParams->internalFlags);
233         }
234 
235         // In GSP, all vGPU channel's will simply consider GFID as the processID
236         if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && IS_GFID_VF(callingContextGfid))
237         {
238             pKernelChannel->ProcessID = callingContextGfid;
239         }
240         else
241         {
242             pKernelChannel->ProcessID = pChannelGpfifoParams->ProcessID;
243         }
244 
245         pKernelChannel->SubProcessID = pChannelGpfifoParams->SubProcessID;
246     }
247     else
248     {
249         RS_PRIV_LEVEL privLevel = pCallContext->secInfo.privLevel;
250         if (privLevel >= RS_PRIV_LEVEL_KERNEL)
251         {
252             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL;
253             pChannelGpfifoParams->flags = FLD_SET_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, pChannelGpfifoParams->flags);
254         }
255         else if (rmclientIsAdmin(pRmClient, privLevel) || hypervisorCheckForObjectAccess(hClient))
256         {
257             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN;
258             pChannelGpfifoParams->flags = FLD_SET_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, pChannelGpfifoParams->flags);
259         }
260         else
261         {
262             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER;
263         }
264 
265         pKernelChannel->ProcessID = pRmClient->ProcID;
266         pKernelChannel->SubProcessID = pRmClient->SubProcessID;
267     }
268 
269     // Context share and vaspace handles can't be active at the same time.
270     if ((hKernelCtxShare != NV01_NULL_OBJECT) && (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT))
271     {
272         NV_PRINTF(LEVEL_ERROR,
273                   "Both context share and vaspace handles can't be valid at the same time\n");
274         return NV_ERR_INVALID_ARGUMENT;
275     }
276 
277     bMIGInUse = IS_MIG_IN_USE(pGpu);
278 
279     //
280     // The scrubber is allocated by Kernel RM in offload mode, and is disabled
281     // completely on GSP, so it is not possible for GSP to determine whether
282     // this allocation should be allowed or not. CPU RM can and should properly
283     // check this.
284     //
285     if (IS_MIG_ENABLED(pGpu) && !RMCFG_FEATURE_PLATFORM_GSP && !bMIGInUse)
286     {
287         NvBool bTopLevelScrubberEnabled = NV_FALSE;
288         NvBool bTopLevelScrubberConstructed = NV_FALSE;
289         MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
290 
291         if (memmgrIsPmaInitialized(pMemoryManager))
292         {
293             Heap *pHeap = GPU_GET_HEAP(pGpu);
294             NvU32 pmaConfigs = PMA_QUERY_SCRUB_ENABLED | PMA_QUERY_SCRUB_VALID;
295             NV_ASSERT_OK(pmaQueryConfigs(&pHeap->pmaObject, &pmaConfigs));
296             bTopLevelScrubberEnabled = (pmaConfigs & PMA_QUERY_SCRUB_ENABLED) != 0x0;
297             bTopLevelScrubberConstructed = (pmaConfigs & PMA_QUERY_SCRUB_VALID) != 0x0;
298         }
299 
300         //
301         // Exception: Top level scrubber must be initialized before
302         // GPU instances can be created, and therefore must be allowed to
303         // create a CE context if the scrubber is supported.
304         //
305 
306         if (!bTopLevelScrubberEnabled || bTopLevelScrubberConstructed ||
307             !kchannelCheckIsKernel(pKernelChannel))
308         {
309             NV_PRINTF(LEVEL_ERROR,
310                       "Channel allocation not allowed when MIG is enabled without GPU instancing\n");
311             return NV_ERR_INVALID_STATE;
312         }
313     }
314 
315     // Find the TSG, or create the TSG if we need to wrap it
316     status = clientGetResourceRefByType(pRsClient, hParent,
317                                         classId(KernelChannelGroupApi),
318                                         &pChanGrpRef);
319     if (status != NV_OK)
320     {
321         NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS tsgParams = { 0 };
322 
323         // Context share can only be used with a TSG channel
324         if (hKernelCtxShare != NV01_NULL_OBJECT)
325         {
326             NV_PRINTF(LEVEL_ERROR,
327                       "Non-TSG channels can't use context share\n");
328             status = NV_ERR_INVALID_ARGUMENT;
329             goto cleanup;
330         }
331 
332         tsgParams.hVASpace = pChannelGpfifoParams->hVASpace;
333         tsgParams.engineType = pChannelGpfifoParams->engineType;
334         // vGpu plugin context flag should only be set if context is plugin
335         if (gpuIsSriovEnabled(pGpu))
336         {
337             tsgParams.bIsCallingContextVgpuPlugin = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_VGPU_PLUGIN_CONTEXT, _TRUE, pChannelGpfifoParams->flags);
338         }
339         //
340         // Internally allocate a TSG to wrap this channel. There is no point
341         // in mirroring this allocation in the host, as the channel is
342         // already mirrored.
343         //
344         status = pRmApi->AllocWithSecInfo(pRmApi,
345             hClient,
346             hParent,
347             &pChannelGpfifoParams->hPhysChannelGroup,
348             KEPLER_CHANNEL_GROUP_A,
349             NV_PTR_TO_NvP64(&tsgParams),
350             sizeof(tsgParams),
351             RMAPI_ALLOC_FLAGS_SKIP_RPC,
352             NvP64_NULL,
353             &pRmApi->defaultSecInfo);
354 
355         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
356         bTsgAllocated = NV_TRUE;
357         hChanGrp = pChannelGpfifoParams->hPhysChannelGroup;
358 
359         status = clientGetResourceRefByType(pRsClient, hChanGrp,
360                                             classId(KernelChannelGroupApi),
361                                             &pChanGrpRef);
362         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
363 
364         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
365                                              KernelChannelGroupApi);
366         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
367         pKernelChannelGroup->bAllocatedByRm = NV_TRUE;
368     }
369     else
370     {
371         hChanGrp = hParent;
372         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
373                                              KernelChannelGroupApi);
374         if (pKernelChannelGroupApi == NULL ||
375             pKernelChannelGroupApi->pKernelChannelGroup == NULL)
376         {
377             NV_PRINTF(LEVEL_ERROR, "Invalid KernelChannelGroup* for channel 0x%x\n",
378                       pResourceRef->hResource);
379             status = NV_ERR_INVALID_POINTER;
380             NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
381         }
382         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
383 
384         // TSG channel should specify a context share object, rather than vaspace directly
385         if (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT)
386         {
387             NV_PRINTF(LEVEL_ERROR,
388                       "TSG channels can't use an explicit vaspace\n");
389             status = NV_ERR_INVALID_ARGUMENT;
390             NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
391         }
392     }
393     pKernelChannel->pKernelChannelGroupApi = pKernelChannelGroupApi;
394 
395     NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE);
396     NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE);
397 
398     //
399     // Reserve memory for channel instance block from PMA
400     // into a pool tied to channel's parent TSG.
401     // RM will later allocate memory for instance block from this pool.
402     //
403     pChannelBufPool = pKernelChannelGroup->pChannelBufPool;
404     if (pChannelBufPool != NULL)
405     {
406         NvBool bIsScrubSkipped;
407         NvBool bRequestScrubSkip = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE, pChannelGpfifoParams->flags);
408 
409         if (bRequestScrubSkip)
410         {
411             if (!kchannelCheckIsKernel(pKernelChannel))
412             {
413                 status = NV_ERR_INVALID_ARGUMENT;
414                 NV_PRINTF(LEVEL_ERROR, "Only kernel priv clients can skip scrubber\n");
415                 goto cleanup;
416             }
417 
418             //
419             // If this is first channel in the TSG then setup ctx buf pool to skip scrubbing.
420             // For subsequent channels, setting should match with ctx buf pool's state.
421             //
422             if (pKernelChannelGroup->chanCount == 0)
423             {
424                 ctxBufPoolSetScrubSkip(pChannelBufPool, NV_TRUE);
425                 NV_PRINTF(LEVEL_INFO, "Skipping scrubber for all allocations on this context\n");
426             }
427         }
428 
429         bIsScrubSkipped = ctxBufPoolIsScrubSkipped(pChannelBufPool);
430         if (bIsScrubSkipped ^ bRequestScrubSkip)
431         {
432             status = NV_ERR_INVALID_ARGUMENT;
433             NV_PRINTF(LEVEL_ERROR, "Mismatch between channel and parent TSG's policy on skipping scrubber\n");
434             NV_PRINTF(LEVEL_ERROR, "scrubbing %s skipped for TSG and %s for channel\n", (bIsScrubSkipped ? "is" : "is not"),
435                 (bRequestScrubSkip ? "is" : "is not"));
436             goto cleanup;
437         }
438         NV_ASSERT_OK_OR_GOTO(status,
439                              kfifoGetInstMemInfo_HAL(pKernelFifo, &bufInfo.size, &bufInfo.align, NULL, NULL, NULL),
440                              cleanup);
441         bufInfo.attr = RM_ATTR_PAGE_SIZE_DEFAULT;
442         NV_ASSERT_OK_OR_GOTO(status, ctxBufPoolReserve(pGpu, pChannelBufPool, &bufInfo, 1), cleanup);
443     }
444     else
445     {
446         NV_PRINTF(LEVEL_INFO, "Not using ctx buf pool\n");
447     }
448 
449     //--------------------------------------------------------------------------
450     // we acquire the GPU lock below.
451     // From here down do not return early, use goto cleanup
452     //--------------------------------------------------------------------------
453 
454     NV_ASSERT_OK_OR_GOTO(status,
455         rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO),
456         cleanup);
457     bLockAcquired = NV_TRUE;
458 
459     //
460     // Initialize the notification indices used for different notifications
461     //
462     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR]
463         = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR;
464     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN]
465         = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN;
466 
467     // Bake channel group error handlers into the channel
468     pKernelChannel->hErrorContext = pChannelGpfifoParams->hObjectError;
469     pKernelChannel->hEccErrorContext = pChannelGpfifoParams->hObjectEccError;
470 
471     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT)
472     {
473         pKernelChannel->hErrorContext = (
474             pKernelChannel->pKernelChannelGroupApi->hErrorContext);
475     }
476     if (pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
477     {
478         pKernelChannel->hEccErrorContext = (
479             pKernelChannel->pKernelChannelGroupApi->hEccErrorContext);
480     }
481 
482     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT)
483     {
484         pKernelChannel->errorContextType = ERROR_NOTIFIER_TYPE_NONE;
485     }
486     else if (!RMCFG_FEATURE_PLATFORM_GSP)
487     {
488         NV_ASSERT_OK(kchannelGetNotifierInfo(pGpu, pDevice,
489             pKernelChannel->hErrorContext,
490             &pKernelChannel->pErrContextMemDesc,
491             &pKernelChannel->errorContextType,
492             &pKernelChannel->errorContextOffset));
493         NV_ASSERT(pKernelChannel->errorContextType !=
494                   ERROR_NOTIFIER_TYPE_NONE);
495     }
496     if (pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
497     {
498         pKernelChannel->eccErrorContextType = ERROR_NOTIFIER_TYPE_NONE;
499     }
500     else if (!RMCFG_FEATURE_PLATFORM_GSP)
501     {
502         NV_ASSERT_OK(kchannelGetNotifierInfo(pGpu, pDevice,
503             pKernelChannel->hEccErrorContext,
504             &pKernelChannel->pEccErrContextMemDesc,
505             &pKernelChannel->eccErrorContextType,
506             &pKernelChannel->eccErrorContextOffset));
507         NV_ASSERT(pKernelChannel->eccErrorContextType !=
508                   ERROR_NOTIFIER_TYPE_NONE);
509     }
510 
511     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
512     {
513         if (pKernelChannel->hErrorContext != NV01_NULL_OBJECT)
514         {
515             pChannelGpfifoParams->errorNotifierMem.base = (
516                 memdescGetPhysAddr(pKernelChannel->pErrContextMemDesc,
517                                    AT_GPU, 0) +
518                 pKernelChannel->errorContextOffset);
519             pChannelGpfifoParams->errorNotifierMem.size = (
520                 pKernelChannel->pErrContextMemDesc->Size -
521                 pKernelChannel->errorContextOffset);
522             pChannelGpfifoParams->errorNotifierMem.addressSpace =
523                 memdescGetAddressSpace(pKernelChannel->pErrContextMemDesc);
524             pChannelGpfifoParams->errorNotifierMem.cacheAttrib =
525                 memdescGetCpuCacheAttrib(pKernelChannel->pErrContextMemDesc);
526 
527         }
528         if (pKernelChannel->hEccErrorContext != NV01_NULL_OBJECT)
529         {
530             pChannelGpfifoParams->eccErrorNotifierMem.base = (
531                 memdescGetPhysAddr(pKernelChannel->pEccErrContextMemDesc,
532                                    AT_GPU, 0) +
533                 pKernelChannel->eccErrorContextOffset);
534             pChannelGpfifoParams->eccErrorNotifierMem.size = (
535                 pKernelChannel->pEccErrContextMemDesc->Size -
536                 pKernelChannel->eccErrorContextOffset);
537             pChannelGpfifoParams->eccErrorNotifierMem.addressSpace =
538                 memdescGetAddressSpace(pKernelChannel->pEccErrContextMemDesc);
539             pChannelGpfifoParams->eccErrorNotifierMem.cacheAttrib =
540                 memdescGetCpuCacheAttrib(pKernelChannel->pEccErrContextMemDesc);
541         }
542 
543         pChannelGpfifoParams->internalFlags = FLD_SET_DRF_NUM(
544             _KERNELCHANNEL_ALLOC, _INTERNALFLAGS, _ERROR_NOTIFIER_TYPE,
545             pKernelChannel->errorContextType,
546             pChannelGpfifoParams->internalFlags);
547         pChannelGpfifoParams->internalFlags = FLD_SET_DRF_NUM(
548             _KERNELCHANNEL_ALLOC, _INTERNALFLAGS, _ECC_ERROR_NOTIFIER_TYPE,
549             pKernelChannel->eccErrorContextType,
550             pChannelGpfifoParams->internalFlags);
551     }
552 
553     //
554     // The error context types should be set on all RM configurations
555     // (GSP/baremetal/CPU-GSP client)
556     //
557     NV_ASSERT(pKernelChannel->errorContextType != ERROR_NOTIFIER_TYPE_UNKNOWN);
558     NV_ASSERT(pKernelChannel->eccErrorContextType !=
559               ERROR_NOTIFIER_TYPE_UNKNOWN);
560 
561 
562     if ((pKernelChannelGroup->chanCount != 0) &&
563         (( pKernelChannelGroup->bLegacyMode && (hKernelCtxShare != NV01_NULL_OBJECT)) ||
564          (!pKernelChannelGroup->bLegacyMode && (hKernelCtxShare == NV01_NULL_OBJECT))))
565     {
566         //
567         // Check if this channnel allocation specifying (or not) a user
568         // allocated context share matches with previous channel allocations (if
569         // any) in this group specifiying (or not) a user allocated context
570         // share.
571         //
572         // A channel group cannot have a mix of channels with some of them
573         // specifying a user allocated context share and some having RM
574         // allocated context share.
575         //
576         NV_PRINTF(LEVEL_NOTICE,
577             "All channels in a channel group must specify a CONTEXT_SHARE if any one of them specifies it\n");
578         status = NV_ERR_INVALID_ARGUMENT;
579         goto cleanup;
580     }
581 
582     // Get KernelCtxShare (supplied or legacy)
583     if (hKernelCtxShare != NV01_NULL_OBJECT)
584     {
585         // Get object pointers from supplied hKernelCtxShare.
586         NV_ASSERT_OK_OR_GOTO(status,
587             clientGetResourceRefByType(pRsClient,
588                                        hKernelCtxShare,
589                                        classId(KernelCtxShareApi),
590                                        &pKernelCtxShareRef),
591             cleanup);
592 
593         //
594         // If hKernelCtxShare is nonzero, the ChannelGroup is not internal
595         // either, so it should have the same parent as hParent.
596         //
597         NV_ASSERT_TRUE_OR_GOTO(status,
598             pKernelCtxShareRef->pParentRef != NULL &&
599                 pKernelCtxShareRef->pParentRef->hResource == hParent,
600             NV_ERR_INVALID_OBJECT_PARENT,
601             cleanup);
602     }
603     else
604     {
605         NvU32 subctxFlag;
606         NvHandle hLegacyKernelCtxShare;
607 
608         if (!pKernelChannelGroup->bLegacyMode)
609         {
610             //
611             // Set this ChannelGroup to legacy mode and get the KernelCtxShare
612             // from it.
613             //
614             NV_ASSERT_OK_OR_GOTO(status,
615                 kchangrpapiSetLegacyMode(pKernelChannelGroupApi,
616                                          pGpu, pKernelFifo, hClient),
617                 cleanup);
618         }
619 
620         subctxFlag = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_THREAD, flags);
621         hLegacyKernelCtxShare = (subctxFlag ==
622                            NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC) ?
623                               pKernelChannelGroupApi->hLegacykCtxShareSync :
624                               pKernelChannelGroupApi->hLegacykCtxShareAsync;
625 
626         NV_ASSERT_OK_OR_GOTO(status,
627             clientGetResourceRefByType(pRsClient,
628                                        hLegacyKernelCtxShare,
629                                        classId(KernelCtxShareApi),
630                                        &pKernelCtxShareRef),
631             cleanup);
632     }
633 
634     pKernelChannel->pKernelCtxShareApi = dynamicCast(
635         pKernelCtxShareRef->pResource,
636         KernelCtxShareApi);
637     NV_ASSERT_TRUE_OR_GOTO(status,
638                            pKernelChannel->pKernelCtxShareApi != NULL,
639                            NV_ERR_INVALID_OBJECT,
640                            cleanup);
641     NV_ASSERT_TRUE_OR_GOTO(status,
642                            pKernelChannel->pKernelCtxShareApi->pShareData !=
643                                NULL,
644                            NV_ERR_INVALID_OBJECT,
645                            cleanup);
646     pKernelChannel->pVAS = pKernelChannel->pKernelCtxShareApi->pShareData->pVAS;
647     NV_ASSERT_TRUE_OR_GOTO(status,
648                            pKernelChannel->pVAS != NULL,
649                            NV_ERR_INVALID_OBJECT,
650                            cleanup);
651 
652     if (kfifoIsPerRunlistChramSupportedInHw(pKernelFifo))
653     {
654         // TSG should always have a valid engine Id.
655         NV_ASSERT_TRUE_OR_GOTO(status,
656             RM_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType),
657             NV_ERR_INVALID_STATE,
658             cleanup);
659 
660         if (NV2080_ENGINE_TYPE_IS_VALID(pChannelGpfifoParams->engineType))
661         {
662             globalRmEngineType = gpuGetRmEngineType(pChannelGpfifoParams->engineType);
663             // Convert it to global engine id if MIG is enabled
664             if (bMIGInUse)
665             {
666                 MIG_INSTANCE_REF ref;
667 
668                 NV_CHECK_OK_OR_GOTO(
669                     status,
670                     LEVEL_ERROR,
671                     kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
672                                                     pDevice, &ref),
673                     cleanup);
674 
675                 NV_CHECK_OK_OR_GOTO(
676                     status,
677                     LEVEL_ERROR,
678                     kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref,
679                                                       globalRmEngineType,
680                                                       &globalRmEngineType),
681                     cleanup);
682             }
683 
684             // Throw an error if TSG engine Id does NOT match with channel engine Id
685             if (globalRmEngineType != pKernelChannelGroup->engineType)
686             {
687                 NV_PRINTF(LEVEL_ERROR,
688                     "Engine type of channel = 0x%x (0x%x) not compatible with engine type of TSG = 0x%x (0x%x)\n",
689                     gpuGetNv2080EngineType(pChannelGpfifoParams->engineType),
690                     pChannelGpfifoParams->engineType,
691                     gpuGetNv2080EngineType(pKernelChannelGroup->engineType),
692                     pKernelChannelGroup->engineType);
693 
694                 status = NV_ERR_INVALID_ARGUMENT;
695                 goto cleanup;
696             }
697         }
698 
699         // Assign the engine type from the parent TSG
700         pKernelChannel->engineType = pKernelChannelGroup->engineType;
701     }
702 
703     // Determine initial runlist ID (based on engine type if provided or inherited from TSG)
704     pKernelChannel->runlistId = kfifoGetDefaultRunlist_HAL(pGpu, pKernelFifo, pKernelChannel->engineType);
705 
706     pKernelChannel->bCCSecureChannel = FLD_TEST_DRF(OS04, _FLAGS, _CC_SECURE, _TRUE, flags);
707     if (pKernelChannel->bCCSecureChannel)
708     {
709         ConfidentialCompute* pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
710 
711         // return early if gpu is not ready to accept work
712         if (pConfCompute && kchannelCheckIsUserMode(pKernelChannel)
713             && !confComputeAcceptClientRequest(pGpu, pConfCompute))
714         {
715             return NV_ERR_NOT_READY;
716         }
717 
718         status = kchannelRetrieveKmb_HAL(pGpu, pKernelChannel, ROTATE_IV_ALL_VALID,
719                                          NV_TRUE, &pKernelChannel->clientKmb);
720         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
721 
722         portMemCopy(pChannelGpfifoParams->encryptIv,
723                     sizeof(pChannelGpfifoParams->encryptIv),
724                     pKernelChannel->clientKmb.encryptBundle.iv,
725                     sizeof(pKernelChannel->clientKmb.encryptBundle.iv));
726 
727         portMemCopy(pChannelGpfifoParams->decryptIv,
728                     sizeof(pChannelGpfifoParams->decryptIv),
729                     pKernelChannel->clientKmb.decryptBundle.iv,
730                     sizeof(pKernelChannel->clientKmb.decryptBundle.iv));
731 
732         portMemCopy(pChannelGpfifoParams->hmacNonce,
733                     sizeof(pChannelGpfifoParams->hmacNonce),
734                     pKernelChannel->clientKmb.hmacBundle.nonce,
735                     sizeof(pKernelChannel->clientKmb.hmacBundle.nonce));
736 
737     }
738 
739     // Set TLS state and BAR0 window if we are working with Gr
740     if (bMIGInUse && RM_ENGINE_TYPE_IS_GR(pKernelChannel->engineType))
741     {
742         NV_ASSERT_OK(kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
743                                                      pDevice, &pKernelChannel->partitionRef));
744     }
745 
746     // Allocate the ChId (except legacy VGPU which allocates ChID on the host)
747     if (!IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
748     {
749         status = kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient,
750                                        flags, verifFlags2, chID);
751 
752         if (status != NV_OK)
753         {
754             NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
755                                    chID, hClient, pResourceRef->hResource);
756             DBG_BREAKPOINT();
757             goto cleanup;
758 
759         }
760 
761         chID = pKernelChannel->ChID;
762         bChidAllocated = NV_TRUE;
763     }
764 
765     //
766     // RPC alloc the channel in legacy VGPU / Heavy SRIOV so that instmem details can be gotten from it
767     //
768     if (IS_VIRTUAL(pGpu) && (!bFullSriov))
769     {
770         NV_ASSERT_OK_OR_GOTO(status,
771                              _kchannelSendChannelAllocRpc(pKernelChannel,
772                                                           pChannelGpfifoParams,
773                                                           pKernelChannelGroup,
774                                                           bFullSriov),
775                              cleanup);
776         bRpcAllocated = NV_TRUE;
777     }
778 
779     // Legacy VGPU: allocate chid that the host provided
780     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
781     {
782         chID = pKernelChannel->ChID;
783 
784         status = kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient,
785                                        flags, verifFlags2, chID);
786 
787         if (status != NV_OK)
788         {
789             NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
790                       chID, hClient, pResourceRef->hResource);
791             chID = ~0;
792             DBG_BREAKPOINT();
793             goto cleanup;
794         }
795 
796         bChidAllocated = NV_TRUE;
797     }
798 
799     //
800     // Do instmem setup here
801     // (Requires the channel to be created on the host if legacy VGPU / Heavy SRIOV.
802     // Does not require a Channel object.)
803     //
804     NV_ASSERT_OK_OR_GOTO(status,
805         _kchannelAllocOrDescribeInstMem(pKernelChannel, pChannelGpfifoParams),
806         cleanup);
807 
808     // Join the channel group here
809     NV_ASSERT_OK_OR_GOTO(status,
810         kchangrpAddChannel(pGpu, pKernelChannelGroup, pKernelChannel),
811         cleanup);
812     bAddedToGroup = NV_TRUE;
813 
814     // Assign to the same runlistId as the KernelChannelGroup if it's already determined
815     if (pKernelChannelGroup->bRunlistAssigned)
816     {
817         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
818         {
819             NV_ASSERT_OK_OR_ELSE(status,
820                 kfifoRunlistSetId_HAL(pGpu,
821                                       GPU_GET_KERNEL_FIFO(pGpu),
822                                       pKernelChannel,
823                                       pKernelChannelGroup->runlistId),
824                 SLI_LOOP_GOTO(cleanup));
825         }
826         SLI_LOOP_END
827      }
828 
829     // Allocate the physical channel
830     NV_ASSERT_OK_OR_GOTO(status,
831         kchannelAllocChannel_HAL(pKernelChannel, pChannelGpfifoParams),
832         cleanup);
833 
834     // Set up pNotifyActions
835     _kchannelSetupNotifyActions(pKernelChannel, pResourceRef->externalClassId);
836     bNotifyActionsSetup = NV_TRUE;
837 
838     // Initialize the userd length
839     if (!pKernelChannel->bClientAllocatedUserD)
840     {
841         NvU64 temp_offset;
842 
843         kchannelGetUserdInfo_HAL(pGpu,
844                                  pKernelChannel,
845                                  NULL,
846                                  &temp_offset,
847                                  &pKernelChannel->userdLength);
848     }
849     else
850     {
851         kfifoGetUserdSizeAlign_HAL(pKernelFifo, (NvU32*)&pKernelChannel->userdLength, NULL);
852     }
853 
854     // Set GPU accounting
855     if (RMCFG_MODULE_GPUACCT &&
856         pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON))
857     {
858         GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(SYS_GET_INSTANCE());
859 
860         gpuacctSetProcType(pGpuAcct,
861                            pGpu->gpuInstance,
862                            pRmClient->ProcID,
863                            pRmClient->SubProcessID,
864                            NV_GPUACCT_PROC_TYPE_GPU);
865     }
866 
867     //
868     // RPC to allocate the channel on GSPFW/host.
869     // (Requires a Channel object but only for hPhysChannel.)
870     //
871     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
872     {
873         NV_ASSERT_OK_OR_GOTO(status,
874                              _kchannelSendChannelAllocRpc(pKernelChannel,
875                                                           pChannelGpfifoParams,
876                                                           pKernelChannelGroup,
877                                                           bFullSriov),
878                              cleanup);
879         bRpcAllocated = NV_TRUE;
880     }
881 
882     if (kfifoIsPerRunlistChramEnabled(pKernelFifo) ||
883         (gpuIsCCorApmFeatureEnabled(pGpu) || bMIGInUse))
884     {
885         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
886         {
887             KernelFifo *pTempKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
888             //
889             // If we have a separate channel RAM for each runlist then we need to set
890             // runlistId as we already picked a chID from channel RAM based on this runlistId.
891             // This will also ensure runlistId is not overridden later to a different value
892             //
893             NV_ASSERT_OK_OR_GOTO(status,
894                 kfifoRunlistSetId_HAL(pGpu, pTempKernelFifo, pKernelChannel, pKernelChannel->runlistId),
895                 cleanup);
896         }
897         SLI_LOOP_END;
898     }
899 
900     //
901     // If we alloced this group, we want to free KernelChannel first,
902     // so we should set KernelChannel as its dependent.
903     //
904     if (bTsgAllocated)
905     {
906         NV_ASSERT_OK_OR_GOTO(status, refAddDependant(pChanGrpRef, pResourceRef), cleanup);
907     }
908 
909     // We depend on VASpace if it was provided
910     if (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT)
911     {
912         NV_ASSERT_OK_OR_GOTO(status,
913             clientGetResourceRef(pRsClient,
914                                  pChannelGpfifoParams->hVASpace,
915                                  &pVASpaceRef),
916             cleanup);
917         NV_ASSERT_TRUE_OR_GOTO(status,
918                                pVASpaceRef != NULL,
919                                NV_ERR_INVALID_OBJECT,
920                                cleanup);
921         NV_ASSERT_OK_OR_GOTO(status,
922                              refAddDependant(pVASpaceRef, pResourceRef),
923                              cleanup);
924     }
925 
926     //
927     // If KernelCtxShare was provided, we depend on it (and if we created it then we
928     // also want KernelChannel to be freed first.)
929     //
930     if (pKernelChannel->pKernelCtxShareApi != NULL)
931     {
932         NV_ASSERT_OK_OR_GOTO(
933             status,
934             refAddDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef),
935             cleanup);
936     }
937 
938     pKernelChannel->hKernelGraphicsContext = pKernelChannelGroupApi->hKernelGraphicsContext;
939     if (pKernelChannel->hKernelGraphicsContext != NV01_NULL_OBJECT)
940     {
941         NV_ASSERT_OK_OR_GOTO(status,
942             kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext),
943             cleanup);
944 
945         NV_ASSERT_OK_OR_GOTO(status,
946             refAddDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef),
947             cleanup);
948     }
949 
950     if (pChannelGpfifoParams->hObjectError != 0)
951     {
952         NV_ASSERT_OK_OR_GOTO(
953             status,
954             _kchannelNotifyOfChid(pGpu, pKernelChannel, pRsClient),
955             cleanup);
956     }
957 
958     // Cache the hVASpace for this channel in the KernelChannel object
959     pKernelChannel->hVASpace = pKernelChannel->pKernelCtxShareApi->hVASpace;
960 
961 cleanup:
962     if (bLockAcquired)
963         rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
964 
965     // These fields are only needed internally; clear them here
966     pChannelGpfifoParams->hPhysChannelGroup = 0;
967     pChannelGpfifoParams->internalFlags = 0;
968     portMemSet(&pChannelGpfifoParams->errorNotifierMem, 0,
969                sizeof pChannelGpfifoParams->errorNotifierMem);
970     portMemSet(&pChannelGpfifoParams->eccErrorNotifierMem, 0,
971                sizeof pChannelGpfifoParams->eccErrorNotifierMem);
972     pChannelGpfifoParams->ProcessID = 0;
973     pChannelGpfifoParams->SubProcessID = 0;
974     portMemSet(pChannelGpfifoParams->encryptIv, 0, sizeof(pChannelGpfifoParams->encryptIv));
975     portMemSet(pChannelGpfifoParams->decryptIv, 0, sizeof(pChannelGpfifoParams->decryptIv));
976     portMemSet(pChannelGpfifoParams->hmacNonce, 0, sizeof(pChannelGpfifoParams->hmacNonce));
977 
978     // Free the allocated resources if there was an error
979     if (status != NV_OK)
980     {
981         if (bNotifyActionsSetup)
982         {
983             _kchannelCleanupNotifyActions(pKernelChannel);
984         }
985 
986         // Remove any dependencies we may have added; we don't want our destructor called when freeing anything below
987         if (pKernelGraphicsContext != NULL)
988         {
989             refRemoveDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef);
990         }
991         if (pKernelChannel->pKernelCtxShareApi != NULL)
992         {
993             refRemoveDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef);
994         }
995         if (pVASpaceRef != NULL)
996         {
997             refRemoveDependant(pVASpaceRef, pResourceRef);
998         }
999         if (bTsgAllocated)
1000         {
1001             refRemoveDependant(pChanGrpRef, pResourceRef);
1002         }
1003 
1004         if (bAddedToGroup)
1005         {
1006             kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel);
1007         }
1008 
1009         if (RMCFG_FEATURE_PLATFORM_GSP)
1010         {
1011             // Free memdescs created during construct on GSP path.
1012             memdescFree(pKernelChannel->pErrContextMemDesc);
1013             memdescDestroy(pKernelChannel->pErrContextMemDesc);
1014             memdescFree(pKernelChannel->pEccErrContextMemDesc);
1015             memdescDestroy(pKernelChannel->pEccErrContextMemDesc);
1016         }
1017         pKernelChannel->pErrContextMemDesc = NULL;
1018         pKernelChannel->pEccErrContextMemDesc = NULL;
1019 
1020         if (bRpcAllocated)
1021         {
1022             NV_RM_RPC_FREE_ON_ERROR(pGpu, hClient, hParent, RES_GET_HANDLE(pKernelChannel));
1023         }
1024 
1025         _kchannelFreeHalData(pGpu, pKernelChannel);
1026 
1027         if (pChannelBufPool != NULL)
1028         {
1029             ctxBufPoolRelease(pChannelBufPool);
1030         }
1031 
1032         if (bTsgAllocated)
1033         {
1034             pRmApi->Free(pRmApi, hClient, hChanGrp);
1035         }
1036 
1037         if (bChidAllocated)
1038         {
1039             kchannelFreeHwID_HAL(pGpu, pKernelChannel);
1040         }
1041     }
1042 
1043     return status;
1044 }
1045 
1046 void
1047 kchannelDestruct_IMPL
1048 (
1049     KernelChannel *pKernelChannel
1050 )
1051 {
1052     CALL_CONTEXT                *pCallContext;
1053     RS_RES_FREE_PARAMS_INTERNAL *pParams;
1054     NvHandle                     hClient;
1055     RM_API                      *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
1056     OBJGPU                      *pGpu   = GPU_RES_GET_GPU(pKernelChannel);
1057     NV_STATUS                    status = NV_OK;
1058     KernelChannelGroup          *pKernelChannelGroup = NULL;
1059 
1060     resGetFreeParams(staticCast(pKernelChannel, RsResource), &pCallContext, &pParams);
1061     hClient = pCallContext->pClient->hClient;
1062 
1063     if (RMCFG_FEATURE_PLATFORM_GSP)
1064     {
1065         // Free memdescs created during construct on GSP path.
1066         memdescFree(pKernelChannel->pErrContextMemDesc);
1067         memdescDestroy(pKernelChannel->pErrContextMemDesc);
1068         memdescFree(pKernelChannel->pEccErrContextMemDesc);
1069         memdescDestroy(pKernelChannel->pEccErrContextMemDesc);
1070     }
1071     pKernelChannel->pErrContextMemDesc = NULL;
1072     pKernelChannel->pEccErrContextMemDesc = NULL;
1073 
1074     // GSP and vGPU support
1075     if ((IS_GSP_CLIENT(pGpu) || IS_VIRTUAL(pGpu)))
1076     {
1077         //
1078         // GSP:
1079         //
1080         // Method buffer is allocated by CPU-RM during TSG construct
1081         // but mapped to invisible BAR2 in GSP during channel construct
1082         // During Free, first the BAR2 mapping must be unmapped in GSP
1083         // and then freeing of method buffer should be done on CPU.
1084         // This RPC call is especially required for the internal channel case
1085         // where channelDestruct calls free for its TSG
1086         //
1087         NV_RM_RPC_FREE(pGpu,
1088                        hClient,
1089                        RES_GET_PARENT_HANDLE(pKernelChannel),
1090                        RES_GET_HANDLE(pKernelChannel),
1091                        status);
1092     }
1093 
1094     {
1095         KernelGraphicsContext *pKernelGraphicsContext;
1096 
1097         // Perform GR ctx cleanup tasks on channel destruction
1098         if ((kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext) == NV_OK) &&
1099             kgrctxIsValid(pGpu, pKernelGraphicsContext, pKernelChannel))
1100         {
1101             shrkgrctxDetach(pGpu, pKernelGraphicsContext->pShared, pKernelGraphicsContext, pKernelChannel);
1102         }
1103     }
1104 
1105     _kchannelCleanupNotifyActions(pKernelChannel);
1106 
1107     _kchannelFreeHalData(pGpu, pKernelChannel);
1108 
1109     NV_ASSERT(pKernelChannel->pKernelChannelGroupApi != NULL);
1110 
1111     pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup;
1112 
1113     NV_ASSERT(pKernelChannelGroup != NULL);
1114 
1115     // remove channel from the group
1116     kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel);
1117 
1118     // Free the Ctx Buf pool
1119     if (pKernelChannelGroup->pChannelBufPool != NULL)
1120     {
1121         ctxBufPoolRelease(pKernelChannelGroup->pChannelBufPool);
1122     }
1123 
1124     // Free the channel group, if we alloced it
1125     if (pKernelChannelGroup->bAllocatedByRm)
1126     {
1127         pRmApi->Free(pRmApi, hClient,
1128                      RES_GET_HANDLE(pKernelChannel->pKernelChannelGroupApi));
1129         pKernelChannelGroup = NULL;
1130         pKernelChannel->pKernelChannelGroupApi = NULL;
1131     }
1132 
1133     kchannelFreeHwID_HAL(pGpu, pKernelChannel);
1134 
1135     NV_ASSERT(pKernelChannel->refCount == 1);
1136 }
1137 
1138 NV_STATUS
1139 kchannelMap_IMPL
1140 (
1141     KernelChannel     *pKernelChannel,
1142     CALL_CONTEXT      *pCallContext,
1143     RS_CPU_MAP_PARAMS *pParams,
1144     RsCpuMapping      *pCpuMapping
1145 )
1146 {
1147     OBJGPU *pGpu;
1148     NV_STATUS rmStatus;
1149     RsClient *pRsClient = pCallContext->pClient;
1150     RmClient *pRmClient = dynamicCast(pRsClient, RmClient);
1151     GpuResource *pGpuResource;
1152 
1153     NV_ASSERT_OR_RETURN(!pKernelChannel->bClientAllocatedUserD, NV_ERR_INVALID_REQUEST);
1154 
1155     rmStatus = gpuresGetByDeviceOrSubdeviceHandle(pRsClient,
1156                                                   pCpuMapping->pContextRef->hResource,
1157                                                   &pGpuResource);
1158     if (rmStatus != NV_OK)
1159         return rmStatus;
1160 
1161     pGpu = GPU_RES_GET_GPU(pGpuResource);
1162     GPU_RES_SET_THREAD_BC_STATE(pGpuResource);
1163 
1164     // If the flags are fifo default then offset/length passed in
1165     if (DRF_VAL(OS33, _FLAGS, _FIFO_MAPPING, pCpuMapping->flags) == NVOS33_FLAGS_FIFO_MAPPING_DEFAULT)
1166     {
1167         // Validate the offset and limit passed in.
1168         if (pCpuMapping->offset >= pKernelChannel->userdLength)
1169             return NV_ERR_INVALID_BASE;
1170         if (pCpuMapping->length == 0)
1171             return NV_ERR_INVALID_LIMIT;
1172         if (pCpuMapping->offset + pCpuMapping->length > pKernelChannel->userdLength)
1173             return NV_ERR_INVALID_LIMIT;
1174     }
1175     else
1176     {
1177         pCpuMapping->offset = 0x0;
1178         pCpuMapping->length = pKernelChannel->userdLength;
1179     }
1180 
1181     rmStatus = kchannelMapUserD(pGpu, pKernelChannel,
1182                                 rmclientGetCachedPrivilege(pRmClient),
1183                                 pCpuMapping->offset,
1184                                 pCpuMapping->pPrivate->protect,
1185                                 &pCpuMapping->pLinearAddress,
1186                                 &(pCpuMapping->pPrivate->pPriv));
1187 
1188     if (rmStatus != NV_OK)
1189         return rmStatus;
1190 
1191     // Save off the mapping
1192     _kchannelUpdateFifoMapping(pKernelChannel,
1193                                pGpu,
1194                                (pRsClient->type == CLIENT_TYPE_KERNEL),
1195                                pCpuMapping->pLinearAddress,
1196                                pCpuMapping->pPrivate->pPriv,
1197                                pCpuMapping->length,
1198                                pCpuMapping->flags,
1199                                pCpuMapping->pContextRef->hResource,
1200                                pCpuMapping);
1201 
1202     return NV_OK;
1203 }
1204 
1205 NV_STATUS
1206 kchannelUnmap_IMPL
1207 (
1208     KernelChannel *pKernelChannel,
1209     CALL_CONTEXT  *pCallContext,
1210     RsCpuMapping  *pCpuMapping
1211 )
1212 {
1213     OBJGPU   *pGpu;
1214     RsClient *pRsClient = pCallContext->pClient;
1215     RmClient *pRmClient = dynamicCast(pRsClient, RmClient);
1216 
1217     if (pKernelChannel->bClientAllocatedUserD)
1218     {
1219         DBG_BREAKPOINT();
1220         return NV_ERR_INVALID_REQUEST;
1221     }
1222 
1223     pGpu = pCpuMapping->pPrivate->pGpu;
1224 
1225     kchannelUnmapUserD(pGpu,
1226                        pKernelChannel,
1227                        rmclientGetCachedPrivilege(pRmClient),
1228                        &pCpuMapping->pLinearAddress,
1229                        &pCpuMapping->pPrivate->pPriv);
1230 
1231     return NV_OK;
1232 }
1233 
1234 NV_STATUS
1235 kchannelGetMapAddrSpace_IMPL
1236 (
1237     KernelChannel    *pKernelChannel,
1238     CALL_CONTEXT     *pCallContext,
1239     NvU32             mapFlags,
1240     NV_ADDRESS_SPACE *pAddrSpace
1241 )
1242 {
1243     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1244     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1245     NvU32 userdAperture;
1246     NvU32 userdAttribute;
1247 
1248     NV_ASSERT_OK_OR_RETURN(kfifoGetUserdLocation_HAL(pKernelFifo,
1249                                                      &userdAperture,
1250                                                      &userdAttribute));
1251     if (pAddrSpace)
1252         *pAddrSpace = userdAperture;
1253 
1254     return NV_OK;
1255 }
1256 
1257 NV_STATUS
1258 kchannelGetMemInterMapParams_IMPL
1259 (
1260     KernelChannel              *pKernelChannel,
1261     RMRES_MEM_INTER_MAP_PARAMS *pParams
1262 )
1263 {
1264     OBJGPU            *pGpu = pParams->pGpu;
1265     KernelFifo        *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1266     MEMORY_DESCRIPTOR *pSrcMemDesc = NULL;
1267     NV_STATUS          status;
1268 
1269     if (pParams->bSubdeviceHandleProvided)
1270     {
1271         NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of USERD not supported.\n");
1272         return NV_ERR_NOT_SUPPORTED;
1273     }
1274 
1275     if (!kfifoIsUserdMapDmaSupported(pKernelFifo))
1276         return NV_ERR_INVALID_OBJECT_HANDLE;
1277 
1278     status = _kchannelGetUserMemDesc(pGpu, pKernelChannel, &pSrcMemDesc);
1279     if (status != NV_OK)
1280         return status;
1281 
1282     pParams->pSrcMemDesc = pSrcMemDesc;
1283     pParams->pSrcGpu = pSrcMemDesc->pGpu;
1284 
1285     return NV_OK;
1286 }
1287 
1288 NV_STATUS
1289 kchannelCheckMemInterUnmap_IMPL
1290 (
1291     KernelChannel *pKernelChannel,
1292     NvBool         bSubdeviceHandleProvided
1293 )
1294 {
1295     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1296     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1297 
1298     if (bSubdeviceHandleProvided)
1299     {
1300         NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of channels not supported.\n");
1301         return NV_ERR_NOT_SUPPORTED;
1302     }
1303 
1304 
1305     if (!kfifoIsUserdMapDmaSupported(pKernelFifo))
1306         return NV_ERR_INVALID_OBJECT_HANDLE;
1307 
1308     return NV_OK;
1309 }
1310 
1311 /**
1312  * @brief Creates an iterator to iterate all channels in a given scope.
1313  *
1314  * Iterates over all channels under a given scope.  For a device it will loop
1315  * through all channels that are descendants of the device (including children
1316  * of channel groups).  For a channel group it will only iterate over the
1317  * channels within that group.  Ordering is ensured for channel group.
1318  * All channels within a channel group will be iterated together before moving to
1319  * another channel group or channel.
1320  *
1321  * @param[in]  pClient
1322  * @param[in]  pScopeRef The resource that defines the scope of iteration
1323  */
1324 RS_ORDERED_ITERATOR
1325 kchannelGetIter
1326 (
1327     RsClient      *pClient,
1328     RsResourceRef *pScopeRef
1329 )
1330 {
1331     return clientRefOrderedIter(pClient, pScopeRef, classId(KernelChannel), NV_TRUE);
1332 }
1333 
1334 /**
1335  * @brief Given a client, parent, and KernelChannel handle retrieves the
1336  * KernelChannel object
1337  *
1338  * @param[in]  hClient
1339  * @param[in]  hParent              Device or Channel Group parent
1340  * @param[in]  hKernelChannel
1341  * @param[out] ppKernelChannel      Valid iff NV_OK is returned.
1342  *
1343  * @return  NV_OK if successful, appropriate error otherwise
1344  */
1345 NV_STATUS
1346 CliGetKernelChannelWithDevice
1347 (
1348     RsClient       *pClient,
1349     NvHandle        hParent,
1350     NvHandle        hKernelChannel,
1351     KernelChannel **ppKernelChannel
1352 )
1353 {
1354     RsResourceRef *pParentRef;
1355     RsResourceRef *pResourceRef;
1356     KernelChannel *pKernelChannel;
1357 
1358     if (ppKernelChannel == NULL)
1359         return NV_ERR_INVALID_ARGUMENT;
1360 
1361     *ppKernelChannel = NULL;
1362 
1363     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pClient, hKernelChannel, &pResourceRef));
1364 
1365     pKernelChannel = dynamicCast(pResourceRef->pResource, KernelChannel);
1366     NV_CHECK_OR_RETURN(LEVEL_INFO, pKernelChannel != NULL, NV_ERR_OBJECT_NOT_FOUND);
1367 
1368     pParentRef = pResourceRef->pParentRef;
1369     NV_CHECK_OR_RETURN(LEVEL_INFO, pParentRef != NULL, NV_ERR_OBJECT_NOT_FOUND);
1370 
1371     //
1372     // Check that the parent matches requested handle.  Parent handle can be a
1373     // device or a ChannelGroup.  The first case can match either, the second
1374     // matches a Device when the parent is a ChannelGroup.
1375     //
1376     NV_CHECK_OR_RETURN(LEVEL_INFO, (pParentRef->hResource == hParent) ||
1377                      (RES_GET_HANDLE(GPU_RES_GET_DEVICE(pKernelChannel)) == hParent),
1378                          NV_ERR_OBJECT_NOT_FOUND);
1379 
1380     *ppKernelChannel = pKernelChannel;
1381     return NV_OK;
1382 } // end of CliGetKernelChannelWithDevice()
1383 
1384 
1385 /**
1386  * @brief Given a classNum this routine returns various sdk specific values for
1387  * that class.
1388  *
1389  * @param[in]   classNum
1390  * @param[out]  pClassInfo
1391  */
1392 void
1393 CliGetChannelClassInfo
1394 (
1395     NvU32 classNum,
1396     CLI_CHANNEL_CLASS_INFO *pClassInfo
1397 )
1398 {
1399     switch (classNum)
1400     {
1401         case GF100_CHANNEL_GPFIFO:
1402         {
1403             pClassInfo->notifiersMaxCount  = NV906F_NOTIFIERS_MAXCOUNT;
1404             pClassInfo->eventActionDisable = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1405             pClassInfo->eventActionSingle  = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1406             pClassInfo->eventActionRepeat  = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1407             pClassInfo->rcNotifierIndex    = NV906F_NOTIFIERS_RC;
1408             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1409             break;
1410         }
1411         case KEPLER_CHANNEL_GPFIFO_A:
1412         {
1413             pClassInfo->notifiersMaxCount  = NVA06F_NOTIFIERS_MAXCOUNT;
1414             pClassInfo->eventActionDisable = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1415             pClassInfo->eventActionSingle  = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1416             pClassInfo->eventActionRepeat  = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1417             pClassInfo->rcNotifierIndex    = NVA06F_NOTIFIERS_RC;
1418             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1419             break;
1420         }
1421         case KEPLER_CHANNEL_GPFIFO_B:
1422         {
1423             pClassInfo->notifiersMaxCount  = NVA16F_NOTIFIERS_MAXCOUNT;
1424             pClassInfo->eventActionDisable = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1425             pClassInfo->eventActionSingle  = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1426             pClassInfo->eventActionRepeat  = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1427             pClassInfo->rcNotifierIndex    = NVA16F_NOTIFIERS_RC;
1428             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1429             break;
1430         }
1431         case MAXWELL_CHANNEL_GPFIFO_A:
1432         {
1433             pClassInfo->notifiersMaxCount  = NVB06F_NOTIFIERS_MAXCOUNT;
1434             pClassInfo->eventActionDisable = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1435             pClassInfo->eventActionSingle  = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1436             pClassInfo->eventActionRepeat  = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1437             pClassInfo->rcNotifierIndex    = NVB06F_NOTIFIERS_RC;
1438             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1439             break;
1440         }
1441         case PASCAL_CHANNEL_GPFIFO_A:
1442         {
1443             pClassInfo->notifiersMaxCount  = NVC06F_NOTIFIERS_MAXCOUNT;
1444             pClassInfo->eventActionDisable = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1445             pClassInfo->eventActionSingle  = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1446             pClassInfo->eventActionRepeat  = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1447             pClassInfo->rcNotifierIndex    = NVC06F_NOTIFIERS_RC;
1448             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1449             break;
1450         }
1451         case VOLTA_CHANNEL_GPFIFO_A:
1452         {
1453             pClassInfo->notifiersMaxCount  = NVC36F_NOTIFIERS_MAXCOUNT;
1454             pClassInfo->eventActionDisable = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1455             pClassInfo->eventActionSingle  = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1456             pClassInfo->eventActionRepeat  = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1457             pClassInfo->rcNotifierIndex    = NVC36F_NOTIFIERS_RC;
1458             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1459             break;
1460         }
1461         case TURING_CHANNEL_GPFIFO_A:
1462         {
1463             pClassInfo->notifiersMaxCount  = NVC46F_NOTIFIERS_MAXCOUNT;
1464             pClassInfo->eventActionDisable = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1465             pClassInfo->eventActionSingle  = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1466             pClassInfo->eventActionRepeat  = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1467             pClassInfo->rcNotifierIndex    = NVC46F_NOTIFIERS_RC;
1468             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1469             break;
1470         }
1471         case AMPERE_CHANNEL_GPFIFO_A:
1472         {
1473             pClassInfo->notifiersMaxCount  = NVC56F_NOTIFIERS_MAXCOUNT;
1474             pClassInfo->eventActionDisable = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1475             pClassInfo->eventActionSingle  = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1476             pClassInfo->eventActionRepeat  = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1477             pClassInfo->rcNotifierIndex    = NVC56F_NOTIFIERS_RC;
1478             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1479             break;
1480         }
1481         case HOPPER_CHANNEL_GPFIFO_A:
1482         {
1483             pClassInfo->notifiersMaxCount  = NVC86F_NOTIFIERS_MAXCOUNT;
1484             pClassInfo->eventActionDisable = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1485             pClassInfo->eventActionSingle  = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1486             pClassInfo->eventActionRepeat  = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1487             pClassInfo->rcNotifierIndex    = NVC86F_NOTIFIERS_RC;
1488             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1489             break;
1490         }
1491 
1492         //
1493         // Does not make sense. Call with the class type from the client not the
1494         // internal type
1495         //
1496         case PHYSICAL_CHANNEL_GPFIFO:
1497             NV_PRINTF(LEVEL_ERROR,
1498                       "Invalid class for CliGetChannelClassInfo\n");
1499 
1500         default:
1501         {
1502             pClassInfo->notifiersMaxCount  = 0;
1503             pClassInfo->eventActionDisable = 0;
1504             pClassInfo->eventActionSingle  = 0;
1505             pClassInfo->eventActionRepeat  = 0;
1506             pClassInfo->rcNotifierIndex    = 0;
1507             pClassInfo->classType          = CHANNEL_CLASS_TYPE_DMA;
1508             break;
1509         }
1510     }
1511 }
1512 
1513 
1514 /**
1515  * @brief Returns the next KernelChannel from the iterator.
1516  *
1517  * Iterates over runlist IDs and ChIDs and returns the next KernelChannel found
1518  * on the heap, if any.
1519  *
1520  * (error guaranteed if pointer is NULL; non-NULL pointer guaranteed if NV_OK)
1521  *
1522  * @param[in] pGpu
1523  * @param[in] pIt                   the channel iterator
1524  * @param[out] ppKernelChannel      returns a KernelChannel *
1525  *
1526  * @return NV_OK if the returned pointer is valid or error
1527  */
1528 NV_STATUS kchannelGetNextKernelChannel
1529 (
1530     OBJGPU              *pGpu,
1531     CHANNEL_ITERATOR    *pIt,
1532     KernelChannel      **ppKernelChannel
1533 )
1534 {
1535     KernelChannel *pKernelChannel;
1536     KernelFifo    *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1537 
1538     if (ppKernelChannel == NULL)
1539         return NV_ERR_INVALID_ARGUMENT;
1540 
1541     *ppKernelChannel = NULL;
1542 
1543     while (pIt->runlistId < pIt->numRunlists)
1544     {
1545         CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pIt->runlistId);
1546 
1547         if (pChidMgr == NULL)
1548         {
1549             pIt->runlistId++;
1550             continue;
1551         }
1552 
1553         pIt->numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr);
1554         while (pIt->physicalChannelID < pIt->numChannels)
1555         {
1556             pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo,
1557                 pChidMgr, pIt->physicalChannelID);
1558             pIt->physicalChannelID++;
1559 
1560             //
1561             // This iterator can be used during an interrupt, when a KernelChannel may
1562             // be in the process of being destroyed. Don't return it if so.
1563             //
1564             if (pKernelChannel == NULL)
1565                 continue;
1566             if (!kchannelIsValid_HAL(pKernelChannel))
1567                 continue;
1568 
1569             *ppKernelChannel = pKernelChannel;
1570             return NV_OK;
1571         }
1572 
1573         pIt->runlistId++;
1574         // Reset channel index to 0 for next runlist
1575         pIt->physicalChannelID = 0;
1576     }
1577 
1578     return NV_ERR_OBJECT_NOT_FOUND;
1579 }
1580 
1581 /**
1582  * @brief Finds the corresponding KernelChannel given client object and channel handle
1583  *
1584  * Looks in client object store for the channel handle.  Scales with total
1585  * number of registered objects in the client, not just the number of channels.
1586  *
1587  * @param[in]  pClient
1588  * @param[in]  hKernelChannel a KernelChannel Channel handle
1589  * @param[out] ppKernelChannel
1590  *
1591  * @return NV_STATUS
1592  */
1593 NV_STATUS
1594 CliGetKernelChannel
1595 (
1596     RsClient       *pClient,
1597     NvHandle        hKernelChannel,
1598     KernelChannel **ppKernelChannel
1599 )
1600 {
1601     NV_STATUS      status;
1602     RsResourceRef *pResourceRef;
1603 
1604     *ppKernelChannel = NULL;
1605 
1606     status = clientGetResourceRef(pClient, hKernelChannel, &pResourceRef);
1607     if (status != NV_OK)
1608     {
1609         return status;
1610     }
1611 
1612     *ppKernelChannel = dynamicCast(pResourceRef->pResource, KernelChannel);
1613     NV_CHECK_OR_RETURN(LEVEL_INFO,
1614                        *ppKernelChannel != NULL,
1615                        NV_ERR_INVALID_CHANNEL);
1616     return NV_OK;
1617 }
1618 
1619 /*!
1620  * @brief Notify client that channel is stopped.
1621  *
1622  * @param[in] pKernelChannnel
1623  */
1624 NV_STATUS
1625 kchannelNotifyRc_IMPL
1626 (
1627     KernelChannel *pKernelChannel
1628 )
1629 {
1630     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1631     RM_ENGINE_TYPE rmEngineType = RM_ENGINE_TYPE_NULL;
1632     NV_STATUS rmStatus = NV_OK;
1633 
1634     if (IS_GFID_VF(kchannelGetGfid(pKernelChannel)))
1635     {
1636         NV_PRINTF(LEVEL_INFO, "Notification for channel 0x%x stop is already performed on guest-RM\n",
1637                   kchannelGetDebugTag(pKernelChannel));
1638         return NV_OK;
1639     }
1640 
1641     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT &&
1642         pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
1643     {
1644         NV_PRINTF(LEVEL_WARNING, "Channel 0x%x has no notifier set\n",
1645                   kchannelGetDebugTag(pKernelChannel));
1646         return NV_OK;
1647     }
1648 
1649     if (RM_ENGINE_TYPE_IS_VALID(kchannelGetEngineType(pKernelChannel)))
1650     {
1651         rmEngineType = kchannelGetEngineType(pKernelChannel);
1652     }
1653     rmStatus = krcErrorSetNotifier(pGpu, GPU_GET_KERNEL_RC(pGpu),
1654                                    pKernelChannel,
1655                                    ROBUST_CHANNEL_PREEMPTIVE_REMOVAL,
1656                                    rmEngineType,
1657                                    RC_NOTIFIER_SCOPE_CHANNEL);
1658     if (rmStatus != NV_OK)
1659     {
1660         NV_PRINTF(LEVEL_ERROR,
1661             "Failed to set error notifier for channel 0x%x with error 0x%x.\n",
1662             kchannelGetDebugTag(pKernelChannel), rmStatus);
1663     }
1664     return rmStatus;
1665 }
1666 
1667 /**
1668  * @brief Writes notifier specified by index
1669  *
1670  * @param[in] pKernelChannel
1671  * @param[in] notifyIndex
1672  * @param[in] pNotifyParams
1673  * @parms[in] notifyParamsSize
1674  */
1675 void kchannelNotifyGeneric_IMPL
1676 (
1677     KernelChannel *pKernelChannel,
1678     NvU32          notifyIndex,
1679     void          *pNotifyParams,
1680     NvU32          notifyParamsSize
1681 )
1682 {
1683     OBJGPU                 *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1684     ContextDma             *pContextDma;
1685     EVENTNOTIFICATION      *pEventNotification;
1686     CLI_CHANNEL_CLASS_INFO  classInfo;
1687 
1688     CliGetChannelClassInfo(RES_GET_EXT_CLASS_ID(pKernelChannel), &classInfo);
1689 
1690     // validate notifyIndex
1691     NV_CHECK_OR_RETURN_VOID(LEVEL_INFO, notifyIndex < classInfo.notifiersMaxCount);
1692 
1693     // handle notification if client wants it
1694     if (pKernelChannel->pNotifyActions[notifyIndex] != classInfo.eventActionDisable)
1695     {
1696         // get notifier context dma for the channel
1697         if (ctxdmaGetByHandle(RES_GET_CLIENT(pKernelChannel),
1698                               pKernelChannel->hErrorContext,
1699                               &pContextDma) == NV_OK)
1700         {
1701             // make sure it's big enough
1702             if (pContextDma->Limit >=
1703                 ((classInfo.notifiersMaxCount * sizeof (NvNotification)) - 1))
1704             {
1705                 // finally, write out the notifier
1706                 notifyFillNotifierArray(pGpu, pContextDma,
1707                                         0x0, 0x0, 0x0,
1708                                         notifyIndex);
1709             }
1710         }
1711     }
1712 
1713     // handle event if client wants it
1714     pEventNotification = inotifyGetNotificationList(staticCast(pKernelChannel, INotifier));
1715     if (pEventNotification != NULL)
1716     {
1717         // ping any events on the list of type notifyIndex
1718         osEventNotification(pGpu, pEventNotification, notifyIndex, pNotifyParams, notifyParamsSize);
1719     }
1720 
1721     // reset if single shot notify action
1722     if (pKernelChannel->pNotifyActions[notifyIndex] == classInfo.eventActionSingle)
1723         pKernelChannel->pNotifyActions[notifyIndex] = classInfo.eventActionDisable;
1724 
1725     return;
1726 }
1727 
1728 /*!
1729  * @brief Stop channel and notify client
1730  *
1731  * @param[in] pKernelChannnel
1732  * @param[in] pStopChannelParams
1733  */
1734 NV_STATUS
1735 kchannelCtrlCmdStopChannel_IMPL
1736 (
1737     KernelChannel *pKernelChannel,
1738     NVA06F_CTRL_STOP_CHANNEL_PARAMS *pStopChannelParams
1739 )
1740 {
1741     NV_STATUS     rmStatus      = NV_OK;
1742     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
1743     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
1744     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
1745 
1746     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
1747     {
1748 
1749         NV_RM_RPC_CONTROL(pGpu,
1750                           pRmCtrlParams->hClient,
1751                           RES_GET_HANDLE(pKernelChannel),
1752                           pRmCtrlParams->cmd,
1753                           pRmCtrlParams->pParams,
1754                           pRmCtrlParams->paramsSize,
1755                           rmStatus);
1756         if (rmStatus != NV_OK)
1757             return rmStatus;
1758     }
1759     else
1760     {
1761         NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
1762             kchannelFwdToInternalCtrl_HAL(pGpu,
1763                                           pKernelChannel,
1764                                           NVA06F_CTRL_CMD_INTERNAL_STOP_CHANNEL,
1765                                           pRmCtrlParams));
1766     }
1767 
1768     NV_ASSERT_OK_OR_RETURN(kchannelNotifyRc_HAL(pKernelChannel));
1769 
1770     return NV_OK;
1771 }
1772 
1773 /*!
1774  * @brief Helper to get type and memdesc of a channel notifier (memory/ctxdma)
1775  */
1776 NV_STATUS
1777 kchannelGetNotifierInfo
1778 (
1779     OBJGPU             *pGpu,
1780     Device             *pDevice,
1781     NvHandle            hErrorContext,
1782     MEMORY_DESCRIPTOR **ppMemDesc,
1783     ErrorNotifierType  *pNotifierType,
1784     NvU64              *pOffset
1785 )
1786 {
1787     RsClient   *pRsClient   = RES_GET_CLIENT(pDevice);
1788     NvHandle    hDevice     = RES_GET_HANDLE(pDevice);
1789     ContextDma *pContextDma = NULL;
1790     Memory     *pMemory     = NULL;
1791 
1792     NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_PARAMETER);
1793     NV_ASSERT_OR_RETURN(pNotifierType != NULL, NV_ERR_INVALID_PARAMETER);
1794 
1795     *ppMemDesc = NULL;
1796     *pNotifierType = ERROR_NOTIFIER_TYPE_UNKNOWN;
1797     *pOffset = 0;
1798 
1799     if (hErrorContext == NV01_NULL_OBJECT)
1800     {
1801         *pNotifierType = ERROR_NOTIFIER_TYPE_NONE;
1802         return NV_OK;
1803     }
1804 
1805     if (memGetByHandleAndDevice(pRsClient, hErrorContext, hDevice, &pMemory) ==
1806         NV_OK)
1807     {
1808         if (memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_VIRTUAL)
1809         {
1810             //
1811             // GPUVA case: Get the underlying DMA mapping in this case. In GSP
1812             // client mode + SLI, GSP won't be able to write to notifiers on
1813             // other GPUs.
1814             //
1815             NvU64 offset;
1816             NvU32 subdeviceInstance;
1817             NvU64 notifyGpuVA = memdescGetPhysAddr(pMemory->pMemDesc,
1818                                                    AT_GPU_VA, 0);
1819             CLI_DMA_MAPPING_INFO *pDmaMappingInfo;
1820             NvBool bFound;
1821 
1822             bFound = CliGetDmaMappingInfo(
1823                 pRsClient,
1824                 RES_GET_HANDLE(pDevice),
1825                 RES_GET_HANDLE(pMemory),
1826                 notifyGpuVA,
1827                 gpumgrGetDeviceGpuMask(pGpu->deviceInstance),
1828                 &pDmaMappingInfo);
1829 
1830             if (!bFound)
1831             {
1832                 NV_PRINTF(LEVEL_ERROR,
1833                           "Cannot find DMA mapping for GPU_VA notifier\n");
1834                 return NV_ERR_INVALID_STATE;
1835             }
1836 
1837             offset = notifyGpuVA - pDmaMappingInfo->DmaOffset;
1838             if (offset + sizeof(NOTIFICATION) > pDmaMappingInfo->pMemDesc->Size)
1839             {
1840                 NV_PRINTF(LEVEL_ERROR,
1841                     "Notifier does not fit within DMA mapping for GPU_VA\n");
1842                 return NV_ERR_INVALID_STATE;
1843             }
1844 
1845             subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(
1846                 gpumgrGetParentGPU(pGpu));
1847             SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
1848             if (IsSLIEnabled(pGpu) && IS_GSP_CLIENT(pGpu))
1849             {
1850                 NV_PRINTF(LEVEL_ERROR, "GSP does not support SLI\n");
1851                 return NV_ERR_NOT_SUPPORTED;
1852             }
1853             SLI_LOOP_END
1854 
1855             if (!pDmaMappingInfo->KernelVAddr[subdeviceInstance])
1856             {
1857                 NV_PRINTF(LEVEL_ERROR,
1858                           "Kernel VA addr mapping not present for notifier\n");
1859                 return NV_ERR_INVALID_STATE;
1860             }
1861             *ppMemDesc = pDmaMappingInfo->pMemDesc;
1862             // The notifier format here is struct NOTIFICATION, same as ctxdma
1863             *pNotifierType = ERROR_NOTIFIER_TYPE_CTXDMA;
1864             *pOffset = offset;
1865         }
1866         else
1867         {
1868             *ppMemDesc = pMemory->pMemDesc;
1869             *pNotifierType = ERROR_NOTIFIER_TYPE_MEMORY;
1870         }
1871         return NV_OK;
1872     }
1873 
1874     if (ctxdmaGetByHandle(pRsClient, hErrorContext, &pContextDma) == NV_OK)
1875     {
1876         *ppMemDesc = pContextDma->pMemDesc;
1877         *pNotifierType = ERROR_NOTIFIER_TYPE_CTXDMA;
1878         return NV_OK;
1879     }
1880 
1881     return NV_ERR_OBJECT_NOT_FOUND;
1882 }
1883 
1884 /*!
1885  * @brief  Check if the client that owns this channel is in user mode.
1886  *
1887  * This replaces using call context for privilege checking,
1888  * and is callable from both CPU and GSP.
1889  *
1890  * @param[in] pGpu
1891  * @param[in] pKernelChannel
1892  *
1893  * @returns NV_TRUE if owned by user mode or NV_FALSE.
1894  */
1895 NvBool
1896 kchannelCheckIsUserMode_IMPL
1897 (
1898     KernelChannel *pKernelChannel
1899 )
1900 {
1901     return (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER) ||
1902            (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN);
1903 }
1904 
1905 /*!
1906  * @brief  Check if the client that owns this channel is kernel.
1907  *
1908  * This replaces using call context for privilege checking,
1909  * and is callable from both CPU and GSP.
1910  *
1911  * @param[in] pGpu
1912  * @param[in] pKernelChannel
1913  *
1914  * @returns NV_TRUE if owned by kernel or NV_FALSE.
1915  */
1916 NvBool
1917 kchannelCheckIsKernel_IMPL
1918 (
1919     KernelChannel *pKernelChannel
1920 )
1921 {
1922     return pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL;
1923 }
1924 
1925 /*!
1926  * @brief  Check if the client that owns this channel is admin.
1927  *
1928  * This replaces using call context for admin privilege checking,
1929  * but is callable from both CPU and GSP.
1930  *
1931  * @param[in] pGpu
1932  * @param[in] pKernelChannel
1933  *
1934  * @returns NV_TRUE if owned by admin or NV_FALSE.
1935  */
1936 NvBool
1937 kchannelCheckIsAdmin_IMPL
1938 (
1939     KernelChannel *pKernelChannel
1940 )
1941 {
1942     return (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL) ||
1943            (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN);
1944 }
1945 
1946 
1947 /*!
1948  * @brief  Check if the channel is bound to its resources.
1949  *
1950  * This is to make sure channel went through the UVM registration step before it can be scheduled.
1951  * This applies only to UVM owned channels.
1952  *
1953  * @param[in] pKernelChannel
1954  * @param[in] pGVAS
1955  *
1956  * @returns NV_TRUE if bound.
1957  */
1958 NvBool
1959 kchannelIsSchedulable_IMPL
1960 (
1961     OBJGPU *pGpu,
1962     KernelChannel *pKernelChannel
1963 )
1964 {
1965     OBJGVASPACE *pGVAS = NULL;
1966     NvU32        engineDesc = 0;
1967     NvU32        gfId;
1968 
1969     gfId = kchannelGetGfid(pKernelChannel);
1970     if (IS_GFID_VF(gfId))
1971     {
1972         NV_PRINTF(LEVEL_INFO, "Check for channel schedulability for channel 0x%x is already performed on guest-RM\n",
1973                   kchannelGetDebugTag(pKernelChannel));
1974         return NV_TRUE;
1975     }
1976 
1977     pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE);
1978 
1979     //
1980     // It should be an error to have allocated and attempt to schedule a
1981     // channel without having allocated a GVAS. We ignore this check on
1982     // AMODEL, which has its own dummy AVAS.
1983     //
1984     NV_ASSERT_OR_RETURN(pGVAS != NULL || IS_MODS_AMODEL(pGpu), NV_FALSE);
1985 
1986     NV_ASSERT_OR_RETURN(kchannelGetEngine_HAL(pGpu, pKernelChannel, &engineDesc) == NV_OK, NV_FALSE);
1987 
1988     if (pGVAS != NULL && gvaspaceIsExternallyOwned(pGVAS) && IS_GR(engineDesc) && !pKernelChannel->bIsContextBound)
1989     {
1990         NV_PRINTF(LEVEL_ERROR,
1991                   "Cannot schedule externally-owned channel with unbound allocations :0x%x!\n",
1992                   kchannelGetDebugTag(pKernelChannel));
1993         return NV_FALSE;
1994     }
1995     return NV_TRUE;
1996 }
1997 
1998 // Alloc pFifoHalData
1999 static NV_STATUS
2000 _kchannelAllocHalData
2001 (
2002     OBJGPU        *pGpu,
2003     KernelChannel *pKernelChannel
2004 )
2005 {
2006     portMemSet(pKernelChannel->pFifoHalData, 0, sizeof(pKernelChannel->pFifoHalData));
2007 
2008     // Alloc 1 page of instmem per GPU instance
2009     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2010 
2011     pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = portMemAllocNonPaged(sizeof(FIFO_INSTANCE_BLOCK));
2012 
2013     NV_ASSERT_OR_ELSE(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] != NULL,
2014             SLI_LOOP_GOTO(failed));
2015 
2016     portMemSet(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)], 0, sizeof(FIFO_INSTANCE_BLOCK));
2017 
2018     SLI_LOOP_END
2019 
2020     return NV_OK;
2021 
2022 failed:
2023     DBG_BREAKPOINT();
2024     _kchannelFreeHalData(pGpu, pKernelChannel);
2025     return NV_ERR_NO_MEMORY;
2026 }
2027 
2028 // Free memdescs and pFifoHalData, if any
2029 static void
2030 _kchannelFreeHalData
2031 (
2032     OBJGPU        *pGpu,
2033     KernelChannel *pKernelChannel
2034 )
2035 {
2036     // Unmap / delete memdescs
2037     kchannelDestroyMem_HAL(pGpu, pKernelChannel);
2038 
2039     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2040     kchannelDestroyUserdMemDesc(pGpu, pKernelChannel);
2041 
2042     // Free pFifoHalData
2043     portMemFree(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]);
2044     pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = NULL;
2045     SLI_LOOP_END
2046 }
2047 
2048 // Returns the proper VerifFlags for kchannelAllocMem
2049 static NvU32
2050 _kchannelgetVerifFlags
2051 (
2052     OBJGPU                                    *pGpu,
2053     NV_CHANNEL_ALLOC_PARAMS    *pChannelGpfifoParams
2054 )
2055 {
2056     NvU32 verifFlags = 0;
2057 
2058     return verifFlags;
2059 }
2060 
2061 // Allocate and describe instance memory
2062 static NV_STATUS
2063 _kchannelAllocOrDescribeInstMem
2064 (
2065     KernelChannel  *pKernelChannel,
2066     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams
2067 )
2068 {
2069     OBJGPU                *pGpu        = GPU_RES_GET_GPU(pKernelChannel);
2070     KernelFifo            *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
2071     KernelChannelGroupApi *pKernelChannelGroupApi = pKernelChannel->pKernelChannelGroupApi;
2072     KernelChannelGroup    *pKernelChannelGroup    = pKernelChannelGroupApi->pKernelChannelGroup;
2073     NvU32                  gfid       = pKernelChannelGroup->gfid;
2074     NV_STATUS              status;
2075     NvHandle               hClient = RES_GET_CLIENT_HANDLE(pKernelChannel);
2076 
2077     // Alloc pFifoHalData
2078     NV_ASSERT_OK_OR_RETURN(_kchannelAllocHalData(pGpu, pKernelChannel));
2079 
2080     //
2081     // GSP RM and host RM on full SRIOV setup will not be aware of the client allocated userd handles,
2082     // translate the handle on client GSP. GSP RM or host RM on full SRIOV setup will get the translated
2083     // addresses which it will later memdescribe.
2084     //
2085     // However it is still client allocated userd from GSP RM or host RM on full SRIOV setup
2086     // perspective so set the flag accordingly.
2087     //
2088     if (!RMCFG_FEATURE_PLATFORM_GSP &&
2089         !(IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2090     {
2091         pKernelChannel->bClientAllocatedUserD = NV_FALSE;
2092         NV_ASSERT_OK_OR_GOTO(status,
2093                 kchannelCreateUserdMemDescBc_HAL(pGpu, pKernelChannel, hClient,
2094                     pChannelGpfifoParams->hUserdMemory,
2095                     pChannelGpfifoParams->userdOffset),
2096                 failed);
2097     }
2098     else
2099     {
2100         pKernelChannel->bClientAllocatedUserD = NV_TRUE;
2101     }
2102 
2103     // Alloc/describe instmem memdescs depending on platform
2104     if (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2105     {
2106         // On Heavy SRIOV, describe memdescs using RPC
2107         NV_ASSERT_OK_OR_GOTO(status,
2108                 _kchannelDescribeMemDescsHeavySriov(pGpu, pKernelChannel),
2109                 failed);
2110     }
2111     else if (RMCFG_FEATURE_PLATFORM_GSP ||
2112         (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2113     {
2114         // On GSPFW or non-heavy SRIOV, describe memdescs from params
2115         NV_ASSERT_OK_OR_GOTO(status,
2116                 _kchannelDescribeMemDescsFromParams(pGpu, pKernelChannel, pChannelGpfifoParams),
2117                 failed);
2118     }
2119     else if (!IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
2120     {
2121         // On baremetal, GSP client, or SRIOV host, alloc mem
2122         NV_ASSERT_OK_OR_GOTO(status,
2123                 kchannelAllocMem_HAL(pGpu,
2124                                      pKernelChannel,
2125                                      pChannelGpfifoParams->flags,
2126                                      _kchannelgetVerifFlags(pGpu, pChannelGpfifoParams)),
2127                 failed);
2128     }
2129 
2130     // Setup USERD
2131     if (IS_VIRTUAL(pGpu))
2132     {
2133         PMEMORY_DESCRIPTOR pUserdSubDeviceMemDesc =
2134                 pKernelChannel->pUserdSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
2135         NvBool bFullSriov = IS_VIRTUAL_WITH_SRIOV(pGpu) &&
2136             !gpuIsWarBug200577889SriovHeavyEnabled(pGpu);
2137 
2138         // Clear Userd if it is in FB for SRIOV environment without BUG 200577889 or if in SYSMEM
2139         if (pUserdSubDeviceMemDesc != NULL &&
2140                 ((memdescGetAddressSpace(pUserdSubDeviceMemDesc) == ADDR_SYSMEM)
2141                 || ((memdescGetAddressSpace(pUserdSubDeviceMemDesc) == ADDR_FBMEM) && bFullSriov)))
2142         {
2143             kfifoSetupUserD_HAL(pGpu, pKernelFifo, pUserdSubDeviceMemDesc);
2144         }
2145     }
2146     return NV_OK;
2147 
2148 failed:
2149     _kchannelFreeHalData(pGpu, pKernelChannel);
2150     return status;
2151 }
2152 
2153 /**
2154  * @brief Create and describe channel instance memory ramfc and userd memdescs
2155  *        Done using info in pChanGpfifoParams
2156  *
2157  * @param pGpu                  : OBJGPU pointer
2158  * @param pKernelChannel        : KernelChannel pointer
2159  * @param pChanGpfifoParams     : Pointer to channel allocation params
2160  */
2161 static NV_STATUS
2162 _kchannelDescribeMemDescsFromParams
2163 (
2164     OBJGPU                                 *pGpu,
2165     KernelChannel                          *pKernelChannel,
2166     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams
2167 )
2168 {
2169     NV_STATUS               status         = NV_OK;
2170     FIFO_INSTANCE_BLOCK    *pInstanceBlock = NULL;
2171     NvU32                   subDevInst;
2172     NvU32                   gfid           = GPU_GFID_PF;
2173     NvU32                   runqueue;
2174     KernelChannelGroupApi *pKernelChannelGroupApi =
2175         pKernelChannel->pKernelChannelGroupApi;
2176 
2177     NV_ASSERT_OR_RETURN((pKernelChannelGroupApi != NULL), NV_ERR_INVALID_STATE);
2178     gfid = pKernelChannelGroupApi->pKernelChannelGroup->gfid;
2179 
2180     NV_ASSERT_OR_RETURN(RMCFG_FEATURE_PLATFORM_GSP ||
2181                         (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)),
2182                         NV_ERR_INVALID_STATE);
2183 
2184     NV_ASSERT_OR_RETURN((pChannelGpfifoParams != NULL), NV_ERR_INVALID_ARGUMENT);
2185 
2186     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2187 
2188     subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2189 
2190     pInstanceBlock = (FIFO_INSTANCE_BLOCK*) pKernelChannel->pFifoHalData[subDevInst];
2191 
2192     // Create memory descriptor for the instance memory
2193     status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu,
2194                            pChannelGpfifoParams->instanceMem.size, 1 , NV_TRUE,
2195                            pChannelGpfifoParams->instanceMem.addressSpace,
2196                            pChannelGpfifoParams->instanceMem.cacheAttrib,
2197                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2198 
2199     if (status != NV_OK)
2200     {
2201         NV_PRINTF(LEVEL_ERROR,
2202                   "Unable to allocate instance memory descriptor!\n");
2203         SLI_LOOP_RETURN(status);
2204     }
2205 
2206     memdescDescribe(pInstanceBlock->pInstanceBlockDesc, pChannelGpfifoParams->instanceMem.addressSpace,
2207                     pChannelGpfifoParams->instanceMem.base, pChannelGpfifoParams->instanceMem.size);
2208 
2209 
2210     // Create memory descriptor for the ramfc
2211     status = memdescCreate(&pInstanceBlock->pRamfcDesc, pGpu,
2212                            pChannelGpfifoParams->ramfcMem.size, 1 , NV_TRUE,
2213                            pChannelGpfifoParams->ramfcMem.addressSpace,
2214                            pChannelGpfifoParams->ramfcMem.cacheAttrib,
2215                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2216 
2217     if (status != NV_OK)
2218     {
2219         NV_PRINTF(LEVEL_ERROR,
2220                   "Unable to allocate instance memory descriptor!\n");
2221         SLI_LOOP_RETURN(status);
2222     }
2223 
2224     memdescDescribe(pInstanceBlock->pRamfcDesc, pChannelGpfifoParams->ramfcMem.addressSpace,
2225                     pChannelGpfifoParams->ramfcMem.base, pChannelGpfifoParams->ramfcMem.size);
2226 
2227     // Create userd memory descriptor
2228     status = memdescCreate(&pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], pGpu,
2229                            pChannelGpfifoParams->userdMem.size, 1 , NV_TRUE,
2230                            pChannelGpfifoParams->userdMem.addressSpace,
2231                            pChannelGpfifoParams->userdMem.cacheAttrib,
2232                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2233 
2234     if (status != NV_OK)
2235     {
2236         NV_PRINTF(LEVEL_ERROR,
2237                   "Unable to allocate instance memory descriptor!\n");
2238         SLI_LOOP_RETURN(status);
2239     }
2240 
2241     memdescDescribe(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst],
2242                     pChannelGpfifoParams->userdMem.addressSpace,
2243                     pChannelGpfifoParams->userdMem.base, pChannelGpfifoParams->userdMem.size);
2244 
2245     if (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2246     {
2247         /*
2248          * For full SRIOV, guest RM allocates and sends istance, ramfc and userd memory.
2249          * Set MEMDESC_FLAGS_GUEST_ALLOCATED flag in memory descriptor
2250          */
2251         memdescSetFlag(pInstanceBlock->pInstanceBlockDesc, MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2252         memdescSetFlag(pInstanceBlock->pRamfcDesc, MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2253         memdescSetFlag(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2254     }
2255 
2256     // Create method buffer memory descriptor
2257     runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, pChannelGpfifoParams->flags);
2258     if (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2259     {
2260         pKernelChannelGroupApi->pKernelChannelGroup->pMthdBuffers[runqueue]
2261             .bar2Addr = pChannelGpfifoParams->mthdbufMem.base;
2262     }
2263     else if (pKernelChannelGroupApi->pKernelChannelGroup
2264                  ->pMthdBuffers[runqueue].pMemDesc == NULL)
2265     {
2266         NV_ASSERT(pChannelGpfifoParams->mthdbufMem.size > 0);
2267         NV_ASSERT(pChannelGpfifoParams->mthdbufMem.base != 0);
2268         status = memdescCreate(&pKernelChannelGroupApi->pKernelChannelGroup
2269                                     ->pMthdBuffers[runqueue].pMemDesc,
2270                                pGpu,
2271                                pChannelGpfifoParams->mthdbufMem.size,
2272                                1,
2273                                NV_TRUE,
2274                                pChannelGpfifoParams->mthdbufMem.addressSpace,
2275                                pChannelGpfifoParams->mthdbufMem.cacheAttrib,
2276                                MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2277 
2278         if (status != NV_OK)
2279         {
2280             NV_PRINTF(LEVEL_ERROR,
2281                       "Unable to allocate instance memory descriptor!\n");
2282             SLI_LOOP_RETURN(status);
2283         }
2284         memdescDescribe(pKernelChannelGroupApi->pKernelChannelGroup
2285                             ->pMthdBuffers[runqueue].pMemDesc,
2286                         pChannelGpfifoParams->mthdbufMem.addressSpace,
2287                         pChannelGpfifoParams->mthdbufMem.base,
2288                         pChannelGpfifoParams->mthdbufMem.size);
2289     }
2290 
2291     NV_PRINTF(LEVEL_INFO,
2292               "hChannel 0x%x hClient 0x%x, Class ID 0x%x "
2293               "Instance Block @ 0x%llx (%s %x) "
2294               "USERD @ 0x%llx "
2295               "for subdevice %d\n",
2296               RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel),
2297               memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0),
2298               memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2299               (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2300               (pKernelChannel->pUserdSubDeviceMemDesc[subDevInst] == NULL) ? 0x0LL :
2301               memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], AT_GPU, 0LL), subDevInst);
2302 
2303     SLI_LOOP_END
2304 
2305     return status;
2306 }
2307 
2308 /**
2309  * @brief Create and describe channel instance memory ramfc and userd memdescs
2310  *        Done using RPC for Heavy SRIOV guest
2311  *
2312  * @param pGpu                  : OBJGPU pointer
2313  * @param pKernelChannel        : KernelChannel pointer
2314  */
2315 static NV_STATUS
2316 _kchannelDescribeMemDescsHeavySriov
2317 (
2318     OBJGPU               *pGpu,
2319     KernelChannel        *pKernelChannel
2320 )
2321 {
2322     NV_STATUS               status         = NV_OK;
2323     FIFO_INSTANCE_BLOCK    *pInstanceBlock = NULL;
2324     NvU32                   subDevInst;
2325     Subdevice              *pSubDevice;
2326     NvHandle                hSubDevice     = 0;
2327     NvU32                   apert          = ADDR_UNKNOWN;
2328     NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS memInfoParams;
2329     Device                 *pDevice = GPU_RES_GET_DEVICE(pKernelChannel);
2330 
2331     NV_ASSERT_OR_RETURN(IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu),
2332             NV_ERR_INVALID_STATE);
2333 
2334     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2335 
2336     subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2337 
2338     pInstanceBlock = (FIFO_INSTANCE_BLOCK*) pKernelChannel->pFifoHalData[subDevInst];
2339 
2340     //
2341     // In SRIOV enabled systems, MMU fault interrupts for guest contexts are received and handled in guests.
2342     // Inorder to correctly find the faulting channel, faulting instance address has be compared with list of allocated channels.
2343     // But since contexts are currently allocated in host during channelConstruct, we need
2344     // context info from host and save it locally for the above channel lookup to pass. This piece of code uses GET_CHANNEL_MEM_INFO
2345     // to fetch the info and update pFifoHalData with the relevant details.
2346     //
2347 
2348     portMemSet(&memInfoParams, 0, sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS));
2349     memInfoParams.hChannel = RES_GET_HANDLE(pKernelChannel);
2350 
2351     status = subdeviceGetByInstance(RES_GET_CLIENT(pKernelChannel),
2352                                     RES_GET_HANDLE(pDevice),
2353                                     subDevInst,
2354                                     &pSubDevice);
2355     if (status != NV_OK)
2356     {
2357         NV_PRINTF(LEVEL_ERROR, "Unable to get subdevice object.\n");
2358         DBG_BREAKPOINT();
2359         SLI_LOOP_RETURN(status);
2360     }
2361 
2362     GPU_RES_SET_THREAD_BC_STATE(pSubDevice);
2363 
2364     hSubDevice = RES_GET_HANDLE(pSubDevice);
2365 
2366     NV_RM_RPC_CONTROL(pGpu,
2367                       RES_GET_CLIENT_HANDLE(pKernelChannel),
2368                       hSubDevice,
2369                       NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO,
2370                       &memInfoParams,
2371                       sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS),
2372                       status);
2373     if (status != NV_OK)
2374     {
2375         NV_PRINTF(LEVEL_ERROR,
2376                   "RM Control call to fetch channel meminfo failed, hKernelChannel 0x%x\n",
2377                   RES_GET_HANDLE(pKernelChannel));
2378         DBG_BREAKPOINT();
2379         SLI_LOOP_RETURN(status);
2380     }
2381 
2382     // Find the aperture
2383     if (memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM)
2384     {
2385         apert = ADDR_FBMEM;
2386     }
2387     else if ((memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH) ||
2388              (memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH))
2389     {
2390         apert = ADDR_SYSMEM;
2391     }
2392     else
2393     {
2394         NV_PRINTF(LEVEL_ERROR,
2395                   "Unknown aperture, hClient 0x%x, hKernelChannel 0x%x\n",
2396                   RES_GET_CLIENT_HANDLE(pKernelChannel),
2397                   RES_GET_HANDLE(pKernelChannel));
2398         status = NV_ERR_INVALID_ARGUMENT;
2399         DBG_BREAKPOINT();
2400         SLI_LOOP_RETURN(status);
2401     }
2402 
2403     status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu,
2404                            memInfoParams.chMemInfo.inst.size, 1 , NV_TRUE,
2405                            apert, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2406 
2407     if (status != NV_OK)
2408     {
2409         NV_PRINTF(LEVEL_ERROR,
2410                   "Unable to allocate instance memory descriptor!\n");
2411         SLI_LOOP_RETURN(status);
2412     }
2413 
2414     memdescDescribe(pInstanceBlock->pInstanceBlockDesc, apert, memInfoParams.chMemInfo.inst.base, memInfoParams.chMemInfo.inst.size);
2415 
2416     NV_PRINTF(LEVEL_INFO,
2417               "hChannel 0x%x hClient 0x%x, Class ID 0x%x "
2418               "Instance Block @ 0x%llx (%s %x) "
2419               "USERD @ 0x%llx "
2420               "for subdevice %d\n",
2421               RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel),
2422               memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0),
2423               memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2424               (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2425               (pKernelChannel->pUserdSubDeviceMemDesc[subDevInst] == NULL) ? 0x0LL :
2426               memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], AT_GPU, 0LL), subDevInst);
2427 
2428     SLI_LOOP_END
2429 
2430     return status;
2431 }
2432 
2433 static NV_STATUS
2434 _kchannelSendChannelAllocRpc
2435 (
2436     KernelChannel *pKernelChannel,
2437     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams,
2438     KernelChannelGroup *pKernelChannelGroup,
2439     NvBool bFullSriov
2440 )
2441 {
2442     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2443     NV_CHANNEL_ALLOC_PARAMS *pRpcParams;
2444     NV_STATUS status = NV_OK;
2445 
2446     pRpcParams = portMemAllocNonPaged(sizeof(*pRpcParams));
2447     NV_ASSERT_OR_RETURN(pRpcParams != NULL, NV_ERR_NO_MEMORY);
2448     portMemSet(pRpcParams, 0, sizeof(*pRpcParams));
2449 
2450     pRpcParams->hObjectError      = pChannelGpfifoParams->hObjectError;
2451     pRpcParams->hObjectBuffer     = 0;
2452     pRpcParams->gpFifoOffset      = pChannelGpfifoParams->gpFifoOffset;
2453     pRpcParams->gpFifoEntries     = pChannelGpfifoParams->gpFifoEntries;
2454     pRpcParams->flags             = pChannelGpfifoParams->flags;
2455     pRpcParams->hContextShare     = pChannelGpfifoParams->hContextShare;
2456     pRpcParams->hVASpace          = pChannelGpfifoParams->hVASpace;
2457     pRpcParams->engineType        = pChannelGpfifoParams->engineType;
2458     pRpcParams->subDeviceId       = pChannelGpfifoParams->subDeviceId;
2459     pRpcParams->hObjectEccError   = pChannelGpfifoParams->hObjectEccError;
2460     pRpcParams->hPhysChannelGroup = pChannelGpfifoParams->hPhysChannelGroup;
2461     pRpcParams->internalFlags     = pChannelGpfifoParams->internalFlags;
2462 
2463     portMemCopy((void*)pRpcParams->hUserdMemory,
2464                 sizeof(NvHandle) * NV2080_MAX_SUBDEVICES,
2465                 (const void*)pChannelGpfifoParams->hUserdMemory,
2466                 sizeof(NvHandle) * NV2080_MAX_SUBDEVICES);
2467 
2468     portMemCopy((void*)pRpcParams->userdOffset,
2469                 sizeof(NvU64) * NV2080_MAX_SUBDEVICES,
2470                 (const void*)pChannelGpfifoParams->userdOffset,
2471                 sizeof(NvU64) * NV2080_MAX_SUBDEVICES);
2472 
2473     if (pKernelChannel->bCCSecureChannel)
2474     {
2475         portMemCopy((void*)pRpcParams->encryptIv,
2476                     sizeof(pRpcParams->encryptIv),
2477                     (const void*)pChannelGpfifoParams->encryptIv,
2478                     sizeof(pChannelGpfifoParams->encryptIv));
2479 
2480         portMemCopy((void*)pRpcParams->decryptIv,
2481                     sizeof(pRpcParams->decryptIv),
2482                     (const void*)pChannelGpfifoParams->decryptIv,
2483                     sizeof(pChannelGpfifoParams->decryptIv));
2484 
2485         portMemCopy((void*)pRpcParams->hmacNonce,
2486                     sizeof(pRpcParams->hmacNonce),
2487                     (const void*)pChannelGpfifoParams->hmacNonce,
2488                     sizeof(pChannelGpfifoParams->hmacNonce));
2489     }
2490 
2491     //
2492     // These fields are only filled out for GSP client or full SRIOV
2493     // i.e. the guest independently allocs ChID and instmem
2494     //
2495     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
2496     {
2497         NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2498         FIFO_INSTANCE_BLOCK *pInstanceBlock = pKernelChannel->pFifoHalData[subdevInst];
2499         NvU32 runqueue  = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, pChannelGpfifoParams->flags);
2500 
2501         NV_ASSERT_TRUE_OR_GOTO(status,
2502                                pInstanceBlock != NULL,
2503                                NV_ERR_INVALID_STATE,
2504                                cleanup);
2505 
2506         portMemCopy(&pRpcParams->errorNotifierMem,
2507                     sizeof pRpcParams->errorNotifierMem,
2508                     &(pChannelGpfifoParams->errorNotifierMem),
2509                     sizeof pChannelGpfifoParams->errorNotifierMem);
2510         portMemCopy(&pRpcParams->eccErrorNotifierMem,
2511                     sizeof pRpcParams->eccErrorNotifierMem,
2512                     &(pChannelGpfifoParams->eccErrorNotifierMem),
2513                     sizeof pChannelGpfifoParams->eccErrorNotifierMem);
2514 
2515         // Fill the instance block
2516         if (pInstanceBlock)
2517         {
2518             pRpcParams->instanceMem.base =
2519                             memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0);
2520             pRpcParams->instanceMem.size = pInstanceBlock->pInstanceBlockDesc->Size;
2521             pRpcParams->instanceMem.addressSpace =
2522                             memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc);
2523             pRpcParams->instanceMem.cacheAttrib =
2524                             memdescGetCpuCacheAttrib(pInstanceBlock->pInstanceBlockDesc);
2525 
2526             pRpcParams->ramfcMem.base =
2527                             memdescGetPhysAddr(pInstanceBlock->pRamfcDesc,  AT_GPU, 0);
2528             pRpcParams->ramfcMem.size = pInstanceBlock->pRamfcDesc->Size;
2529             pRpcParams->ramfcMem.addressSpace =
2530                             memdescGetAddressSpace(pInstanceBlock->pRamfcDesc);
2531             pRpcParams->ramfcMem.cacheAttrib =
2532                             memdescGetCpuCacheAttrib(pInstanceBlock->pRamfcDesc);
2533         }
2534 
2535         // Fill the userd memory descriptor
2536         if (pKernelChannel->pUserdSubDeviceMemDesc[subdevInst])
2537         {
2538             pRpcParams->userdMem.base =
2539                             memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst], AT_GPU, 0);
2540             pRpcParams->userdMem.size = pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]->Size;
2541             pRpcParams->userdMem.addressSpace =
2542                             memdescGetAddressSpace(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]);
2543             pRpcParams->userdMem.cacheAttrib =
2544                             memdescGetCpuCacheAttrib(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]);
2545         }
2546 
2547         // Fill the method buffer memory descriptor
2548         if (pKernelChannelGroup->pMthdBuffers != NULL &&
2549             pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc != NULL)
2550         {
2551             if (bFullSriov)
2552             {
2553                 pRpcParams->mthdbufMem.base =
2554                     pKernelChannelGroup->pMthdBuffers[runqueue].bar2Addr;
2555                 pRpcParams->mthdbufMem.size =
2556                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc->Size;
2557                 pRpcParams->mthdbufMem.addressSpace = ADDR_VIRTUAL;
2558                 pRpcParams->mthdbufMem.cacheAttrib = 0;
2559             }
2560             else
2561             {
2562                 pRpcParams->mthdbufMem.base = memdescGetPhysAddr(
2563                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc,
2564                     AT_GPU, 0);
2565                 pRpcParams->mthdbufMem.size =
2566                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc->Size;
2567                 pRpcParams->mthdbufMem.addressSpace = memdescGetAddressSpace(
2568                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc);
2569                 pRpcParams->mthdbufMem.cacheAttrib = memdescGetCpuCacheAttrib(
2570                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc);
2571             }
2572         }
2573 
2574         if (IS_GSP_CLIENT(pGpu))
2575         {
2576             //
2577             // Setting these param flags will make the Physical RMAPI use our
2578             // ChID (which is already decided)
2579             //
2580 
2581             NvU32 numChannelsPerUserd = NVBIT(DRF_SIZE(NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE)); //  1<<3 -> 4K / 512B
2582 
2583             pRpcParams->flags = FLD_SET_DRF(OS04, _FLAGS,
2584                     _CHANNEL_USERD_INDEX_FIXED, _FALSE, pRpcParams->flags);
2585             pRpcParams->flags = FLD_SET_DRF(OS04, _FLAGS,
2586                     _CHANNEL_USERD_INDEX_PAGE_FIXED, _TRUE, pRpcParams->flags);
2587             pRpcParams->flags = FLD_SET_DRF_NUM(OS04, _FLAGS,
2588                     _CHANNEL_USERD_INDEX_VALUE, pKernelChannel->ChID % numChannelsPerUserd, pRpcParams->flags);
2589             pRpcParams->flags = FLD_SET_DRF_NUM(OS04, _FLAGS,
2590                     _CHANNEL_USERD_INDEX_PAGE_VALUE, pKernelChannel->ChID / numChannelsPerUserd, pRpcParams->flags);
2591 
2592             // GSP client needs to pass in privilege level as an alloc param since GSP-RM cannot check this
2593             pRpcParams->internalFlags =
2594                 FLD_SET_DRF_NUM(_KERNELCHANNEL, _ALLOC_INTERNALFLAGS, _PRIVILEGE,
2595                     pKernelChannel->privilegeLevel, pRpcParams->internalFlags);
2596             pRpcParams->ProcessID = pKernelChannel->ProcessID;
2597             pRpcParams->SubProcessID= pKernelChannel->SubProcessID;
2598         }
2599     }
2600 
2601     NV_RM_RPC_ALLOC_CHANNEL(pGpu,
2602                             RES_GET_CLIENT_HANDLE(pKernelChannel),
2603                             RES_GET_PARENT_HANDLE(pKernelChannel),
2604                             RES_GET_HANDLE(pKernelChannel),
2605                             RES_GET_EXT_CLASS_ID(pKernelChannel),
2606                             pRpcParams,
2607                             &pKernelChannel->ChID,
2608                             status);
2609     NV_ASSERT_OK_OR_GOTO(status, status, cleanup);
2610 
2611     NV_PRINTF(LEVEL_INFO,
2612         "Alloc Channel chid %d, hClient:0x%x, hParent:0x%x, hObject:0x%x, hClass:0x%x\n",
2613         pKernelChannel->ChID,
2614         RES_GET_CLIENT_HANDLE(pKernelChannel),
2615         RES_GET_PARENT_HANDLE(pKernelChannel),
2616         RES_GET_HANDLE(pKernelChannel),
2617         RES_GET_EXT_CLASS_ID(pKernelChannel));
2618 
2619 cleanup:
2620     portMemFree(pRpcParams);
2621 
2622     return status;
2623 }
2624 
2625 /*!
2626  * @brief Bind a single channel to a runlist
2627  *
2628  * This is a helper function for kchannelCtrlCmdBind and kchangrpapiCtrlCmdBind
2629  */
2630 NV_STATUS kchannelBindToRunlist_IMPL
2631 (
2632     KernelChannel *pKernelChannel,
2633     RM_ENGINE_TYPE localRmEngineType,
2634     ENGDESCRIPTOR  engineDesc
2635 )
2636 {
2637     OBJGPU    *pGpu;
2638     NV_STATUS  status = NV_OK;
2639 
2640     NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT);
2641     pGpu = GPU_RES_GET_GPU(pKernelChannel);
2642 
2643     // copied from setRunlistIdByEngineType
2644     if ((engineDesc == ENG_SW) || (engineDesc == ENG_BUS))
2645     {
2646         return NV_OK;
2647     }
2648 
2649     //
2650     // vGPU:
2651     //
2652     // Since vGPU does all real hardware management in the
2653     // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true),
2654     // do an RPC to the host to do the hardware update.
2655     //
2656     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
2657     {
2658         NVA06F_CTRL_BIND_PARAMS params;
2659 
2660         params.engineType = gpuGetNv2080EngineType(localRmEngineType);
2661 
2662         NV_RM_RPC_CONTROL(pGpu,
2663                           RES_GET_CLIENT_HANDLE(pKernelChannel),
2664                           RES_GET_HANDLE(pKernelChannel),
2665                           NVA06F_CTRL_CMD_BIND,
2666                           &params,
2667                           sizeof(params),
2668                           status);
2669 
2670         NV_ASSERT_OR_RETURN(status == NV_OK, status);
2671     }
2672 
2673     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
2674 
2675     status = kfifoRunlistSetIdByEngine_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu),
2676                                            pKernelChannel, engineDesc);
2677 
2678     if (status != NV_OK)
2679     {
2680         NV_PRINTF(LEVEL_ERROR,
2681                   "Failed to set RunlistID 0x%08x for channel 0x%08x\n",
2682                   engineDesc, kchannelGetDebugTag(pKernelChannel));
2683         SLI_LOOP_BREAK;
2684     }
2685 
2686     SLI_LOOP_END;
2687 
2688     return status;
2689 }
2690 
2691 //
2692 // channelCtrlCmdEventSetNotification
2693 //
2694 // This command handles set notification operations for all tesla,
2695 // fermi, kepler, and maxwell based gpfifo classes:
2696 //
2697 //    NV50_DISPLAY             (Class: NV5070)
2698 //    GF100_CHANNEL_GPFIFO     (Class: NV906F)
2699 //    KEPLER_CHANNEL_GPFIFO_A  (Class: NVA06F)
2700 //    KEPLER_CHANNEL_GPFIFO_B  (Class: NVA16F)
2701 //    KEPLER_CHANNEL_GPFIFO_C  (Class: NVA26F)
2702 //    MAXWELL_CHANNEL_GPFIFO_A (Class: NVB06F)
2703 //    PASCAL_CHANNEL_GPFIFO_A  (Class: NVC06F)
2704 //
2705 NV_STATUS
2706 kchannelCtrlCmdEventSetNotification_IMPL
2707 (
2708     KernelChannel *pKernelChannel,
2709     NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams
2710 )
2711 {
2712     CLI_CHANNEL_CLASS_INFO classInfo;
2713     CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
2714     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2715 
2716     // NV01_EVENT must have been plugged into this subdevice
2717     if (inotifyGetNotificationList(staticCast(pKernelChannel, INotifier)) == NULL)
2718     {
2719         NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", pRmCtrlParams->cmd);
2720         return NV_ERR_INVALID_STATE;
2721     }
2722 
2723     // get channel class-specific properties
2724     CliGetChannelClassInfo(REF_VAL(NVXXXX_CTRL_CMD_CLASS, pRmCtrlParams->cmd),
2725                            &classInfo);
2726 
2727     if (pSetEventParams->event >= classInfo.notifiersMaxCount)
2728     {
2729         NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event);
2730         return NV_ERR_INVALID_ARGUMENT;
2731     }
2732 
2733     if ((pSetEventParams->action == classInfo.eventActionSingle) ||
2734         (pSetEventParams->action == classInfo.eventActionRepeat))
2735     {
2736         // must be in disabled state to transition to an active state
2737         if (pKernelChannel->pNotifyActions[pSetEventParams->event] != classInfo.eventActionDisable)
2738         {
2739             return NV_ERR_INVALID_STATE;
2740         }
2741 
2742         pKernelChannel->pNotifyActions[pSetEventParams->event] = pSetEventParams->action;
2743     }
2744     else if (pSetEventParams->action == classInfo.eventActionDisable)
2745     {
2746         pKernelChannel->pNotifyActions[pSetEventParams->event] = pSetEventParams->action;
2747     }
2748     else
2749     {
2750         return NV_ERR_INVALID_ARGUMENT;
2751     }
2752 
2753     return NV_OK;
2754 }
2755 
2756 NV_STATUS
2757 kchannelCtrlCmdGetClassEngineid_IMPL
2758 (
2759     KernelChannel *pKernelChannel,
2760     NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams
2761 )
2762 {
2763     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2764     KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
2765     NV_STATUS status = NV_OK;
2766     RM_ENGINE_TYPE rmEngineType;
2767 
2768     //
2769     // MODS uses hObject 0 to figure out if this call is supported or not.
2770     // In SRIOV VF scenario, plugin asserts if host returns an error code
2771     // for a control call. Adding a temporary work around till MODS submits
2772     // a proper fix.
2773     //
2774     if (pParams->hObject == NV01_NULL_OBJECT)
2775     {
2776         return NV_ERR_OBJECT_NOT_FOUND;
2777     }
2778 
2779     NV_CHECK_OR_RETURN(LEVEL_ERROR,
2780                        pParams->hObject != RES_GET_CLIENT_HANDLE(pKernelChannel),
2781                        NV_ERR_INVALID_ARGUMENT);
2782 
2783     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) ||
2784         (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2785     {
2786         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
2787         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2788 
2789         NV_RM_RPC_CONTROL(pGpu,
2790                           pRmCtrlParams->hClient,
2791                           RES_GET_HANDLE(pKernelChannel),
2792                           pRmCtrlParams->cmd,
2793                           pRmCtrlParams->pParams,
2794                           pRmCtrlParams->paramsSize,
2795                           status);
2796         return status;
2797     }
2798 
2799     NV_ASSERT_OK_OR_RETURN(
2800         kchannelGetClassEngineID_HAL(pGpu, pKernelChannel, pParams->hObject,
2801                                  &pParams->classEngineID,
2802                                  &pParams->classID,
2803                                  &rmEngineType));
2804 
2805     pParams->engineID = gpuGetNv2080EngineType(rmEngineType);
2806 
2807     if (IS_MIG_IN_USE(pGpu) &&
2808         kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, rmEngineType))
2809     {
2810         MIG_INSTANCE_REF ref;
2811         RM_ENGINE_TYPE localRmEngineType;
2812 
2813         NV_ASSERT_OK_OR_RETURN(
2814             kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
2815                                             GPU_RES_GET_DEVICE(pKernelChannel),
2816                                             &ref));
2817 
2818         NV_ASSERT_OK_OR_RETURN(
2819             kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref,
2820                                               rmEngineType,
2821                                               &localRmEngineType));
2822 
2823         NV_PRINTF(LEVEL_INFO, "Overriding global engine type 0x%x to local engine type 0x%x (0x%x) due to MIG\n",
2824                   pParams->engineID, gpuGetNv2080EngineType(localRmEngineType), localRmEngineType);
2825 
2826         pParams->engineID = gpuGetNv2080EngineType(localRmEngineType);
2827     }
2828 
2829     return status;
2830 }
2831 
2832 NV_STATUS
2833 kchannelCtrlCmdResetChannel_IMPL
2834 (
2835     KernelChannel *pKernelChannel,
2836     NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams
2837 )
2838 {
2839     NV_STATUS status    = NV_OK;
2840     OBJGPU   *pGpu      = GPU_RES_GET_GPU(pKernelChannel);
2841     CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
2842     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2843 
2844     if (!(pRmCtrlParams->bInternal ||
2845           pResetChannelParams->resetReason <
2846               NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX))
2847     {
2848         return NV_ERR_INVALID_PARAMETER;
2849     }
2850 
2851     //
2852     // All real hardware management is done in the host.
2853     // Do an RPC to the host to do the hardware update and return.
2854     //
2855     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
2856     {
2857         NV_RM_RPC_CONTROL(pGpu,
2858                           pRmCtrlParams->hClient,
2859                           RES_GET_HANDLE(pKernelChannel),
2860                           NV906F_CTRL_CMD_RESET_CHANNEL,
2861                           pResetChannelParams,
2862                           pRmCtrlParams->paramsSize,
2863                           status);
2864         return status;
2865     }
2866 
2867     //
2868     // Do an internal control call to do channel reset
2869     // on Host (Physical) RM
2870     //
2871     return kchannelFwdToInternalCtrl_HAL(pGpu,
2872                                          pKernelChannel,
2873                                          NVA06F_CTRL_CMD_INTERNAL_RESET_CHANNEL,
2874                                          pRmCtrlParams);
2875 }
2876 
2877 //
2878 // channelCtrlCmdEventSetTrigger
2879 //
2880 // This command handles set trigger operations for all kepler and maxwell based
2881 // gpfifo classes:
2882 //
2883 //    KEPLER_CHANNEL_GPFIFO_A  (Class: NVA06F)
2884 //    KEPLER_CHANNEL_GPFIFO_B  (Class: NVA16F)
2885 //    KEPLER_CHANNEL_GPFIFO_C  (Class: NVA26F)
2886 //    MAXWELL_CHANNEL_GPFIFO_A (Class: NVB06F)
2887 //    PASCAL_CHANNEL_GPFIFO_A  (Class: NVC06F)
2888 //
2889 NV_STATUS
2890 kchannelCtrlCmdEventSetTrigger_IMPL
2891 (
2892     KernelChannel *pKernelChannel
2893 )
2894 {
2895     kchannelNotifyGeneric(pKernelChannel, NVA06F_NOTIFIERS_SW, NULL, 0);
2896 
2897     return NV_OK;
2898 }
2899 
2900 NV_STATUS
2901 kchannelCtrlCmdGpFifoSchedule_IMPL
2902 (
2903     KernelChannel *pKernelChannel,
2904     NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams
2905 )
2906 {
2907     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
2908     NV_STATUS     rmStatus      = NV_OK;
2909     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
2910     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2911 
2912     //
2913     // Bug 1737765: Prevent Externally Owned Channels from running unless bound
2914     //  It is possible for clients to allocate and schedule channels while
2915     //  skipping the UVM registration step which binds the appropriate
2916     //  allocations in RM. We need to fail channel scheduling if the channels
2917     //  have not been registered with UVM.
2918     //  This check is performed on baremetal, CPU-RM and guest-RM
2919     //
2920     NV_ASSERT_OR_RETURN(kchannelIsSchedulable_HAL(pGpu, pKernelChannel), NV_ERR_INVALID_STATE);
2921 
2922     //
2923     // If this was a host-only channel we'll have never set the runlist id, so
2924     // force it here to ensure it is immutable now that the channel is scheduled.
2925     //
2926     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
2927     kchannelSetRunlistSet(pGpu, pKernelChannel, NV_TRUE);
2928     SLI_LOOP_END
2929 
2930 
2931     //
2932     // All real hardware management is done in the host.
2933     // Do an RPC to the host to do the hardware update and return.
2934     //
2935     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
2936     {
2937 
2938         NV_RM_RPC_CONTROL(pGpu,
2939                           RES_GET_CLIENT_HANDLE(pKernelChannel),
2940                           RES_GET_HANDLE(pKernelChannel),
2941                           NVA06F_CTRL_CMD_GPFIFO_SCHEDULE,
2942                           pRmCtrlParams->pParams,
2943                           pRmCtrlParams->paramsSize,
2944                           rmStatus);
2945 
2946         return rmStatus;
2947     }
2948 
2949     //
2950     // Do an internal control call to do channel reset
2951     // on Host (Physical) RM
2952     //
2953     return kchannelFwdToInternalCtrl_HAL(pGpu,
2954                                          pKernelChannel,
2955                                          NVA06F_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE,
2956                                          pRmCtrlParams);
2957 }
2958 
2959 NV_STATUS
2960 kchannelCtrlCmdGetEngineCtxSize_IMPL
2961 (
2962     KernelChannel *pKernelChannel,
2963     NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS *pCtxSizeParams
2964 )
2965 {
2966     return NV_ERR_NOT_SUPPORTED;
2967 }
2968 
2969 NV_STATUS
2970 kchannelCtrlCmdSetErrorNotifier_IMPL
2971 (
2972     KernelChannel *pKernelChannel,
2973     NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS *pSetErrorNotifierParams
2974 )
2975 {
2976     OBJGPU   *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2977     RC_NOTIFIER_SCOPE scope;
2978     NV_STATUS rmStatus = NV_OK;
2979 
2980     NV_PRINTF(LEVEL_INFO,
2981               "calling setErrorNotifier on channel: 0x%x, broadcast to TSG: %s\n",
2982               kchannelGetDebugTag(pKernelChannel),
2983               pSetErrorNotifierParams->bNotifyEachChannelInTSG ? "true" : "false");
2984 
2985     scope = pSetErrorNotifierParams->bNotifyEachChannelInTSG ?
2986                 RC_NOTIFIER_SCOPE_TSG :
2987                 RC_NOTIFIER_SCOPE_CHANNEL;
2988 
2989     rmStatus = krcErrorSetNotifier(pGpu, GPU_GET_KERNEL_RC(pGpu),
2990                                    pKernelChannel,
2991                                    ROBUST_CHANNEL_GR_EXCEPTION,
2992                                    kchannelGetEngineType(pKernelChannel),
2993                                    scope);
2994     return rmStatus;
2995 }
2996 
2997 NV_STATUS
2998 kchannelCtrlCmdBind_IMPL
2999 (
3000     KernelChannel *pKernelChannel,
3001     NVA06F_CTRL_BIND_PARAMS *pParams
3002 )
3003 {
3004     RM_ENGINE_TYPE globalRmEngineType;
3005     RM_ENGINE_TYPE localRmEngineType;
3006     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3007     NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
3008     NV_STATUS rmStatus = NV_OK;
3009     ENGDESCRIPTOR engineDesc;
3010 
3011     if (!pParams)
3012         return NV_ERR_INVALID_ARGUMENT;
3013 
3014     // Check if channel belongs to TSG that is not internal RM TSG
3015     if (!pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm)
3016     {
3017         // This may be valid request if we added new channel to TSG that is
3018         // already running. In that case we just have to check that it uses
3019         // the same runlist as whole TSG.
3020         // We do that in fifoRunlistSetId()
3021         NV_PRINTF(LEVEL_INFO,
3022                   "Bind requested for channel %d belonging to TSG %d.\n",
3023                   kchannelGetDebugTag(pKernelChannel),
3024                   pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->grpID);
3025     }
3026 
3027     localRmEngineType = globalRmEngineType = gpuGetRmEngineType(pParams->engineType);
3028 
3029     if (bMIGInUse)
3030     {
3031         KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
3032         MIG_INSTANCE_REF ref;
3033 
3034         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
3035             kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
3036                                             GPU_RES_GET_DEVICE(pKernelChannel),
3037                                             &ref));
3038 
3039         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
3040             kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, localRmEngineType,
3041                                               &globalRmEngineType));
3042 
3043     }
3044 
3045     NV_PRINTF(LEVEL_INFO, "Binding Channel %d to Engine %d\n",
3046               kchannelGetDebugTag(pKernelChannel), globalRmEngineType);
3047 
3048     // Translate globalRmEngineType -> enginedesc
3049     NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
3050         gpuXlateClientEngineIdToEngDesc(pGpu, globalRmEngineType, &engineDesc));
3051 
3052     if (rmStatus == NV_OK)
3053     {
3054         NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
3055             kchannelBindToRunlist(pKernelChannel, localRmEngineType, engineDesc));
3056     }
3057 
3058     return rmStatus;
3059 }
3060 
3061 NV_STATUS
3062 kchannelCtrlCmdSetInterleaveLevel_IMPL
3063 (
3064     KernelChannel *pKernelChannel,
3065     NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams
3066 )
3067 {
3068     OBJGPU          *pGpu         = GPU_RES_GET_GPU(pKernelChannel);
3069     NV_STATUS        status       = NV_OK;
3070 
3071     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
3072     {
3073         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
3074         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3075 
3076         NV_RM_RPC_CONTROL(pGpu,
3077                           RES_GET_CLIENT_HANDLE(pKernelChannel),
3078                           RES_GET_HANDLE(pKernelChannel),
3079                           pRmCtrlParams->cmd,
3080                           pRmCtrlParams->pParams,
3081                           pRmCtrlParams->paramsSize,
3082                           status);
3083         NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, NV_ERR_NOT_SUPPORTED);
3084     }
3085 
3086     status = kchangrpSetInterleaveLevel(pGpu, pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup, pParams->channelInterleaveLevel);
3087 
3088     return status;
3089 }
3090 
3091 NV_STATUS
3092 kchannelCtrlCmdGetInterleaveLevel_IMPL
3093 (
3094     KernelChannel *pKernelChannel,
3095     NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams
3096 )
3097 {
3098     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3099 
3100     pParams->channelInterleaveLevel =
3101         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pInterleaveLevel[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3102 
3103     return NV_OK;
3104 }
3105 
3106 NV_STATUS
3107 kchannelCtrlCmdGpfifoGetWorkSubmitToken_IMPL
3108 (
3109     KernelChannel *pKernelChannel,
3110     NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *pTokenParams
3111 )
3112 {
3113     NV_STATUS     rmStatus      = NV_OK;
3114     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
3115     KernelFifo   *pKernelFifo   = GPU_GET_KERNEL_FIFO(pGpu);
3116     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
3117     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3118     NvBool bIsMIGEnabled        = IS_MIG_ENABLED(pGpu);
3119 
3120     NvBool bIsModsVgpu          = NV_FALSE;
3121 
3122     NvBool bIsVgpuRpcNeeded     = (bIsModsVgpu || (IS_VIRTUAL(pGpu) &&
3123                                   !(IS_VIRTUAL_WITH_SRIOV(pGpu) && !bIsMIGEnabled &&
3124                                     kfifoIsPerRunlistChramEnabled(pKernelFifo)))) &&
3125                                     (!pKernelFifo->bGuestGenenratesWorkSubmitToken);
3126     //
3127     // vGPU:
3128     // If required call into the host to get the worksubmit token.
3129     //
3130     if (bIsVgpuRpcNeeded)
3131     {
3132         NV_RM_RPC_CONTROL(pGpu,
3133                           pRmCtrlParams->hClient,
3134                           RES_GET_HANDLE(pKernelChannel),
3135                           pRmCtrlParams->cmd,
3136                           pRmCtrlParams->pParams,
3137                           pRmCtrlParams->paramsSize,
3138                           rmStatus);
3139         //
3140         // All done if error or for non-MODS vGPU guest (host did notification in RPC).
3141         // GSP FW is not able to perform the notification, nor is MODS vGPU host,
3142         // so it still needs to be handled by the client/guest outside the RPC.
3143         //
3144         if (rmStatus != NV_OK)
3145         {
3146             return rmStatus;
3147         }
3148 
3149         if (IS_VIRTUAL(pGpu))
3150         {
3151             return rmStatus;
3152         }
3153     }
3154 
3155     if (!bIsModsVgpu || pKernelFifo->bGuestGenenratesWorkSubmitToken)
3156     {
3157         NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE);
3158         NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE);
3159         rmStatus = kfifoGenerateWorkSubmitToken_HAL(pGpu, pKernelFifo, pKernelChannel,
3160                                                     &pTokenParams->workSubmitToken,
3161                                                     pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bIsCallingContextVgpuPlugin);
3162         NV_CHECK_OR_RETURN(LEVEL_INFO, rmStatus == NV_OK, rmStatus);
3163     }
3164 
3165     rmStatus = kchannelNotifyWorkSubmitToken(pGpu, pKernelChannel, pTokenParams->workSubmitToken);
3166     return rmStatus;
3167 }
3168 
3169 NV_STATUS
3170 kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_IMPL
3171 (
3172     KernelChannel *pKernelChannel,
3173     NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *pParams
3174 )
3175 {
3176     NV_STATUS   rmStatus    = NV_OK;
3177     OBJGPU     *pGpu        = GPU_RES_GET_GPU(pKernelChannel);
3178 
3179     //
3180     // vGPU:
3181     //
3182     // Since vgpu plugin is required to update notifier for guest, send an RPC
3183     // to host RM for the plugin to hook.
3184     // RPC not needed for SR-IOV vGpu.
3185     //
3186     // GSP-RM:
3187     //
3188     // Notification is done in CPU-RM, so RPC is not made to FW-RM.
3189     //
3190     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3191     NvBool bIsMIGEnabled    = IS_MIG_ENABLED(pGpu);
3192     NvBool bIsVgpuRpcNeeded = IS_VIRTUAL(pGpu) &&
3193                               !(IS_VIRTUAL_WITH_SRIOV(pGpu) && !bIsMIGEnabled &&
3194                                 kfifoIsPerRunlistChramEnabled(pKernelFifo));
3195     if (bIsVgpuRpcNeeded)
3196     {
3197         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
3198         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3199 
3200         NV_RM_RPC_CONTROL(pGpu,
3201                           pRmCtrlParams->hClient,
3202                           RES_GET_HANDLE(pKernelChannel),
3203                           pRmCtrlParams->cmd,
3204                           pRmCtrlParams->pParams,
3205                           pRmCtrlParams->paramsSize,
3206                           rmStatus);
3207         return rmStatus;
3208     }
3209 
3210     rmStatus = kchannelUpdateWorkSubmitTokenNotifIndex(pGpu, pKernelChannel, pParams->index);
3211     return rmStatus;
3212 }
3213 
3214 NV_STATUS
3215 kchannelRegisterChild_IMPL
3216 (
3217     KernelChannel     *pKernelChannel,
3218     ChannelDescendant *pObject
3219 )
3220 {
3221     NvU16 firstObjectClassID;
3222     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3223     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3224 
3225     //
3226     // On recent GPU architectures such as FERMI, SetObject operations
3227     // require an EngineID:ClassID tuple as an argument, rather than
3228     // an object handle. In order to be able to differentiate between
3229     // different instances of any given software class, the ClassID
3230     // field needs to be unique within the FIFO context. The code below
3231     // attempts to find a qualifying 16-bit ClassID.
3232     //
3233     if (pObject->resourceDesc.engDesc == ENG_SW)
3234     {
3235         RS_ORDERED_ITERATOR it;
3236         RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3237         ChannelDescendant *pMatchingObject = NULL;
3238 
3239         firstObjectClassID = pKernelChannel->nextObjectClassID;
3240 
3241         do
3242         {
3243             if (++pKernelChannel->nextObjectClassID == firstObjectClassID)
3244             {
3245                 NV_PRINTF(LEVEL_ERROR, "channel %08x:%08x: out of handles!\n",
3246                           RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_HANDLE(pKernelChannel));
3247                 return NV_ERR_INSUFFICIENT_RESOURCES;
3248             }
3249             if (pKernelChannel->nextObjectClassID == 0)
3250                 continue;
3251 
3252             it = clientRefOrderedIter(pClient, RES_GET_REF(pKernelChannel), classId(ChannelDescendant), NV_FALSE);
3253 
3254             while (clientRefOrderedIterNext(pClient, &it))
3255             {
3256                 pMatchingObject = dynamicCast(it.pResourceRef->pResource, ChannelDescendant);
3257                 NV_ASSERT_OR_ELSE(pMatchingObject != NULL, continue);
3258 
3259                 if ((pMatchingObject->resourceDesc.engDesc == ENG_SW) &&
3260                     (pMatchingObject->classID == pKernelChannel->nextObjectClassID))
3261                 {
3262                     break;
3263                 }
3264 
3265                 pMatchingObject = NULL;
3266             }
3267         }
3268         while (pMatchingObject != NULL);
3269 
3270         pObject->classID = pKernelChannel->nextObjectClassID;
3271     }
3272 
3273     return kfifoAddObject_HAL(pGpu, pKernelFifo, pObject);
3274 }
3275 
3276 NV_STATUS
3277 kchannelDeregisterChild_IMPL
3278 (
3279     KernelChannel     *pKernelChannel,
3280     ChannelDescendant *pObject
3281 )
3282 {
3283     NV_STATUS status = NV_OK;
3284     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3285     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3286 
3287     status = kfifoDeleteObject_HAL(pGpu, pKernelFifo, pObject);
3288     if (status != NV_OK)
3289     {
3290         NV_PRINTF(LEVEL_ERROR, "Could not delete hal resources with object\n");
3291         DBG_BREAKPOINT();
3292     }
3293 
3294     return status;
3295 }
3296 
3297 void
3298 kchannelGetChildIterator
3299 (
3300     KernelChannel *pKernelChannel,
3301     NvU32 classID,
3302     RM_ENGINE_TYPE engineID,
3303     KernelChannelChildIterator *pIter
3304 )
3305 {
3306     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3307     NV_ASSERT_OR_RETURN_VOID(pIter != NULL);
3308 
3309     portMemSet(pIter, 0, sizeof(*pIter));
3310     pIter->classID = classID;
3311     pIter->engineID = engineID;
3312     pIter->rsIter = clientRefOrderedIter(pClient, RES_GET_REF(pKernelChannel), classId(ChannelDescendant), NV_FALSE);
3313 }
3314 
3315 ChannelDescendant *
3316 kchannelGetNextChild
3317 (
3318     KernelChannelChildIterator *pIter
3319 )
3320 {
3321     ChannelDescendant *pChild;
3322 
3323     NV_ASSERT_OR_RETURN(pIter != NULL, NULL);
3324 
3325     while (clientRefOrderedIterNext(pIter->rsIter.pClient, &pIter->rsIter))
3326     {
3327         pChild = dynamicCast(pIter->rsIter.pResourceRef->pResource, ChannelDescendant);
3328         NV_ASSERT_OR_RETURN(pChild != NULL, NULL);
3329 
3330         // Continue to the next child if it doesn't match these filters:
3331         if (pIter->engineID != pChild->resourceDesc.engDesc)
3332             continue;
3333         if (pIter->classID != 0)
3334         {
3335             if ((RES_GET_EXT_CLASS_ID(pChild) != pIter->classID) &&
3336                 (pChild->classID != pIter->classID))
3337                 continue;
3338         }
3339 
3340         // Yield this matching child
3341         return pChild;
3342     }
3343 
3344     return NULL;
3345 }
3346 
3347 ChannelDescendant *
3348 kchannelGetOneChild
3349 (
3350     KernelChannel *pKernelChannel,
3351     NvU32          classID,
3352     NvU32          engineID
3353 )
3354 {
3355     KernelChannelChildIterator iter;
3356 
3357     kchannelGetChildIterator(pKernelChannel, classID, engineID, &iter);
3358     return kchannelGetNextChild(&iter);
3359 }
3360 
3361 /**
3362  * @brief Gets object iterator for a channel or channel group
3363  *
3364  * @param[in] pKernelChannel
3365  * @param[in] classNum
3366  * @param[in] engDesc
3367  * @param[out] pIt
3368  *
3369  */
3370 void
3371 kchannelGetChildIterOverGroup
3372 (
3373     KernelChannel                   *pKernelChannel,
3374     NvU32                            classNum,
3375     NvU32                            engDesc,
3376     KernelChannelChildIterOverGroup *pIt
3377 )
3378 {
3379     NV_ASSERT_OR_RETURN_VOID(pIt != NULL);
3380     portMemSet(pIt, 0, sizeof(*pIt));
3381 
3382     NV_ASSERT_OR_RETURN_VOID(pKernelChannel != NULL);
3383 
3384     pIt->classNum = classNum;
3385     pIt->engDesc = engDesc;
3386 
3387     pIt->channelNode.pKernelChannel =
3388         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pKernelChannel;
3389     pIt->channelNode.pNext =
3390         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pNext;
3391 
3392     kchannelGetChildIterator(pIt->channelNode.pKernelChannel, pIt->classNum, pIt->engDesc, &pIt->kchannelIter);
3393 }
3394 
3395 /**
3396  * @brief Get the next object based on given class/engine tag.
3397  * When the class number is 0, it is ignored.
3398  *
3399  * @param[in] pIt
3400  *
3401  * Returns: found child or NULL
3402  *
3403  */
3404 ChannelDescendant *
3405 kchannelGetNextChildOverGroup
3406 (
3407     KernelChannelChildIterOverGroup *pIt
3408 )
3409 {
3410     PCHANNEL_NODE pHead = NULL;
3411     ChannelDescendant *pObject = NULL;
3412 
3413     NV_ASSERT_OR_RETURN(pIt != NULL, NULL);
3414 
3415     // Start iterating from the given object (if any) of the given channel.
3416     pHead = &pIt->channelNode;
3417 
3418     while ((pHead != NULL) && (pHead->pKernelChannel != NULL))
3419     {
3420         pObject = kchannelGetNextChild(&pIt->kchannelIter);
3421 
3422         if (pObject != NULL)
3423             break;
3424 
3425         //
3426         // If there are no more objects to inspect in the given channel,
3427         // move to the next channel (if any, for TSGs).
3428         //
3429         pHead = pHead->pNext;
3430         if (pHead != NULL)
3431         {
3432             NV_ASSERT_OR_ELSE(pHead->pKernelChannel != NULL, break);
3433             // Re-initialize the channeldescendant iterator based on this channel
3434             kchannelGetChildIterator(pHead->pKernelChannel, pIt->classNum, pIt->engDesc, &pIt->kchannelIter);
3435         }
3436     }
3437 
3438     // Cache off the next channel to start searching from in future iterations.
3439     pIt->channelNode.pKernelChannel = pHead ? pHead->pKernelChannel : NULL;
3440     pIt->channelNode.pNext = pHead ? pHead->pNext : NULL;
3441 
3442     return pObject;
3443 }
3444 
3445 NV_STATUS
3446 kchannelFindChildByHandle
3447 (
3448     KernelChannel *pKernelChannel,
3449     NvHandle hResource,
3450     ChannelDescendant **ppObject
3451 )
3452 {
3453     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3454     RsResourceRef *pResourceRef = NULL;
3455 
3456     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, clientGetResourceRef(pClient, hResource, &pResourceRef));
3457 
3458     NV_CHECK_OR_RETURN(LEVEL_ERROR, pResourceRef->pParentRef->hResource == RES_GET_HANDLE(pKernelChannel), NV_ERR_OBJECT_NOT_FOUND);
3459 
3460     *ppObject = dynamicCast(pResourceRef->pResource, ChannelDescendant);
3461     NV_CHECK_OR_RETURN(LEVEL_ERROR, *ppObject != NULL, NV_ERR_OBJECT_NOT_FOUND);
3462 
3463     return NV_OK;
3464 }
3465 
3466 static NV_STATUS
3467 _kchannelClearVAList
3468 (
3469     OBJGPU          *pGpu,
3470     VA_LIST         *pVaList,
3471     NvBool           bUnmap
3472 )
3473 {
3474     //
3475     // Subcontext handling
3476     // We need to unmap the mappings on all the subcontext, since the this call will be made only on one of the TSG channels.
3477     //
3478     if (bUnmap)
3479     {
3480         OBJVASPACE *pVas;
3481         NvU64 vAddr;
3482 
3483         FOR_EACH_IN_VADDR_LIST(pVaList, pVas, vAddr)
3484         {
3485             dmaUnmapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVas, vAddr);
3486         }
3487         FOR_EACH_IN_VADDR_LIST_END(pVaList, pVas, vAddr);
3488     }
3489 
3490     vaListClear(pVaList);
3491 
3492     return NV_OK;
3493 }
3494 
3495 /**
3496  * @brief Set or clear the Engine Context Memdesc.
3497  *
3498  * Should be committed to hardware after this using channelCommitEngineContext().
3499  * Should be unmapped before cleared/changed using kchannelUnmapEngineCtxBuf()
3500  *
3501  * @param[in] pGpu
3502  * @param[in] pKernelChannel
3503  * @param[in] engDesc
3504  * @param[in] pMemDesc                the new memdesc to assign, or NULL to clear
3505  *
3506  * Returns: status
3507  */
3508 NV_STATUS
3509 kchannelSetEngineContextMemDesc_IMPL
3510 (
3511     OBJGPU             *pGpu,
3512     KernelChannel      *pKernelChannel,
3513     NvU32               engDesc,
3514     MEMORY_DESCRIPTOR  *pMemDesc
3515 )
3516 {
3517     NV_STATUS status = NV_OK;
3518     ENGINE_CTX_DESCRIPTOR *pEngCtxDesc;
3519     KernelChannelGroup *pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup;
3520 
3521     NV_PRINTF(LEVEL_INFO,
3522               "ChID %x engDesc 0x%x pMemDesc %p\n",
3523               kchannelGetDebugTag(pKernelChannel), engDesc, pMemDesc);
3524 
3525     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_PARAMETER);
3526 
3527     if (IS_GR(engDesc))
3528     {
3529         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3530     }
3531 
3532     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
3533 
3534     // Get or allocate the EngCtxDesc
3535     pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3536 
3537     if (pEngCtxDesc == NULL && pMemDesc == NULL)
3538     {
3539         // There is no need to clean up or alloc anything.
3540         SLI_LOOP_CONTINUE;
3541     }
3542 
3543     if (pEngCtxDesc != NULL)
3544     {
3545         // Cleanup for the engDesc context that existed before
3546         if (pEngCtxDesc->pMemDesc != NULL)
3547         {
3548             memdescFree(pEngCtxDesc->pMemDesc);
3549             memdescDestroy(pEngCtxDesc->pMemDesc);
3550         }
3551 
3552         //
3553     }
3554     else
3555     {
3556         NV_ASSERT_OK_OR_ELSE(status,
3557             kchangrpAllocEngineContextDescriptor(pGpu, pKernelChannelGroup),
3558             SLI_LOOP_GOTO(fail));
3559         pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3560         NV_ASSERT_OR_ELSE(pEngCtxDesc != NULL, status = NV_ERR_NO_MEMORY; SLI_LOOP_GOTO(fail));
3561     }
3562 
3563     if (pMemDesc != NULL)
3564     {
3565         // We are setting a memdesc
3566         if (pMemDesc->Allocated > 0)
3567             pMemDesc->Allocated++;
3568         memdescAddRef(pMemDesc);
3569 
3570         if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL)
3571         {
3572             NvU64 virtAddr;
3573 
3574             // Since the memdesc is already virtual, we do not manage it
3575             status = vaListSetManaged(&pEngCtxDesc->vaList, NV_FALSE);
3576             NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3577 
3578             // memdescGetPhysAddr of a virtual memdesc is a virtual addr
3579             virtAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0);
3580             status = vaListAddVa(&pEngCtxDesc->vaList, pKernelChannel->pVAS, virtAddr);
3581             NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3582         }
3583     }
3584 
3585     // Assign the memdesc (or NULL)
3586     pEngCtxDesc->pMemDesc = pMemDesc;
3587     pEngCtxDesc->engDesc = engDesc;
3588 
3589     SLI_LOOP_END
3590 
3591 fail:
3592     return status;
3593 }
3594 
3595 /**
3596  * @brief Unmaps everything from the Engine Context Memdesc.
3597  *
3598  * @param[in] pGpu
3599  * @param[in] pKernelChannel
3600  * @param[in] engDesc
3601  *
3602  * Returns: status
3603  */
3604 NV_STATUS
3605 kchannelUnmapEngineCtxBuf_IMPL
3606 (
3607     OBJGPU             *pGpu,
3608     KernelChannel      *pKernelChannel,
3609     NvU32               engDesc
3610 )
3611 {
3612     NV_STATUS status = NV_OK;
3613     ENGINE_CTX_DESCRIPTOR *pEngCtxDesc;
3614 
3615     NV_PRINTF(LEVEL_INFO,
3616               "ChID %x engDesc 0x%x\n",
3617               kchannelGetDebugTag(pKernelChannel), engDesc);
3618 
3619     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_PARAMETER);
3620 
3621     if (IS_GR(engDesc))
3622     {
3623         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3624     }
3625 
3626     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
3627     pEngCtxDesc = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3628 
3629     // EngCtxDesc and MemDesc will be here, or else nothing can be mapped
3630     if ((pEngCtxDesc == NULL) || (pEngCtxDesc->pMemDesc == NULL))
3631     {
3632         SLI_LOOP_CONTINUE;
3633     }
3634 
3635     // Clear VA list, including unmap if managed
3636     status = _kchannelClearVAList(pGpu, &pEngCtxDesc->vaList, vaListGetManaged(&pEngCtxDesc->vaList));
3637     NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3638 
3639     SLI_LOOP_END
3640 
3641 fail:
3642     return status;
3643 }
3644 
3645 // Check that BcState stays consistent for GR channel engine context
3646 NV_STATUS
3647 kchannelCheckBcStateCurrent_IMPL
3648 (
3649     OBJGPU        *pGpu,
3650     KernelChannel *pKernelChannel
3651 )
3652 {
3653 #define KERNEL_CHANNEL_BCSTATE_UNINITIALIZED (0)
3654 #define KERNEL_CHANNEL_BCSTATE_DISABLED (1)
3655 #define KERNEL_CHANNEL_BCSTATE_ENABLED (2)
3656 
3657     NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu);
3658     NvU8   channelBcStateEnum = bBcState ? KERNEL_CHANNEL_BCSTATE_ENABLED : KERNEL_CHANNEL_BCSTATE_DISABLED;
3659 
3660     NV_PRINTF(
3661         LEVEL_INFO,
3662         "GPU = %d, ChID = %d, bcStateCurrent = %d, channelBcStateEnum = %d\n",
3663         pGpu->gpuInstance,
3664         kchannelGetDebugTag(pKernelChannel),
3665         pKernelChannel->bcStateCurrent,
3666         channelBcStateEnum);
3667 
3668     // Check that the BC status did not change - 0 = first call, 1 = disable, 2 = enable.
3669     if (pKernelChannel->bcStateCurrent == KERNEL_CHANNEL_BCSTATE_UNINITIALIZED)
3670     {
3671         pKernelChannel->bcStateCurrent = channelBcStateEnum;
3672     }
3673     NV_ASSERT_OR_RETURN(pKernelChannel->bcStateCurrent == channelBcStateEnum, NV_ERR_INVALID_STATE);
3674 
3675     return NV_OK;
3676 }
3677 
3678 // Map the Engine Context Memdesc and add it's VAddr
3679 NV_STATUS
3680 kchannelMapEngineCtxBuf_IMPL
3681 (
3682     OBJGPU      *pGpu,
3683     KernelChannel *pKernelChannel,
3684     NvU32        engDesc
3685 )
3686 {
3687     OBJVASPACE            *pVAS           = NULL;
3688     NV_STATUS              status         = NV_OK;
3689     ENGINE_CTX_DESCRIPTOR *pEngCtx;
3690     NvU64                  addr;
3691     MEMORY_DESCRIPTOR     *pTempMemDesc;
3692     OBJGVASPACE           *pGVAS;
3693     KernelFifo            *pKernelFifo    = GPU_GET_KERNEL_FIFO(pGpu);
3694 
3695     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_ARGUMENT);
3696 
3697     if (IS_GR(engDesc))
3698     {
3699         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3700     }
3701 
3702     NV_PRINTF(LEVEL_INFO, "ChID %d engDesc %s (0x%x) \n",
3703               kchannelGetDebugTag(pKernelChannel),
3704               kfifoGetEngineName_HAL(GPU_GET_KERNEL_FIFO(pGpu), ENGINE_INFO_TYPE_ENG_DESC, engDesc),
3705               engDesc);
3706 
3707     pVAS = pKernelChannel->pVAS;
3708     pGVAS = dynamicCast(pVAS, OBJGVASPACE);
3709     NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_INVALID_STATE);
3710 
3711     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
3712 
3713     pEngCtx = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3714     NV_ASSERT_OR_ELSE(pEngCtx != NULL, status = NV_ERR_INVALID_STATE; goto fail);
3715 
3716     pTempMemDesc = pEngCtx->pMemDesc;
3717     NV_ASSERT_OR_ELSE(pTempMemDesc != NULL, status = NV_ERR_INVALID_STATE; goto fail);
3718 
3719     //
3720     // For virtual context, UMD has already alloced/mapped the engine context.
3721     // So simply get the vaddr
3722     //
3723 
3724     status = vaListFindVa(&pEngCtx->vaList, pVAS, &addr);
3725     if (status == NV_OK)
3726     {
3727         // VAddr already exists and needs no action
3728         SLI_LOOP_CONTINUE;
3729     }
3730     else if (status == NV_ERR_OBJECT_NOT_FOUND)
3731     {
3732         NvU32 flags = DMA_ALLOC_VASPACE_NONE;
3733         if (gvaspaceIsExternallyOwned(pGVAS))
3734         {
3735             // We should never land up here if VA space is externally owned!
3736             NV_ASSERT_FAILED("Externally owned object not found");
3737             status = NV_ERR_INVALID_OPERATION;
3738             goto fail;
3739         }
3740 
3741         kfifoGetCtxBufferMapFlags_HAL(pGpu, pKernelFifo, engDesc, &flags);
3742 
3743         status = dmaMapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, pTempMemDesc, &addr,
3744             flags, DMA_UPDATE_VASPACE_FLAGS_NONE);
3745         if (status != NV_OK)
3746         {
3747             NV_PRINTF(LEVEL_ERROR,
3748                       "Could not map context buffer for engDesc 0x%x\n",
3749                       engDesc);
3750             goto fail;
3751         }
3752         else
3753         {
3754             status = vaListAddVa(&pEngCtx->vaList, pVAS, addr);
3755             NV_ASSERT(status == NV_OK);
3756         }
3757     }
3758     else
3759     {
3760         NV_ASSERT_OK_FAILED("vaListFindVa", status);
3761         goto fail;
3762     }
3763 
3764 fail:
3765     if (status != NV_OK)
3766     {
3767         SLI_LOOP_BREAK;
3768     }
3769     SLI_LOOP_END
3770 
3771     return status;
3772 }
3773 
3774 /**
3775  * @brief Updates the notifier index with which to update the work submit
3776  *        notifier on request.
3777  *
3778  * @param[IN] pGpu              OBJGPU
3779  * @param[in] pKernelChannel    KernelChannel
3780  * @param[in] index             Updated notifier index
3781  *
3782  * @return NV_OK
3783  *         NV_ERR_OUT_OF_RANGE if index is beyond the bounds of the notifier
3784  */
3785 NV_STATUS
3786 kchannelUpdateWorkSubmitTokenNotifIndex_IMPL
3787 (
3788     OBJGPU *pGpu,
3789     KernelChannel *pKernelChannel,
3790     NvU32 index
3791 )
3792 {
3793     NvHandle hNotifier;
3794     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3795     Memory *pMemory;
3796     ContextDma *pContextDma;
3797     NvU32 addressSpace;
3798     NvU64 notificationBufferSize;
3799     Device *pDevice;
3800 
3801     hNotifier = pKernelChannel->hErrorContext;
3802 
3803     // Clobbering error notifier index is illegal
3804     NV_CHECK_OR_RETURN(LEVEL_INFO, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR,
3805                      NV_ERR_INVALID_ARGUMENT);
3806 
3807     // Check for integer overflows
3808     if (((index + 1) < index) ||
3809         !portSafeMulU64(index + 1, sizeof(NvNotification), &notificationBufferSize))
3810     {
3811         return NV_ERR_OUT_OF_RANGE;
3812     }
3813 
3814     pDevice = GPU_RES_GET_DEVICE(pKernelChannel);
3815 
3816     if (NV_OK == memGetByHandleAndDevice(pClient, hNotifier, RES_GET_HANDLE(pDevice), &pMemory))
3817     {
3818         addressSpace = memdescGetAddressSpace(pMemory->pMemDesc);
3819 
3820         NV_CHECK_OR_RETURN(LEVEL_INFO, pMemory->Length >= notificationBufferSize,
3821                          NV_ERR_OUT_OF_RANGE);
3822         switch (addressSpace)
3823         {
3824             case ADDR_VIRTUAL:
3825             {
3826                 NvU64 physAddr = memdescGetPhysAddr(pMemory->pMemDesc, AT_GPU_VA, 0);
3827                 PCLI_DMA_MAPPING_INFO pDmaMappingInfo;
3828 
3829                 NV_CHECK_OR_RETURN(LEVEL_INFO,
3830                     CliGetDmaMappingInfo(pClient,
3831                                          RES_GET_HANDLE(pDevice),
3832                                          RES_GET_HANDLE(pMemory),
3833                                          physAddr,
3834                                          gpumgrGetDeviceGpuMask(pGpu->deviceInstance),
3835                                          &pDmaMappingInfo),
3836                     NV_ERR_GENERIC);
3837 
3838                 NV_CHECK_OR_RETURN(LEVEL_INFO, pDmaMappingInfo->pMemDesc->Size >= notificationBufferSize,
3839                                  NV_ERR_OUT_OF_RANGE);
3840                 break;
3841             }
3842             case ADDR_FBMEM:
3843                 // fall through
3844             case ADDR_SYSMEM:
3845                 // Covered by check prior to switch/case
3846                 break;
3847             default:
3848                 return NV_ERR_NOT_SUPPORTED;
3849         }
3850     }
3851     else if (NV_OK == ctxdmaGetByHandle(pClient, hNotifier, &pContextDma))
3852     {
3853         NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= (notificationBufferSize - 1),
3854                          NV_ERR_OUT_OF_RANGE);
3855     }
3856     else
3857     {
3858         return NV_ERR_OBJECT_NOT_FOUND;
3859     }
3860 
3861     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN]
3862         = index;
3863 
3864     return NV_OK;
3865 }
3866 
3867 /**
3868  * @brief Updates the work submit notifier passed to the channel during channel
3869  *        creation with the new work submit token.
3870  *
3871  * @param[IN] pGpu              OBJGPU
3872  * @param[in] pKernelChannel    KernelChannel
3873  * @param[in] token             Work submit token to notify clients of
3874  *
3875  * @return NV_OK on successful notify
3876  *         NV_OK if client has not set up the doorbell notifier. This should
3877  *         be an error once all clients have been updated.
3878  */
3879 NV_STATUS
3880 kchannelNotifyWorkSubmitToken_IMPL
3881 (
3882     OBJGPU *pGpu,
3883     KernelChannel *pKernelChannel,
3884     NvU32 token
3885 )
3886 {
3887     MEMORY_DESCRIPTOR *pNotifierMemDesc = pKernelChannel->pErrContextMemDesc;
3888     NV_ADDRESS_SPACE addressSpace;
3889     NvU16 notifyStatus = 0x0;
3890     NvU32 index;
3891     OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
3892     NvU64 time;
3893     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
3894     KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
3895     TRANSFER_SURFACE surf = {0};
3896     NvNotification *pNotifier = NULL;
3897     NvBool bMemEndTransfer = NV_FALSE;
3898 
3899     if (pNotifierMemDesc == NULL)
3900         return NV_OK;
3901 
3902     index = pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN];
3903 
3904     notifyStatus =
3905         FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _TRUE, notifyStatus);
3906     notifyStatus =
3907         FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, 0xFFFF, notifyStatus);
3908 
3909     addressSpace = memdescGetAddressSpace(pNotifierMemDesc);
3910     if (RMCFG_FEATURE_PLATFORM_GSP)
3911         NV_ASSERT_OR_RETURN(addressSpace == ADDR_FBMEM, NV_ERR_INVALID_STATE);
3912 
3913     //
3914     // If clients did not allocate enough memory for the doorbell
3915     // notifier, return NV_OK so as not to regress older clients
3916     //
3917     NV_CHECK_OR_RETURN(LEVEL_INFO, memdescGetSize(pNotifierMemDesc) >= (index + 1) * sizeof(NvNotification), NV_OK);
3918 
3919     pNotifier = (NvNotification *)memdescGetKernelMapping(pNotifierMemDesc);
3920     if (pNotifier == NULL)
3921     {
3922         surf.pMemDesc = pNotifierMemDesc;
3923         surf.offset = index * sizeof(NvNotification);
3924 
3925         pNotifier =
3926             (NvNotification *) memmgrMemBeginTransfer(pMemoryManager, &surf,
3927                                                       sizeof(NvNotification),
3928                                                       TRANSFER_FLAGS_SHADOW_ALLOC);
3929         NV_ASSERT_OR_RETURN(pNotifier != NULL, NV_ERR_INVALID_STATE);
3930         bMemEndTransfer = NV_TRUE;
3931     }
3932     else
3933     {
3934         //
3935         // If a CPU pointer has been passed by caller ensure that the notifier
3936         // is in sysmem or in case it in vidmem, BAR access to the same is not
3937         // blocked (for HCC)
3938         //
3939         NV_ASSERT_OR_RETURN(
3940             memdescGetAddressSpace(pNotifierMemDesc) == ADDR_SYSMEM ||
3941             !kbusIsBarAccessBlocked(pKernelBus), NV_ERR_INVALID_ARGUMENT);
3942         pNotifier = &pNotifier[index];
3943     }
3944 
3945     tmrGetCurrentTime(pTmr, &time);
3946 
3947     notifyFillNvNotification(pGpu, pNotifier, token, 0,
3948                              notifyStatus, NV_TRUE, time);
3949 
3950     if (bMemEndTransfer)
3951     {
3952         memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(NvNotification), 0);
3953     }
3954 
3955     return NV_OK;
3956 }
3957 
3958 /**
3959  * @brief Alloc and set up pNotifyActions
3960  *
3961  * @param[in]  pKernelChannel
3962  * @param[in]  classNuml           Channel class
3963  *
3964  * @return  NV_OK or error code
3965  */
3966 static NV_STATUS
3967 _kchannelSetupNotifyActions
3968 (
3969     KernelChannel *pKernelChannel,
3970     NvU32          classNum
3971 )
3972 {
3973     CLI_CHANNEL_CLASS_INFO classInfo;
3974 
3975     // Allocate notifier action table for the maximum supported by this class
3976     CliGetChannelClassInfo(classNum, &classInfo);
3977     if (classInfo.notifiersMaxCount > 0)
3978     {
3979         pKernelChannel->pNotifyActions = portMemAllocNonPaged(
3980                                    classInfo.notifiersMaxCount * sizeof(*pKernelChannel->pNotifyActions));
3981         if (pKernelChannel->pNotifyActions == NULL)
3982             return NV_ERR_NO_MEMORY;
3983 
3984         portMemSet(pKernelChannel->pNotifyActions, 0,
3985                  classInfo.notifiersMaxCount * sizeof(*pKernelChannel->pNotifyActions));
3986     }
3987 
3988     return NV_OK;
3989 } // end of _kchannelSetupNotifyActions()
3990 
3991 /**
3992  * @brief Cleans up pNotifyActions
3993  *
3994  * @param[in] pKernelChannel
3995  */
3996 static void
3997 _kchannelCleanupNotifyActions
3998 (
3999     KernelChannel *pKernelChannel
4000 )
4001 {
4002     // free memory associated with notify actions table
4003     portMemFree(pKernelChannel->pNotifyActions);
4004     pKernelChannel->pNotifyActions = NULL;
4005 } // end of _kchannelCleanupNotifyActions()
4006 
4007 static NV_STATUS
4008 _kchannelNotifyOfChid
4009 (
4010     OBJGPU *pGpu,
4011     KernelChannel *pKernelChannel,
4012     RsClient *pRsClient
4013 )
4014 {
4015     ContextDma *pContextDma;
4016 
4017     //
4018     // Return the chid to the drivers in the error context DMA
4019     //
4020     // We need to update this when virtual channel gets mapped in.
4021     //
4022 
4023     if ((ctxdmaGetByHandle(pRsClient, pKernelChannel->hErrorContext, &pContextDma)) == NV_OK)
4024     {
4025         NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= sizeof(NvNotification) - 1, NV_ERR_INVALID_ARGUMENT);
4026         notifyFillNotifier(pGpu, pContextDma, pKernelChannel->ChID, 0, NV_OK);
4027     }
4028 
4029     return NV_OK;
4030 }
4031 
4032 NvU32
4033 kchannelGetGfid_IMPL
4034 (
4035     KernelChannel *pKernelChannel
4036 )
4037 {
4038     return pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->gfid;
4039 }
4040 
4041 NvBool
4042 kchannelIsCpuMapped
4043 (
4044     OBJGPU *pGpu,
4045     KernelChannel *pKernelChannel
4046 )
4047 {
4048     return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &
4049               KERNEL_CHANNEL_SW_STATE_CPU_MAP);
4050 }
4051 
4052 void
4053 kchannelSetCpuMapped
4054 (
4055     OBJGPU *pGpu,
4056     KernelChannel *pKernelChannel,
4057     NvBool bCpuMapped
4058 )
4059 {
4060     if (bCpuMapped)
4061     {
4062         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |=
4063               KERNEL_CHANNEL_SW_STATE_CPU_MAP;
4064     }
4065     else
4066     {
4067         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &=
4068               ~(KERNEL_CHANNEL_SW_STATE_CPU_MAP);
4069     }
4070 }
4071 
4072 NvBool
4073 kchannelIsRunlistSet
4074 (
4075     OBJGPU *pGpu,
4076     KernelChannel *pKernelChannel
4077 )
4078 {
4079     return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &
4080               KERNEL_CHANNEL_SW_STATE_RUNLIST_SET);
4081 }
4082 
4083 void
4084 kchannelSetRunlistSet
4085 (
4086     OBJGPU *pGpu,
4087     KernelChannel *pKernelChannel,
4088     NvBool bRunlistSet
4089 )
4090 {
4091     if (bRunlistSet)
4092     {
4093         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |=
4094               KERNEL_CHANNEL_SW_STATE_RUNLIST_SET;
4095     }
4096     else
4097     {
4098         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &=
4099               ~(KERNEL_CHANNEL_SW_STATE_RUNLIST_SET);
4100     }
4101 }
4102 
4103 NV_STATUS
4104 kchannelGetChannelPhysicalState_KERNEL
4105 (
4106     OBJGPU *pGpu,
4107     KernelChannel *pKernelChannel,
4108     NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams
4109 )
4110 {
4111     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
4112     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
4113     NV_STATUS status = NV_OK;
4114 
4115     // Get the physical state from GSP
4116     NV_RM_RPC_CONTROL(pGpu,
4117                       pRmCtrlParams->hClient,
4118                       pRmCtrlParams->hObject,
4119                       pRmCtrlParams->cmd,
4120                       pRmCtrlParams->pParams,
4121                       pRmCtrlParams->paramsSize,
4122                       status);
4123     NV_ASSERT_OK_OR_RETURN(status);
4124 
4125     return NV_OK;
4126 }
4127 
4128 NV_STATUS
4129 kchannelMapUserD_IMPL
4130 (
4131     OBJGPU         *pGpu,
4132     KernelChannel  *pKernelChannel,
4133     RS_PRIV_LEVEL   privLevel,
4134     NvU64           offset,
4135     NvU32           protect,
4136     NvP64          *ppCpuVirtAddr,
4137     NvP64          *ppPriv
4138 )
4139 {
4140     NV_STATUS status      = NV_OK;
4141     NvU64     userBase;
4142     NvU64     userOffset;
4143     NvU64     userSize;
4144     NvU32     cachingMode = NV_MEMORY_UNCACHED;
4145 
4146     // if USERD is allocated by client
4147     if (pKernelChannel->bClientAllocatedUserD)
4148     {
4149         return NV_OK;
4150     }
4151 
4152     status = kchannelGetUserdInfo_HAL(pGpu, pKernelChannel,
4153                                       &userBase, &userOffset, &userSize);
4154 
4155     if (status != NV_OK)
4156         return status;
4157 
4158 
4159     if (userBase == pGpu->busInfo.gpuPhysAddr)
4160     {
4161         // Create a mapping of BAR0
4162         status = osMapGPU(pGpu, privLevel, NvU64_LO32(userOffset+offset),
4163                  NvU64_LO32(userSize), protect, ppCpuVirtAddr, ppPriv);
4164         goto done;
4165     }
4166 
4167     if (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING))
4168     {
4169         cachingMode = NV_MEMORY_CACHED;
4170     }
4171 
4172     //
4173     // If userBase is not bar0, then it is bar1 and we create a regular memory
4174     // mapping.
4175     //
4176     if (privLevel >= RS_PRIV_LEVEL_KERNEL)
4177     {
4178         status = osMapPciMemoryKernel64(pGpu, userBase + userOffset + offset,
4179                                         userSize, protect, ppCpuVirtAddr, cachingMode);
4180     }
4181     else
4182     {
4183         status = osMapPciMemoryUser(pGpu->pOsGpuInfo,
4184                                     userBase + userOffset + offset,
4185                                     userSize, protect, ppCpuVirtAddr,
4186                                     ppPriv, cachingMode);
4187     }
4188     if (!((status == NV_OK) && *ppCpuVirtAddr))
4189     {
4190         NV_PRINTF(LEVEL_ERROR,
4191                   "BAR1 offset 0x%llx for USERD of channel %x could not be cpu mapped\n",
4192                   userOffset, kchannelGetDebugTag(pKernelChannel));
4193     }
4194 
4195 done:
4196 
4197     // Indicate channel is mapped
4198     if (status == NV_OK)
4199     {
4200             SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
4201             kchannelSetCpuMapped(pGpu, pKernelChannel, NV_TRUE);
4202             SLI_LOOP_END
4203     }
4204 
4205     return status;
4206 }
4207 
4208 void
4209 kchannelUnmapUserD_IMPL
4210 (
4211     OBJGPU         *pGpu,
4212     KernelChannel  *pKernelChannel,
4213     RS_PRIV_LEVEL   privLevel,
4214     NvP64          *ppCpuVirtAddr,
4215     NvP64          *ppPriv
4216 )
4217 {
4218     NV_STATUS status;
4219     NvU64     userBase;
4220     NvU64     userOffset;
4221     NvU64     userSize;
4222 
4223     if (pKernelChannel->bClientAllocatedUserD)
4224     {
4225         return;
4226     }
4227 
4228     status = kchannelGetUserdInfo_HAL(pGpu, pKernelChannel,
4229                                       &userBase, &userOffset, &userSize);
4230 
4231     NV_ASSERT_OR_RETURN_VOID(status == NV_OK);
4232 
4233     if (userBase == pGpu->busInfo.gpuPhysAddr)
4234     {
4235         osUnmapGPU(pGpu->pOsGpuInfo, privLevel, *ppCpuVirtAddr,
4236                    NvU64_LO32(userSize), *ppPriv);
4237     }
4238     else
4239     {
4240         // GF100+
4241         // Unmap Cpu virt mapping
4242         if (privLevel >= RS_PRIV_LEVEL_KERNEL)
4243         {
4244             osUnmapPciMemoryKernel64(pGpu, *ppCpuVirtAddr);
4245         }
4246         else
4247         {
4248             osUnmapPciMemoryUser(pGpu->pOsGpuInfo, *ppCpuVirtAddr,
4249                                  userSize, *ppPriv);
4250         }
4251     }
4252 
4253     // Indicate channel is !mapped
4254     kchannelSetCpuMapped(pGpu, pKernelChannel, NV_FALSE);
4255     return;
4256 }
4257 
4258 static NV_STATUS
4259 _kchannelGetUserMemDesc
4260 (
4261     OBJGPU             *pGpu,
4262     KernelChannel      *pKernelChannel,
4263     PMEMORY_DESCRIPTOR *ppMemDesc
4264 )
4265 {
4266     NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_STATE);
4267     *ppMemDesc = NULL;
4268 
4269     NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_STATE);
4270 
4271     *ppMemDesc = pKernelChannel->pInstSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
4272 
4273     return *ppMemDesc ? NV_OK : NV_ERR_INVALID_STATE;
4274 }
4275 
4276 /*!
4277  * @brief Retrieve a KernelChannel from either a KernelChannel or TSG handle. KernelChannel is
4278  * checked first. If TSG is provided, the head of the TSG is returned.
4279  *
4280  * @param[in]  pClient            Client object
4281  * @param[in]  hDual              NvHandle either to TSG or to KernelChannel
4282  * @param[out] ppKernelChannel    Referenced KernelChannel
4283  */
4284 NV_STATUS
4285 kchannelGetFromDualHandle_IMPL
4286 (
4287     RsClient        *pClient,
4288     NvHandle         hDual,
4289     KernelChannel  **ppKernelChannel
4290 )
4291 {
4292     KernelChannel *pKernelChannel;
4293     RsResourceRef *pChanGrpRef;
4294 
4295     NV_ASSERT_OR_RETURN(ppKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT);
4296 
4297     *ppKernelChannel = NULL;
4298 
4299     if (CliGetKernelChannel(pClient, hDual, &pKernelChannel) == NV_OK)
4300     {
4301         *ppKernelChannel = pKernelChannel;
4302         return NV_OK;
4303     }
4304 
4305     if (CliGetChannelGroup(pClient->hClient, hDual, &pChanGrpRef, NULL) == NV_OK)
4306     {
4307         KernelChannelGroupApi *pKernelChannelGroupApi = dynamicCast(
4308             pChanGrpRef->pResource,
4309             KernelChannelGroupApi);
4310 
4311         NV_ASSERT_OR_RETURN(
4312             (pKernelChannelGroupApi != NULL) &&
4313                 (pKernelChannelGroupApi->pKernelChannelGroup != NULL),
4314             NV_ERR_INVALID_ARGUMENT);
4315 
4316         if (pKernelChannelGroupApi->pKernelChannelGroup->chanCount == 0)
4317             return NV_ERR_INVALID_ARGUMENT;
4318 
4319         *ppKernelChannel =
4320             pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pKernelChannel;
4321         NV_ASSERT_OR_RETURN(*ppKernelChannel != NULL, NV_ERR_INVALID_STATE);
4322 
4323         return NV_OK;
4324     }
4325 
4326     return NV_ERR_OBJECT_NOT_FOUND;
4327 }
4328 
4329 /*!
4330  * @brief Retrieve a KernelChannel from either a KernelChannel or TSG handle. KernelChannel is
4331  * checked first. If TSG is provided, the head of the TSG is returned. If
4332  * KernelChannel handle is provided, it must not be part of a client-allocated TSG.
4333  *
4334  * @param[in]  pClient            Client object
4335  * @param[in]  hDual              NvHandle either to TSG or to bare Channel
4336  * @param[out] ppKernelChannel    Referenced KernelChannel
4337  */
4338 NV_STATUS
4339 kchannelGetFromDualHandleRestricted_IMPL
4340 (
4341     RsClient        *pClient,
4342     NvHandle         hDual,
4343     KernelChannel  **ppKernelChannel
4344 )
4345 {
4346     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
4347         kchannelGetFromDualHandle(pClient, hDual, ppKernelChannel));
4348     if ((RES_GET_HANDLE(*ppKernelChannel) == hDual) &&
4349         (((*ppKernelChannel)->pKernelChannelGroupApi->pKernelChannelGroup != NULL) &&
4350          !(*ppKernelChannel)->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm))
4351     {
4352         NV_PRINTF(LEVEL_ERROR, "channel handle 0x%08x is part of a channel group, not allowed!\n",
4353                   RES_GET_HANDLE(*ppKernelChannel));
4354         return NV_ERR_INVALID_ARGUMENT;
4355     }
4356     return NV_OK;
4357 }
4358 
4359 static void
4360 _kchannelUpdateFifoMapping
4361 (
4362     KernelChannel    *pKernelChannel,
4363     OBJGPU           *pGpu,
4364     NvBool            bKernel,
4365     NvP64             cpuAddress,
4366     NvP64             priv,
4367     NvU64             cpuMapLength,
4368     NvU32             flags,
4369     NvHandle          hSubdevice,
4370     RsCpuMapping     *pMapping
4371 )
4372 {
4373     pMapping->pPrivate->pGpu      = pGpu;
4374     pMapping->pPrivate->bKernel   = bKernel;
4375     pMapping->processId = osGetCurrentProcess();
4376     pMapping->pLinearAddress      = cpuAddress;
4377     pMapping->pPrivate->pPriv     = priv;
4378     pMapping->length              = cpuMapLength;
4379     pMapping->flags               = flags;
4380     pMapping->pContext            = (void*)(NvUPtr)pKernelChannel->ChID;
4381 }
4382 
4383 NV_STATUS kchannelRetrieveKmb_KERNEL
4384 (
4385     OBJGPU *pGpu,
4386     KernelChannel *pKernelChannel,
4387     ROTATE_IV_TYPE rotateOperation,
4388     NvBool includeSecrets,
4389     CC_KMB *keyMaterialBundle
4390 )
4391 {
4392     ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
4393 
4394     NV_ASSERT(pCC != NULL);
4395 
4396     return (confComputeKeyStoreRetrieveViaChannel_HAL(pCC, pKernelChannel, rotateOperation,
4397                                                       includeSecrets, keyMaterialBundle));
4398 }
4399 
4400 /*!
4401  * @brief Get KMB for secure channel
4402  *
4403  * @param[in] pKernelChannnel
4404  * @param[out] pGetKmbParams
4405  */
4406 NV_STATUS
4407 kchannelCtrlCmdGetKmb_KERNEL
4408 (
4409     KernelChannel *pKernelChannel,
4410     NVC56F_CTRL_CMD_GET_KMB_PARAMS *pGetKmbParams
4411 )
4412 {
4413     if (!pKernelChannel->bCCSecureChannel)
4414     {
4415         return NV_ERR_NOT_SUPPORTED;
4416     }
4417 
4418     portMemCopy((void*)(&pGetKmbParams->kmb), sizeof(CC_KMB),
4419                 (const void*)(&pKernelChannel->clientKmb), sizeof(CC_KMB));
4420 
4421     return NV_OK;
4422     return NV_ERR_NOT_SUPPORTED;
4423 }
4424 
4425 /*!
4426  * @brief      Rotate the IVs for the given secure channel
4427  *
4428  * @param[in]  pKernelChannel
4429  * @param[out] pRotateIvParams
4430  *
4431  * @return     NV_OK on success
4432  * @return     NV_ERR_NOT_SUPPORTED if channel is not a secure channel.
4433  */
4434 NV_STATUS
4435 kchannelCtrlRotateSecureChannelIv_KERNEL
4436 (
4437     KernelChannel *pKernelChannel,
4438     NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS *pRotateIvParams
4439 )
4440 {
4441     NV_STATUS            status            = NV_OK;
4442     OBJGPU              *pGpu              = GPU_RES_GET_GPU(pKernelChannel);
4443     ConfidentialCompute *pCC               = GPU_GET_CONF_COMPUTE(pGpu);
4444     ROTATE_IV_TYPE       rotateIvOperation = pRotateIvParams->rotateIvType;
4445 
4446     if (!pKernelChannel->bCCSecureChannel)
4447     {
4448         return NV_ERR_NOT_SUPPORTED;
4449     }
4450 
4451     NV_PRINTF(LEVEL_INFO, "Rotating IV in CPU-RM.\n");
4452 
4453     status = confComputeKeyStoreRetrieveViaChannel_HAL(
4454         pCC, pKernelChannel, rotateIvOperation, NV_TRUE, &pKernelChannel->clientKmb);
4455 
4456     if (status != NV_OK)
4457     {
4458         return status;
4459     }
4460 
4461     portMemSet(pRotateIvParams, 0, sizeof(*pRotateIvParams));
4462 
4463     portMemCopy(pRotateIvParams->updatedKmb.encryptBundle.iv,
4464                 sizeof(pRotateIvParams->updatedKmb.encryptBundle.iv),
4465                 pKernelChannel->clientKmb.encryptBundle.iv,
4466                 sizeof(pKernelChannel->clientKmb.encryptBundle.iv));
4467 
4468     portMemCopy(pRotateIvParams->updatedKmb.decryptBundle.iv,
4469                 sizeof(pRotateIvParams->updatedKmb.decryptBundle.iv),
4470                 pKernelChannel->clientKmb.decryptBundle.iv,
4471                 sizeof(pKernelChannel->clientKmb.decryptBundle.iv));
4472 
4473     pRotateIvParams->rotateIvType = rotateIvOperation;
4474 
4475     NV_RM_RPC_CONTROL(pGpu,
4476                       RES_GET_CLIENT_HANDLE(pKernelChannel),
4477                       RES_GET_HANDLE(pKernelChannel),
4478                       NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV,
4479                       pRotateIvParams,
4480                       sizeof(*pRotateIvParams),
4481                       status);
4482 
4483     if (status != NV_OK)
4484     {
4485         return status;
4486     }
4487 
4488     if ((rotateIvOperation == ROTATE_IV_ALL_VALID) || (rotateIvOperation == ROTATE_IV_ENCRYPT))
4489     {
4490         portMemCopy(&pRotateIvParams->updatedKmb.encryptBundle,
4491                     sizeof(pRotateIvParams->updatedKmb.encryptBundle),
4492                     &pKernelChannel->clientKmb.encryptBundle,
4493                     sizeof(pKernelChannel->clientKmb.encryptBundle));
4494     }
4495 
4496     if ((rotateIvOperation == ROTATE_IV_ALL_VALID) || (rotateIvOperation == ROTATE_IV_DECRYPT))
4497     {
4498         portMemCopy(&pRotateIvParams->updatedKmb.decryptBundle,
4499                     sizeof(pRotateIvParams->updatedKmb.decryptBundle),
4500                     &pKernelChannel->clientKmb.decryptBundle,
4501                     sizeof(pKernelChannel->clientKmb.decryptBundle));
4502     }
4503 
4504     return NV_OK;
4505     return NV_ERR_NOT_SUPPORTED;
4506 }
4507 
4508 NV_STATUS
4509 kchannelCtrlRotateSecureChannelIv_PHYSICAL
4510 (
4511     KernelChannel *pKernelChannel,
4512     NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS *pRotateIvParams
4513 )
4514 {
4515     NV_STATUS status;
4516 
4517     NV_PRINTF(LEVEL_INFO, "Rotating IV in GSP-RM.\n");
4518 
4519     // CPU-side encrypt IV corresponds to GPU-side decrypt IV.
4520     // CPU-side decrypt IV corresponds to GPU-side encrypt IV.
4521     status =
4522         kchannelRotateSecureChannelIv_HAL(pKernelChannel,
4523                                           pRotateIvParams->rotateIvType,
4524                                           pRotateIvParams->updatedKmb.decryptBundle.iv,
4525                                           pRotateIvParams->updatedKmb.encryptBundle.iv);
4526     if (status != NV_OK)
4527     {
4528         return status;
4529     }
4530 
4531     return NV_OK;
4532 }
4533