1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 // FIXME XXX
25 #define NVOC_KERNEL_GRAPHICS_CONTEXT_H_PRIVATE_ACCESS_ALLOWED
26 #define NVOC_KERNEL_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
27 
28 #include "kernel/gpu/fifo/kernel_channel.h"
29 
30 #include "kernel/core/locks.h"
31 #include "kernel/diagnostics/gpu_acct.h"
32 #include "kernel/gpu/conf_compute/conf_compute.h"
33 #include "kernel/gpu/device/device.h"
34 #include "kernel/gpu/fifo/kernel_ctxshare.h"
35 #include "kernel/gpu/fifo/kernel_channel_group.h"
36 #include "kernel/gpu/gr/kernel_graphics.h"
37 #include "kernel/gpu/mem_mgr/context_dma.h"
38 #include "kernel/gpu/mem_mgr/heap.h"
39 #include "kernel/gpu/mem_mgr/mem_mgr.h"
40 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
41 #include "kernel/gpu/rc/kernel_rc.h"
42 #include "kernel/mem_mgr/ctx_buf_pool.h"
43 #include "kernel/mem_mgr/gpu_vaspace.h"
44 #include "kernel/rmapi/event.h"
45 #include "kernel/rmapi/rmapi.h"
46 #include "kernel/rmapi/rs_utils.h"
47 #include "kernel/virtualization/hypervisor/hypervisor.h"
48 #include "gpu/bus/kern_bus.h"
49 #include "gpu/mem_mgr/virt_mem_allocator.h"
50 #include "objtmr.h"
51 
52 #include "class/cl0090.h"   // KERNEL_GRAPHICS_CONTEXT
53 #include "class/cl906fsw.h" // GF100_GPFIFO
54 #include "class/cla06c.h"   // KEPLER_CHANNEL_GROUP_A
55 #include "class/cla06f.h"   // KEPLER_CHANNEL_GPFIFO_A
56 #include "class/cla06fsw.h" // KEPLER_CHANNEL_GPFIFO_A
57 #include "class/cla16f.h"   // KEPLER_CHANNEL_GPFIFO_B
58 #include "class/cla16fsw.h" // KEPLER_CHANNEL_GPFIFO_B
59 #include "class/clb06f.h"   // MAXWELL_CHANNEL_GPFIFO_A
60 #include "class/clb06fsw.h" // MAXWELL_CHANNEL_GPFIFO_A
61 #include "class/clc06f.h"   // PASCAL_CHANNEL_GPFIFO_A
62 #include "class/clc06fsw.h" // PASCAL_CHANNEL_GPFIFO_A
63 #include "class/clc36f.h"   // VOLTA_CHANNEL_GPFIFO_A
64 #include "class/clc36fsw.h" // VOLTA_CHANNEL_GPFIFO_A
65 #include "class/clc46f.h"   // TURING_CHANNEL_GPFIFO_A
66 #include "class/clc46fsw.h" // TURING_CHANNEL_GPFIFO_A
67 #include "class/clc56f.h"   // AMPERE_CHANNEL_GPFIFO_A
68 #include "class/clc56fsw.h" // AMPERE_CHANNEL_GPFIFO_A
69 #include "class/clc572.h"   // PHYSICAL_CHANNEL_GPFIFO
70 #include "class/clc86f.h"   // HOPPER_CHANNEL_GPFIFO_A
71 #include "class/clc86fsw.h" // HOPPER_CHANNEL_GPFIFO_A
72 
73 #include "ctrl/ctrl906f.h"
74 #include "ctrl/ctrlc46f.h"
75 #include "ctrl/ctrlc86f.h"
76 
77 #include "Nvcm.h"
78 #include "libraries/resserv/resserv.h"
79 #include "libraries/resserv/rs_client.h"
80 #include "libraries/resserv/rs_resource.h"
81 #include "libraries/resserv/rs_server.h"
82 #include "nvRmReg.h"
83 #include "nvstatuscodes.h"
84 #include "vgpu/rpc.h"
85 
86 // Instmem static functions
87 static NV_STATUS _kchannelAllocHalData(OBJGPU *pGpu, KernelChannel *pKernelChannel);
88 static void      _kchannelFreeHalData(OBJGPU *pGpu, KernelChannel *pKernelChannel);
89 static NV_STATUS _kchannelAllocOrDescribeInstMem(
90     KernelChannel  *pKernelChannel,
91     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams);
92 static NV_STATUS _kchannelDescribeMemDescsFromParams(
93     OBJGPU *pGpu,
94     KernelChannel *pKernelChannel,
95     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams);
96 static NV_STATUS _kchannelDescribeMemDescsHeavySriov(OBJGPU *pGpu, KernelChannel *pKernelChannel);
97 static NV_STATUS _kchannelSendChannelAllocRpc(
98     KernelChannel *pKernelChannel,
99     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams,
100     KernelChannelGroup *pKernelChannelGroup,
101     NvBool bFullSriov);
102 
103 static NV_STATUS _kchannelSetupNotifyActions(KernelChannel *pKernelChannel,
104                                              NvU32 classNum);
105 static void _kchannelCleanupNotifyActions(KernelChannel *pKernelChannel);
106 static NV_STATUS _kchannelNotifyOfChid(OBJGPU *pGpu, KernelChannel *pKernelChannel, RsClient *pRsClient);
107 static NV_STATUS _kchannelGetUserMemDesc(OBJGPU *pGpu, KernelChannel *pKernelChannel, PMEMORY_DESCRIPTOR *ppMemDesc);
108 static void _kchannelUpdateFifoMapping(KernelChannel    *pKernelChannel,
109                                        OBJGPU           *pGpu,
110                                        NvBool            bKernel,
111                                        NvP64             cpuAddress,
112                                        NvP64             priv,
113                                        NvU64             cpuMapLength,
114                                        NvU32             flags,
115                                        NvHandle          hSubdevice,
116                                        RsCpuMapping     *pMapping);
117 
118 /*!
119  * @brief Construct a new KernelChannel, which also creates a Channel.
120  *
121  * @param[in,out]  pCallContext     The call context
122  * @param[in,out]  pParams          Params for the *_CHANNEL_GPFIFO class
123  *                                  object being created
124  *
125  * @returns NV_OK on success, specific error code on failure.
126  */
127 NV_STATUS
128 kchannelConstruct_IMPL
129 (
130     KernelChannel *pKernelChannel,
131     CALL_CONTEXT *pCallContext,
132     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
133 )
134 {
135     OBJGPU                 *pGpu             = GPU_RES_GET_GPU(pKernelChannel);
136     OBJSYS                 *pSys             = SYS_GET_INSTANCE();
137     KernelMIGManager       *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
138     KernelFifo             *pKernelFifo      = GPU_GET_KERNEL_FIFO(pGpu);
139     RsClient               *pRsClient        = pCallContext->pClient;
140     RmClient               *pRmClient        = NULL;
141     RsResourceRef          *pResourceRef     = pCallContext->pResourceRef;
142     RsResourceRef          *pKernelCtxShareRef = NULL;
143     NV_STATUS               status;
144     RM_API                 *pRmApi           = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
145     NvHandle                hClient          = pRsClient->hClient;
146     NvHandle                hParent          = pResourceRef->pParentRef->hResource;
147     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams = pParams->pAllocParams;
148     RsResourceRef          *pChanGrpRef      = NULL;
149     KernelChannelGroupApi  *pKernelChannelGroupApi = NULL;
150     NvHandle                hKernelCtxShare  = pChannelGpfifoParams->hContextShare;
151     NvBool                  bTsgAllocated    = NV_FALSE;
152     NvHandle                hChanGrp         = NV01_NULL_OBJECT;
153     RsResourceRef          *pDeviceRef       = NULL;
154     RsResourceRef          *pVASpaceRef      = NULL;
155     KernelGraphicsContext  *pKernelGraphicsContext = NULL;
156     NvBool                  bMIGInUse;
157     KernelChannelGroup     *pKernelChannelGroup = NULL;
158     NvU32                   chID             = ~0;
159     NvU32                   flags            = pChannelGpfifoParams->flags;
160     RM_ENGINE_TYPE          globalRmEngineType = RM_ENGINE_TYPE_NULL;
161     NvU32                   verifFlags2      = 0;
162     NvBool                  bChidAllocated   = NV_FALSE;
163     NvBool                  bLockAcquired    = NV_FALSE;
164     NvBool                  bNotifyActionsSetup = NV_FALSE;
165     CTX_BUF_POOL_INFO      *pChannelBufPool  = NULL;
166     CTX_BUF_INFO            bufInfo          = {0};
167     NvBool                  bRpcAllocated    = NV_FALSE;
168     NvBool                  bFullSriov       = IS_VIRTUAL_WITH_SRIOV(pGpu) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu);
169     NvBool                  bAddedToGroup    = NV_FALSE;
170     NvU32                   callingContextGfid;
171     Device                 *pDevice;
172 
173     // We only support physical channels.
174     NV_ASSERT_OR_RETURN(FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_TYPE, _PHYSICAL, flags),
175         NV_ERR_NOT_SUPPORTED);
176 
177     pKernelChannel->refCount = 1;
178     pKernelChannel->bIsContextBound = NV_FALSE;
179     pKernelChannel->nextObjectClassID = 0;
180     pKernelChannel->subctxId = 0;
181     pKernelChannel->bSkipCtxBufferAlloc = FLD_TEST_DRF(OS04, _FLAGS,
182                                                        _SKIP_CTXBUFFER_ALLOC, _TRUE, flags);
183     pKernelChannel->cid = portAtomicIncrementU32(&pSys->currentCid);
184     pKernelChannel->runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, flags);
185     pKernelChannel->engineType = RM_ENGINE_TYPE_NULL;
186     pChannelGpfifoParams->cid = pKernelChannel->cid;
187     NV_ASSERT_OK_OR_GOTO(status, refFindAncestorOfType(pResourceRef, classId(Device), &pDeviceRef), cleanup);
188     NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &callingContextGfid));
189 
190     pDevice = dynamicCast(pDeviceRef->pResource, Device);
191 
192     // Internal fields must be cleared when RMAPI call is from client
193     if (!hypervisorIsVgxHyper() || IS_GSP_CLIENT(pGpu))
194         pChannelGpfifoParams->hPhysChannelGroup = NV01_NULL_OBJECT;
195     pChannelGpfifoParams->internalFlags = 0;
196     portMemSet(&pChannelGpfifoParams->errorNotifierMem, 0,
197                sizeof pChannelGpfifoParams->errorNotifierMem);
198     portMemSet(&pChannelGpfifoParams->eccErrorNotifierMem, 0,
199                sizeof pChannelGpfifoParams->eccErrorNotifierMem);
200     pChannelGpfifoParams->ProcessID = 0;
201     pChannelGpfifoParams->SubProcessID = 0;
202     portMemSet(pChannelGpfifoParams->encryptIv, 0, sizeof(pChannelGpfifoParams->encryptIv));
203     portMemSet(pChannelGpfifoParams->decryptIv, 0, sizeof(pChannelGpfifoParams->decryptIv));
204     portMemSet(pChannelGpfifoParams->hmacNonce, 0, sizeof(pChannelGpfifoParams->hmacNonce));
205 
206     pRmClient = dynamicCast(pRsClient, RmClient);
207     if (pRmClient == NULL)
208     {
209         return NV_ERR_OBJECT_NOT_FOUND;
210     }
211     pKernelChannel->pUserInfo = pRmClient->pUserInfo;
212 
213     //
214     // GSP-RM needs privilegeLevel passed in as an alloc param because it cannot
215     // check pRmClient for kernel/admin.
216     // Other platforms check pRmClient to determine privilegeLevel.
217     //
218     if (RMCFG_FEATURE_PLATFORM_GSP)
219     {
220         // Guest-RM clients can allocate a privileged channel to perform
221         // actions such as updating page tables in physical mode or scrubbing.
222         // Security for these channels is enforced by VMMU and IOMMU
223         if (gpuIsSriovEnabled(pGpu) && IS_GFID_VF(callingContextGfid) &&
224                 FLD_TEST_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, flags))
225         {
226             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN;
227         }
228         else
229         {
230             pKernelChannel->privilegeLevel =
231                 DRF_VAL(_KERNELCHANNEL, _ALLOC_INTERNALFLAGS, _PRIVILEGE, pChannelGpfifoParams->internalFlags);
232         }
233 
234         // In GSP, all vGPU channel's will simply consider GFID as the processID
235         if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && IS_GFID_VF(callingContextGfid))
236         {
237             pKernelChannel->ProcessID = callingContextGfid;
238         }
239         else
240         {
241             pKernelChannel->ProcessID = pChannelGpfifoParams->ProcessID;
242         }
243 
244         pKernelChannel->SubProcessID = pChannelGpfifoParams->SubProcessID;
245     }
246     else
247     {
248         RS_PRIV_LEVEL privLevel = pCallContext->secInfo.privLevel;
249         if (privLevel >= RS_PRIV_LEVEL_KERNEL)
250         {
251             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL;
252             pChannelGpfifoParams->flags = FLD_SET_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, pChannelGpfifoParams->flags);
253         }
254         else if (rmclientIsAdmin(pRmClient, privLevel) || hypervisorCheckForObjectAccess(hClient))
255         {
256             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN;
257             pChannelGpfifoParams->flags = FLD_SET_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, pChannelGpfifoParams->flags);
258         }
259         else
260         {
261             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER;
262         }
263 
264         pKernelChannel->ProcessID = pRmClient->ProcID;
265         pKernelChannel->SubProcessID = pRmClient->SubProcessID;
266     }
267 
268     // Context share and vaspace handles can't be active at the same time.
269     if ((hKernelCtxShare != NV01_NULL_OBJECT) && (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT))
270     {
271         NV_PRINTF(LEVEL_ERROR,
272                   "Both context share and vaspace handles can't be valid at the same time\n");
273         return NV_ERR_INVALID_ARGUMENT;
274     }
275 
276     bMIGInUse = IS_MIG_IN_USE(pGpu);
277 
278     //
279     // The scrubber is allocated by Kernel RM in offload mode, and is disabled
280     // completely on GSP, so it is not possible for GSP to determine whether
281     // this allocation should be allowed or not. CPU RM can and should properly
282     // check this.
283     //
284     if (IS_MIG_ENABLED(pGpu) && !RMCFG_FEATURE_PLATFORM_GSP && !bMIGInUse)
285     {
286         NvBool bTopLevelScrubberEnabled = NV_FALSE;
287         NvBool bTopLevelScrubberConstructed = NV_FALSE;
288         MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
289 
290         if (memmgrIsPmaInitialized(pMemoryManager))
291         {
292             Heap *pHeap = GPU_GET_HEAP(pGpu);
293             NvU32 pmaConfigs = PMA_QUERY_SCRUB_ENABLED | PMA_QUERY_SCRUB_VALID;
294             NV_ASSERT_OK(pmaQueryConfigs(&pHeap->pmaObject, &pmaConfigs));
295             bTopLevelScrubberEnabled = (pmaConfigs & PMA_QUERY_SCRUB_ENABLED) != 0x0;
296             bTopLevelScrubberConstructed = (pmaConfigs & PMA_QUERY_SCRUB_VALID) != 0x0;
297         }
298 
299         //
300         // Exception: Top level scrubber must be initialized before
301         // GPU instances can be created, and therefore must be allowed to
302         // create a CE context if the scrubber is supported.
303         //
304 
305         if (!bTopLevelScrubberEnabled || bTopLevelScrubberConstructed ||
306             !kchannelCheckIsKernel(pKernelChannel))
307         {
308             NV_PRINTF(LEVEL_ERROR,
309                       "Channel allocation not allowed when MIG is enabled without GPU instancing\n");
310             return NV_ERR_INVALID_STATE;
311         }
312     }
313 
314     // Find the TSG, or create the TSG if we need to wrap it
315     status = clientGetResourceRefByType(pRsClient, hParent,
316                                         classId(KernelChannelGroupApi),
317                                         &pChanGrpRef);
318     if (status != NV_OK)
319     {
320         NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS tsgParams = { 0 };
321 
322         // Context share can only be used with a TSG channel
323         if (hKernelCtxShare != NV01_NULL_OBJECT)
324         {
325             NV_PRINTF(LEVEL_ERROR,
326                       "Non-TSG channels can't use context share\n");
327             status = NV_ERR_INVALID_ARGUMENT;
328             goto cleanup;
329         }
330 
331         tsgParams.hVASpace = pChannelGpfifoParams->hVASpace;
332         tsgParams.engineType = pChannelGpfifoParams->engineType;
333         // vGpu plugin context flag should only be set if context is plugin
334         if (gpuIsSriovEnabled(pGpu))
335         {
336             tsgParams.bIsCallingContextVgpuPlugin = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_VGPU_PLUGIN_CONTEXT, _TRUE, pChannelGpfifoParams->flags);
337         }
338         //
339         // Internally allocate a TSG to wrap this channel. There is no point
340         // in mirroring this allocation in the host, as the channel is
341         // already mirrored.
342         //
343         status = pRmApi->AllocWithSecInfo(pRmApi,
344             hClient,
345             hParent,
346             &pChannelGpfifoParams->hPhysChannelGroup,
347             KEPLER_CHANNEL_GROUP_A,
348             NV_PTR_TO_NvP64(&tsgParams),
349             sizeof(tsgParams),
350             RMAPI_ALLOC_FLAGS_SKIP_RPC,
351             NvP64_NULL,
352             &pRmApi->defaultSecInfo);
353 
354         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
355         bTsgAllocated = NV_TRUE;
356         hChanGrp = pChannelGpfifoParams->hPhysChannelGroup;
357 
358         status = clientGetResourceRefByType(pRsClient, hChanGrp,
359                                             classId(KernelChannelGroupApi),
360                                             &pChanGrpRef);
361         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
362 
363         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
364                                              KernelChannelGroupApi);
365         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
366         pKernelChannelGroup->bAllocatedByRm = NV_TRUE;
367     }
368     else
369     {
370         hChanGrp = hParent;
371         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
372                                              KernelChannelGroupApi);
373         if (pKernelChannelGroupApi == NULL ||
374             pKernelChannelGroupApi->pKernelChannelGroup == NULL)
375         {
376             NV_PRINTF(LEVEL_ERROR, "Invalid KernelChannelGroup* for channel 0x%x\n",
377                       pResourceRef->hResource);
378             status = NV_ERR_INVALID_POINTER;
379             NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
380         }
381         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
382 
383         // TSG channel should specify a context share object, rather than vaspace directly
384         if (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT)
385         {
386             NV_PRINTF(LEVEL_ERROR,
387                       "TSG channels can't use an explicit vaspace\n");
388             status = NV_ERR_INVALID_ARGUMENT;
389             NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
390         }
391     }
392     pKernelChannel->pKernelChannelGroupApi = pKernelChannelGroupApi;
393 
394     NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE);
395     NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE);
396 
397     //
398     // Reserve memory for channel instance block from PMA
399     // into a pool tied to channel's parent TSG.
400     // RM will later allocate memory for instance block from this pool.
401     //
402     pChannelBufPool = pKernelChannelGroup->pChannelBufPool;
403     if (pChannelBufPool != NULL)
404     {
405         NvBool bIsScrubSkipped;
406         NvBool bRequestScrubSkip = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE, pChannelGpfifoParams->flags);
407 
408         if (bRequestScrubSkip)
409         {
410             if (!kchannelCheckIsKernel(pKernelChannel))
411             {
412                 status = NV_ERR_INVALID_ARGUMENT;
413                 NV_PRINTF(LEVEL_ERROR, "Only kernel priv clients can skip scrubber\n");
414                 goto cleanup;
415             }
416 
417             //
418             // If this is first channel in the TSG then setup ctx buf pool to skip scrubbing.
419             // For subsequent channels, setting should match with ctx buf pool's state.
420             //
421             if (pKernelChannelGroup->chanCount == 0)
422             {
423                 ctxBufPoolSetScrubSkip(pChannelBufPool, NV_TRUE);
424                 NV_PRINTF(LEVEL_INFO, "Skipping scrubber for all allocations on this context\n");
425             }
426         }
427 
428         bIsScrubSkipped = ctxBufPoolIsScrubSkipped(pChannelBufPool);
429         if (bIsScrubSkipped ^ bRequestScrubSkip)
430         {
431             status = NV_ERR_INVALID_ARGUMENT;
432             NV_PRINTF(LEVEL_ERROR, "Mismatch between channel and parent TSG's policy on skipping scrubber\n");
433             NV_PRINTF(LEVEL_ERROR, "scrubbing %s skipped for TSG and %s for channel\n", (bIsScrubSkipped ? "is" : "is not"),
434                 (bRequestScrubSkip ? "is" : "is not"));
435             goto cleanup;
436         }
437         NV_ASSERT_OK_OR_GOTO(status,
438                              kfifoGetInstMemInfo_HAL(pKernelFifo, &bufInfo.size, &bufInfo.align, NULL, NULL, NULL),
439                              cleanup);
440         bufInfo.attr = RM_ATTR_PAGE_SIZE_DEFAULT;
441         NV_ASSERT_OK_OR_GOTO(status, ctxBufPoolReserve(pGpu, pChannelBufPool, &bufInfo, 1), cleanup);
442     }
443     else
444     {
445         NV_PRINTF(LEVEL_INFO, "Not using ctx buf pool\n");
446     }
447 
448     //--------------------------------------------------------------------------
449     // we acquire the GPU lock below.
450     // From here down do not return early, use goto cleanup
451     //--------------------------------------------------------------------------
452 
453     NV_ASSERT_OK_OR_GOTO(status,
454         rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO),
455         cleanup);
456     bLockAcquired = NV_TRUE;
457 
458     //
459     // Initialize the notification indices used for different notifications
460     //
461     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR]
462         = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR;
463     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN]
464         = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN;
465 
466     // Bake channel group error handlers into the channel
467     pKernelChannel->hErrorContext = pChannelGpfifoParams->hObjectError;
468     pKernelChannel->hEccErrorContext = pChannelGpfifoParams->hObjectEccError;
469 
470     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT)
471     {
472         pKernelChannel->hErrorContext = (
473             pKernelChannel->pKernelChannelGroupApi->hErrorContext);
474     }
475     if (pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
476     {
477         pKernelChannel->hEccErrorContext = (
478             pKernelChannel->pKernelChannelGroupApi->hEccErrorContext);
479     }
480 
481     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT)
482     {
483         pKernelChannel->errorContextType = ERROR_NOTIFIER_TYPE_NONE;
484     }
485     else if (!RMCFG_FEATURE_PLATFORM_GSP)
486     {
487         NV_ASSERT_OK(kchannelGetNotifierInfo(pGpu, pRsClient,
488             pKernelChannel->hErrorContext,
489             &pKernelChannel->pErrContextMemDesc,
490             &pKernelChannel->errorContextType,
491             &pKernelChannel->errorContextOffset));
492         NV_ASSERT(pKernelChannel->errorContextType !=
493                   ERROR_NOTIFIER_TYPE_NONE);
494     }
495     if (pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
496     {
497         pKernelChannel->eccErrorContextType = ERROR_NOTIFIER_TYPE_NONE;
498     }
499     else if (!RMCFG_FEATURE_PLATFORM_GSP)
500     {
501         NV_ASSERT_OK(kchannelGetNotifierInfo(pGpu, pRsClient,
502             pKernelChannel->hEccErrorContext,
503             &pKernelChannel->pEccErrContextMemDesc,
504             &pKernelChannel->eccErrorContextType,
505             &pKernelChannel->eccErrorContextOffset));
506         NV_ASSERT(pKernelChannel->eccErrorContextType !=
507                   ERROR_NOTIFIER_TYPE_NONE);
508     }
509 
510     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
511     {
512         if (pKernelChannel->hErrorContext != NV01_NULL_OBJECT)
513         {
514             pChannelGpfifoParams->errorNotifierMem.base = (
515                 memdescGetPhysAddr(pKernelChannel->pErrContextMemDesc,
516                                    AT_GPU, 0) +
517                 pKernelChannel->errorContextOffset);
518             pChannelGpfifoParams->errorNotifierMem.size = (
519                 pKernelChannel->pErrContextMemDesc->Size -
520                 pKernelChannel->errorContextOffset);
521             pChannelGpfifoParams->errorNotifierMem.addressSpace =
522                 memdescGetAddressSpace(pKernelChannel->pErrContextMemDesc);
523             pChannelGpfifoParams->errorNotifierMem.cacheAttrib =
524                 memdescGetCpuCacheAttrib(pKernelChannel->pErrContextMemDesc);
525 
526         }
527         if (pKernelChannel->hEccErrorContext != NV01_NULL_OBJECT)
528         {
529             pChannelGpfifoParams->eccErrorNotifierMem.base = (
530                 memdescGetPhysAddr(pKernelChannel->pEccErrContextMemDesc,
531                                    AT_GPU, 0) +
532                 pKernelChannel->eccErrorContextOffset);
533             pChannelGpfifoParams->eccErrorNotifierMem.size = (
534                 pKernelChannel->pEccErrContextMemDesc->Size -
535                 pKernelChannel->eccErrorContextOffset);
536             pChannelGpfifoParams->eccErrorNotifierMem.addressSpace =
537                 memdescGetAddressSpace(pKernelChannel->pEccErrContextMemDesc);
538             pChannelGpfifoParams->eccErrorNotifierMem.cacheAttrib =
539                 memdescGetCpuCacheAttrib(pKernelChannel->pEccErrContextMemDesc);
540         }
541 
542         pChannelGpfifoParams->internalFlags = FLD_SET_DRF_NUM(
543             _KERNELCHANNEL_ALLOC, _INTERNALFLAGS, _ERROR_NOTIFIER_TYPE,
544             pKernelChannel->errorContextType,
545             pChannelGpfifoParams->internalFlags);
546         pChannelGpfifoParams->internalFlags = FLD_SET_DRF_NUM(
547             _KERNELCHANNEL_ALLOC, _INTERNALFLAGS, _ECC_ERROR_NOTIFIER_TYPE,
548             pKernelChannel->eccErrorContextType,
549             pChannelGpfifoParams->internalFlags);
550     }
551 
552     //
553     // The error context types should be set on all RM configurations
554     // (GSP/baremetal/CPU-GSP client)
555     //
556     NV_ASSERT(pKernelChannel->errorContextType != ERROR_NOTIFIER_TYPE_UNKNOWN);
557     NV_ASSERT(pKernelChannel->eccErrorContextType !=
558               ERROR_NOTIFIER_TYPE_UNKNOWN);
559 
560 
561     if ((pKernelChannelGroup->chanCount != 0) &&
562         (( pKernelChannelGroup->bLegacyMode && (hKernelCtxShare != NV01_NULL_OBJECT)) ||
563          (!pKernelChannelGroup->bLegacyMode && (hKernelCtxShare == NV01_NULL_OBJECT))))
564     {
565         //
566         // Check if this channnel allocation specifying (or not) a user
567         // allocated context share matches with previous channel allocations (if
568         // any) in this group specifiying (or not) a user allocated context
569         // share.
570         //
571         // A channel group cannot have a mix of channels with some of them
572         // specifying a user allocated context share and some having RM
573         // allocated context share.
574         //
575         NV_PRINTF(LEVEL_NOTICE,
576             "All channels in a channel group must specify a CONTEXT_SHARE if any one of them specifies it\n");
577         status = NV_ERR_INVALID_ARGUMENT;
578         goto cleanup;
579     }
580 
581     // Get KernelCtxShare (supplied or legacy)
582     if (hKernelCtxShare != NV01_NULL_OBJECT)
583     {
584         // Get object pointers from supplied hKernelCtxShare.
585         NV_ASSERT_OK_OR_GOTO(status,
586             clientGetResourceRefByType(pRsClient,
587                                        hKernelCtxShare,
588                                        classId(KernelCtxShareApi),
589                                        &pKernelCtxShareRef),
590             cleanup);
591 
592         //
593         // If hKernelCtxShare is nonzero, the ChannelGroup is not internal
594         // either, so it should have the same parent as hParent.
595         //
596         NV_ASSERT_OR_ELSE(
597             pKernelCtxShareRef->pParentRef != NULL &&
598                 pKernelCtxShareRef->pParentRef->hResource == hParent,
599             status = NV_ERR_INVALID_OBJECT_PARENT;
600             goto cleanup);
601     }
602     else
603     {
604         NvU32 subctxFlag;
605         NvHandle hLegacyKernelCtxShare;
606 
607         if (!pKernelChannelGroup->bLegacyMode)
608         {
609             //
610             // Set this ChannelGroup to legacy mode and get the KernelCtxShare
611             // from it.
612             //
613             NV_ASSERT_OK_OR_GOTO(status,
614                 kchangrpapiSetLegacyMode(pKernelChannelGroupApi,
615                                          pGpu, pKernelFifo, hClient),
616                 cleanup);
617         }
618 
619         subctxFlag = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_THREAD, flags);
620         hLegacyKernelCtxShare = (subctxFlag ==
621                            NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC) ?
622                               pKernelChannelGroupApi->hLegacykCtxShareSync :
623                               pKernelChannelGroupApi->hLegacykCtxShareAsync;
624 
625         NV_ASSERT_OK_OR_GOTO(status,
626             clientGetResourceRefByType(pRsClient,
627                                        hLegacyKernelCtxShare,
628                                        classId(KernelCtxShareApi),
629                                        &pKernelCtxShareRef),
630             cleanup);
631     }
632 
633     pKernelChannel->pKernelCtxShareApi = dynamicCast(pKernelCtxShareRef->pResource, KernelCtxShareApi);
634     NV_ASSERT_OR_ELSE(pKernelChannel->pKernelCtxShareApi != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup);
635     NV_ASSERT_OR_ELSE(pKernelChannel->pKernelCtxShareApi->pShareData != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup);
636     pKernelChannel->pVAS = pKernelChannel->pKernelCtxShareApi->pShareData->pVAS;
637     NV_ASSERT_OR_ELSE(pKernelChannel->pVAS != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup);
638 
639     if (kfifoIsPerRunlistChramSupportedInHw(pKernelFifo))
640     {
641         // TSG should always have a valid engine Id.
642         if (!RM_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType))
643         {
644             NV_ASSERT(
645                 RM_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType));
646             status = NV_ERR_INVALID_STATE;
647             goto cleanup;
648         }
649 
650         if (NV2080_ENGINE_TYPE_IS_VALID(pChannelGpfifoParams->engineType))
651         {
652             globalRmEngineType = gpuGetRmEngineType(pChannelGpfifoParams->engineType);
653             // Convert it to global engine id if MIG is enabled
654             if (bMIGInUse)
655             {
656                 MIG_INSTANCE_REF ref;
657 
658                 NV_CHECK_OK_OR_GOTO(
659                     status,
660                     LEVEL_ERROR,
661                     kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
662                                                     pDevice, &ref),
663                     cleanup);
664 
665                 NV_CHECK_OK_OR_GOTO(
666                     status,
667                     LEVEL_ERROR,
668                     kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref,
669                                                       globalRmEngineType,
670                                                       &globalRmEngineType),
671                     cleanup);
672             }
673 
674             // Throw an error if TSG engine Id does NOT match with channel engine Id
675             if (globalRmEngineType != pKernelChannelGroup->engineType)
676             {
677                 NV_PRINTF(LEVEL_ERROR,
678                     "Engine type of channel = 0x%x (0x%x) not compatible with engine type of TSG = 0x%x (0x%x)\n",
679                     gpuGetNv2080EngineType(pChannelGpfifoParams->engineType),
680                     pChannelGpfifoParams->engineType,
681                     gpuGetNv2080EngineType(pKernelChannelGroup->engineType),
682                     pKernelChannelGroup->engineType);
683 
684                 status = NV_ERR_INVALID_ARGUMENT;
685                 goto cleanup;
686             }
687         }
688 
689         // Assign the engine type from the parent TSG
690         pKernelChannel->engineType = pKernelChannelGroup->engineType;
691     }
692 
693     // Determine initial runlist ID (based on engine type if provided or inherited from TSG)
694     pKernelChannel->runlistId = kfifoGetDefaultRunlist_HAL(pGpu, pKernelFifo, pKernelChannel->engineType);
695 
696     pKernelChannel->bCCSecureChannel = FLD_TEST_DRF(OS04, _FLAGS, _CC_SECURE, _TRUE, flags);
697     if (pKernelChannel->bCCSecureChannel)
698     {
699         ConfidentialCompute* pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
700 
701         // return early if gpu is not ready to accept work
702         if (pConfCompute && kchannelCheckIsUserMode(pKernelChannel)
703             && !confComputeAcceptClientRequest(pGpu, pConfCompute))
704         {
705             return NV_ERR_NOT_READY;
706         }
707 
708         status = kchannelRetrieveKmb_HAL(pGpu, pKernelChannel, ROTATE_IV_ALL_VALID,
709                                          NV_TRUE, &pKernelChannel->clientKmb);
710         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
711 
712         portMemCopy(pChannelGpfifoParams->encryptIv,
713                     sizeof(pChannelGpfifoParams->encryptIv),
714                     pKernelChannel->clientKmb.encryptBundle.iv,
715                     sizeof(pKernelChannel->clientKmb.encryptBundle.iv));
716 
717         portMemCopy(pChannelGpfifoParams->decryptIv,
718                     sizeof(pChannelGpfifoParams->decryptIv),
719                     pKernelChannel->clientKmb.decryptBundle.iv,
720                     sizeof(pKernelChannel->clientKmb.decryptBundle.iv));
721 
722         portMemCopy(pChannelGpfifoParams->hmacNonce,
723                     sizeof(pChannelGpfifoParams->hmacNonce),
724                     pKernelChannel->clientKmb.hmacBundle.nonce,
725                     sizeof(pKernelChannel->clientKmb.hmacBundle.nonce));
726 
727     }
728 
729     // Set TLS state and BAR0 window if we are working with Gr
730     if (bMIGInUse && RM_ENGINE_TYPE_IS_GR(pKernelChannel->engineType))
731     {
732         NV_ASSERT_OK(kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
733                                                      pDevice, &pKernelChannel->partitionRef));
734     }
735 
736     // Allocate the ChId (except legacy VGPU which allocates ChID on the host)
737     if (!IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
738     {
739         status = kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient,
740                                        flags, verifFlags2, chID);
741 
742         if (status != NV_OK)
743         {
744             NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
745                                    chID, hClient, pResourceRef->hResource);
746             DBG_BREAKPOINT();
747             goto cleanup;
748 
749         }
750 
751         chID = pKernelChannel->ChID;
752         bChidAllocated = NV_TRUE;
753     }
754 
755     //
756     // RPC alloc the channel in legacy VGPU / Heavy SRIOV so that instmem details can be gotten from it
757     //
758     if (IS_VIRTUAL(pGpu) && (!bFullSriov))
759     {
760         NV_ASSERT_OK_OR_GOTO(status,
761                              _kchannelSendChannelAllocRpc(pKernelChannel,
762                                                           pChannelGpfifoParams,
763                                                           pKernelChannelGroup,
764                                                           bFullSriov),
765                              cleanup);
766         bRpcAllocated = NV_TRUE;
767     }
768 
769     // Legacy VGPU: allocate chid that the host provided
770     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
771     {
772         chID = pKernelChannel->ChID;
773 
774         status = kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient,
775                                        flags, verifFlags2, chID);
776 
777         if (status != NV_OK)
778         {
779             NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
780                       chID, hClient, pResourceRef->hResource);
781             chID = ~0;
782             DBG_BREAKPOINT();
783             goto cleanup;
784         }
785 
786         bChidAllocated = NV_TRUE;
787     }
788 
789     //
790     // Do instmem setup here
791     // (Requires the channel to be created on the host if legacy VGPU / Heavy SRIOV.
792     // Does not require a Channel object.)
793     //
794     NV_ASSERT_OK_OR_GOTO(status,
795         _kchannelAllocOrDescribeInstMem(pKernelChannel, pChannelGpfifoParams),
796         cleanup);
797 
798     // Join the channel group here
799     NV_ASSERT_OK_OR_GOTO(status,
800         kchangrpAddChannel(pGpu, pKernelChannelGroup, pKernelChannel),
801         cleanup);
802     bAddedToGroup = NV_TRUE;
803 
804     // Assign to the same runlistId as the KernelChannelGroup if it's already determined
805     if (pKernelChannelGroup->bRunlistAssigned)
806     {
807         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
808         {
809             NV_ASSERT_OK_OR_ELSE(status,
810                 kfifoRunlistSetId_HAL(pGpu,
811                                       GPU_GET_KERNEL_FIFO(pGpu),
812                                       pKernelChannel,
813                                       pKernelChannelGroup->runlistId),
814                 SLI_LOOP_GOTO(cleanup));
815         }
816         SLI_LOOP_END
817      }
818 
819     // Allocate the physical channel
820     NV_ASSERT_OK_OR_GOTO(status,
821         kchannelAllocChannel_HAL(pKernelChannel, pChannelGpfifoParams),
822         cleanup);
823 
824     // Set up pNotifyActions
825     _kchannelSetupNotifyActions(pKernelChannel, pResourceRef->externalClassId);
826     bNotifyActionsSetup = NV_TRUE;
827 
828     // Initialize the userd length
829     if (!pKernelChannel->bClientAllocatedUserD)
830     {
831         NvU64 temp_offset;
832 
833         kchannelGetUserdInfo_HAL(pGpu,
834                                  pKernelChannel,
835                                  NULL,
836                                  &temp_offset,
837                                  &pKernelChannel->userdLength);
838     }
839     else
840     {
841         kfifoGetUserdSizeAlign_HAL(pKernelFifo, (NvU32*)&pKernelChannel->userdLength, NULL);
842     }
843 
844     // Set GPU accounting
845     if (RMCFG_MODULE_GPUACCT &&
846         pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON))
847     {
848         GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(SYS_GET_INSTANCE());
849 
850         gpuacctSetProcType(pGpuAcct,
851                            pGpu->gpuInstance,
852                            pRmClient->ProcID,
853                            pRmClient->SubProcessID,
854                            NV_GPUACCT_PROC_TYPE_GPU);
855     }
856 
857     //
858     // RPC to allocate the channel on GSPFW/host.
859     // (Requires a Channel object but only for hPhysChannel.)
860     //
861     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
862     {
863         NV_ASSERT_OK_OR_GOTO(status,
864                              _kchannelSendChannelAllocRpc(pKernelChannel,
865                                                           pChannelGpfifoParams,
866                                                           pKernelChannelGroup,
867                                                           bFullSriov),
868                              cleanup);
869         bRpcAllocated = NV_TRUE;
870     }
871 
872     if (kfifoIsPerRunlistChramEnabled(pKernelFifo) ||
873         (gpuIsCCorApmFeatureEnabled(pGpu) || bMIGInUse))
874     {
875         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
876         {
877             KernelFifo *pTempKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
878             //
879             // If we have a separate channel RAM for each runlist then we need to set
880             // runlistId as we already picked a chID from channel RAM based on this runlistId.
881             // This will also ensure runlistId is not overridden later to a different value
882             //
883             NV_ASSERT_OK_OR_GOTO(status,
884                 kfifoRunlistSetId_HAL(pGpu, pTempKernelFifo, pKernelChannel, pKernelChannel->runlistId),
885                 cleanup);
886         }
887         SLI_LOOP_END;
888     }
889 
890     //
891     // If we alloced this group, we want to free KernelChannel first,
892     // so we should set KernelChannel as its dependent.
893     //
894     if (bTsgAllocated)
895     {
896         NV_ASSERT_OK_OR_GOTO(status, refAddDependant(pChanGrpRef, pResourceRef), cleanup);
897     }
898 
899     // We depend on VASpace if it was provided
900     if (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT)
901     {
902         NV_ASSERT_OK_OR_GOTO(status, clientGetResourceRef(pRsClient, pChannelGpfifoParams->hVASpace, &pVASpaceRef), cleanup);
903         NV_ASSERT_OR_ELSE(pVASpaceRef != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup);
904 
905         NV_ASSERT_OK_OR_GOTO(status, refAddDependant(pVASpaceRef, pResourceRef), cleanup);
906     }
907 
908     //
909     // If KernelCtxShare was provided, we depend on it (and if we created it then we
910     // also want KernelChannel to be freed first.)
911     //
912     if (pKernelChannel->pKernelCtxShareApi != NULL)
913     {
914         NV_ASSERT_OK_OR_GOTO(
915             status,
916             refAddDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef),
917             cleanup);
918     }
919 
920     pKernelChannel->hKernelGraphicsContext = pKernelChannelGroupApi->hKernelGraphicsContext;
921     if (pKernelChannel->hKernelGraphicsContext != NV01_NULL_OBJECT)
922     {
923         NV_ASSERT_OK_OR_GOTO(status,
924             kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext),
925             cleanup);
926 
927         NV_ASSERT_OK_OR_GOTO(status,
928             refAddDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef),
929             cleanup);
930     }
931 
932     if (pChannelGpfifoParams->hObjectError != 0)
933     {
934         NV_ASSERT_OK_OR_GOTO(
935             status,
936             _kchannelNotifyOfChid(pGpu, pKernelChannel, pRsClient),
937             cleanup);
938     }
939 
940     // Cache the hVASpace for this channel in the KernelChannel object
941     pKernelChannel->hVASpace = pKernelChannel->pKernelCtxShareApi->hVASpace;
942 
943 cleanup:
944     if (bLockAcquired)
945         rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
946 
947     // These fields are only needed internally; clear them here
948     pChannelGpfifoParams->hPhysChannelGroup = 0;
949     pChannelGpfifoParams->internalFlags = 0;
950     portMemSet(&pChannelGpfifoParams->errorNotifierMem, 0,
951                sizeof pChannelGpfifoParams->errorNotifierMem);
952     portMemSet(&pChannelGpfifoParams->eccErrorNotifierMem, 0,
953                sizeof pChannelGpfifoParams->eccErrorNotifierMem);
954     pChannelGpfifoParams->ProcessID = 0;
955     pChannelGpfifoParams->SubProcessID = 0;
956     portMemSet(pChannelGpfifoParams->encryptIv, 0, sizeof(pChannelGpfifoParams->encryptIv));
957     portMemSet(pChannelGpfifoParams->decryptIv, 0, sizeof(pChannelGpfifoParams->decryptIv));
958     portMemSet(pChannelGpfifoParams->hmacNonce, 0, sizeof(pChannelGpfifoParams->hmacNonce));
959 
960     // Free the allocated resources if there was an error
961     if (status != NV_OK)
962     {
963         if (bNotifyActionsSetup)
964         {
965             _kchannelCleanupNotifyActions(pKernelChannel);
966         }
967 
968         // Remove any dependencies we may have added; we don't want our destructor called when freeing anything below
969         if (pKernelGraphicsContext != NULL)
970         {
971             refRemoveDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef);
972         }
973         if (pKernelChannel->pKernelCtxShareApi != NULL)
974         {
975             refRemoveDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef);
976         }
977         if (pVASpaceRef != NULL)
978         {
979             refRemoveDependant(pVASpaceRef, pResourceRef);
980         }
981         if (bTsgAllocated)
982         {
983             refRemoveDependant(pChanGrpRef, pResourceRef);
984         }
985 
986         if (bAddedToGroup)
987         {
988             kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel);
989         }
990 
991         if (RMCFG_FEATURE_PLATFORM_GSP)
992         {
993             // Free memdescs created during construct on GSP path.
994             memdescFree(pKernelChannel->pErrContextMemDesc);
995             memdescDestroy(pKernelChannel->pErrContextMemDesc);
996             memdescFree(pKernelChannel->pEccErrContextMemDesc);
997             memdescDestroy(pKernelChannel->pEccErrContextMemDesc);
998         }
999         pKernelChannel->pErrContextMemDesc = NULL;
1000         pKernelChannel->pEccErrContextMemDesc = NULL;
1001 
1002         if (bRpcAllocated)
1003         {
1004             NV_RM_RPC_FREE_ON_ERROR(pGpu, hClient, hParent, RES_GET_HANDLE(pKernelChannel));
1005         }
1006 
1007         _kchannelFreeHalData(pGpu, pKernelChannel);
1008 
1009         if (pChannelBufPool != NULL)
1010         {
1011             ctxBufPoolRelease(pChannelBufPool);
1012         }
1013 
1014         if (bTsgAllocated)
1015         {
1016             pRmApi->Free(pRmApi, hClient, hChanGrp);
1017         }
1018 
1019         if (bChidAllocated)
1020         {
1021             kchannelFreeHwID_HAL(pGpu, pKernelChannel);
1022         }
1023     }
1024 
1025     return status;
1026 }
1027 
1028 void
1029 kchannelDestruct_IMPL
1030 (
1031     KernelChannel *pKernelChannel
1032 )
1033 {
1034     CALL_CONTEXT                *pCallContext;
1035     RS_RES_FREE_PARAMS_INTERNAL *pParams;
1036     NvHandle                     hClient;
1037     RM_API                      *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
1038     OBJGPU                      *pGpu   = GPU_RES_GET_GPU(pKernelChannel);
1039     NV_STATUS                    status = NV_OK;
1040     KernelChannelGroup          *pKernelChannelGroup = NULL;
1041 
1042     resGetFreeParams(staticCast(pKernelChannel, RsResource), &pCallContext, &pParams);
1043     hClient = pCallContext->pClient->hClient;
1044 
1045     if (RMCFG_FEATURE_PLATFORM_GSP)
1046     {
1047         // Free memdescs created during construct on GSP path.
1048         memdescFree(pKernelChannel->pErrContextMemDesc);
1049         memdescDestroy(pKernelChannel->pErrContextMemDesc);
1050         memdescFree(pKernelChannel->pEccErrContextMemDesc);
1051         memdescDestroy(pKernelChannel->pEccErrContextMemDesc);
1052     }
1053     pKernelChannel->pErrContextMemDesc = NULL;
1054     pKernelChannel->pEccErrContextMemDesc = NULL;
1055 
1056     // GSP and vGPU support
1057     if ((IS_GSP_CLIENT(pGpu) || IS_VIRTUAL(pGpu)))
1058     {
1059         //
1060         // GSP:
1061         //
1062         // Method buffer is allocated by CPU-RM during TSG construct
1063         // but mapped to invisible BAR2 in GSP during channel construct
1064         // During Free, first the BAR2 mapping must be unmapped in GSP
1065         // and then freeing of method buffer should be done on CPU.
1066         // This RPC call is especially required for the internal channel case
1067         // where channelDestruct calls free for its TSG
1068         //
1069         NV_RM_RPC_FREE(pGpu,
1070                        hClient,
1071                        RES_GET_PARENT_HANDLE(pKernelChannel),
1072                        RES_GET_HANDLE(pKernelChannel),
1073                        status);
1074     }
1075 
1076     {
1077         KernelGraphicsContext *pKernelGraphicsContext;
1078 
1079         // Perform GR ctx cleanup tasks on channel destruction
1080         if ((kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext) == NV_OK) &&
1081             kgrctxIsValid(pGpu, pKernelGraphicsContext, pKernelChannel))
1082         {
1083             shrkgrctxDetach(pGpu, pKernelGraphicsContext->pShared, pKernelGraphicsContext, pKernelChannel);
1084         }
1085     }
1086 
1087     _kchannelCleanupNotifyActions(pKernelChannel);
1088 
1089     _kchannelFreeHalData(pGpu, pKernelChannel);
1090 
1091     NV_ASSERT(pKernelChannel->pKernelChannelGroupApi != NULL);
1092 
1093     pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup;
1094 
1095     NV_ASSERT(pKernelChannelGroup != NULL);
1096 
1097     // remove channel from the group
1098     kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel);
1099 
1100     // Free the Ctx Buf pool
1101     if (pKernelChannelGroup->pChannelBufPool != NULL)
1102     {
1103         ctxBufPoolRelease(pKernelChannelGroup->pChannelBufPool);
1104     }
1105 
1106     // Free the channel group, if we alloced it
1107     if (pKernelChannelGroup->bAllocatedByRm)
1108     {
1109         pRmApi->Free(pRmApi, hClient,
1110                      RES_GET_HANDLE(pKernelChannel->pKernelChannelGroupApi));
1111         pKernelChannelGroup = NULL;
1112         pKernelChannel->pKernelChannelGroupApi = NULL;
1113     }
1114 
1115     kchannelFreeHwID_HAL(pGpu, pKernelChannel);
1116 
1117     NV_ASSERT(pKernelChannel->refCount == 1);
1118 }
1119 
1120 NV_STATUS
1121 kchannelMap_IMPL
1122 (
1123     KernelChannel     *pKernelChannel,
1124     CALL_CONTEXT      *pCallContext,
1125     RS_CPU_MAP_PARAMS *pParams,
1126     RsCpuMapping      *pCpuMapping
1127 )
1128 {
1129     OBJGPU *pGpu;
1130     NV_STATUS rmStatus;
1131     RsClient *pRsClient = pCallContext->pClient;
1132     RmClient *pRmClient = dynamicCast(pRsClient, RmClient);
1133     GpuResource *pGpuResource;
1134 
1135     NV_ASSERT_OR_RETURN(!pKernelChannel->bClientAllocatedUserD, NV_ERR_INVALID_REQUEST);
1136 
1137     rmStatus = gpuresGetByDeviceOrSubdeviceHandle(pRsClient,
1138                                                   pCpuMapping->pContextRef->hResource,
1139                                                   &pGpuResource);
1140     if (rmStatus != NV_OK)
1141         return rmStatus;
1142 
1143     pGpu = GPU_RES_GET_GPU(pGpuResource);
1144     GPU_RES_SET_THREAD_BC_STATE(pGpuResource);
1145 
1146     // If the flags are fifo default then offset/length passed in
1147     if (DRF_VAL(OS33, _FLAGS, _FIFO_MAPPING, pCpuMapping->flags) == NVOS33_FLAGS_FIFO_MAPPING_DEFAULT)
1148     {
1149         // Validate the offset and limit passed in.
1150         if (pCpuMapping->offset >= pKernelChannel->userdLength)
1151             return NV_ERR_INVALID_BASE;
1152         if (pCpuMapping->length == 0)
1153             return NV_ERR_INVALID_LIMIT;
1154         if (pCpuMapping->offset + pCpuMapping->length > pKernelChannel->userdLength)
1155             return NV_ERR_INVALID_LIMIT;
1156     }
1157     else
1158     {
1159         pCpuMapping->offset = 0x0;
1160         pCpuMapping->length = pKernelChannel->userdLength;
1161     }
1162 
1163     rmStatus = kchannelMapUserD(pGpu, pKernelChannel,
1164                                 rmclientGetCachedPrivilege(pRmClient),
1165                                 pCpuMapping->offset,
1166                                 pCpuMapping->pPrivate->protect,
1167                                 &pCpuMapping->pLinearAddress,
1168                                 &(pCpuMapping->pPrivate->pPriv));
1169 
1170     if (rmStatus != NV_OK)
1171         return rmStatus;
1172 
1173     // Save off the mapping
1174     _kchannelUpdateFifoMapping(pKernelChannel,
1175                                pGpu,
1176                                (pRsClient->type == CLIENT_TYPE_KERNEL),
1177                                pCpuMapping->pLinearAddress,
1178                                pCpuMapping->pPrivate->pPriv,
1179                                pCpuMapping->length,
1180                                pCpuMapping->flags,
1181                                pCpuMapping->pContextRef->hResource,
1182                                pCpuMapping);
1183 
1184     return NV_OK;
1185 }
1186 
1187 NV_STATUS
1188 kchannelUnmap_IMPL
1189 (
1190     KernelChannel *pKernelChannel,
1191     CALL_CONTEXT  *pCallContext,
1192     RsCpuMapping  *pCpuMapping
1193 )
1194 {
1195     OBJGPU   *pGpu;
1196     RsClient *pRsClient = pCallContext->pClient;
1197     RmClient *pRmClient = dynamicCast(pRsClient, RmClient);
1198 
1199     if (pKernelChannel->bClientAllocatedUserD)
1200     {
1201         DBG_BREAKPOINT();
1202         return NV_ERR_INVALID_REQUEST;
1203     }
1204 
1205     pGpu = pCpuMapping->pPrivate->pGpu;
1206 
1207     kchannelUnmapUserD(pGpu,
1208                        pKernelChannel,
1209                        rmclientGetCachedPrivilege(pRmClient),
1210                        &pCpuMapping->pLinearAddress,
1211                        &pCpuMapping->pPrivate->pPriv);
1212 
1213     return NV_OK;
1214 }
1215 
1216 NV_STATUS
1217 kchannelGetMapAddrSpace_IMPL
1218 (
1219     KernelChannel    *pKernelChannel,
1220     CALL_CONTEXT     *pCallContext,
1221     NvU32             mapFlags,
1222     NV_ADDRESS_SPACE *pAddrSpace
1223 )
1224 {
1225     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1226     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1227     NvU32 userdAperture;
1228     NvU32 userdAttribute;
1229 
1230     NV_ASSERT_OK_OR_RETURN(kfifoGetUserdLocation_HAL(pKernelFifo,
1231                                                      &userdAperture,
1232                                                      &userdAttribute));
1233     if (pAddrSpace)
1234         *pAddrSpace = userdAperture;
1235 
1236     return NV_OK;
1237 }
1238 
1239 NV_STATUS
1240 kchannelGetMemInterMapParams_IMPL
1241 (
1242     KernelChannel              *pKernelChannel,
1243     RMRES_MEM_INTER_MAP_PARAMS *pParams
1244 )
1245 {
1246     OBJGPU            *pGpu = pParams->pGpu;
1247     KernelFifo        *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1248     MEMORY_DESCRIPTOR *pSrcMemDesc = NULL;
1249     NV_STATUS          status;
1250 
1251     if (pParams->bSubdeviceHandleProvided)
1252     {
1253         NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of USERD not supported.\n");
1254         return NV_ERR_NOT_SUPPORTED;
1255     }
1256 
1257     if (!kfifoIsUserdMapDmaSupported(pKernelFifo))
1258         return NV_ERR_INVALID_OBJECT_HANDLE;
1259 
1260     status = _kchannelGetUserMemDesc(pGpu, pKernelChannel, &pSrcMemDesc);
1261     if (status != NV_OK)
1262         return status;
1263 
1264     pParams->pSrcMemDesc = pSrcMemDesc;
1265     pParams->pSrcGpu = pSrcMemDesc->pGpu;
1266 
1267     return NV_OK;
1268 }
1269 
1270 NV_STATUS
1271 kchannelCheckMemInterUnmap_IMPL
1272 (
1273     KernelChannel *pKernelChannel,
1274     NvBool         bSubdeviceHandleProvided
1275 )
1276 {
1277     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1278     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1279 
1280     if (bSubdeviceHandleProvided)
1281     {
1282         NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of channels not supported.\n");
1283         return NV_ERR_NOT_SUPPORTED;
1284     }
1285 
1286 
1287     if (!kfifoIsUserdMapDmaSupported(pKernelFifo))
1288         return NV_ERR_INVALID_OBJECT_HANDLE;
1289 
1290     return NV_OK;
1291 }
1292 
1293 /**
1294  * @brief Creates an iterator to iterate all channels in a given scope.
1295  *
1296  * Iterates over all channels under a given scope.  For a device it will loop
1297  * through all channels that are descendants of the device (including children
1298  * of channel groups).  For a channel group it will only iterate over the
1299  * channels within that group.  Ordering is ensured for channel group.
1300  * All channels within a channel group will be iterated together before moving to
1301  * another channel group or channel.
1302  *
1303  * @param[in]  pClient
1304  * @param[in]  pScopeRef The resource that defines the scope of iteration
1305  */
1306 RS_ORDERED_ITERATOR
1307 kchannelGetIter
1308 (
1309     RsClient      *pClient,
1310     RsResourceRef *pScopeRef
1311 )
1312 {
1313     return clientRefOrderedIter(pClient, pScopeRef, classId(KernelChannel), NV_TRUE);
1314 }
1315 
1316 /**
1317  * @brief Given a client, parent, and KernelChannel handle retrieves the
1318  * KernelChannel object
1319  *
1320  * @param[in]  hClient
1321  * @param[in]  hParent              Device or Channel Group parent
1322  * @param[in]  hKernelChannel
1323  * @param[out] ppKernelChannel      Valid iff NV_OK is returned.
1324  *
1325  * @return  NV_OK if successful, appropriate error otherwise
1326  */
1327 NV_STATUS
1328 CliGetKernelChannelWithDevice
1329 (
1330     RsClient       *pClient,
1331     NvHandle        hParent,
1332     NvHandle        hKernelChannel,
1333     KernelChannel **ppKernelChannel
1334 )
1335 {
1336     RsResourceRef *pParentRef;
1337     RsResourceRef *pResourceRef;
1338     KernelChannel *pKernelChannel;
1339 
1340     if (ppKernelChannel == NULL)
1341         return NV_ERR_INVALID_ARGUMENT;
1342 
1343     *ppKernelChannel = NULL;
1344 
1345     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pClient, hKernelChannel, &pResourceRef));
1346 
1347     pKernelChannel = dynamicCast(pResourceRef->pResource, KernelChannel);
1348     NV_CHECK_OR_RETURN(LEVEL_INFO, pKernelChannel != NULL, NV_ERR_OBJECT_NOT_FOUND);
1349 
1350     pParentRef = pResourceRef->pParentRef;
1351     NV_CHECK_OR_RETURN(LEVEL_INFO, pParentRef != NULL, NV_ERR_OBJECT_NOT_FOUND);
1352 
1353     //
1354     // Check that the parent matches requested handle.  Parent handle can be a
1355     // device or a ChannelGroup.  The first case can match either, the second
1356     // matches a Device when the parent is a ChannelGroup.
1357     //
1358     NV_CHECK_OR_RETURN(LEVEL_INFO, (pParentRef->hResource == hParent) ||
1359                      (RES_GET_HANDLE(GPU_RES_GET_DEVICE(pKernelChannel)) == hParent),
1360                          NV_ERR_OBJECT_NOT_FOUND);
1361 
1362     *ppKernelChannel = pKernelChannel;
1363     return NV_OK;
1364 } // end of CliGetKernelChannelWithDevice()
1365 
1366 
1367 /**
1368  * @brief Given a classNum this routine returns various sdk specific values for
1369  * that class.
1370  *
1371  * @param[in]   classNum
1372  * @param[out]  pClassInfo
1373  */
1374 void
1375 CliGetChannelClassInfo
1376 (
1377     NvU32 classNum,
1378     CLI_CHANNEL_CLASS_INFO *pClassInfo
1379 )
1380 {
1381     switch (classNum)
1382     {
1383         case GF100_CHANNEL_GPFIFO:
1384         {
1385             pClassInfo->notifiersMaxCount  = NV906F_NOTIFIERS_MAXCOUNT;
1386             pClassInfo->eventActionDisable = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1387             pClassInfo->eventActionSingle  = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1388             pClassInfo->eventActionRepeat  = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1389             pClassInfo->rcNotifierIndex    = NV906F_NOTIFIERS_RC;
1390             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1391             break;
1392         }
1393         case KEPLER_CHANNEL_GPFIFO_A:
1394         {
1395             pClassInfo->notifiersMaxCount  = NVA06F_NOTIFIERS_MAXCOUNT;
1396             pClassInfo->eventActionDisable = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1397             pClassInfo->eventActionSingle  = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1398             pClassInfo->eventActionRepeat  = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1399             pClassInfo->rcNotifierIndex    = NVA06F_NOTIFIERS_RC;
1400             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1401             break;
1402         }
1403         case KEPLER_CHANNEL_GPFIFO_B:
1404         {
1405             pClassInfo->notifiersMaxCount  = NVA16F_NOTIFIERS_MAXCOUNT;
1406             pClassInfo->eventActionDisable = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1407             pClassInfo->eventActionSingle  = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1408             pClassInfo->eventActionRepeat  = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1409             pClassInfo->rcNotifierIndex    = NVA16F_NOTIFIERS_RC;
1410             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1411             break;
1412         }
1413         case MAXWELL_CHANNEL_GPFIFO_A:
1414         {
1415             pClassInfo->notifiersMaxCount  = NVB06F_NOTIFIERS_MAXCOUNT;
1416             pClassInfo->eventActionDisable = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1417             pClassInfo->eventActionSingle  = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1418             pClassInfo->eventActionRepeat  = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1419             pClassInfo->rcNotifierIndex    = NVB06F_NOTIFIERS_RC;
1420             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1421             break;
1422         }
1423         case PASCAL_CHANNEL_GPFIFO_A:
1424         {
1425             pClassInfo->notifiersMaxCount  = NVC06F_NOTIFIERS_MAXCOUNT;
1426             pClassInfo->eventActionDisable = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1427             pClassInfo->eventActionSingle  = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1428             pClassInfo->eventActionRepeat  = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1429             pClassInfo->rcNotifierIndex    = NVC06F_NOTIFIERS_RC;
1430             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1431             break;
1432         }
1433         case VOLTA_CHANNEL_GPFIFO_A:
1434         {
1435             pClassInfo->notifiersMaxCount  = NVC36F_NOTIFIERS_MAXCOUNT;
1436             pClassInfo->eventActionDisable = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1437             pClassInfo->eventActionSingle  = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1438             pClassInfo->eventActionRepeat  = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1439             pClassInfo->rcNotifierIndex    = NVC36F_NOTIFIERS_RC;
1440             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1441             break;
1442         }
1443         case TURING_CHANNEL_GPFIFO_A:
1444         {
1445             pClassInfo->notifiersMaxCount  = NVC46F_NOTIFIERS_MAXCOUNT;
1446             pClassInfo->eventActionDisable = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1447             pClassInfo->eventActionSingle  = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1448             pClassInfo->eventActionRepeat  = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1449             pClassInfo->rcNotifierIndex    = NVC46F_NOTIFIERS_RC;
1450             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1451             break;
1452         }
1453         case AMPERE_CHANNEL_GPFIFO_A:
1454         {
1455             pClassInfo->notifiersMaxCount  = NVC56F_NOTIFIERS_MAXCOUNT;
1456             pClassInfo->eventActionDisable = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1457             pClassInfo->eventActionSingle  = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1458             pClassInfo->eventActionRepeat  = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1459             pClassInfo->rcNotifierIndex    = NVC56F_NOTIFIERS_RC;
1460             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1461             break;
1462         }
1463         case HOPPER_CHANNEL_GPFIFO_A:
1464         {
1465             pClassInfo->notifiersMaxCount  = NVC86F_NOTIFIERS_MAXCOUNT;
1466             pClassInfo->eventActionDisable = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1467             pClassInfo->eventActionSingle  = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1468             pClassInfo->eventActionRepeat  = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1469             pClassInfo->rcNotifierIndex    = NVC86F_NOTIFIERS_RC;
1470             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1471             break;
1472         }
1473 
1474         //
1475         // Does not make sense. Call with the class type from the client not the
1476         // internal type
1477         //
1478         case PHYSICAL_CHANNEL_GPFIFO:
1479             NV_PRINTF(LEVEL_ERROR,
1480                       "Invalid class for CliGetChannelClassInfo\n");
1481 
1482         default:
1483         {
1484             pClassInfo->notifiersMaxCount  = 0;
1485             pClassInfo->eventActionDisable = 0;
1486             pClassInfo->eventActionSingle  = 0;
1487             pClassInfo->eventActionRepeat  = 0;
1488             pClassInfo->rcNotifierIndex    = 0;
1489             pClassInfo->classType          = CHANNEL_CLASS_TYPE_DMA;
1490             break;
1491         }
1492     }
1493 }
1494 
1495 
1496 /**
1497  * @brief Returns the next KernelChannel from the iterator.
1498  *
1499  * Iterates over runlist IDs and ChIDs and returns the next KernelChannel found
1500  * on the heap, if any.
1501  *
1502  * (error guaranteed if pointer is NULL; non-NULL pointer guaranteed if NV_OK)
1503  *
1504  * @param[in] pGpu
1505  * @param[in] pIt                   the channel iterator
1506  * @param[out] ppKernelChannel      returns a KernelChannel *
1507  *
1508  * @return NV_OK if the returned pointer is valid or error
1509  */
1510 NV_STATUS kchannelGetNextKernelChannel
1511 (
1512     OBJGPU              *pGpu,
1513     CHANNEL_ITERATOR    *pIt,
1514     KernelChannel      **ppKernelChannel
1515 )
1516 {
1517     KernelChannel *pKernelChannel;
1518     KernelFifo    *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1519 
1520     if (ppKernelChannel == NULL)
1521         return NV_ERR_INVALID_ARGUMENT;
1522 
1523     *ppKernelChannel = NULL;
1524 
1525     while (pIt->runlistId < pIt->numRunlists)
1526     {
1527         CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pIt->runlistId);
1528 
1529         if (pChidMgr == NULL)
1530         {
1531             pIt->runlistId++;
1532             continue;
1533         }
1534 
1535         pIt->numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr);
1536         while (pIt->physicalChannelID < pIt->numChannels)
1537         {
1538             pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo,
1539                 pChidMgr, pIt->physicalChannelID);
1540             pIt->physicalChannelID++;
1541 
1542             //
1543             // This iterator can be used during an interrupt, when a KernelChannel may
1544             // be in the process of being destroyed. Don't return it if so.
1545             //
1546             if (pKernelChannel == NULL)
1547                 continue;
1548             if (!kchannelIsValid_HAL(pKernelChannel))
1549                 continue;
1550 
1551             *ppKernelChannel = pKernelChannel;
1552             return NV_OK;
1553         }
1554 
1555         pIt->runlistId++;
1556         // Reset channel index to 0 for next runlist
1557         pIt->physicalChannelID = 0;
1558     }
1559 
1560     return NV_ERR_OBJECT_NOT_FOUND;
1561 }
1562 
1563 /**
1564  * @brief Finds the corresponding KernelChannel given client object and channel handle
1565  *
1566  * Looks in client object store for the channel handle.  Scales with total
1567  * number of registered objects in the client, not just the number of channels.
1568  *
1569  * @param[in]  pClient
1570  * @param[in]  hKernelChannel a KernelChannel Channel handle
1571  * @param[out] ppKernelChannel
1572  *
1573  * @return NV_STATUS
1574  */
1575 NV_STATUS
1576 CliGetKernelChannel
1577 (
1578     RsClient       *pClient,
1579     NvHandle        hKernelChannel,
1580     KernelChannel **ppKernelChannel
1581 )
1582 {
1583     NV_STATUS      status;
1584     RsResourceRef *pResourceRef;
1585 
1586     *ppKernelChannel = NULL;
1587 
1588     status = clientGetResourceRef(pClient, hKernelChannel, &pResourceRef);
1589     if (status != NV_OK)
1590     {
1591         return status;
1592     }
1593 
1594     *ppKernelChannel = dynamicCast(pResourceRef->pResource, KernelChannel);
1595     NV_CHECK_OR_RETURN(LEVEL_INFO,
1596                        *ppKernelChannel != NULL,
1597                        NV_ERR_INVALID_CHANNEL);
1598     return NV_OK;
1599 }
1600 
1601 /*!
1602  * @brief Notify client that channel is stopped.
1603  *
1604  * @param[in] pKernelChannnel
1605  */
1606 NV_STATUS
1607 kchannelNotifyRc_IMPL
1608 (
1609     KernelChannel *pKernelChannel
1610 )
1611 {
1612     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1613     RM_ENGINE_TYPE rmEngineType = RM_ENGINE_TYPE_NULL;
1614     NV_STATUS rmStatus = NV_OK;
1615 
1616     if (IS_GFID_VF(kchannelGetGfid(pKernelChannel)))
1617     {
1618         NV_PRINTF(LEVEL_INFO, "Notification for channel 0x%x stop is already performed on guest-RM\n",
1619                   kchannelGetDebugTag(pKernelChannel));
1620         return NV_OK;
1621     }
1622 
1623     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT &&
1624         pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
1625     {
1626         NV_PRINTF(LEVEL_WARNING, "Channel 0x%x has no notifier set\n",
1627                   kchannelGetDebugTag(pKernelChannel));
1628         return NV_OK;
1629     }
1630 
1631     if (RM_ENGINE_TYPE_IS_VALID(kchannelGetEngineType(pKernelChannel)))
1632     {
1633         rmEngineType = kchannelGetEngineType(pKernelChannel);
1634     }
1635     rmStatus = krcErrorSetNotifier(pGpu, GPU_GET_KERNEL_RC(pGpu),
1636                                    pKernelChannel,
1637                                    ROBUST_CHANNEL_PREEMPTIVE_REMOVAL,
1638                                    rmEngineType,
1639                                    RC_NOTIFIER_SCOPE_CHANNEL);
1640     if (rmStatus != NV_OK)
1641     {
1642         NV_PRINTF(LEVEL_ERROR,
1643             "Failed to set error notifier for channel 0x%x with error 0x%x.\n",
1644             kchannelGetDebugTag(pKernelChannel), rmStatus);
1645     }
1646     return rmStatus;
1647 }
1648 
1649 /**
1650  * @brief Writes notifier specified by index
1651  *
1652  * @param[in] pKernelChannel
1653  * @param[in] notifyIndex
1654  * @param[in] pNotifyParams
1655  * @parms[in] notifyParamsSize
1656  */
1657 void kchannelNotifyGeneric_IMPL
1658 (
1659     KernelChannel *pKernelChannel,
1660     NvU32          notifyIndex,
1661     void          *pNotifyParams,
1662     NvU32          notifyParamsSize
1663 )
1664 {
1665     OBJGPU                 *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1666     ContextDma             *pContextDma;
1667     EVENTNOTIFICATION      *pEventNotification;
1668     CLI_CHANNEL_CLASS_INFO  classInfo;
1669 
1670     CliGetChannelClassInfo(RES_GET_EXT_CLASS_ID(pKernelChannel), &classInfo);
1671 
1672     // validate notifyIndex
1673     NV_CHECK_OR_RETURN_VOID(LEVEL_INFO, notifyIndex < classInfo.notifiersMaxCount);
1674 
1675     // handle notification if client wants it
1676     if (pKernelChannel->pNotifyActions[notifyIndex] != classInfo.eventActionDisable)
1677     {
1678         // get notifier context dma for the channel
1679         if (ctxdmaGetByHandle(RES_GET_CLIENT(pKernelChannel),
1680                               pKernelChannel->hErrorContext,
1681                               &pContextDma) == NV_OK)
1682         {
1683             // make sure it's big enough
1684             if (pContextDma->Limit >=
1685                 ((classInfo.notifiersMaxCount * sizeof (NvNotification)) - 1))
1686             {
1687                 // finally, write out the notifier
1688                 notifyFillNotifierArray(pGpu, pContextDma,
1689                                         0x0, 0x0, 0x0,
1690                                         notifyIndex);
1691             }
1692         }
1693     }
1694 
1695     // handle event if client wants it
1696     pEventNotification = inotifyGetNotificationList(staticCast(pKernelChannel, INotifier));
1697     if (pEventNotification != NULL)
1698     {
1699         // ping any events on the list of type notifyIndex
1700         osEventNotification(pGpu, pEventNotification, notifyIndex, pNotifyParams, notifyParamsSize);
1701     }
1702 
1703     // reset if single shot notify action
1704     if (pKernelChannel->pNotifyActions[notifyIndex] == classInfo.eventActionSingle)
1705         pKernelChannel->pNotifyActions[notifyIndex] = classInfo.eventActionDisable;
1706 
1707     return;
1708 }
1709 
1710 /*!
1711  * @brief Stop channel and notify client
1712  *
1713  * @param[in] pKernelChannnel
1714  * @param[in] pStopChannelParams
1715  */
1716 NV_STATUS
1717 kchannelCtrlCmdStopChannel_IMPL
1718 (
1719     KernelChannel *pKernelChannel,
1720     NVA06F_CTRL_STOP_CHANNEL_PARAMS *pStopChannelParams
1721 )
1722 {
1723     NV_STATUS     rmStatus      = NV_OK;
1724     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
1725     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
1726     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
1727 
1728     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
1729     {
1730 
1731         NV_RM_RPC_CONTROL(pGpu,
1732                           pRmCtrlParams->hClient,
1733                           RES_GET_HANDLE(pKernelChannel),
1734                           pRmCtrlParams->cmd,
1735                           pRmCtrlParams->pParams,
1736                           pRmCtrlParams->paramsSize,
1737                           rmStatus);
1738         if (rmStatus != NV_OK)
1739             return rmStatus;
1740     }
1741     else
1742     {
1743         NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
1744             kchannelFwdToInternalCtrl_HAL(pGpu,
1745                                           pKernelChannel,
1746                                           NVA06F_CTRL_CMD_INTERNAL_STOP_CHANNEL,
1747                                           pRmCtrlParams));
1748     }
1749 
1750     NV_ASSERT_OK_OR_RETURN(kchannelNotifyRc_HAL(pKernelChannel));
1751 
1752     return NV_OK;
1753 }
1754 
1755 /*!
1756  * @brief Helper to get type and memdesc of a channel notifier (memory/ctxdma)
1757  */
1758 NV_STATUS
1759 kchannelGetNotifierInfo
1760 (
1761     OBJGPU             *pGpu,
1762     RsClient           *pRsClient,
1763     NvHandle            hErrorContext,
1764     MEMORY_DESCRIPTOR **ppMemDesc,
1765     ErrorNotifierType  *pNotifierType,
1766     NvU64              *pOffset
1767 )
1768 {
1769     NvHandle    hDevice;
1770     Device     *pDevice     = NULL;
1771     ContextDma *pContextDma = NULL;
1772     Memory     *pMemory     = NULL;
1773 
1774     NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_PARAMETER);
1775     NV_ASSERT_OR_RETURN(pNotifierType != NULL, NV_ERR_INVALID_PARAMETER);
1776 
1777     *ppMemDesc = NULL;
1778     *pNotifierType = ERROR_NOTIFIER_TYPE_UNKNOWN;
1779     *pOffset = 0;
1780 
1781     if (hErrorContext == NV01_NULL_OBJECT)
1782     {
1783         *pNotifierType = ERROR_NOTIFIER_TYPE_NONE;
1784         return NV_OK;
1785     }
1786 
1787     NV_ASSERT_OK_OR_RETURN(deviceGetByInstance(pRsClient,
1788                                                gpuGetDeviceInstance(pGpu),
1789                                                &pDevice));
1790     hDevice = RES_GET_HANDLE(pDevice);
1791 
1792     if (memGetByHandleAndDevice(pRsClient, hErrorContext, hDevice, &pMemory) ==
1793         NV_OK)
1794     {
1795         if (memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_VIRTUAL)
1796         {
1797             //
1798             // GPUVA case: Get the underlying DMA mapping in this case. In GSP
1799             // client mode + SLI, GSP won't be able to write to notifiers on
1800             // other GPUs.
1801             //
1802             NvU64 offset;
1803             NvU32 subdeviceInstance;
1804             NvU64 notifyGpuVA = memdescGetPhysAddr(pMemory->pMemDesc,
1805                                                    AT_GPU_VA, 0);
1806             CLI_DMA_MAPPING_INFO *pDmaMappingInfo;
1807             NvBool bFound;
1808 
1809             bFound = CliGetDmaMappingInfo(
1810                 pRsClient,
1811                 RES_GET_HANDLE(pDevice),
1812                 RES_GET_HANDLE(pMemory),
1813                 notifyGpuVA,
1814                 gpumgrGetDeviceGpuMask(pGpu->deviceInstance),
1815                 &pDmaMappingInfo);
1816 
1817             if (!bFound)
1818             {
1819                 NV_PRINTF(LEVEL_ERROR,
1820                           "Cannot find DMA mapping for GPU_VA notifier\n");
1821                 return NV_ERR_INVALID_STATE;
1822             }
1823 
1824             offset = notifyGpuVA - pDmaMappingInfo->DmaOffset;
1825             if (offset + sizeof(NOTIFICATION) > pDmaMappingInfo->pMemDesc->Size)
1826             {
1827                 NV_PRINTF(LEVEL_ERROR,
1828                     "Notifier does not fit within DMA mapping for GPU_VA\n");
1829                 return NV_ERR_INVALID_STATE;
1830             }
1831 
1832             subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(
1833                 gpumgrGetParentGPU(pGpu));
1834             SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
1835             if (IsSLIEnabled(pGpu) && IS_GSP_CLIENT(pGpu))
1836             {
1837                 NV_PRINTF(LEVEL_ERROR, "GSP does not support SLI\n");
1838                 return NV_ERR_NOT_SUPPORTED;
1839             }
1840             SLI_LOOP_END
1841 
1842             if (!pDmaMappingInfo->KernelVAddr[subdeviceInstance])
1843             {
1844                 NV_PRINTF(LEVEL_ERROR,
1845                           "Kernel VA addr mapping not present for notifier\n");
1846                 return NV_ERR_INVALID_STATE;
1847             }
1848             *ppMemDesc = pDmaMappingInfo->pMemDesc;
1849             // The notifier format here is struct NOTIFICATION, same as ctxdma
1850             *pNotifierType = ERROR_NOTIFIER_TYPE_CTXDMA;
1851             *pOffset = offset;
1852         }
1853         else
1854         {
1855             *ppMemDesc = pMemory->pMemDesc;
1856             *pNotifierType = ERROR_NOTIFIER_TYPE_MEMORY;
1857         }
1858         return NV_OK;
1859     }
1860 
1861     if (ctxdmaGetByHandle(pRsClient, hErrorContext, &pContextDma) == NV_OK)
1862     {
1863         *ppMemDesc = pContextDma->pMemDesc;
1864         *pNotifierType = ERROR_NOTIFIER_TYPE_CTXDMA;
1865         return NV_OK;
1866     }
1867 
1868     return NV_ERR_OBJECT_NOT_FOUND;
1869 }
1870 
1871 /*!
1872  * @brief  Check if the client that owns this channel is in user mode.
1873  *
1874  * This replaces using call context for privilege checking,
1875  * and is callable from both CPU and GSP.
1876  *
1877  * @param[in] pGpu
1878  * @param[in] pKernelChannel
1879  *
1880  * @returns NV_TRUE if owned by user mode or NV_FALSE.
1881  */
1882 NvBool
1883 kchannelCheckIsUserMode_IMPL
1884 (
1885     KernelChannel *pKernelChannel
1886 )
1887 {
1888     return (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER) ||
1889            (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN);
1890 }
1891 
1892 /*!
1893  * @brief  Check if the client that owns this channel is kernel.
1894  *
1895  * This replaces using call context for privilege checking,
1896  * and is callable from both CPU and GSP.
1897  *
1898  * @param[in] pGpu
1899  * @param[in] pKernelChannel
1900  *
1901  * @returns NV_TRUE if owned by kernel or NV_FALSE.
1902  */
1903 NvBool
1904 kchannelCheckIsKernel_IMPL
1905 (
1906     KernelChannel *pKernelChannel
1907 )
1908 {
1909     return pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL;
1910 }
1911 
1912 /*!
1913  * @brief  Check if the client that owns this channel is admin.
1914  *
1915  * This replaces using call context for admin privilege checking,
1916  * but is callable from both CPU and GSP.
1917  *
1918  * @param[in] pGpu
1919  * @param[in] pKernelChannel
1920  *
1921  * @returns NV_TRUE if owned by admin or NV_FALSE.
1922  */
1923 NvBool
1924 kchannelCheckIsAdmin_IMPL
1925 (
1926     KernelChannel *pKernelChannel
1927 )
1928 {
1929     return (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL) ||
1930            (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN);
1931 }
1932 
1933 
1934 /*!
1935  * @brief  Check if the channel is bound to its resources.
1936  *
1937  * This is to make sure channel went through the UVM registration step before it can be scheduled.
1938  * This applies only to UVM owned channels.
1939  *
1940  * @param[in] pKernelChannel
1941  * @param[in] pGVAS
1942  *
1943  * @returns NV_TRUE if bound.
1944  */
1945 NvBool
1946 kchannelIsSchedulable_IMPL
1947 (
1948     OBJGPU *pGpu,
1949     KernelChannel *pKernelChannel
1950 )
1951 {
1952     OBJGVASPACE *pGVAS = NULL;
1953     NvU32        engineDesc = 0;
1954     NvU32        gfId;
1955 
1956     gfId = kchannelGetGfid(pKernelChannel);
1957     if (IS_GFID_VF(gfId))
1958     {
1959         NV_PRINTF(LEVEL_INFO, "Check for channel schedulability for channel 0x%x is already performed on guest-RM\n",
1960                   kchannelGetDebugTag(pKernelChannel));
1961         return NV_TRUE;
1962     }
1963 
1964     pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE);
1965 
1966     //
1967     // It should be an error to have allocated and attempt to schedule a
1968     // channel without having allocated a GVAS. We ignore this check on
1969     // AMODEL, which has its own dummy AVAS.
1970     //
1971     NV_ASSERT_OR_RETURN(pGVAS != NULL || IS_MODS_AMODEL(pGpu), NV_FALSE);
1972 
1973     NV_ASSERT_OR_RETURN(kchannelGetEngine_HAL(pGpu, pKernelChannel, &engineDesc) == NV_OK, NV_FALSE);
1974 
1975     if (pGVAS != NULL && gvaspaceIsExternallyOwned(pGVAS) && IS_GR(engineDesc) && !pKernelChannel->bIsContextBound)
1976     {
1977         NV_PRINTF(LEVEL_ERROR,
1978                   "Cannot schedule externally-owned channel with unbound allocations :0x%x!\n",
1979                   kchannelGetDebugTag(pKernelChannel));
1980         return NV_FALSE;
1981     }
1982     return NV_TRUE;
1983 }
1984 
1985 // Alloc pFifoHalData
1986 static NV_STATUS
1987 _kchannelAllocHalData
1988 (
1989     OBJGPU        *pGpu,
1990     KernelChannel *pKernelChannel
1991 )
1992 {
1993     portMemSet(pKernelChannel->pFifoHalData, 0, sizeof(pKernelChannel->pFifoHalData));
1994 
1995     // Alloc 1 page of instmem per GPU instance
1996     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
1997 
1998     pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = portMemAllocNonPaged(sizeof(FIFO_INSTANCE_BLOCK));
1999 
2000     NV_ASSERT_OR_ELSE(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] != NULL,
2001             SLI_LOOP_GOTO(failed));
2002 
2003     portMemSet(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)], 0, sizeof(FIFO_INSTANCE_BLOCK));
2004 
2005     SLI_LOOP_END
2006 
2007     return NV_OK;
2008 
2009 failed:
2010     DBG_BREAKPOINT();
2011     _kchannelFreeHalData(pGpu, pKernelChannel);
2012     return NV_ERR_NO_MEMORY;
2013 }
2014 
2015 // Free memdescs and pFifoHalData, if any
2016 static void
2017 _kchannelFreeHalData
2018 (
2019     OBJGPU        *pGpu,
2020     KernelChannel *pKernelChannel
2021 )
2022 {
2023     // Unmap / delete memdescs
2024     kchannelDestroyMem_HAL(pGpu, pKernelChannel);
2025 
2026     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2027     kchannelDestroyUserdMemDesc(pGpu, pKernelChannel);
2028 
2029     // Free pFifoHalData
2030     portMemFree(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]);
2031     pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = NULL;
2032     SLI_LOOP_END
2033 }
2034 
2035 // Returns the proper VerifFlags for kchannelAllocMem
2036 static NvU32
2037 _kchannelgetVerifFlags
2038 (
2039     OBJGPU                                    *pGpu,
2040     NV_CHANNEL_ALLOC_PARAMS    *pChannelGpfifoParams
2041 )
2042 {
2043     NvU32 verifFlags = 0;
2044 
2045     return verifFlags;
2046 }
2047 
2048 // Allocate and describe instance memory
2049 static NV_STATUS
2050 _kchannelAllocOrDescribeInstMem
2051 (
2052     KernelChannel  *pKernelChannel,
2053     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams
2054 )
2055 {
2056     OBJGPU                *pGpu        = GPU_RES_GET_GPU(pKernelChannel);
2057     KernelFifo            *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
2058     KernelChannelGroupApi *pKernelChannelGroupApi = pKernelChannel->pKernelChannelGroupApi;
2059     KernelChannelGroup    *pKernelChannelGroup    = pKernelChannelGroupApi->pKernelChannelGroup;
2060     NvU32                  gfid       = pKernelChannelGroup->gfid;
2061     NV_STATUS              status;
2062     NvHandle               hClient = RES_GET_CLIENT_HANDLE(pKernelChannel);
2063 
2064     // Alloc pFifoHalData
2065     NV_ASSERT_OK_OR_RETURN(_kchannelAllocHalData(pGpu, pKernelChannel));
2066 
2067     //
2068     // GSP RM and host RM on full SRIOV setup will not be aware of the client allocated userd handles,
2069     // translate the handle on client GSP. GSP RM or host RM on full SRIOV setup will get the translated
2070     // addresses which it will later memdescribe.
2071     //
2072     // However it is still client allocated userd from GSP RM or host RM on full SRIOV setup
2073     // perspective so set the flag accordingly.
2074     //
2075     if (!RMCFG_FEATURE_PLATFORM_GSP &&
2076         !(IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2077     {
2078         pKernelChannel->bClientAllocatedUserD = NV_FALSE;
2079         NV_ASSERT_OK_OR_GOTO(status,
2080                 kchannelCreateUserdMemDescBc_HAL(pGpu, pKernelChannel, hClient,
2081                     pChannelGpfifoParams->hUserdMemory,
2082                     pChannelGpfifoParams->userdOffset),
2083                 failed);
2084     }
2085     else
2086     {
2087         pKernelChannel->bClientAllocatedUserD = NV_TRUE;
2088     }
2089 
2090     // Alloc/describe instmem memdescs depending on platform
2091     if (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2092     {
2093         // On Heavy SRIOV, describe memdescs using RPC
2094         NV_ASSERT_OK_OR_GOTO(status,
2095                 _kchannelDescribeMemDescsHeavySriov(pGpu, pKernelChannel),
2096                 failed);
2097     }
2098     else if (RMCFG_FEATURE_PLATFORM_GSP ||
2099         (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2100     {
2101         // On GSPFW or non-heavy SRIOV, describe memdescs from params
2102         NV_ASSERT_OK_OR_GOTO(status,
2103                 _kchannelDescribeMemDescsFromParams(pGpu, pKernelChannel, pChannelGpfifoParams),
2104                 failed);
2105     }
2106     else if (!IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
2107     {
2108         // On baremetal, GSP client, or SRIOV host, alloc mem
2109         NV_ASSERT_OK_OR_GOTO(status,
2110                 kchannelAllocMem_HAL(pGpu,
2111                                      pKernelChannel,
2112                                      pChannelGpfifoParams->flags,
2113                                      _kchannelgetVerifFlags(pGpu, pChannelGpfifoParams)),
2114                 failed);
2115     }
2116 
2117     // Setup USERD
2118     if (IS_VIRTUAL(pGpu))
2119     {
2120         PMEMORY_DESCRIPTOR pUserdSubDeviceMemDesc =
2121                 pKernelChannel->pUserdSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
2122         NvBool bFullSriov = IS_VIRTUAL_WITH_SRIOV(pGpu) &&
2123             !gpuIsWarBug200577889SriovHeavyEnabled(pGpu);
2124 
2125         // Clear Userd if it is in FB for SRIOV environment without BUG 200577889 or if in SYSMEM
2126         if (pUserdSubDeviceMemDesc != NULL &&
2127                 ((memdescGetAddressSpace(pUserdSubDeviceMemDesc) == ADDR_SYSMEM)
2128                 || ((memdescGetAddressSpace(pUserdSubDeviceMemDesc) == ADDR_FBMEM) && bFullSriov)))
2129         {
2130             kfifoSetupUserD_HAL(pGpu, pKernelFifo, pUserdSubDeviceMemDesc);
2131         }
2132     }
2133     return NV_OK;
2134 
2135 failed:
2136     _kchannelFreeHalData(pGpu, pKernelChannel);
2137     return status;
2138 }
2139 
2140 /**
2141  * @brief Create and describe channel instance memory ramfc and userd memdescs
2142  *        Done using info in pChanGpfifoParams
2143  *
2144  * @param pGpu                  : OBJGPU pointer
2145  * @param pKernelChannel        : KernelChannel pointer
2146  * @param pChanGpfifoParams     : Pointer to channel allocation params
2147  */
2148 static NV_STATUS
2149 _kchannelDescribeMemDescsFromParams
2150 (
2151     OBJGPU                                 *pGpu,
2152     KernelChannel                          *pKernelChannel,
2153     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams
2154 )
2155 {
2156     NV_STATUS               status         = NV_OK;
2157     FIFO_INSTANCE_BLOCK    *pInstanceBlock = NULL;
2158     NvU32                   subDevInst;
2159     NvU32                   gfid           = GPU_GFID_PF;
2160     NvU32                   runqueue;
2161     KernelChannelGroupApi *pKernelChannelGroupApi =
2162         pKernelChannel->pKernelChannelGroupApi;
2163 
2164     NV_ASSERT_OR_RETURN((pKernelChannelGroupApi != NULL), NV_ERR_INVALID_STATE);
2165     gfid = pKernelChannelGroupApi->pKernelChannelGroup->gfid;
2166 
2167     NV_ASSERT_OR_RETURN(RMCFG_FEATURE_PLATFORM_GSP ||
2168                         (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)),
2169                         NV_ERR_INVALID_STATE);
2170 
2171     NV_ASSERT_OR_RETURN((pChannelGpfifoParams != NULL), NV_ERR_INVALID_ARGUMENT);
2172 
2173     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2174 
2175     subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2176 
2177     pInstanceBlock = (FIFO_INSTANCE_BLOCK*) pKernelChannel->pFifoHalData[subDevInst];
2178 
2179     // Create memory descriptor for the instance memory
2180     status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu,
2181                            pChannelGpfifoParams->instanceMem.size, 1 , NV_TRUE,
2182                            pChannelGpfifoParams->instanceMem.addressSpace,
2183                            pChannelGpfifoParams->instanceMem.cacheAttrib,
2184                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2185 
2186     if (status != NV_OK)
2187     {
2188         NV_PRINTF(LEVEL_ERROR,
2189                   "Unable to allocate instance memory descriptor!\n");
2190         SLI_LOOP_RETURN(status);
2191     }
2192 
2193     memdescDescribe(pInstanceBlock->pInstanceBlockDesc, pChannelGpfifoParams->instanceMem.addressSpace,
2194                     pChannelGpfifoParams->instanceMem.base, pChannelGpfifoParams->instanceMem.size);
2195 
2196 
2197     // Create memory descriptor for the ramfc
2198     status = memdescCreate(&pInstanceBlock->pRamfcDesc, pGpu,
2199                            pChannelGpfifoParams->ramfcMem.size, 1 , NV_TRUE,
2200                            pChannelGpfifoParams->ramfcMem.addressSpace,
2201                            pChannelGpfifoParams->ramfcMem.cacheAttrib,
2202                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2203 
2204     if (status != NV_OK)
2205     {
2206         NV_PRINTF(LEVEL_ERROR,
2207                   "Unable to allocate instance memory descriptor!\n");
2208         SLI_LOOP_RETURN(status);
2209     }
2210 
2211     memdescDescribe(pInstanceBlock->pRamfcDesc, pChannelGpfifoParams->ramfcMem.addressSpace,
2212                     pChannelGpfifoParams->ramfcMem.base, pChannelGpfifoParams->ramfcMem.size);
2213 
2214     // Create userd memory descriptor
2215     status = memdescCreate(&pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], pGpu,
2216                            pChannelGpfifoParams->userdMem.size, 1 , NV_TRUE,
2217                            pChannelGpfifoParams->userdMem.addressSpace,
2218                            pChannelGpfifoParams->userdMem.cacheAttrib,
2219                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2220 
2221     if (status != NV_OK)
2222     {
2223         NV_PRINTF(LEVEL_ERROR,
2224                   "Unable to allocate instance memory descriptor!\n");
2225         SLI_LOOP_RETURN(status);
2226     }
2227 
2228     memdescDescribe(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst],
2229                     pChannelGpfifoParams->userdMem.addressSpace,
2230                     pChannelGpfifoParams->userdMem.base, pChannelGpfifoParams->userdMem.size);
2231 
2232     if (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2233     {
2234         /*
2235          * For full SRIOV, guest RM allocates and sends istance, ramfc and userd memory.
2236          * Set MEMDESC_FLAGS_GUEST_ALLOCATED flag in memory descriptor
2237          */
2238         memdescSetFlag(pInstanceBlock->pInstanceBlockDesc, MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2239         memdescSetFlag(pInstanceBlock->pRamfcDesc, MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2240         memdescSetFlag(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2241     }
2242 
2243     // Create method buffer memory descriptor
2244     runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, pChannelGpfifoParams->flags);
2245     if (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2246     {
2247         pKernelChannelGroupApi->pKernelChannelGroup->pMthdBuffers[runqueue]
2248             .bar2Addr = pChannelGpfifoParams->mthdbufMem.base;
2249     }
2250     else if (pKernelChannelGroupApi->pKernelChannelGroup
2251                  ->pMthdBuffers[runqueue].pMemDesc == NULL)
2252     {
2253         NV_ASSERT(pChannelGpfifoParams->mthdbufMem.size > 0);
2254         NV_ASSERT(pChannelGpfifoParams->mthdbufMem.base != 0);
2255         status = memdescCreate(&pKernelChannelGroupApi->pKernelChannelGroup
2256                                     ->pMthdBuffers[runqueue].pMemDesc,
2257                                pGpu,
2258                                pChannelGpfifoParams->mthdbufMem.size,
2259                                1,
2260                                NV_TRUE,
2261                                pChannelGpfifoParams->mthdbufMem.addressSpace,
2262                                pChannelGpfifoParams->mthdbufMem.cacheAttrib,
2263                                MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2264 
2265         if (status != NV_OK)
2266         {
2267             NV_PRINTF(LEVEL_ERROR,
2268                       "Unable to allocate instance memory descriptor!\n");
2269             SLI_LOOP_RETURN(status);
2270         }
2271         memdescDescribe(pKernelChannelGroupApi->pKernelChannelGroup
2272                             ->pMthdBuffers[runqueue].pMemDesc,
2273                         pChannelGpfifoParams->mthdbufMem.addressSpace,
2274                         pChannelGpfifoParams->mthdbufMem.base,
2275                         pChannelGpfifoParams->mthdbufMem.size);
2276     }
2277 
2278     NV_PRINTF(LEVEL_INFO,
2279               "hChannel 0x%x hClient 0x%x, Class ID 0x%x "
2280               "Instance Block @ 0x%llx (%s %x) "
2281               "USERD @ 0x%llx "
2282               "for subdevice %d\n",
2283               RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel),
2284               memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0),
2285               memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2286               (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2287               (pKernelChannel->pUserdSubDeviceMemDesc[subDevInst] == NULL) ? 0x0LL :
2288               memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], AT_GPU, 0LL), subDevInst);
2289 
2290     SLI_LOOP_END
2291 
2292     return status;
2293 }
2294 
2295 /**
2296  * @brief Create and describe channel instance memory ramfc and userd memdescs
2297  *        Done using RPC for Heavy SRIOV guest
2298  *
2299  * @param pGpu                  : OBJGPU pointer
2300  * @param pKernelChannel        : KernelChannel pointer
2301  */
2302 static NV_STATUS
2303 _kchannelDescribeMemDescsHeavySriov
2304 (
2305     OBJGPU               *pGpu,
2306     KernelChannel        *pKernelChannel
2307 )
2308 {
2309     NV_STATUS               status         = NV_OK;
2310     FIFO_INSTANCE_BLOCK    *pInstanceBlock = NULL;
2311     NvU32                   subDevInst;
2312     Subdevice              *pSubDevice;
2313     NvHandle                hSubDevice     = 0;
2314     NvU32                   apert          = ADDR_UNKNOWN;
2315     NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS memInfoParams;
2316     Device                 *pDevice = GPU_RES_GET_DEVICE(pKernelChannel);
2317 
2318     NV_ASSERT_OR_RETURN(IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu),
2319             NV_ERR_INVALID_STATE);
2320 
2321     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2322 
2323     subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2324 
2325     pInstanceBlock = (FIFO_INSTANCE_BLOCK*) pKernelChannel->pFifoHalData[subDevInst];
2326 
2327     //
2328     // In SRIOV enabled systems, MMU fault interrupts for guest contexts are received and handled in guests.
2329     // Inorder to correctly find the faulting channel, faulting instance address has be compared with list of allocated channels.
2330     // But since contexts are currently allocated in host during channelConstruct, we need
2331     // context info from host and save it locally for the above channel lookup to pass. This piece of code uses GET_CHANNEL_MEM_INFO
2332     // to fetch the info and update pFifoHalData with the relevant details.
2333     //
2334 
2335     portMemSet(&memInfoParams, 0, sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS));
2336     memInfoParams.hChannel = RES_GET_HANDLE(pKernelChannel);
2337 
2338     status = subdeviceGetByInstance(RES_GET_CLIENT(pKernelChannel),
2339                                     RES_GET_HANDLE(pDevice),
2340                                     subDevInst,
2341                                     &pSubDevice);
2342     if (status != NV_OK)
2343     {
2344         NV_PRINTF(LEVEL_ERROR, "Unable to get subdevice object.\n");
2345         DBG_BREAKPOINT();
2346         SLI_LOOP_RETURN(status);
2347     }
2348 
2349     GPU_RES_SET_THREAD_BC_STATE(pSubDevice);
2350 
2351     hSubDevice = RES_GET_HANDLE(pSubDevice);
2352 
2353     NV_RM_RPC_CONTROL(pGpu,
2354                       RES_GET_CLIENT_HANDLE(pKernelChannel),
2355                       hSubDevice,
2356                       NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO,
2357                       &memInfoParams,
2358                       sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS),
2359                       status);
2360     if (status != NV_OK)
2361     {
2362         NV_PRINTF(LEVEL_ERROR,
2363                   "RM Control call to fetch channel meminfo failed, hKernelChannel 0x%x\n",
2364                   RES_GET_HANDLE(pKernelChannel));
2365         DBG_BREAKPOINT();
2366         SLI_LOOP_RETURN(status);
2367     }
2368 
2369     // Find the aperture
2370     if (memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM)
2371     {
2372         apert = ADDR_FBMEM;
2373     }
2374     else if ((memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH) ||
2375              (memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH))
2376     {
2377         apert = ADDR_SYSMEM;
2378     }
2379     else
2380     {
2381         NV_PRINTF(LEVEL_ERROR,
2382                   "Unknown aperture, hClient 0x%x, hKernelChannel 0x%x\n",
2383                   RES_GET_CLIENT_HANDLE(pKernelChannel),
2384                   RES_GET_HANDLE(pKernelChannel));
2385         status = NV_ERR_INVALID_ARGUMENT;
2386         DBG_BREAKPOINT();
2387         SLI_LOOP_RETURN(status);
2388     }
2389 
2390     status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu,
2391                            memInfoParams.chMemInfo.inst.size, 1 , NV_TRUE,
2392                            apert, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2393 
2394     if (status != NV_OK)
2395     {
2396         NV_PRINTF(LEVEL_ERROR,
2397                   "Unable to allocate instance memory descriptor!\n");
2398         SLI_LOOP_RETURN(status);
2399     }
2400 
2401     memdescDescribe(pInstanceBlock->pInstanceBlockDesc, apert, memInfoParams.chMemInfo.inst.base, memInfoParams.chMemInfo.inst.size);
2402 
2403     NV_PRINTF(LEVEL_INFO,
2404               "hChannel 0x%x hClient 0x%x, Class ID 0x%x "
2405               "Instance Block @ 0x%llx (%s %x) "
2406               "USERD @ 0x%llx "
2407               "for subdevice %d\n",
2408               RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel),
2409               memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0),
2410               memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2411               (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2412               (pKernelChannel->pUserdSubDeviceMemDesc[subDevInst] == NULL) ? 0x0LL :
2413               memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], AT_GPU, 0LL), subDevInst);
2414 
2415     SLI_LOOP_END
2416 
2417     return status;
2418 }
2419 
2420 static NV_STATUS
2421 _kchannelSendChannelAllocRpc
2422 (
2423     KernelChannel *pKernelChannel,
2424     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams,
2425     KernelChannelGroup *pKernelChannelGroup,
2426     NvBool bFullSriov
2427 )
2428 {
2429     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2430     NV_CHANNEL_ALLOC_PARAMS *pRpcParams;
2431     NV_STATUS status = NV_OK;
2432 
2433     pRpcParams = portMemAllocNonPaged(sizeof(*pRpcParams));
2434     NV_ASSERT_OR_RETURN(pRpcParams != NULL, NV_ERR_NO_MEMORY);
2435     portMemSet(pRpcParams, 0, sizeof(*pRpcParams));
2436 
2437     pRpcParams->hObjectError      = pChannelGpfifoParams->hObjectError;
2438     pRpcParams->hObjectBuffer     = 0;
2439     pRpcParams->gpFifoOffset      = pChannelGpfifoParams->gpFifoOffset;
2440     pRpcParams->gpFifoEntries     = pChannelGpfifoParams->gpFifoEntries;
2441     pRpcParams->flags             = pChannelGpfifoParams->flags;
2442     pRpcParams->hContextShare     = pChannelGpfifoParams->hContextShare;
2443     pRpcParams->hVASpace          = pChannelGpfifoParams->hVASpace;
2444     pRpcParams->engineType        = pChannelGpfifoParams->engineType;
2445     pRpcParams->subDeviceId       = pChannelGpfifoParams->subDeviceId;
2446     pRpcParams->hObjectEccError   = pChannelGpfifoParams->hObjectEccError;
2447     pRpcParams->hPhysChannelGroup = pChannelGpfifoParams->hPhysChannelGroup;
2448     pRpcParams->internalFlags     = pChannelGpfifoParams->internalFlags;
2449 
2450     portMemCopy((void*)pRpcParams->hUserdMemory,
2451                 sizeof(NvHandle) * NV2080_MAX_SUBDEVICES,
2452                 (const void*)pChannelGpfifoParams->hUserdMemory,
2453                 sizeof(NvHandle) * NV2080_MAX_SUBDEVICES);
2454 
2455     portMemCopy((void*)pRpcParams->userdOffset,
2456                 sizeof(NvU64) * NV2080_MAX_SUBDEVICES,
2457                 (const void*)pChannelGpfifoParams->userdOffset,
2458                 sizeof(NvU64) * NV2080_MAX_SUBDEVICES);
2459 
2460     if (pKernelChannel->bCCSecureChannel)
2461     {
2462         portMemCopy((void*)pRpcParams->encryptIv,
2463                     sizeof(pRpcParams->encryptIv),
2464                     (const void*)pChannelGpfifoParams->encryptIv,
2465                     sizeof(pChannelGpfifoParams->encryptIv));
2466 
2467         portMemCopy((void*)pRpcParams->decryptIv,
2468                     sizeof(pRpcParams->decryptIv),
2469                     (const void*)pChannelGpfifoParams->decryptIv,
2470                     sizeof(pChannelGpfifoParams->decryptIv));
2471 
2472         portMemCopy((void*)pRpcParams->hmacNonce,
2473                     sizeof(pRpcParams->hmacNonce),
2474                     (const void*)pChannelGpfifoParams->hmacNonce,
2475                     sizeof(pChannelGpfifoParams->hmacNonce));
2476     }
2477 
2478     //
2479     // These fields are only filled out for GSP client or full SRIOV
2480     // i.e. the guest independently allocs ChID and instmem
2481     //
2482     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
2483     {
2484         NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2485         FIFO_INSTANCE_BLOCK *pInstanceBlock = pKernelChannel->pFifoHalData[subdevInst];
2486         NvU32 runqueue  = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, pChannelGpfifoParams->flags);
2487 
2488         NV_ASSERT_OR_ELSE(pInstanceBlock != NULL, status = NV_ERR_INVALID_STATE; goto cleanup);
2489 
2490         portMemCopy(&pRpcParams->errorNotifierMem,
2491                     sizeof pRpcParams->errorNotifierMem,
2492                     &(pChannelGpfifoParams->errorNotifierMem),
2493                     sizeof pChannelGpfifoParams->errorNotifierMem);
2494         portMemCopy(&pRpcParams->eccErrorNotifierMem,
2495                     sizeof pRpcParams->eccErrorNotifierMem,
2496                     &(pChannelGpfifoParams->eccErrorNotifierMem),
2497                     sizeof pChannelGpfifoParams->eccErrorNotifierMem);
2498 
2499         // Fill the instance block
2500         if (pInstanceBlock)
2501         {
2502             pRpcParams->instanceMem.base =
2503                             memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0);
2504             pRpcParams->instanceMem.size = pInstanceBlock->pInstanceBlockDesc->Size;
2505             pRpcParams->instanceMem.addressSpace =
2506                             memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc);
2507             pRpcParams->instanceMem.cacheAttrib =
2508                             memdescGetCpuCacheAttrib(pInstanceBlock->pInstanceBlockDesc);
2509 
2510             pRpcParams->ramfcMem.base =
2511                             memdescGetPhysAddr(pInstanceBlock->pRamfcDesc,  AT_GPU, 0);
2512             pRpcParams->ramfcMem.size = pInstanceBlock->pRamfcDesc->Size;
2513             pRpcParams->ramfcMem.addressSpace =
2514                             memdescGetAddressSpace(pInstanceBlock->pRamfcDesc);
2515             pRpcParams->ramfcMem.cacheAttrib =
2516                             memdescGetCpuCacheAttrib(pInstanceBlock->pRamfcDesc);
2517         }
2518 
2519         // Fill the userd memory descriptor
2520         if (pKernelChannel->pUserdSubDeviceMemDesc[subdevInst])
2521         {
2522             pRpcParams->userdMem.base =
2523                             memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst], AT_GPU, 0);
2524             pRpcParams->userdMem.size = pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]->Size;
2525             pRpcParams->userdMem.addressSpace =
2526                             memdescGetAddressSpace(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]);
2527             pRpcParams->userdMem.cacheAttrib =
2528                             memdescGetCpuCacheAttrib(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]);
2529         }
2530 
2531         // Fill the method buffer memory descriptor
2532         if (pKernelChannelGroup->pMthdBuffers != NULL &&
2533             pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc != NULL)
2534         {
2535             if (bFullSriov)
2536             {
2537                 pRpcParams->mthdbufMem.base =
2538                     pKernelChannelGroup->pMthdBuffers[runqueue].bar2Addr;
2539                 pRpcParams->mthdbufMem.size =
2540                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc->Size;
2541                 pRpcParams->mthdbufMem.addressSpace = ADDR_VIRTUAL;
2542                 pRpcParams->mthdbufMem.cacheAttrib = 0;
2543             }
2544             else
2545             {
2546                 pRpcParams->mthdbufMem.base = memdescGetPhysAddr(
2547                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc,
2548                     AT_GPU, 0);
2549                 pRpcParams->mthdbufMem.size =
2550                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc->Size;
2551                 pRpcParams->mthdbufMem.addressSpace = memdescGetAddressSpace(
2552                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc);
2553                 pRpcParams->mthdbufMem.cacheAttrib = memdescGetCpuCacheAttrib(
2554                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc);
2555             }
2556         }
2557 
2558         if (IS_GSP_CLIENT(pGpu))
2559         {
2560             //
2561             // Setting these param flags will make the Physical RMAPI use our
2562             // ChID (which is already decided)
2563             //
2564 
2565             NvU32 numChannelsPerUserd = NVBIT(DRF_SIZE(NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE)); //  1<<3 -> 4K / 512B
2566 
2567             pRpcParams->flags = FLD_SET_DRF(OS04, _FLAGS,
2568                     _CHANNEL_USERD_INDEX_FIXED, _FALSE, pRpcParams->flags);
2569             pRpcParams->flags = FLD_SET_DRF(OS04, _FLAGS,
2570                     _CHANNEL_USERD_INDEX_PAGE_FIXED, _TRUE, pRpcParams->flags);
2571             pRpcParams->flags = FLD_SET_DRF_NUM(OS04, _FLAGS,
2572                     _CHANNEL_USERD_INDEX_VALUE, pKernelChannel->ChID % numChannelsPerUserd, pRpcParams->flags);
2573             pRpcParams->flags = FLD_SET_DRF_NUM(OS04, _FLAGS,
2574                     _CHANNEL_USERD_INDEX_PAGE_VALUE, pKernelChannel->ChID / numChannelsPerUserd, pRpcParams->flags);
2575 
2576             // GSP client needs to pass in privilege level as an alloc param since GSP-RM cannot check this
2577             pRpcParams->internalFlags =
2578                 FLD_SET_DRF_NUM(_KERNELCHANNEL, _ALLOC_INTERNALFLAGS, _PRIVILEGE,
2579                     pKernelChannel->privilegeLevel, pRpcParams->internalFlags);
2580             pRpcParams->ProcessID = pKernelChannel->ProcessID;
2581             pRpcParams->SubProcessID= pKernelChannel->SubProcessID;
2582         }
2583     }
2584 
2585     NV_RM_RPC_ALLOC_CHANNEL(pGpu, RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_PARENT_HANDLE(pKernelChannel),
2586                             RES_GET_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel),
2587                             pRpcParams, &pKernelChannel->ChID, status);
2588     NV_ASSERT_OR_ELSE(status == NV_OK, goto cleanup);
2589 
2590     NV_PRINTF(LEVEL_INFO,
2591           "Alloc Channel chid %d, hClient:0x%x, "
2592           "hParent:0x%x, hObject:0x%x, hClass:0x%x\n", pKernelChannel->ChID,
2593           RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_PARENT_HANDLE(pKernelChannel),
2594           RES_GET_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel));
2595 
2596 cleanup:
2597     portMemFree(pRpcParams);
2598 
2599     return status;
2600 }
2601 
2602 /*!
2603  * @brief Bind a single channel to a runlist
2604  *
2605  * This is a helper function for kchannelCtrlCmdBind and kchangrpapiCtrlCmdBind
2606  */
2607 NV_STATUS kchannelBindToRunlist_IMPL
2608 (
2609     KernelChannel *pKernelChannel,
2610     RM_ENGINE_TYPE localRmEngineType,
2611     ENGDESCRIPTOR  engineDesc
2612 )
2613 {
2614     OBJGPU    *pGpu;
2615     NV_STATUS  status = NV_OK;
2616 
2617     NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT);
2618     pGpu = GPU_RES_GET_GPU(pKernelChannel);
2619 
2620     // copied from setRunlistIdByEngineType
2621     if ((engineDesc == ENG_SW) || (engineDesc == ENG_BUS))
2622     {
2623         return NV_OK;
2624     }
2625 
2626     //
2627     // vGPU:
2628     //
2629     // Since vGPU does all real hardware management in the
2630     // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true),
2631     // do an RPC to the host to do the hardware update.
2632     //
2633     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
2634     {
2635         NVA06F_CTRL_BIND_PARAMS params;
2636 
2637         params.engineType = gpuGetNv2080EngineType(localRmEngineType);
2638 
2639         NV_RM_RPC_CONTROL(pGpu,
2640                           RES_GET_CLIENT_HANDLE(pKernelChannel),
2641                           RES_GET_HANDLE(pKernelChannel),
2642                           NVA06F_CTRL_CMD_BIND,
2643                           &params,
2644                           sizeof(params),
2645                           status);
2646 
2647         NV_ASSERT_OR_RETURN(status == NV_OK, status);
2648     }
2649 
2650     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
2651 
2652     status = kfifoRunlistSetIdByEngine_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu),
2653                                            pKernelChannel, engineDesc);
2654 
2655     if (status != NV_OK)
2656     {
2657         NV_PRINTF(LEVEL_ERROR,
2658                   "Failed to set RunlistID 0x%08x for channel 0x%08x\n",
2659                   engineDesc, kchannelGetDebugTag(pKernelChannel));
2660         SLI_LOOP_BREAK;
2661     }
2662 
2663     SLI_LOOP_END;
2664 
2665     return status;
2666 }
2667 
2668 //
2669 // channelCtrlCmdEventSetNotification
2670 //
2671 // This command handles set notification operations for all tesla,
2672 // fermi, kepler, and maxwell based gpfifo classes:
2673 //
2674 //    NV50_DISPLAY             (Class: NV5070)
2675 //    GF100_CHANNEL_GPFIFO     (Class: NV906F)
2676 //    KEPLER_CHANNEL_GPFIFO_A  (Class: NVA06F)
2677 //    KEPLER_CHANNEL_GPFIFO_B  (Class: NVA16F)
2678 //    KEPLER_CHANNEL_GPFIFO_C  (Class: NVA26F)
2679 //    MAXWELL_CHANNEL_GPFIFO_A (Class: NVB06F)
2680 //    PASCAL_CHANNEL_GPFIFO_A  (Class: NVC06F)
2681 //
2682 NV_STATUS
2683 kchannelCtrlCmdEventSetNotification_IMPL
2684 (
2685     KernelChannel *pKernelChannel,
2686     NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams
2687 )
2688 {
2689     CLI_CHANNEL_CLASS_INFO classInfo;
2690     CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
2691     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2692 
2693     // NV01_EVENT must have been plugged into this subdevice
2694     if (inotifyGetNotificationList(staticCast(pKernelChannel, INotifier)) == NULL)
2695     {
2696         NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", pRmCtrlParams->cmd);
2697         return NV_ERR_INVALID_STATE;
2698     }
2699 
2700     // get channel class-specific properties
2701     CliGetChannelClassInfo(REF_VAL(NVXXXX_CTRL_CMD_CLASS, pRmCtrlParams->cmd),
2702                            &classInfo);
2703 
2704     if (pSetEventParams->event >= classInfo.notifiersMaxCount)
2705     {
2706         NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event);
2707         return NV_ERR_INVALID_ARGUMENT;
2708     }
2709 
2710     if ((pSetEventParams->action == classInfo.eventActionSingle) ||
2711         (pSetEventParams->action == classInfo.eventActionRepeat))
2712     {
2713         // must be in disabled state to transition to an active state
2714         if (pKernelChannel->pNotifyActions[pSetEventParams->event] != classInfo.eventActionDisable)
2715         {
2716             return NV_ERR_INVALID_STATE;
2717         }
2718 
2719         pKernelChannel->pNotifyActions[pSetEventParams->event] = pSetEventParams->action;
2720     }
2721     else if (pSetEventParams->action == classInfo.eventActionDisable)
2722     {
2723         pKernelChannel->pNotifyActions[pSetEventParams->event] = pSetEventParams->action;
2724     }
2725     else
2726     {
2727         return NV_ERR_INVALID_ARGUMENT;
2728     }
2729 
2730     return NV_OK;
2731 }
2732 
2733 NV_STATUS
2734 kchannelCtrlCmdGetClassEngineid_IMPL
2735 (
2736     KernelChannel *pKernelChannel,
2737     NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams
2738 )
2739 {
2740     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2741     KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
2742     NV_STATUS status = NV_OK;
2743     RM_ENGINE_TYPE rmEngineType;
2744 
2745     //
2746     // MODS uses hObject 0 to figure out if this call is supported or not.
2747     // In SRIOV VF scenario, plugin asserts if host returns an error code
2748     // for a control call. Adding a temporary work around till MODS submits
2749     // a proper fix.
2750     //
2751     if (pParams->hObject == NV01_NULL_OBJECT)
2752     {
2753         return NV_ERR_OBJECT_NOT_FOUND;
2754     }
2755 
2756     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) ||
2757         (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2758     {
2759         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
2760         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2761 
2762         NV_RM_RPC_CONTROL(pGpu,
2763                           pRmCtrlParams->hClient,
2764                           RES_GET_HANDLE(pKernelChannel),
2765                           pRmCtrlParams->cmd,
2766                           pRmCtrlParams->pParams,
2767                           pRmCtrlParams->paramsSize,
2768                           status);
2769         return status;
2770     }
2771 
2772     NV_ASSERT_OK_OR_RETURN(
2773         kchannelGetClassEngineID_HAL(pGpu, pKernelChannel, pParams->hObject,
2774                                  &pParams->classEngineID,
2775                                  &pParams->classID,
2776                                  &rmEngineType));
2777 
2778     pParams->engineID = gpuGetNv2080EngineType(rmEngineType);
2779 
2780     if (IS_MIG_IN_USE(pGpu) &&
2781         kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, rmEngineType))
2782     {
2783         MIG_INSTANCE_REF ref;
2784         RM_ENGINE_TYPE localRmEngineType;
2785 
2786         NV_ASSERT_OK_OR_RETURN(
2787             kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
2788                                             GPU_RES_GET_DEVICE(pKernelChannel),
2789                                             &ref));
2790 
2791         NV_ASSERT_OK_OR_RETURN(
2792             kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref,
2793                                               rmEngineType,
2794                                               &localRmEngineType));
2795 
2796         NV_PRINTF(LEVEL_INFO, "Overriding global engine type 0x%x to local engine type 0x%x (0x%x) due to MIG\n",
2797                   pParams->engineID, gpuGetNv2080EngineType(localRmEngineType), localRmEngineType);
2798 
2799         pParams->engineID = gpuGetNv2080EngineType(localRmEngineType);
2800     }
2801 
2802     return status;
2803 }
2804 
2805 NV_STATUS
2806 kchannelCtrlCmdResetChannel_IMPL
2807 (
2808     KernelChannel *pKernelChannel,
2809     NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams
2810 )
2811 {
2812     NV_STATUS status    = NV_OK;
2813     OBJGPU   *pGpu      = GPU_RES_GET_GPU(pKernelChannel);
2814     CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
2815     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2816 
2817     if (!(pRmCtrlParams->bInternal ||
2818           pResetChannelParams->resetReason <
2819               NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX))
2820     {
2821         return NV_ERR_INVALID_PARAMETER;
2822     }
2823 
2824     //
2825     // All real hardware management is done in the host.
2826     // Do an RPC to the host to do the hardware update and return.
2827     //
2828     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
2829     {
2830         NV_RM_RPC_CONTROL(pGpu,
2831                           pRmCtrlParams->hClient,
2832                           RES_GET_HANDLE(pKernelChannel),
2833                           NV906F_CTRL_CMD_RESET_CHANNEL,
2834                           pResetChannelParams,
2835                           pRmCtrlParams->paramsSize,
2836                           status);
2837         return status;
2838     }
2839 
2840     //
2841     // Do an internal control call to do channel reset
2842     // on Host (Physical) RM
2843     //
2844     return kchannelFwdToInternalCtrl_HAL(pGpu,
2845                                          pKernelChannel,
2846                                          NVA06F_CTRL_CMD_INTERNAL_RESET_CHANNEL,
2847                                          pRmCtrlParams);
2848 }
2849 
2850 //
2851 // channelCtrlCmdEventSetTrigger
2852 //
2853 // This command handles set trigger operations for all kepler and maxwell based
2854 // gpfifo classes:
2855 //
2856 //    KEPLER_CHANNEL_GPFIFO_A  (Class: NVA06F)
2857 //    KEPLER_CHANNEL_GPFIFO_B  (Class: NVA16F)
2858 //    KEPLER_CHANNEL_GPFIFO_C  (Class: NVA26F)
2859 //    MAXWELL_CHANNEL_GPFIFO_A (Class: NVB06F)
2860 //    PASCAL_CHANNEL_GPFIFO_A  (Class: NVC06F)
2861 //
2862 NV_STATUS
2863 kchannelCtrlCmdEventSetTrigger_IMPL
2864 (
2865     KernelChannel *pKernelChannel
2866 )
2867 {
2868     kchannelNotifyGeneric(pKernelChannel, NVA06F_NOTIFIERS_SW, NULL, 0);
2869 
2870     return NV_OK;
2871 }
2872 
2873 NV_STATUS
2874 kchannelCtrlCmdGpFifoSchedule_IMPL
2875 (
2876     KernelChannel *pKernelChannel,
2877     NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams
2878 )
2879 {
2880     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
2881     NV_STATUS     rmStatus      = NV_OK;
2882     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
2883     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2884 
2885     //
2886     // Bug 1737765: Prevent Externally Owned Channels from running unless bound
2887     //  It is possible for clients to allocate and schedule channels while
2888     //  skipping the UVM registration step which binds the appropriate
2889     //  allocations in RM. We need to fail channel scheduling if the channels
2890     //  have not been registered with UVM.
2891     //  This check is performed on baremetal, CPU-RM and guest-RM
2892     //
2893     NV_ASSERT_OR_RETURN(kchannelIsSchedulable_HAL(pGpu, pKernelChannel), NV_ERR_INVALID_STATE);
2894 
2895     //
2896     // If this was a host-only channel we'll have never set the runlist id, so
2897     // force it here to ensure it is immutable now that the channel is scheduled.
2898     //
2899     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
2900     kchannelSetRunlistSet(pGpu, pKernelChannel, NV_TRUE);
2901     SLI_LOOP_END
2902 
2903 
2904     //
2905     // All real hardware management is done in the host.
2906     // Do an RPC to the host to do the hardware update and return.
2907     //
2908     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
2909     {
2910 
2911         NV_RM_RPC_CONTROL(pGpu,
2912                           RES_GET_CLIENT_HANDLE(pKernelChannel),
2913                           RES_GET_HANDLE(pKernelChannel),
2914                           NVA06F_CTRL_CMD_GPFIFO_SCHEDULE,
2915                           pRmCtrlParams->pParams,
2916                           pRmCtrlParams->paramsSize,
2917                           rmStatus);
2918 
2919         return rmStatus;
2920     }
2921 
2922     //
2923     // Do an internal control call to do channel reset
2924     // on Host (Physical) RM
2925     //
2926     return kchannelFwdToInternalCtrl_HAL(pGpu,
2927                                          pKernelChannel,
2928                                          NVA06F_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE,
2929                                          pRmCtrlParams);
2930 }
2931 
2932 NV_STATUS
2933 kchannelCtrlCmdGetEngineCtxSize_IMPL
2934 (
2935     KernelChannel *pKernelChannel,
2936     NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS *pCtxSizeParams
2937 )
2938 {
2939     return NV_ERR_NOT_SUPPORTED;
2940 }
2941 
2942 NV_STATUS
2943 kchannelCtrlCmdSetErrorNotifier_IMPL
2944 (
2945     KernelChannel *pKernelChannel,
2946     NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS *pSetErrorNotifierParams
2947 )
2948 {
2949     OBJGPU   *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2950     RC_NOTIFIER_SCOPE scope;
2951     NV_STATUS rmStatus = NV_OK;
2952 
2953     NV_PRINTF(LEVEL_INFO,
2954               "calling setErrorNotifier on channel: 0x%x, broadcast to TSG: %s\n",
2955               kchannelGetDebugTag(pKernelChannel),
2956               pSetErrorNotifierParams->bNotifyEachChannelInTSG ? "true" : "false");
2957 
2958     scope = pSetErrorNotifierParams->bNotifyEachChannelInTSG ?
2959                 RC_NOTIFIER_SCOPE_TSG :
2960                 RC_NOTIFIER_SCOPE_CHANNEL;
2961 
2962     rmStatus = krcErrorSetNotifier(pGpu, GPU_GET_KERNEL_RC(pGpu),
2963                                    pKernelChannel,
2964                                    ROBUST_CHANNEL_GR_ERROR_SW_NOTIFY,
2965                                    kchannelGetEngineType(pKernelChannel),
2966                                    scope);
2967     return rmStatus;
2968 }
2969 
2970 NV_STATUS
2971 kchannelCtrlCmdBind_IMPL
2972 (
2973     KernelChannel *pKernelChannel,
2974     NVA06F_CTRL_BIND_PARAMS *pParams
2975 )
2976 {
2977     RM_ENGINE_TYPE globalRmEngineType;
2978     RM_ENGINE_TYPE localRmEngineType;
2979     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2980     NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
2981     NV_STATUS rmStatus = NV_OK;
2982     ENGDESCRIPTOR engineDesc;
2983 
2984     if (!pParams)
2985         return NV_ERR_INVALID_ARGUMENT;
2986 
2987     // Check if channel belongs to TSG that is not internal RM TSG
2988     if (!pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm)
2989     {
2990         // This may be valid request if we added new channel to TSG that is
2991         // already running. In that case we just have to check that it uses
2992         // the same runlist as whole TSG.
2993         // We do that in fifoRunlistSetId()
2994         NV_PRINTF(LEVEL_INFO,
2995                   "Bind requested for channel %d belonging to TSG %d.\n",
2996                   kchannelGetDebugTag(pKernelChannel),
2997                   pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->grpID);
2998     }
2999 
3000     localRmEngineType = globalRmEngineType = gpuGetRmEngineType(pParams->engineType);
3001 
3002     if (bMIGInUse)
3003     {
3004         KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
3005         MIG_INSTANCE_REF ref;
3006 
3007         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
3008             kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
3009                                             GPU_RES_GET_DEVICE(pKernelChannel),
3010                                             &ref));
3011 
3012         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
3013             kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, localRmEngineType,
3014                                               &globalRmEngineType));
3015 
3016     }
3017 
3018     NV_PRINTF(LEVEL_INFO, "Binding Channel %d to Engine %d\n",
3019               kchannelGetDebugTag(pKernelChannel), globalRmEngineType);
3020 
3021     // Translate globalRmEngineType -> enginedesc
3022     NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
3023         gpuXlateClientEngineIdToEngDesc(pGpu, globalRmEngineType, &engineDesc));
3024 
3025     if (rmStatus == NV_OK)
3026     {
3027         NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
3028             kchannelBindToRunlist(pKernelChannel, localRmEngineType, engineDesc));
3029     }
3030 
3031     return rmStatus;
3032 }
3033 
3034 NV_STATUS
3035 kchannelCtrlCmdSetInterleaveLevel_IMPL
3036 (
3037     KernelChannel *pKernelChannel,
3038     NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams
3039 )
3040 {
3041     OBJGPU          *pGpu         = GPU_RES_GET_GPU(pKernelChannel);
3042     NV_STATUS        status       = NV_OK;
3043 
3044     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
3045     {
3046         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
3047         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3048 
3049         NV_RM_RPC_CONTROL(pGpu,
3050                           RES_GET_CLIENT_HANDLE(pKernelChannel),
3051                           RES_GET_HANDLE(pKernelChannel),
3052                           pRmCtrlParams->cmd,
3053                           pRmCtrlParams->pParams,
3054                           pRmCtrlParams->paramsSize,
3055                           status);
3056         NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, NV_ERR_NOT_SUPPORTED);
3057     }
3058 
3059     status = kchangrpSetInterleaveLevel(pGpu, pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup, pParams->channelInterleaveLevel);
3060 
3061     return status;
3062 }
3063 
3064 NV_STATUS
3065 kchannelCtrlCmdGetInterleaveLevel_IMPL
3066 (
3067     KernelChannel *pKernelChannel,
3068     NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams
3069 )
3070 {
3071     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3072 
3073     pParams->channelInterleaveLevel =
3074         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pInterleaveLevel[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3075 
3076     return NV_OK;
3077 }
3078 
3079 NV_STATUS
3080 kchannelCtrlCmdGpfifoGetWorkSubmitToken_IMPL
3081 (
3082     KernelChannel *pKernelChannel,
3083     NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *pTokenParams
3084 )
3085 {
3086     NV_STATUS     rmStatus      = NV_OK;
3087     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
3088     KernelFifo   *pKernelFifo   = GPU_GET_KERNEL_FIFO(pGpu);
3089     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
3090     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3091     NvBool bIsMIGEnabled        = IS_MIG_ENABLED(pGpu);
3092 
3093     NvBool bIsModsVgpu          = NV_FALSE;
3094 
3095     NvBool bIsVgpuRpcNeeded     = bIsModsVgpu || (IS_VIRTUAL(pGpu) &&
3096                                   !(IS_VIRTUAL_WITH_SRIOV(pGpu) && !bIsMIGEnabled &&
3097                                     kfifoIsPerRunlistChramEnabled(pKernelFifo)));
3098     //
3099     // vGPU:
3100     //
3101     // Since host is taking care of channel allocations for the guest
3102     // we must call into the host to get the worksubmit token. This
3103     // should go away once the guest starts managing its own channels.
3104     //
3105     // RPC not needed for SR-IOV vGpu
3106     //
3107     if (bIsVgpuRpcNeeded)
3108     {
3109         NV_RM_RPC_CONTROL(pGpu,
3110                           pRmCtrlParams->hClient,
3111                           RES_GET_HANDLE(pKernelChannel),
3112                           pRmCtrlParams->cmd,
3113                           pRmCtrlParams->pParams,
3114                           pRmCtrlParams->paramsSize,
3115                           rmStatus);
3116         //
3117         // All done if error or for non-MODS vGPU guest (host did notification in RPC).
3118         // GSP FW is not able to perform the notification, nor is MODS vGPU host,
3119         // so it still needs to be handled by the client/guest outside the RPC.
3120         //
3121         if (rmStatus != NV_OK)
3122         {
3123             return rmStatus;
3124         }
3125 
3126         if (IS_VIRTUAL(pGpu))
3127         {
3128             return rmStatus;
3129         }
3130     }
3131 
3132     //
3133     // For GSP client or MODS vGPU guest, pTokenParams->workSubmitToken already filled by RPC.
3134     // For baremetal RM, generate it here.
3135     //
3136     if (!bIsModsVgpu)
3137     {
3138         NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE);
3139         NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE);
3140         rmStatus = kfifoGenerateWorkSubmitToken_HAL(pGpu, pKernelFifo, pKernelChannel,
3141                                                     &pTokenParams->workSubmitToken,
3142                                                     pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bIsCallingContextVgpuPlugin);
3143         NV_CHECK_OR_RETURN(LEVEL_INFO, rmStatus == NV_OK, rmStatus);
3144     }
3145 
3146     rmStatus = kchannelNotifyWorkSubmitToken(pGpu, pKernelChannel, pTokenParams->workSubmitToken);
3147     return rmStatus;
3148 }
3149 
3150 NV_STATUS
3151 kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_IMPL
3152 (
3153     KernelChannel *pKernelChannel,
3154     NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *pParams
3155 )
3156 {
3157     NV_STATUS   rmStatus    = NV_OK;
3158     OBJGPU     *pGpu        = GPU_RES_GET_GPU(pKernelChannel);
3159 
3160     //
3161     // vGPU:
3162     //
3163     // Since vgpu plugin is required to update notifier for guest, send an RPC
3164     // to host RM for the plugin to hook.
3165     // RPC not needed for SR-IOV vGpu.
3166     //
3167     // GSP-RM:
3168     //
3169     // Notification is done in CPU-RM, so RPC is not made to FW-RM.
3170     //
3171     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3172     NvBool bIsMIGEnabled    = IS_MIG_ENABLED(pGpu);
3173     NvBool bIsVgpuRpcNeeded = IS_VIRTUAL(pGpu) &&
3174                               !(IS_VIRTUAL_WITH_SRIOV(pGpu) && !bIsMIGEnabled &&
3175                                 kfifoIsPerRunlistChramEnabled(pKernelFifo));
3176     if (bIsVgpuRpcNeeded)
3177     {
3178         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
3179         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3180 
3181         NV_RM_RPC_CONTROL(pGpu,
3182                           pRmCtrlParams->hClient,
3183                           RES_GET_HANDLE(pKernelChannel),
3184                           pRmCtrlParams->cmd,
3185                           pRmCtrlParams->pParams,
3186                           pRmCtrlParams->paramsSize,
3187                           rmStatus);
3188         return rmStatus;
3189     }
3190 
3191     rmStatus = kchannelUpdateWorkSubmitTokenNotifIndex(pGpu, pKernelChannel, pParams->index);
3192     return rmStatus;
3193 }
3194 
3195 NV_STATUS
3196 kchannelRegisterChild_IMPL
3197 (
3198     KernelChannel     *pKernelChannel,
3199     ChannelDescendant *pObject
3200 )
3201 {
3202     NvU16 firstObjectClassID;
3203     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3204     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3205 
3206     //
3207     // On recent GPU architectures such as FERMI, SetObject operations
3208     // require an EngineID:ClassID tuple as an argument, rather than
3209     // an object handle. In order to be able to differentiate between
3210     // different instances of any given software class, the ClassID
3211     // field needs to be unique within the FIFO context. The code below
3212     // attempts to find a qualifying 16-bit ClassID.
3213     //
3214     if (pObject->resourceDesc.engDesc == ENG_SW)
3215     {
3216         RS_ORDERED_ITERATOR it;
3217         RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3218         ChannelDescendant *pMatchingObject = NULL;
3219 
3220         firstObjectClassID = pKernelChannel->nextObjectClassID;
3221 
3222         do
3223         {
3224             if (++pKernelChannel->nextObjectClassID == firstObjectClassID)
3225             {
3226                 NV_PRINTF(LEVEL_ERROR, "channel %08x:%08x: out of handles!\n",
3227                           RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_HANDLE(pKernelChannel));
3228                 return NV_ERR_INSUFFICIENT_RESOURCES;
3229             }
3230             if (pKernelChannel->nextObjectClassID == 0)
3231                 continue;
3232 
3233             it = clientRefOrderedIter(pClient, RES_GET_REF(pKernelChannel), classId(ChannelDescendant), NV_FALSE);
3234 
3235             while (clientRefOrderedIterNext(pClient, &it))
3236             {
3237                 pMatchingObject = dynamicCast(it.pResourceRef->pResource, ChannelDescendant);
3238                 NV_ASSERT_OR_ELSE(pMatchingObject != NULL, continue);
3239 
3240                 if ((pMatchingObject->resourceDesc.engDesc == ENG_SW) &&
3241                     (pMatchingObject->classID == pKernelChannel->nextObjectClassID))
3242                 {
3243                     break;
3244                 }
3245 
3246                 pMatchingObject = NULL;
3247             }
3248         }
3249         while (pMatchingObject != NULL);
3250 
3251         pObject->classID = pKernelChannel->nextObjectClassID;
3252     }
3253 
3254     return kfifoAddObject_HAL(pGpu, pKernelFifo, pObject);
3255 }
3256 
3257 NV_STATUS
3258 kchannelDeregisterChild_IMPL
3259 (
3260     KernelChannel     *pKernelChannel,
3261     ChannelDescendant *pObject
3262 )
3263 {
3264     NV_STATUS status = NV_OK;
3265     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3266     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3267 
3268     status = kfifoDeleteObject_HAL(pGpu, pKernelFifo, pObject);
3269     if (status != NV_OK)
3270     {
3271         NV_PRINTF(LEVEL_ERROR, "Could not delete hal resources with object\n");
3272         DBG_BREAKPOINT();
3273     }
3274 
3275     return status;
3276 }
3277 
3278 void
3279 kchannelGetChildIterator
3280 (
3281     KernelChannel *pKernelChannel,
3282     NvU32 classID,
3283     NvU32 engineID,
3284     KernelChannelChildIterator *pIter
3285 )
3286 {
3287     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3288     NV_ASSERT_OR_RETURN_VOID(pIter != NULL);
3289 
3290     portMemSet(pIter, 0, sizeof(*pIter));
3291     pIter->classID = classID;
3292     pIter->engineID = engineID;
3293     pIter->rsIter = clientRefOrderedIter(pClient, RES_GET_REF(pKernelChannel), classId(ChannelDescendant), NV_FALSE);
3294 }
3295 
3296 ChannelDescendant *
3297 kchannelGetNextChild
3298 (
3299     KernelChannelChildIterator *pIter
3300 )
3301 {
3302     ChannelDescendant *pChild;
3303 
3304     NV_ASSERT_OR_RETURN(pIter != NULL, NULL);
3305 
3306     while (clientRefOrderedIterNext(pIter->rsIter.pClient, &pIter->rsIter))
3307     {
3308         pChild = dynamicCast(pIter->rsIter.pResourceRef->pResource, ChannelDescendant);
3309         NV_ASSERT_OR_RETURN(pChild != NULL, NULL);
3310 
3311         // Continue to the next child if it doesn't match these filters:
3312         if (pIter->engineID != pChild->resourceDesc.engDesc)
3313             continue;
3314         if (pIter->classID != 0)
3315         {
3316             if ((RES_GET_EXT_CLASS_ID(pChild) != pIter->classID) &&
3317                 (pChild->classID != pIter->classID))
3318                 continue;
3319         }
3320 
3321         // Yield this matching child
3322         return pChild;
3323     }
3324 
3325     return NULL;
3326 }
3327 
3328 ChannelDescendant *
3329 kchannelGetOneChild
3330 (
3331     KernelChannel *pKernelChannel,
3332     NvU32          classID,
3333     NvU32          engineID
3334 )
3335 {
3336     KernelChannelChildIterator iter;
3337 
3338     kchannelGetChildIterator(pKernelChannel, classID, engineID, &iter);
3339     return kchannelGetNextChild(&iter);
3340 }
3341 
3342 /**
3343  * @brief Gets object iterator for a channel or channel group
3344  *
3345  * @param[in] pKernelChannel
3346  * @param[in] classNum
3347  * @param[in] engDesc
3348  * @param[out] pIt
3349  *
3350  */
3351 void
3352 kchannelGetChildIterOverGroup
3353 (
3354     KernelChannel                   *pKernelChannel,
3355     NvU32                            classNum,
3356     NvU32                            engDesc,
3357     KernelChannelChildIterOverGroup *pIt
3358 )
3359 {
3360     NV_ASSERT_OR_RETURN_VOID(pIt != NULL);
3361     portMemSet(pIt, 0, sizeof(*pIt));
3362 
3363     NV_ASSERT_OR_RETURN_VOID(pKernelChannel != NULL);
3364 
3365     pIt->classNum = classNum;
3366     pIt->engDesc = engDesc;
3367 
3368     pIt->channelNode.pKernelChannel =
3369         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pKernelChannel;
3370     pIt->channelNode.pNext =
3371         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pNext;
3372 
3373     kchannelGetChildIterator(pIt->channelNode.pKernelChannel, pIt->classNum, pIt->engDesc, &pIt->kchannelIter);
3374 }
3375 
3376 /**
3377  * @brief Get the next object based on given class/engine tag.
3378  * When the class number is 0, it is ignored.
3379  *
3380  * @param[in] pIt
3381  *
3382  * Returns: found child or NULL
3383  *
3384  */
3385 ChannelDescendant *
3386 kchannelGetNextChildOverGroup
3387 (
3388     KernelChannelChildIterOverGroup *pIt
3389 )
3390 {
3391     PCHANNEL_NODE pHead = NULL;
3392     ChannelDescendant *pObject = NULL;
3393 
3394     NV_ASSERT_OR_RETURN(pIt != NULL, NULL);
3395 
3396     // Start iterating from the given object (if any) of the given channel.
3397     pHead = &pIt->channelNode;
3398 
3399     while ((pHead != NULL) && (pHead->pKernelChannel != NULL))
3400     {
3401         pObject = kchannelGetNextChild(&pIt->kchannelIter);
3402 
3403         if (pObject != NULL)
3404             break;
3405 
3406         //
3407         // If there are no more objects to inspect in the given channel,
3408         // move to the next channel (if any, for TSGs).
3409         //
3410         pHead = pHead->pNext;
3411         if (pHead != NULL)
3412         {
3413             NV_ASSERT_OR_ELSE(pHead->pKernelChannel != NULL, break);
3414             // Re-initialize the channeldescendant iterator based on this channel
3415             kchannelGetChildIterator(pHead->pKernelChannel, pIt->classNum, pIt->engDesc, &pIt->kchannelIter);
3416         }
3417     }
3418 
3419     // Cache off the next channel to start searching from in future iterations.
3420     pIt->channelNode.pKernelChannel = pHead ? pHead->pKernelChannel : NULL;
3421     pIt->channelNode.pNext = pHead ? pHead->pNext : NULL;
3422 
3423     return pObject;
3424 }
3425 
3426 NV_STATUS
3427 kchannelFindChildByHandle
3428 (
3429     KernelChannel *pKernelChannel,
3430     NvHandle hResource,
3431     ChannelDescendant **ppObject
3432 )
3433 {
3434     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3435     RsResourceRef *pResourceRef = NULL;
3436 
3437     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, clientGetResourceRef(pClient, hResource, &pResourceRef));
3438 
3439     NV_CHECK_OR_RETURN(LEVEL_ERROR, pResourceRef->pParentRef->hResource == RES_GET_HANDLE(pKernelChannel), NV_ERR_OBJECT_NOT_FOUND);
3440 
3441     *ppObject = dynamicCast(pResourceRef->pResource, ChannelDescendant);
3442     NV_CHECK_OR_RETURN(LEVEL_ERROR, *ppObject != NULL, NV_ERR_OBJECT_NOT_FOUND);
3443 
3444     return NV_OK;
3445 }
3446 
3447 static NV_STATUS
3448 _kchannelClearVAList
3449 (
3450     OBJGPU          *pGpu,
3451     VA_LIST         *pVaList,
3452     NvBool           bUnmap
3453 )
3454 {
3455     //
3456     // Subcontext handling
3457     // We need to unmap the mappings on all the subcontext, since the this call will be made only on one of the TSG channels.
3458     //
3459     if (bUnmap)
3460     {
3461         OBJVASPACE *pVas;
3462         NvU64 vAddr;
3463 
3464         FOR_EACH_IN_VADDR_LIST(pVaList, pVas, vAddr)
3465         {
3466             dmaUnmapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVas, vAddr);
3467         }
3468         FOR_EACH_IN_VADDR_LIST_END(pVaList, pVas, vAddr);
3469     }
3470 
3471     vaListClear(pVaList);
3472 
3473     return NV_OK;
3474 }
3475 
3476 /**
3477  * @brief Set or clear the Engine Context Memdesc.
3478  *
3479  * Should be committed to hardware after this using channelCommitEngineContext().
3480  * Should be unmapped before cleared/changed using kchannelUnmapEngineCtxBuf()
3481  *
3482  * @param[in] pGpu
3483  * @param[in] pKernelChannel
3484  * @param[in] engDesc
3485  * @param[in] pMemDesc                the new memdesc to assign, or NULL to clear
3486  *
3487  * Returns: status
3488  */
3489 NV_STATUS
3490 kchannelSetEngineContextMemDesc_IMPL
3491 (
3492     OBJGPU             *pGpu,
3493     KernelChannel      *pKernelChannel,
3494     NvU32               engDesc,
3495     MEMORY_DESCRIPTOR  *pMemDesc
3496 )
3497 {
3498     NV_STATUS status = NV_OK;
3499     ENGINE_CTX_DESCRIPTOR *pEngCtxDesc;
3500     KernelChannelGroup *pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup;
3501 
3502     NV_PRINTF(LEVEL_INFO,
3503               "ChID %x engDesc 0x%x pMemDesc %p\n",
3504               kchannelGetDebugTag(pKernelChannel), engDesc, pMemDesc);
3505 
3506     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_PARAMETER);
3507 
3508     if (IS_GR(engDesc))
3509     {
3510         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3511     }
3512 
3513     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
3514 
3515     // Get or allocate the EngCtxDesc
3516     pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3517 
3518     if (pEngCtxDesc == NULL && pMemDesc == NULL)
3519     {
3520         // There is no need to clean up or alloc anything.
3521         SLI_LOOP_CONTINUE;
3522     }
3523 
3524     if (pEngCtxDesc != NULL)
3525     {
3526         // Cleanup for the engDesc context that existed before
3527         if (pEngCtxDesc->pMemDesc != NULL)
3528         {
3529             memdescFree(pEngCtxDesc->pMemDesc);
3530             memdescDestroy(pEngCtxDesc->pMemDesc);
3531         }
3532 
3533         //
3534     }
3535     else
3536     {
3537         NV_ASSERT_OK_OR_ELSE(status,
3538             kchangrpAllocEngineContextDescriptor(pGpu, pKernelChannelGroup),
3539             SLI_LOOP_GOTO(fail));
3540         pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3541         NV_ASSERT_OR_ELSE(pEngCtxDesc != NULL, status = NV_ERR_NO_MEMORY; SLI_LOOP_GOTO(fail));
3542     }
3543 
3544     if (pMemDesc != NULL)
3545     {
3546         // We are setting a memdesc
3547         if (pMemDesc->Allocated > 0)
3548             pMemDesc->Allocated++;
3549         memdescAddRef(pMemDesc);
3550 
3551         if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL)
3552         {
3553             NvU64 virtAddr;
3554 
3555             // Since the memdesc is already virtual, we do not manage it
3556             status = vaListSetManaged(&pEngCtxDesc->vaList, NV_FALSE);
3557             NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3558 
3559             // memdescGetPhysAddr of a virtual memdesc is a virtual addr
3560             virtAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0);
3561             status = vaListAddVa(&pEngCtxDesc->vaList, pKernelChannel->pVAS, virtAddr);
3562             NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3563         }
3564     }
3565 
3566     // Assign the memdesc (or NULL)
3567     pEngCtxDesc->pMemDesc = pMemDesc;
3568     pEngCtxDesc->engDesc = engDesc;
3569 
3570     SLI_LOOP_END
3571 
3572 fail:
3573     return status;
3574 }
3575 
3576 /**
3577  * @brief Unmaps everything from the Engine Context Memdesc.
3578  *
3579  * @param[in] pGpu
3580  * @param[in] pKernelChannel
3581  * @param[in] engDesc
3582  *
3583  * Returns: status
3584  */
3585 NV_STATUS
3586 kchannelUnmapEngineCtxBuf_IMPL
3587 (
3588     OBJGPU             *pGpu,
3589     KernelChannel      *pKernelChannel,
3590     NvU32               engDesc
3591 )
3592 {
3593     NV_STATUS status = NV_OK;
3594     ENGINE_CTX_DESCRIPTOR *pEngCtxDesc;
3595 
3596     NV_PRINTF(LEVEL_INFO,
3597               "ChID %x engDesc 0x%x\n",
3598               kchannelGetDebugTag(pKernelChannel), engDesc);
3599 
3600     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_PARAMETER);
3601 
3602     if (IS_GR(engDesc))
3603     {
3604         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3605     }
3606 
3607     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
3608     pEngCtxDesc = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3609 
3610     // EngCtxDesc and MemDesc will be here, or else nothing can be mapped
3611     if ((pEngCtxDesc == NULL) || (pEngCtxDesc->pMemDesc == NULL))
3612     {
3613         SLI_LOOP_CONTINUE;
3614     }
3615 
3616     // Clear VA list, including unmap if managed
3617     status = _kchannelClearVAList(pGpu, &pEngCtxDesc->vaList, vaListGetManaged(&pEngCtxDesc->vaList));
3618     NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3619 
3620     SLI_LOOP_END
3621 
3622 fail:
3623     return status;
3624 }
3625 
3626 // Check that BcState stays consistent for GR channel engine context
3627 NV_STATUS
3628 kchannelCheckBcStateCurrent_IMPL
3629 (
3630     OBJGPU        *pGpu,
3631     KernelChannel *pKernelChannel
3632 )
3633 {
3634 #define KERNEL_CHANNEL_BCSTATE_UNINITIALIZED (0)
3635 #define KERNEL_CHANNEL_BCSTATE_DISABLED (1)
3636 #define KERNEL_CHANNEL_BCSTATE_ENABLED (2)
3637 
3638     NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu);
3639     NvU8   channelBcStateEnum = bBcState ? KERNEL_CHANNEL_BCSTATE_ENABLED : KERNEL_CHANNEL_BCSTATE_DISABLED;
3640 
3641     NV_PRINTF(
3642         LEVEL_INFO,
3643         "GPU = %d, ChID = %d, bcStateCurrent = %d, channelBcStateEnum = %d\n",
3644         pGpu->gpuInstance,
3645         kchannelGetDebugTag(pKernelChannel),
3646         pKernelChannel->bcStateCurrent,
3647         channelBcStateEnum);
3648 
3649     // Check that the BC status did not change - 0 = first call, 1 = disable, 2 = enable.
3650     if (pKernelChannel->bcStateCurrent == KERNEL_CHANNEL_BCSTATE_UNINITIALIZED)
3651     {
3652         pKernelChannel->bcStateCurrent = channelBcStateEnum;
3653     }
3654     NV_ASSERT_OR_RETURN(pKernelChannel->bcStateCurrent == channelBcStateEnum, NV_ERR_INVALID_STATE);
3655 
3656     return NV_OK;
3657 }
3658 
3659 // Map the Engine Context Memdesc and add it's VAddr
3660 NV_STATUS
3661 kchannelMapEngineCtxBuf_IMPL
3662 (
3663     OBJGPU      *pGpu,
3664     KernelChannel *pKernelChannel,
3665     NvU32        engDesc
3666 )
3667 {
3668     OBJVASPACE            *pVAS           = NULL;
3669     NV_STATUS              status         = NV_OK;
3670     ENGINE_CTX_DESCRIPTOR *pEngCtx;
3671     NvU64                  addr;
3672     MEMORY_DESCRIPTOR     *pTempMemDesc;
3673     OBJGVASPACE           *pGVAS;
3674     KernelFifo            *pKernelFifo    = GPU_GET_KERNEL_FIFO(pGpu);
3675 
3676     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_ARGUMENT);
3677 
3678     if (IS_GR(engDesc))
3679     {
3680         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3681     }
3682 
3683     NV_PRINTF(LEVEL_INFO, "ChID %d engDesc %s (0x%x) \n",
3684               kchannelGetDebugTag(pKernelChannel),
3685               kfifoGetEngineName_HAL(GPU_GET_KERNEL_FIFO(pGpu), ENGINE_INFO_TYPE_ENG_DESC, engDesc),
3686               engDesc);
3687 
3688     pVAS = pKernelChannel->pVAS;
3689     pGVAS = dynamicCast(pVAS, OBJGVASPACE);
3690     NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_INVALID_STATE);
3691 
3692     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
3693 
3694     pEngCtx = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3695     NV_ASSERT_OR_ELSE(pEngCtx != NULL, status = NV_ERR_INVALID_STATE; goto fail);
3696 
3697     pTempMemDesc = pEngCtx->pMemDesc;
3698     NV_ASSERT_OR_ELSE(pTempMemDesc != NULL, status = NV_ERR_INVALID_STATE; goto fail);
3699 
3700     //
3701     // For virtual context, UMD has already alloced/mapped the engine context.
3702     // So simply get the vaddr
3703     //
3704 
3705     status = vaListFindVa(&pEngCtx->vaList, pVAS, &addr);
3706     if (status == NV_OK)
3707     {
3708         // VAddr already exists and needs no action
3709         SLI_LOOP_CONTINUE;
3710     }
3711     else if (status == NV_ERR_OBJECT_NOT_FOUND)
3712     {
3713         NvU32 flags = DMA_ALLOC_VASPACE_NONE;
3714         if (gvaspaceIsExternallyOwned(pGVAS))
3715         {
3716             // We should never land up here if VA space is externally owned!
3717             NV_ASSERT_FAILED("Externally owned object not found");
3718             status = NV_ERR_INVALID_OPERATION;
3719             goto fail;
3720         }
3721 
3722         kfifoGetCtxBufferMapFlags_HAL(pGpu, pKernelFifo, engDesc, &flags);
3723 
3724         status = dmaMapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, pTempMemDesc, &addr,
3725             flags, DMA_UPDATE_VASPACE_FLAGS_NONE);
3726         if (status != NV_OK)
3727         {
3728             NV_PRINTF(LEVEL_ERROR,
3729                       "Could not map context buffer for engDesc 0x%x\n",
3730                       engDesc);
3731             goto fail;
3732         }
3733         else
3734         {
3735             status = vaListAddVa(&pEngCtx->vaList, pVAS, addr);
3736             NV_ASSERT(status == NV_OK);
3737         }
3738     }
3739     else
3740     {
3741         NV_ASSERT_OK_FAILED("vaListFindVa", status);
3742         goto fail;
3743     }
3744 
3745 fail:
3746     if (status != NV_OK)
3747     {
3748         SLI_LOOP_BREAK;
3749     }
3750     SLI_LOOP_END
3751 
3752     return status;
3753 }
3754 
3755 /**
3756  * @brief Updates the notifier index with which to update the work submit
3757  *        notifier on request.
3758  *
3759  * @param[IN] pGpu              OBJGPU
3760  * @param[in] pKernelChannel    KernelChannel
3761  * @param[in] index             Updated notifier index
3762  *
3763  * @return NV_OK
3764  *         NV_ERR_OUT_OF_RANGE if index is beyond the bounds of the notifier
3765  */
3766 NV_STATUS
3767 kchannelUpdateWorkSubmitTokenNotifIndex_IMPL
3768 (
3769     OBJGPU *pGpu,
3770     KernelChannel *pKernelChannel,
3771     NvU32 index
3772 )
3773 {
3774     NvHandle hNotifier;
3775     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3776     Memory *pMemory;
3777     ContextDma *pContextDma;
3778     NvU32 addressSpace;
3779     NvU64 notificationBufferSize;
3780     Device *pDevice;
3781 
3782     hNotifier = pKernelChannel->hErrorContext;
3783 
3784     // Clobbering error notifier index is illegal
3785     NV_CHECK_OR_RETURN(LEVEL_INFO, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR,
3786                      NV_ERR_INVALID_ARGUMENT);
3787 
3788     // Check for integer overflows
3789     if (((index + 1) < index) ||
3790         !portSafeMulU64(index + 1, sizeof(NvNotification), &notificationBufferSize))
3791     {
3792         return NV_ERR_OUT_OF_RANGE;
3793     }
3794 
3795     pDevice = GPU_RES_GET_DEVICE(pKernelChannel);
3796 
3797     if (NV_OK == memGetByHandleAndDevice(pClient, hNotifier, RES_GET_HANDLE(pDevice), &pMemory))
3798     {
3799         addressSpace = memdescGetAddressSpace(pMemory->pMemDesc);
3800 
3801         NV_CHECK_OR_RETURN(LEVEL_INFO, pMemory->Length >= notificationBufferSize,
3802                          NV_ERR_OUT_OF_RANGE);
3803         switch (addressSpace)
3804         {
3805             case ADDR_VIRTUAL:
3806             {
3807                 NvU64 physAddr = memdescGetPhysAddr(pMemory->pMemDesc, AT_GPU_VA, 0);
3808                 PCLI_DMA_MAPPING_INFO pDmaMappingInfo;
3809 
3810                 NV_CHECK_OR_RETURN(LEVEL_INFO,
3811                     CliGetDmaMappingInfo(pClient,
3812                                          RES_GET_HANDLE(pDevice),
3813                                          RES_GET_HANDLE(pMemory),
3814                                          physAddr,
3815                                          gpumgrGetDeviceGpuMask(pGpu->deviceInstance),
3816                                          &pDmaMappingInfo),
3817                     NV_ERR_GENERIC);
3818 
3819                 NV_CHECK_OR_RETURN(LEVEL_INFO, pDmaMappingInfo->pMemDesc->Size >= notificationBufferSize,
3820                                  NV_ERR_OUT_OF_RANGE);
3821                 break;
3822             }
3823             case ADDR_FBMEM:
3824                 // fall through
3825             case ADDR_SYSMEM:
3826                 // Covered by check prior to switch/case
3827                 break;
3828             default:
3829                 return NV_ERR_NOT_SUPPORTED;
3830         }
3831     }
3832     else if (NV_OK == ctxdmaGetByHandle(pClient, hNotifier, &pContextDma))
3833     {
3834         NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= (notificationBufferSize - 1),
3835                          NV_ERR_OUT_OF_RANGE);
3836     }
3837     else
3838     {
3839         return NV_ERR_OBJECT_NOT_FOUND;
3840     }
3841 
3842     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN]
3843         = index;
3844 
3845     return NV_OK;
3846 }
3847 
3848 /**
3849  * @brief Updates the work submit notifier passed to the channel during channel
3850  *        creation with the new work submit token.
3851  *
3852  * @param[IN] pGpu              OBJGPU
3853  * @param[in] pKernelChannel    KernelChannel
3854  * @param[in] token             Work submit token to notify clients of
3855  *
3856  * @return NV_OK on successful notify
3857  *         NV_OK if client has not set up the doorbell notifier. This should
3858  *         be an error once all clients have been updated.
3859  */
3860 NV_STATUS
3861 kchannelNotifyWorkSubmitToken_IMPL
3862 (
3863     OBJGPU *pGpu,
3864     KernelChannel *pKernelChannel,
3865     NvU32 token
3866 )
3867 {
3868     MEMORY_DESCRIPTOR *pNotifierMemDesc = pKernelChannel->pErrContextMemDesc;
3869     NV_ADDRESS_SPACE addressSpace;
3870     NvU16 notifyStatus = 0x0;
3871     NvU32 index;
3872     OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
3873     NvU64 time;
3874     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
3875     KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
3876     TRANSFER_SURFACE surf = {0};
3877     NvNotification *pNotifier = NULL;
3878     NvBool bMemEndTransfer = NV_FALSE;
3879 
3880     if (pNotifierMemDesc == NULL)
3881         return NV_OK;
3882 
3883     index = pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN];
3884 
3885     notifyStatus =
3886         FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _TRUE, notifyStatus);
3887     notifyStatus =
3888         FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, 0xFFFF, notifyStatus);
3889 
3890     addressSpace = memdescGetAddressSpace(pNotifierMemDesc);
3891     if (RMCFG_FEATURE_PLATFORM_GSP)
3892         NV_ASSERT_OR_RETURN(addressSpace == ADDR_FBMEM, NV_ERR_INVALID_STATE);
3893 
3894     //
3895     // If clients did not allocate enough memory for the doorbell
3896     // notifier, return NV_OK so as not to regress older clients
3897     //
3898     NV_CHECK_OR_RETURN(LEVEL_INFO, memdescGetSize(pNotifierMemDesc) >= (index + 1) * sizeof(NvNotification), NV_OK);
3899 
3900     pNotifier = (NvNotification *)memdescGetKernelMapping(pNotifierMemDesc);
3901     if (pNotifier == NULL)
3902     {
3903         surf.pMemDesc = pNotifierMemDesc;
3904         surf.offset = index * sizeof(NvNotification);
3905 
3906         pNotifier =
3907             (NvNotification *) memmgrMemBeginTransfer(pMemoryManager, &surf,
3908                                                       sizeof(NvNotification),
3909                                                       TRANSFER_FLAGS_SHADOW_ALLOC);
3910         NV_ASSERT_OR_RETURN(pNotifier != NULL, NV_ERR_INVALID_STATE);
3911         bMemEndTransfer = NV_TRUE;
3912     }
3913     else
3914     {
3915         //
3916         // If a CPU pointer has been passed by caller ensure that the notifier
3917         // is in sysmem or in case it in vidmem, BAR access to the same is not
3918         // blocked (for HCC)
3919         //
3920         NV_ASSERT_OR_RETURN(
3921             memdescGetAddressSpace(pNotifierMemDesc) == ADDR_SYSMEM ||
3922             !kbusIsBarAccessBlocked(pKernelBus), NV_ERR_INVALID_ARGUMENT);
3923         pNotifier = &pNotifier[index];
3924     }
3925 
3926     tmrGetCurrentTime(pTmr, &time);
3927 
3928     notifyFillNvNotification(pGpu, pNotifier, token, 0,
3929                              notifyStatus, NV_TRUE, time);
3930 
3931     if (bMemEndTransfer)
3932     {
3933         memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(NvNotification), 0);
3934     }
3935 
3936     return NV_OK;
3937 }
3938 
3939 /**
3940  * @brief Alloc and set up pNotifyActions
3941  *
3942  * @param[in]  pKernelChannel
3943  * @param[in]  classNuml           Channel class
3944  *
3945  * @return  NV_OK or error code
3946  */
3947 static NV_STATUS
3948 _kchannelSetupNotifyActions
3949 (
3950     KernelChannel *pKernelChannel,
3951     NvU32          classNum
3952 )
3953 {
3954     CLI_CHANNEL_CLASS_INFO classInfo;
3955 
3956     // Allocate notifier action table for the maximum supported by this class
3957     CliGetChannelClassInfo(classNum, &classInfo);
3958     if (classInfo.notifiersMaxCount > 0)
3959     {
3960         pKernelChannel->pNotifyActions = portMemAllocNonPaged(
3961                                    classInfo.notifiersMaxCount * sizeof(*pKernelChannel->pNotifyActions));
3962         if (pKernelChannel->pNotifyActions == NULL)
3963             return NV_ERR_NO_MEMORY;
3964 
3965         portMemSet(pKernelChannel->pNotifyActions, 0,
3966                  classInfo.notifiersMaxCount * sizeof(*pKernelChannel->pNotifyActions));
3967     }
3968 
3969     return NV_OK;
3970 } // end of _kchannelSetupNotifyActions()
3971 
3972 /**
3973  * @brief Cleans up pNotifyActions
3974  *
3975  * @param[in] pKernelChannel
3976  */
3977 static void
3978 _kchannelCleanupNotifyActions
3979 (
3980     KernelChannel *pKernelChannel
3981 )
3982 {
3983     // free memory associated with notify actions table
3984     portMemFree(pKernelChannel->pNotifyActions);
3985     pKernelChannel->pNotifyActions = NULL;
3986 } // end of _kchannelCleanupNotifyActions()
3987 
3988 static NV_STATUS
3989 _kchannelNotifyOfChid
3990 (
3991     OBJGPU *pGpu,
3992     KernelChannel *pKernelChannel,
3993     RsClient *pRsClient
3994 )
3995 {
3996     ContextDma *pContextDma;
3997 
3998     //
3999     // Return the chid to the drivers in the error context DMA
4000     //
4001     // We need to update this when virtual channel gets mapped in.
4002     //
4003 
4004     if ((ctxdmaGetByHandle(pRsClient, pKernelChannel->hErrorContext, &pContextDma)) == NV_OK)
4005     {
4006         NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= sizeof(NvNotification) - 1, NV_ERR_INVALID_ARGUMENT);
4007         notifyFillNotifier(pGpu, pContextDma, pKernelChannel->ChID, 0, NV_OK);
4008     }
4009 
4010     return NV_OK;
4011 }
4012 
4013 NvU32
4014 kchannelGetGfid_IMPL
4015 (
4016     KernelChannel *pKernelChannel
4017 )
4018 {
4019     return pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->gfid;
4020 }
4021 
4022 NvBool
4023 kchannelIsCpuMapped
4024 (
4025     OBJGPU *pGpu,
4026     KernelChannel *pKernelChannel
4027 )
4028 {
4029     return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &
4030               KERNEL_CHANNEL_SW_STATE_CPU_MAP);
4031 }
4032 
4033 void
4034 kchannelSetCpuMapped
4035 (
4036     OBJGPU *pGpu,
4037     KernelChannel *pKernelChannel,
4038     NvBool bCpuMapped
4039 )
4040 {
4041     if (bCpuMapped)
4042     {
4043         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |=
4044               KERNEL_CHANNEL_SW_STATE_CPU_MAP;
4045     }
4046     else
4047     {
4048         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &=
4049               ~(KERNEL_CHANNEL_SW_STATE_CPU_MAP);
4050     }
4051 }
4052 
4053 NvBool
4054 kchannelIsRunlistSet
4055 (
4056     OBJGPU *pGpu,
4057     KernelChannel *pKernelChannel
4058 )
4059 {
4060     return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &
4061               KERNEL_CHANNEL_SW_STATE_RUNLIST_SET);
4062 }
4063 
4064 void
4065 kchannelSetRunlistSet
4066 (
4067     OBJGPU *pGpu,
4068     KernelChannel *pKernelChannel,
4069     NvBool bRunlistSet
4070 )
4071 {
4072     if (bRunlistSet)
4073     {
4074         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |=
4075               KERNEL_CHANNEL_SW_STATE_RUNLIST_SET;
4076     }
4077     else
4078     {
4079         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &=
4080               ~(KERNEL_CHANNEL_SW_STATE_RUNLIST_SET);
4081     }
4082 }
4083 
4084 NV_STATUS
4085 kchannelGetChannelPhysicalState_KERNEL
4086 (
4087     OBJGPU *pGpu,
4088     KernelChannel *pKernelChannel,
4089     NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams
4090 )
4091 {
4092     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
4093     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
4094     NV_STATUS status = NV_OK;
4095 
4096     // Get the physical state from GSP
4097     NV_RM_RPC_CONTROL(pGpu,
4098                       pRmCtrlParams->hClient,
4099                       pRmCtrlParams->hObject,
4100                       pRmCtrlParams->cmd,
4101                       pRmCtrlParams->pParams,
4102                       pRmCtrlParams->paramsSize,
4103                       status);
4104     NV_ASSERT_OK_OR_RETURN(status);
4105 
4106     return NV_OK;
4107 }
4108 
4109 NV_STATUS
4110 kchannelMapUserD_IMPL
4111 (
4112     OBJGPU         *pGpu,
4113     KernelChannel  *pKernelChannel,
4114     RS_PRIV_LEVEL   privLevel,
4115     NvU64           offset,
4116     NvU32           protect,
4117     NvP64          *ppCpuVirtAddr,
4118     NvP64          *ppPriv
4119 )
4120 {
4121     NV_STATUS status      = NV_OK;
4122     NvU64     userBase;
4123     NvU64     userOffset;
4124     NvU64     userSize;
4125     NvU32     cachingMode = NV_MEMORY_UNCACHED;
4126 
4127     // if USERD is allocated by client
4128     if (pKernelChannel->bClientAllocatedUserD)
4129     {
4130         return NV_OK;
4131     }
4132 
4133     status = kchannelGetUserdInfo_HAL(pGpu, pKernelChannel,
4134                                       &userBase, &userOffset, &userSize);
4135 
4136     if (status != NV_OK)
4137         return status;
4138 
4139 
4140     if (userBase == pGpu->busInfo.gpuPhysAddr)
4141     {
4142         // Create a mapping of BAR0
4143         status = osMapGPU(pGpu, privLevel, NvU64_LO32(userOffset+offset),
4144                  NvU64_LO32(userSize), protect, ppCpuVirtAddr, ppPriv);
4145         goto done;
4146     }
4147 
4148     if (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING))
4149     {
4150         cachingMode = NV_MEMORY_CACHED;
4151     }
4152 
4153     //
4154     // If userBase is not bar0, then it is bar1 and we create a regular memory
4155     // mapping.
4156     //
4157     if (privLevel >= RS_PRIV_LEVEL_KERNEL)
4158     {
4159         status = osMapPciMemoryKernel64(pGpu, userBase + userOffset + offset,
4160                                         userSize, protect, ppCpuVirtAddr, cachingMode);
4161     }
4162     else
4163     {
4164         status = osMapPciMemoryUser(pGpu->pOsGpuInfo,
4165                                     userBase + userOffset + offset,
4166                                     userSize, protect, ppCpuVirtAddr,
4167                                     ppPriv, cachingMode);
4168     }
4169     if (!((status == NV_OK) && *ppCpuVirtAddr))
4170     {
4171         NV_PRINTF(LEVEL_ERROR,
4172                   "BAR1 offset 0x%llx for USERD of channel %x could not be cpu mapped\n",
4173                   userOffset, kchannelGetDebugTag(pKernelChannel));
4174     }
4175 
4176 done:
4177 
4178     // Indicate channel is mapped
4179     if (status == NV_OK)
4180     {
4181             SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
4182             kchannelSetCpuMapped(pGpu, pKernelChannel, NV_TRUE);
4183             SLI_LOOP_END
4184     }
4185 
4186     return status;
4187 }
4188 
4189 void
4190 kchannelUnmapUserD_IMPL
4191 (
4192     OBJGPU         *pGpu,
4193     KernelChannel  *pKernelChannel,
4194     RS_PRIV_LEVEL   privLevel,
4195     NvP64          *ppCpuVirtAddr,
4196     NvP64          *ppPriv
4197 )
4198 {
4199     NV_STATUS status;
4200     NvU64     userBase;
4201     NvU64     userOffset;
4202     NvU64     userSize;
4203 
4204     if (pKernelChannel->bClientAllocatedUserD)
4205     {
4206         return;
4207     }
4208 
4209     status = kchannelGetUserdInfo_HAL(pGpu, pKernelChannel,
4210                                       &userBase, &userOffset, &userSize);
4211 
4212     NV_ASSERT_OR_RETURN_VOID(status == NV_OK);
4213 
4214     if (userBase == pGpu->busInfo.gpuPhysAddr)
4215     {
4216         osUnmapGPU(pGpu->pOsGpuInfo, privLevel, *ppCpuVirtAddr,
4217                    NvU64_LO32(userSize), *ppPriv);
4218     }
4219     else
4220     {
4221         // GF100+
4222         // Unmap Cpu virt mapping
4223         if (privLevel >= RS_PRIV_LEVEL_KERNEL)
4224         {
4225             osUnmapPciMemoryKernel64(pGpu, *ppCpuVirtAddr);
4226         }
4227         else
4228         {
4229             osUnmapPciMemoryUser(pGpu->pOsGpuInfo, *ppCpuVirtAddr,
4230                                  userSize, *ppPriv);
4231         }
4232     }
4233 
4234     // Indicate channel is !mapped
4235     kchannelSetCpuMapped(pGpu, pKernelChannel, NV_FALSE);
4236     return;
4237 }
4238 
4239 static NV_STATUS
4240 _kchannelGetUserMemDesc
4241 (
4242     OBJGPU             *pGpu,
4243     KernelChannel      *pKernelChannel,
4244     PMEMORY_DESCRIPTOR *ppMemDesc
4245 )
4246 {
4247     NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_STATE);
4248     *ppMemDesc = NULL;
4249 
4250     NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_STATE);
4251 
4252     *ppMemDesc = pKernelChannel->pInstSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
4253 
4254     return *ppMemDesc ? NV_OK : NV_ERR_INVALID_STATE;
4255 }
4256 
4257 /*!
4258  * @brief Retrieve a KernelChannel from either a KernelChannel or TSG handle. KernelChannel is
4259  * checked first. If TSG is provided, the head of the TSG is returned.
4260  *
4261  * @param[in]  pClient            Client object
4262  * @param[in]  hDual              NvHandle either to TSG or to KernelChannel
4263  * @param[out] ppKernelChannel    Referenced KernelChannel
4264  */
4265 NV_STATUS
4266 kchannelGetFromDualHandle_IMPL
4267 (
4268     RsClient        *pClient,
4269     NvHandle         hDual,
4270     KernelChannel  **ppKernelChannel
4271 )
4272 {
4273     KernelChannel *pKernelChannel;
4274     RsResourceRef *pChanGrpRef;
4275 
4276     NV_ASSERT_OR_RETURN(ppKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT);
4277 
4278     *ppKernelChannel = NULL;
4279 
4280     if (CliGetKernelChannel(pClient, hDual, &pKernelChannel) == NV_OK)
4281     {
4282         *ppKernelChannel = pKernelChannel;
4283         return NV_OK;
4284     }
4285 
4286     if (CliGetChannelGroup(pClient->hClient, hDual, &pChanGrpRef, NULL) == NV_OK)
4287     {
4288         KernelChannelGroupApi *pKernelChannelGroupApi = dynamicCast(
4289             pChanGrpRef->pResource,
4290             KernelChannelGroupApi);
4291 
4292         NV_ASSERT_OR_RETURN(
4293             (pKernelChannelGroupApi != NULL) &&
4294                 (pKernelChannelGroupApi->pKernelChannelGroup != NULL),
4295             NV_ERR_INVALID_ARGUMENT);
4296 
4297         if (pKernelChannelGroupApi->pKernelChannelGroup->chanCount == 0)
4298             return NV_ERR_INVALID_ARGUMENT;
4299 
4300         *ppKernelChannel =
4301             pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pKernelChannel;
4302         NV_ASSERT_OR_RETURN(*ppKernelChannel != NULL, NV_ERR_INVALID_STATE);
4303 
4304         return NV_OK;
4305     }
4306 
4307     return NV_ERR_OBJECT_NOT_FOUND;
4308 }
4309 
4310 /*!
4311  * @brief Retrieve a KernelChannel from either a KernelChannel or TSG handle. KernelChannel is
4312  * checked first. If TSG is provided, the head of the TSG is returned. If
4313  * KernelChannel handle is provided, it must not be part of a client-allocated TSG.
4314  *
4315  * @param[in]  pClient            Client object
4316  * @param[in]  hDual              NvHandle either to TSG or to bare Channel
4317  * @param[out] ppKernelChannel    Referenced KernelChannel
4318  */
4319 NV_STATUS
4320 kchannelGetFromDualHandleRestricted_IMPL
4321 (
4322     RsClient        *pClient,
4323     NvHandle         hDual,
4324     KernelChannel  **ppKernelChannel
4325 )
4326 {
4327     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
4328         kchannelGetFromDualHandle(pClient, hDual, ppKernelChannel));
4329     if ((RES_GET_HANDLE(*ppKernelChannel) == hDual) &&
4330         (((*ppKernelChannel)->pKernelChannelGroupApi->pKernelChannelGroup != NULL) &&
4331          !(*ppKernelChannel)->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm))
4332     {
4333         NV_PRINTF(LEVEL_ERROR, "channel handle 0x%08x is part of a channel group, not allowed!\n",
4334                   RES_GET_HANDLE(*ppKernelChannel));
4335         return NV_ERR_INVALID_ARGUMENT;
4336     }
4337     return NV_OK;
4338 }
4339 
4340 static void
4341 _kchannelUpdateFifoMapping
4342 (
4343     KernelChannel    *pKernelChannel,
4344     OBJGPU           *pGpu,
4345     NvBool            bKernel,
4346     NvP64             cpuAddress,
4347     NvP64             priv,
4348     NvU64             cpuMapLength,
4349     NvU32             flags,
4350     NvHandle          hSubdevice,
4351     RsCpuMapping     *pMapping
4352 )
4353 {
4354     pMapping->pPrivate->pGpu      = pGpu;
4355     pMapping->pPrivate->bKernel   = bKernel;
4356     pMapping->processId = osGetCurrentProcess();
4357     pMapping->pLinearAddress      = cpuAddress;
4358     pMapping->pPrivate->pPriv     = priv;
4359     pMapping->length              = cpuMapLength;
4360     pMapping->flags               = flags;
4361     pMapping->pContext            = (void*)(NvUPtr)pKernelChannel->ChID;
4362 }
4363 
4364 NV_STATUS kchannelRetrieveKmb_KERNEL
4365 (
4366     OBJGPU *pGpu,
4367     KernelChannel *pKernelChannel,
4368     ROTATE_IV_TYPE rotateOperation,
4369     NvBool includeSecrets,
4370     CC_KMB *keyMaterialBundle
4371 )
4372 {
4373     ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
4374 
4375     NV_ASSERT(pCC != NULL);
4376 
4377     return (confComputeKeyStoreRetrieveViaChannel_HAL(pCC, pKernelChannel, rotateOperation,
4378                                                       includeSecrets, keyMaterialBundle));
4379 }
4380 
4381 /*!
4382  * @brief Get KMB for secure channel
4383  *
4384  * @param[in] pKernelChannnel
4385  * @param[out] pGetKmbParams
4386  */
4387 NV_STATUS
4388 kchannelCtrlCmdGetKmb_KERNEL
4389 (
4390     KernelChannel *pKernelChannel,
4391     NVC56F_CTRL_CMD_GET_KMB_PARAMS *pGetKmbParams
4392 )
4393 {
4394     if (!pKernelChannel->bCCSecureChannel)
4395     {
4396         return NV_ERR_NOT_SUPPORTED;
4397     }
4398 
4399     portMemCopy((void*)(&pGetKmbParams->kmb), sizeof(CC_KMB),
4400                 (const void*)(&pKernelChannel->clientKmb), sizeof(CC_KMB));
4401 
4402     return NV_OK;
4403     return NV_ERR_NOT_SUPPORTED;
4404 }
4405 
4406 /*!
4407  * @brief      Rotate the IVs for the given secure channel
4408  *
4409  * @param[in]  pKernelChannel
4410  * @param[out] pRotateIvParams
4411  *
4412  * @return     NV_OK on success
4413  * @return     NV_ERR_NOT_SUPPORTED if channel is not a secure channel.
4414  */
4415 NV_STATUS
4416 kchannelCtrlRotateSecureChannelIv_KERNEL
4417 (
4418     KernelChannel *pKernelChannel,
4419     NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS *pRotateIvParams
4420 )
4421 {
4422     NV_STATUS            status            = NV_OK;
4423     OBJGPU              *pGpu              = GPU_RES_GET_GPU(pKernelChannel);
4424     ConfidentialCompute *pCC               = GPU_GET_CONF_COMPUTE(pGpu);
4425     ROTATE_IV_TYPE       rotateIvOperation = pRotateIvParams->rotateIvType;
4426 
4427     if (!pKernelChannel->bCCSecureChannel)
4428     {
4429         return NV_ERR_NOT_SUPPORTED;
4430     }
4431 
4432     NV_PRINTF(LEVEL_INFO, "Rotating IV in CPU-RM.\n");
4433 
4434     status = confComputeKeyStoreRetrieveViaChannel_HAL(
4435         pCC, pKernelChannel, rotateIvOperation, NV_TRUE, &pKernelChannel->clientKmb);
4436 
4437     if (status != NV_OK)
4438     {
4439         return status;
4440     }
4441 
4442     portMemSet(pRotateIvParams, 0, sizeof(*pRotateIvParams));
4443 
4444     portMemCopy(pRotateIvParams->updatedKmb.encryptBundle.iv,
4445                 sizeof(pRotateIvParams->updatedKmb.encryptBundle.iv),
4446                 pKernelChannel->clientKmb.encryptBundle.iv,
4447                 sizeof(pKernelChannel->clientKmb.encryptBundle.iv));
4448 
4449     portMemCopy(pRotateIvParams->updatedKmb.decryptBundle.iv,
4450                 sizeof(pRotateIvParams->updatedKmb.decryptBundle.iv),
4451                 pKernelChannel->clientKmb.decryptBundle.iv,
4452                 sizeof(pKernelChannel->clientKmb.decryptBundle.iv));
4453 
4454     pRotateIvParams->rotateIvType = rotateIvOperation;
4455 
4456     NV_RM_RPC_CONTROL(pGpu,
4457                       RES_GET_CLIENT_HANDLE(pKernelChannel),
4458                       RES_GET_HANDLE(pKernelChannel),
4459                       NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV,
4460                       pRotateIvParams,
4461                       sizeof(*pRotateIvParams),
4462                       status);
4463 
4464     if (status != NV_OK)
4465     {
4466         return status;
4467     }
4468 
4469     if ((rotateIvOperation == ROTATE_IV_ALL_VALID) || (rotateIvOperation == ROTATE_IV_ENCRYPT))
4470     {
4471         portMemCopy(&pRotateIvParams->updatedKmb.encryptBundle,
4472                     sizeof(pRotateIvParams->updatedKmb.encryptBundle),
4473                     &pKernelChannel->clientKmb.encryptBundle,
4474                     sizeof(pKernelChannel->clientKmb.encryptBundle));
4475     }
4476 
4477     if ((rotateIvOperation == ROTATE_IV_ALL_VALID) || (rotateIvOperation == ROTATE_IV_DECRYPT))
4478     {
4479         portMemCopy(&pRotateIvParams->updatedKmb.decryptBundle,
4480                     sizeof(pRotateIvParams->updatedKmb.decryptBundle),
4481                     &pKernelChannel->clientKmb.decryptBundle,
4482                     sizeof(pKernelChannel->clientKmb.decryptBundle));
4483     }
4484 
4485     return NV_OK;
4486     return NV_ERR_NOT_SUPPORTED;
4487 }
4488 
4489 NV_STATUS
4490 kchannelCtrlRotateSecureChannelIv_PHYSICAL
4491 (
4492     KernelChannel *pKernelChannel,
4493     NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS *pRotateIvParams
4494 )
4495 {
4496     NV_STATUS status;
4497 
4498     NV_PRINTF(LEVEL_INFO, "Rotating IV in GSP-RM.\n");
4499 
4500     // CPU-side encrypt IV corresponds to GPU-side decrypt IV.
4501     // CPU-side decrypt IV corresponds to GPU-side encrypt IV.
4502     status =
4503         kchannelRotateSecureChannelIv_HAL(pKernelChannel,
4504                                           pRotateIvParams->rotateIvType,
4505                                           pRotateIvParams->updatedKmb.decryptBundle.iv,
4506                                           pRotateIvParams->updatedKmb.encryptBundle.iv);
4507     if (status != NV_OK)
4508     {
4509         return status;
4510     }
4511 
4512     return NV_OK;
4513 }
4514