1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #define NVOC_KERNEL_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
25 
26 #include "kernel/gpu/fifo/kernel_channel.h"
27 
28 #include "kernel/core/locks.h"
29 #include "gpu/subdevice/subdevice.h"
30 #include "kernel/diagnostics/gpu_acct.h"
31 #include "kernel/gpu/conf_compute/conf_compute.h"
32 #include "kernel/gpu/device/device.h"
33 #include "kernel/gpu/fifo/kernel_ctxshare.h"
34 #include "kernel/gpu/fifo/kernel_channel_group.h"
35 #include "kernel/gpu/gr/kernel_graphics.h"
36 #include "kernel/gpu/mem_mgr/context_dma.h"
37 #include "kernel/gpu/mem_mgr/heap.h"
38 #include "kernel/gpu/mem_mgr/mem_mgr.h"
39 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
40 #include "kernel/gpu/rc/kernel_rc.h"
41 #include "kernel/mem_mgr/ctx_buf_pool.h"
42 #include "kernel/mem_mgr/gpu_vaspace.h"
43 #include "kernel/rmapi/event.h"
44 #include "kernel/rmapi/rmapi.h"
45 #include "kernel/rmapi/rs_utils.h"
46 #include "kernel/virtualization/hypervisor/hypervisor.h"
47 #include "gpu/bus/kern_bus.h"
48 #include "gpu/mem_mgr/virt_mem_allocator.h"
49 #include "objtmr.h"
50 #include "platform/sli/sli.h"
51 
52 #include "class/cl0090.h"   // KERNEL_GRAPHICS_CONTEXT
53 #include "class/cl906fsw.h" // GF100_GPFIFO
54 #include "class/cla06c.h"   // KEPLER_CHANNEL_GROUP_A
55 #include "class/cla06f.h"   // KEPLER_CHANNEL_GPFIFO_A
56 #include "class/cla06fsw.h" // KEPLER_CHANNEL_GPFIFO_A
57 #include "class/cla16f.h"   // KEPLER_CHANNEL_GPFIFO_B
58 #include "class/cla16fsw.h" // KEPLER_CHANNEL_GPFIFO_B
59 #include "class/clb06f.h"   // MAXWELL_CHANNEL_GPFIFO_A
60 #include "class/clb06fsw.h" // MAXWELL_CHANNEL_GPFIFO_A
61 #include "class/clc06f.h"   // PASCAL_CHANNEL_GPFIFO_A
62 #include "class/clc06fsw.h" // PASCAL_CHANNEL_GPFIFO_A
63 #include "class/clc36f.h"   // VOLTA_CHANNEL_GPFIFO_A
64 #include "class/clc36fsw.h" // VOLTA_CHANNEL_GPFIFO_A
65 #include "class/clc46f.h"   // TURING_CHANNEL_GPFIFO_A
66 #include "class/clc46fsw.h" // TURING_CHANNEL_GPFIFO_A
67 #include "class/clc56f.h"   // AMPERE_CHANNEL_GPFIFO_A
68 #include "class/clc56fsw.h" // AMPERE_CHANNEL_GPFIFO_A
69 #include "class/clc572.h"   // PHYSICAL_CHANNEL_GPFIFO
70 #include "class/clc86f.h"   // HOPPER_CHANNEL_GPFIFO_A
71 #include "class/clc86fsw.h" // HOPPER_CHANNEL_GPFIFO_A
72 
73 #include "ctrl/ctrl906f.h"
74 #include "ctrl/ctrlc46f.h"
75 #include "ctrl/ctrlc86f.h"
76 
77 #include "Nvcm.h"
78 #include "libraries/resserv/resserv.h"
79 #include "libraries/resserv/rs_client.h"
80 #include "libraries/resserv/rs_resource.h"
81 #include "libraries/resserv/rs_server.h"
82 #include "nvRmReg.h"
83 #include "nvstatuscodes.h"
84 #include "vgpu/rpc.h"
85 
86 // Instmem static functions
87 static NV_STATUS _kchannelAllocHalData(OBJGPU *pGpu, KernelChannel *pKernelChannel);
88 static void      _kchannelFreeHalData(OBJGPU *pGpu, KernelChannel *pKernelChannel);
89 static NV_STATUS _kchannelAllocOrDescribeInstMem(
90     KernelChannel  *pKernelChannel,
91     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams);
92 static NV_STATUS _kchannelDescribeMemDescsFromParams(
93     OBJGPU *pGpu,
94     KernelChannel *pKernelChannel,
95     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams);
96 static NV_STATUS _kchannelDescribeMemDescsHeavySriov(OBJGPU *pGpu, KernelChannel *pKernelChannel);
97 static NV_STATUS _kchannelSendChannelAllocRpc(
98     KernelChannel *pKernelChannel,
99     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams,
100     KernelChannelGroup *pKernelChannelGroup,
101     NvBool bFullSriov);
102 
103 static NV_STATUS _kchannelSetupNotifyActions(KernelChannel *pKernelChannel,
104                                              NvU32 classNum);
105 static void _kchannelCleanupNotifyActions(KernelChannel *pKernelChannel);
106 static NV_STATUS _kchannelNotifyOfChid(OBJGPU *pGpu, KernelChannel *pKernelChannel, RsClient *pRsClient);
107 static NV_STATUS _kchannelGetUserMemDesc(OBJGPU *pGpu, KernelChannel *pKernelChannel, PMEMORY_DESCRIPTOR *ppMemDesc);
108 static void _kchannelUpdateFifoMapping(KernelChannel    *pKernelChannel,
109                                        OBJGPU           *pGpu,
110                                        NvBool            bKernel,
111                                        NvP64             cpuAddress,
112                                        NvP64             priv,
113                                        NvU64             cpuMapLength,
114                                        NvU32             flags,
115                                        NvHandle          hSubdevice,
116                                        RsCpuMapping     *pMapping);
117 static NvNotification*
118 _kchannelGetKeyRotationNotifier(KernelChannel *pKernelChannel);
119 
120 /*!
121  * @brief Construct a new KernelChannel, which also creates a Channel.
122  *
123  * @param[in,out]  pCallContext     The call context
124  * @param[in,out]  pParams          Params for the *_CHANNEL_GPFIFO class
125  *                                  object being created
126  *
127  * @returns NV_OK on success, specific error code on failure.
128  */
129 NV_STATUS
kchannelConstruct_IMPL(KernelChannel * pKernelChannel,CALL_CONTEXT * pCallContext,RS_RES_ALLOC_PARAMS_INTERNAL * pParams)130 kchannelConstruct_IMPL
131 (
132     KernelChannel *pKernelChannel,
133     CALL_CONTEXT *pCallContext,
134     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
135 )
136 {
137     OBJGPU                 *pGpu             = GPU_RES_GET_GPU(pKernelChannel);
138     OBJSYS                 *pSys             = SYS_GET_INSTANCE();
139     KernelMIGManager       *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
140     KernelFifo             *pKernelFifo      = GPU_GET_KERNEL_FIFO(pGpu);
141     RsClient               *pRsClient        = pCallContext->pClient;
142     RmClient               *pRmClient        = NULL;
143     RsResourceRef          *pResourceRef     = pCallContext->pResourceRef;
144     RsResourceRef          *pKernelCtxShareRef = NULL;
145     NV_STATUS               status;
146     RM_API                 *pRmApi           = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
147     NvHandle                hClient          = pRsClient->hClient;
148     NvHandle                hParent          = pResourceRef->pParentRef->hResource;
149     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams = pParams->pAllocParams;
150     RsResourceRef          *pChanGrpRef      = NULL;
151     KernelChannelGroupApi  *pKernelChannelGroupApi = NULL;
152     NvHandle                hKernelCtxShare  = pChannelGpfifoParams->hContextShare;
153     NvBool                  bTsgAllocated    = NV_FALSE;
154     NvHandle                hChanGrp         = NV01_NULL_OBJECT;
155     RsResourceRef          *pDeviceRef       = NULL;
156     RsResourceRef          *pVASpaceRef      = NULL;
157     KernelGraphicsContext  *pKernelGraphicsContext = NULL;
158     NvBool                  bMIGInUse;
159     KernelChannelGroup     *pKernelChannelGroup = NULL;
160     NvU32                   chID             = ~0;
161     NvU32                   flags            = pChannelGpfifoParams->flags;
162     RM_ENGINE_TYPE          globalRmEngineType = RM_ENGINE_TYPE_NULL;
163     NvU32                   verifFlags2      = 0;
164     NvBool                  bChidAllocated   = NV_FALSE;
165     NvBool                  bLockAcquired    = NV_FALSE;
166     NvBool                  bNotifyActionsSetup = NV_FALSE;
167     CTX_BUF_POOL_INFO      *pChannelBufPool  = NULL;
168     CTX_BUF_INFO            bufInfo          = {0};
169     NvBool                  bRpcAllocated    = NV_FALSE;
170     NvBool                  bFullSriov       = IS_VIRTUAL_WITH_SRIOV(pGpu) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu);
171     NvBool                  bAddedToGroup    = NV_FALSE;
172     NvU32                   callingContextGfid;
173     Device                 *pDevice;
174 
175     // We only support physical channels.
176     NV_ASSERT_OR_RETURN(FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_TYPE, _PHYSICAL, flags),
177         NV_ERR_NOT_SUPPORTED);
178 
179     pKernelChannel->refCount = 1;
180     pKernelChannel->bIsContextBound = NV_FALSE;
181     pKernelChannel->nextObjectClassID = 0;
182     pKernelChannel->subctxId = 0;
183     pKernelChannel->bSkipCtxBufferAlloc = FLD_TEST_DRF(OS04, _FLAGS,
184                                                        _SKIP_CTXBUFFER_ALLOC, _TRUE, flags);
185     pKernelChannel->cid = portAtomicIncrementU32(&pSys->currentCid);
186     pKernelChannel->runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, flags);
187     pKernelChannel->engineType = RM_ENGINE_TYPE_NULL;
188     pChannelGpfifoParams->cid = pKernelChannel->cid;
189     NV_ASSERT_OK_OR_GOTO(status, refFindAncestorOfType(pResourceRef, classId(Device), &pDeviceRef), cleanup);
190     NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &callingContextGfid));
191 
192     pDevice = dynamicCast(pDeviceRef->pResource, Device);
193 
194     // Internal fields must be cleared when RMAPI call is from client
195     if (!hypervisorIsVgxHyper() || IS_GSP_CLIENT(pGpu))
196         pChannelGpfifoParams->hPhysChannelGroup = NV01_NULL_OBJECT;
197     pChannelGpfifoParams->internalFlags = 0;
198     portMemSet(&pChannelGpfifoParams->errorNotifierMem, 0,
199                sizeof pChannelGpfifoParams->errorNotifierMem);
200     portMemSet(&pChannelGpfifoParams->eccErrorNotifierMem, 0,
201                sizeof pChannelGpfifoParams->eccErrorNotifierMem);
202     pChannelGpfifoParams->ProcessID = 0;
203     pChannelGpfifoParams->SubProcessID = 0;
204     portMemSet(pChannelGpfifoParams->encryptIv, 0, sizeof(pChannelGpfifoParams->encryptIv));
205     portMemSet(pChannelGpfifoParams->decryptIv, 0, sizeof(pChannelGpfifoParams->decryptIv));
206     portMemSet(pChannelGpfifoParams->hmacNonce, 0, sizeof(pChannelGpfifoParams->hmacNonce));
207 
208     pRmClient = dynamicCast(pRsClient, RmClient);
209     if (pRmClient == NULL)
210     {
211         return NV_ERR_OBJECT_NOT_FOUND;
212     }
213     pKernelChannel->pUserInfo = pRmClient->pUserInfo;
214 
215     //
216     // GSP-RM needs privilegeLevel passed in as an alloc param because it cannot
217     // check pRmClient for kernel/admin.
218     // Other platforms check pRmClient to determine privilegeLevel.
219     //
220     if (RMCFG_FEATURE_PLATFORM_GSP)
221     {
222         // Guest-RM clients can allocate a privileged channel to perform
223         // actions such as updating page tables in physical mode or scrubbing.
224         // Security for these channels is enforced by VMMU and IOMMU
225         if (gpuIsSriovEnabled(pGpu) && IS_GFID_VF(callingContextGfid) &&
226                 FLD_TEST_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, flags))
227         {
228             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN;
229         }
230         else
231         {
232             pKernelChannel->privilegeLevel =
233                 DRF_VAL(_KERNELCHANNEL, _ALLOC_INTERNALFLAGS, _PRIVILEGE, pChannelGpfifoParams->internalFlags);
234         }
235 
236         // In GSP, all vGPU channel's will simply consider GFID as the processID
237         if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && IS_GFID_VF(callingContextGfid))
238         {
239             pKernelChannel->ProcessID = callingContextGfid;
240         }
241         else
242         {
243             pKernelChannel->ProcessID = pChannelGpfifoParams->ProcessID;
244         }
245 
246         pKernelChannel->SubProcessID = pChannelGpfifoParams->SubProcessID;
247     }
248     else
249     {
250         RS_PRIV_LEVEL privLevel = pCallContext->secInfo.privLevel;
251         if (privLevel >= RS_PRIV_LEVEL_KERNEL)
252         {
253             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL;
254             pChannelGpfifoParams->flags = FLD_SET_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, pChannelGpfifoParams->flags);
255         }
256         else if (rmclientIsAdmin(pRmClient, privLevel) || hypervisorCheckForObjectAccess(hClient))
257         {
258             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN;
259             pChannelGpfifoParams->flags = FLD_SET_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, pChannelGpfifoParams->flags);
260         }
261         else
262         {
263             pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER;
264         }
265 
266         pKernelChannel->ProcessID = pRmClient->ProcID;
267         pKernelChannel->SubProcessID = pRmClient->SubProcessID;
268     }
269 
270     // Context share and vaspace handles can't be active at the same time.
271     if ((hKernelCtxShare != NV01_NULL_OBJECT) && (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT))
272     {
273         NV_PRINTF(LEVEL_ERROR,
274                   "Both context share and vaspace handles can't be valid at the same time\n");
275         return NV_ERR_INVALID_ARGUMENT;
276     }
277 
278     bMIGInUse = IS_MIG_IN_USE(pGpu);
279 
280     //
281     // The scrubber is allocated by Kernel RM in offload mode, and is disabled
282     // completely on GSP, so it is not possible for GSP to determine whether
283     // this allocation should be allowed or not. CPU RM can and should properly
284     // check this.
285     //
286     if (IS_MIG_ENABLED(pGpu) && !RMCFG_FEATURE_PLATFORM_GSP && !bMIGInUse)
287     {
288         NvBool bTopLevelScrubberEnabled = NV_FALSE;
289         NvBool bTopLevelScrubberConstructed = NV_FALSE;
290         MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
291 
292         if (memmgrIsPmaInitialized(pMemoryManager))
293         {
294             Heap *pHeap = GPU_GET_HEAP(pGpu);
295             NvU32 pmaConfigs = PMA_QUERY_SCRUB_ENABLED | PMA_QUERY_SCRUB_VALID;
296             NV_ASSERT_OK(pmaQueryConfigs(&pHeap->pmaObject, &pmaConfigs));
297             bTopLevelScrubberEnabled = (pmaConfigs & PMA_QUERY_SCRUB_ENABLED) != 0x0;
298             bTopLevelScrubberConstructed = (pmaConfigs & PMA_QUERY_SCRUB_VALID) != 0x0;
299         }
300 
301         //
302         // Exception: Top level scrubber must be initialized before
303         // GPU instances can be created, and therefore must be allowed to
304         // create a CE context if the scrubber is supported.
305         //
306 
307         if (!bTopLevelScrubberEnabled || bTopLevelScrubberConstructed ||
308             !kchannelCheckIsKernel(pKernelChannel))
309         {
310             NV_PRINTF(LEVEL_ERROR,
311                       "Channel allocation not allowed when MIG is enabled without GPU instancing\n");
312             return NV_ERR_INVALID_STATE;
313         }
314     }
315 
316     // Find the TSG, or create the TSG if we need to wrap it
317     status = clientGetResourceRefByType(pRsClient, hParent,
318                                         classId(KernelChannelGroupApi),
319                                         &pChanGrpRef);
320     if (status != NV_OK)
321     {
322         NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS tsgParams = { 0 };
323 
324         // Context share can only be used with a TSG channel
325         if (hKernelCtxShare != NV01_NULL_OBJECT)
326         {
327             NV_PRINTF(LEVEL_ERROR,
328                       "Non-TSG channels can't use context share\n");
329             status = NV_ERR_INVALID_ARGUMENT;
330             goto cleanup;
331         }
332 
333         tsgParams.hVASpace = pChannelGpfifoParams->hVASpace;
334         tsgParams.engineType = pChannelGpfifoParams->engineType;
335         // vGpu plugin context flag should only be set if context is plugin
336         if (gpuIsSriovEnabled(pGpu))
337         {
338             tsgParams.bIsCallingContextVgpuPlugin = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_VGPU_PLUGIN_CONTEXT, _TRUE, pChannelGpfifoParams->flags);
339         }
340         //
341         // Internally allocate a TSG to wrap this channel. There is no point
342         // in mirroring this allocation in the host, as the channel is
343         // already mirrored.
344         //
345         status = pRmApi->AllocWithSecInfo(pRmApi,
346             hClient,
347             hParent,
348             &pChannelGpfifoParams->hPhysChannelGroup,
349             KEPLER_CHANNEL_GROUP_A,
350             NV_PTR_TO_NvP64(&tsgParams),
351             sizeof(tsgParams),
352             RMAPI_ALLOC_FLAGS_SKIP_RPC,
353             NvP64_NULL,
354             &pRmApi->defaultSecInfo);
355 
356         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
357         bTsgAllocated = NV_TRUE;
358         hChanGrp = pChannelGpfifoParams->hPhysChannelGroup;
359 
360         status = clientGetResourceRefByType(pRsClient, hChanGrp,
361                                             classId(KernelChannelGroupApi),
362                                             &pChanGrpRef);
363         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
364 
365         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
366                                              KernelChannelGroupApi);
367         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
368         pKernelChannelGroup->bAllocatedByRm = NV_TRUE;
369     }
370     else
371     {
372         hChanGrp = hParent;
373         pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource,
374                                              KernelChannelGroupApi);
375         if (pKernelChannelGroupApi == NULL ||
376             pKernelChannelGroupApi->pKernelChannelGroup == NULL)
377         {
378             NV_PRINTF(LEVEL_ERROR, "Invalid KernelChannelGroup* for channel 0x%x\n",
379                       pResourceRef->hResource);
380             status = NV_ERR_INVALID_POINTER;
381             NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
382         }
383         pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup;
384 
385         // TSG channel should specify a context share object, rather than vaspace directly
386         if (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT)
387         {
388             NV_PRINTF(LEVEL_ERROR,
389                       "TSG channels can't use an explicit vaspace\n");
390             status = NV_ERR_INVALID_ARGUMENT;
391             NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
392         }
393     }
394     pKernelChannel->pKernelChannelGroupApi = pKernelChannelGroupApi;
395 
396     NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE);
397     NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE);
398 
399     //
400     // Reserve memory for channel instance block from PMA
401     // into a pool tied to channel's parent TSG.
402     // RM will later allocate memory for instance block from this pool.
403     //
404     pChannelBufPool = pKernelChannelGroup->pChannelBufPool;
405     if (pChannelBufPool != NULL)
406     {
407         NvBool bIsScrubSkipped;
408         NvBool bRequestScrubSkip = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE, pChannelGpfifoParams->flags);
409 
410         if (bRequestScrubSkip)
411         {
412             if (!kchannelCheckIsKernel(pKernelChannel))
413             {
414                 status = NV_ERR_INVALID_ARGUMENT;
415                 NV_PRINTF(LEVEL_ERROR, "Only kernel priv clients can skip scrubber\n");
416                 goto cleanup;
417             }
418 
419             //
420             // If this is first channel in the TSG then setup ctx buf pool to skip scrubbing.
421             // For subsequent channels, setting should match with ctx buf pool's state.
422             //
423             if (pKernelChannelGroup->chanCount == 0)
424             {
425                 ctxBufPoolSetScrubSkip(pChannelBufPool, NV_TRUE);
426                 NV_PRINTF(LEVEL_INFO, "Skipping scrubber for all allocations on this context\n");
427             }
428         }
429 
430         bIsScrubSkipped = ctxBufPoolIsScrubSkipped(pChannelBufPool);
431         if (bIsScrubSkipped ^ bRequestScrubSkip)
432         {
433             status = NV_ERR_INVALID_ARGUMENT;
434             NV_PRINTF(LEVEL_ERROR, "Mismatch between channel and parent TSG's policy on skipping scrubber\n");
435             NV_PRINTF(LEVEL_ERROR, "scrubbing %s skipped for TSG and %s for channel\n", (bIsScrubSkipped ? "is" : "is not"),
436                 (bRequestScrubSkip ? "is" : "is not"));
437             goto cleanup;
438         }
439         NV_ASSERT_OK_OR_GOTO(status,
440                              kfifoGetInstMemInfo_HAL(pKernelFifo, &bufInfo.size, &bufInfo.align, NULL, NULL, NULL),
441                              cleanup);
442         bufInfo.attr = RM_ATTR_PAGE_SIZE_DEFAULT;
443         NV_ASSERT_OK_OR_GOTO(status, ctxBufPoolReserve(pGpu, pChannelBufPool, &bufInfo, 1), cleanup);
444     }
445     else
446     {
447         NV_PRINTF(LEVEL_INFO, "Not using ctx buf pool\n");
448     }
449 
450     //--------------------------------------------------------------------------
451     // we acquire the GPU lock below.
452     // From here down do not return early, use goto cleanup
453     //--------------------------------------------------------------------------
454 
455     NV_ASSERT_OK_OR_GOTO(status,
456         rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO),
457         cleanup);
458     bLockAcquired = NV_TRUE;
459 
460     //
461     // Initialize the notification indices used for different notifications
462     //
463     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR]
464         = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR;
465     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN]
466         = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN;
467 
468     // Bake channel group error handlers into the channel
469     pKernelChannel->hErrorContext = pChannelGpfifoParams->hObjectError;
470     pKernelChannel->hEccErrorContext = pChannelGpfifoParams->hObjectEccError;
471 
472     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT)
473     {
474         pKernelChannel->hErrorContext = (
475             pKernelChannel->pKernelChannelGroupApi->hErrorContext);
476     }
477     if (pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
478     {
479         pKernelChannel->hEccErrorContext = (
480             pKernelChannel->pKernelChannelGroupApi->hEccErrorContext);
481     }
482 
483     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT)
484     {
485         pKernelChannel->errorContextType = ERROR_NOTIFIER_TYPE_NONE;
486     }
487     else if (!RMCFG_FEATURE_PLATFORM_GSP)
488     {
489         NV_ASSERT_OK(kchannelGetNotifierInfo(pGpu, pDevice,
490             pKernelChannel->hErrorContext,
491             &pKernelChannel->pErrContextMemDesc,
492             &pKernelChannel->errorContextType,
493             &pKernelChannel->errorContextOffset));
494         NV_ASSERT(pKernelChannel->errorContextType !=
495                   ERROR_NOTIFIER_TYPE_NONE);
496     }
497     if (pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
498     {
499         pKernelChannel->eccErrorContextType = ERROR_NOTIFIER_TYPE_NONE;
500     }
501     else if (!RMCFG_FEATURE_PLATFORM_GSP)
502     {
503         NV_ASSERT_OK(kchannelGetNotifierInfo(pGpu, pDevice,
504             pKernelChannel->hEccErrorContext,
505             &pKernelChannel->pEccErrContextMemDesc,
506             &pKernelChannel->eccErrorContextType,
507             &pKernelChannel->eccErrorContextOffset));
508         NV_ASSERT(pKernelChannel->eccErrorContextType !=
509                   ERROR_NOTIFIER_TYPE_NONE);
510     }
511 
512     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
513     {
514         if (pKernelChannel->hErrorContext != NV01_NULL_OBJECT)
515         {
516             pChannelGpfifoParams->errorNotifierMem.base = (
517                 memdescGetPhysAddr(pKernelChannel->pErrContextMemDesc,
518                                    AT_GPU, 0) +
519                 pKernelChannel->errorContextOffset);
520             pChannelGpfifoParams->errorNotifierMem.size = (
521                 pKernelChannel->pErrContextMemDesc->Size -
522                 pKernelChannel->errorContextOffset);
523             pChannelGpfifoParams->errorNotifierMem.addressSpace =
524                 memdescGetAddressSpace(pKernelChannel->pErrContextMemDesc);
525             pChannelGpfifoParams->errorNotifierMem.cacheAttrib =
526                 memdescGetCpuCacheAttrib(pKernelChannel->pErrContextMemDesc);
527 
528         }
529         if (pKernelChannel->hEccErrorContext != NV01_NULL_OBJECT)
530         {
531             pChannelGpfifoParams->eccErrorNotifierMem.base = (
532                 memdescGetPhysAddr(pKernelChannel->pEccErrContextMemDesc,
533                                    AT_GPU, 0) +
534                 pKernelChannel->eccErrorContextOffset);
535             pChannelGpfifoParams->eccErrorNotifierMem.size = (
536                 pKernelChannel->pEccErrContextMemDesc->Size -
537                 pKernelChannel->eccErrorContextOffset);
538             pChannelGpfifoParams->eccErrorNotifierMem.addressSpace =
539                 memdescGetAddressSpace(pKernelChannel->pEccErrContextMemDesc);
540             pChannelGpfifoParams->eccErrorNotifierMem.cacheAttrib =
541                 memdescGetCpuCacheAttrib(pKernelChannel->pEccErrContextMemDesc);
542         }
543 
544         pChannelGpfifoParams->internalFlags = FLD_SET_DRF_NUM(
545             _KERNELCHANNEL_ALLOC, _INTERNALFLAGS, _ERROR_NOTIFIER_TYPE,
546             pKernelChannel->errorContextType,
547             pChannelGpfifoParams->internalFlags);
548         pChannelGpfifoParams->internalFlags = FLD_SET_DRF_NUM(
549             _KERNELCHANNEL_ALLOC, _INTERNALFLAGS, _ECC_ERROR_NOTIFIER_TYPE,
550             pKernelChannel->eccErrorContextType,
551             pChannelGpfifoParams->internalFlags);
552     }
553 
554     //
555     // The error context types should be set on all RM configurations
556     // (GSP/baremetal/CPU-GSP client)
557     //
558     NV_ASSERT(pKernelChannel->errorContextType != ERROR_NOTIFIER_TYPE_UNKNOWN);
559     NV_ASSERT(pKernelChannel->eccErrorContextType !=
560               ERROR_NOTIFIER_TYPE_UNKNOWN);
561 
562 
563     if ((pKernelChannelGroup->chanCount != 0) &&
564         (( pKernelChannelGroup->bLegacyMode && (hKernelCtxShare != NV01_NULL_OBJECT)) ||
565          (!pKernelChannelGroup->bLegacyMode && (hKernelCtxShare == NV01_NULL_OBJECT))))
566     {
567         //
568         // Check if this channnel allocation specifying (or not) a user
569         // allocated context share matches with previous channel allocations (if
570         // any) in this group specifiying (or not) a user allocated context
571         // share.
572         //
573         // A channel group cannot have a mix of channels with some of them
574         // specifying a user allocated context share and some having RM
575         // allocated context share.
576         //
577         NV_PRINTF(LEVEL_NOTICE,
578             "All channels in a channel group must specify a CONTEXT_SHARE if any one of them specifies it\n");
579         status = NV_ERR_INVALID_ARGUMENT;
580         goto cleanup;
581     }
582 
583     // Get KernelCtxShare (supplied or legacy)
584     if (hKernelCtxShare != NV01_NULL_OBJECT)
585     {
586         // Get object pointers from supplied hKernelCtxShare.
587         NV_ASSERT_OK_OR_GOTO(status,
588             clientGetResourceRefByType(pRsClient,
589                                        hKernelCtxShare,
590                                        classId(KernelCtxShareApi),
591                                        &pKernelCtxShareRef),
592             cleanup);
593 
594         //
595         // If hKernelCtxShare is nonzero, the ChannelGroup is not internal
596         // either, so it should have the same parent as hParent.
597         //
598         NV_ASSERT_TRUE_OR_GOTO(status,
599             pKernelCtxShareRef->pParentRef != NULL &&
600                 pKernelCtxShareRef->pParentRef->hResource == hParent,
601             NV_ERR_INVALID_OBJECT_PARENT,
602             cleanup);
603     }
604     else
605     {
606         NvU32 subctxFlag;
607         NvHandle hLegacyKernelCtxShare;
608 
609         if (!pKernelChannelGroup->bLegacyMode)
610         {
611             //
612             // Set this ChannelGroup to legacy mode and get the KernelCtxShare
613             // from it.
614             //
615             NV_ASSERT_OK_OR_GOTO(status,
616                 kchangrpapiSetLegacyMode(pKernelChannelGroupApi,
617                                          pGpu, pKernelFifo, hClient),
618                 cleanup);
619         }
620 
621         subctxFlag = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_THREAD, flags);
622         hLegacyKernelCtxShare = (subctxFlag ==
623                            NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC) ?
624                               pKernelChannelGroupApi->hLegacykCtxShareSync :
625                               pKernelChannelGroupApi->hLegacykCtxShareAsync;
626 
627         NV_ASSERT_OK_OR_GOTO(status,
628             clientGetResourceRefByType(pRsClient,
629                                        hLegacyKernelCtxShare,
630                                        classId(KernelCtxShareApi),
631                                        &pKernelCtxShareRef),
632             cleanup);
633     }
634 
635     pKernelChannel->pKernelCtxShareApi = dynamicCast(
636         pKernelCtxShareRef->pResource,
637         KernelCtxShareApi);
638     NV_ASSERT_TRUE_OR_GOTO(status,
639                            pKernelChannel->pKernelCtxShareApi != NULL,
640                            NV_ERR_INVALID_OBJECT,
641                            cleanup);
642     NV_ASSERT_TRUE_OR_GOTO(status,
643                            pKernelChannel->pKernelCtxShareApi->pShareData !=
644                                NULL,
645                            NV_ERR_INVALID_OBJECT,
646                            cleanup);
647     pKernelChannel->pVAS = pKernelChannel->pKernelCtxShareApi->pShareData->pVAS;
648     NV_ASSERT_TRUE_OR_GOTO(status,
649                            pKernelChannel->pVAS != NULL,
650                            NV_ERR_INVALID_OBJECT,
651                            cleanup);
652 
653     if (kfifoIsPerRunlistChramSupportedInHw(pKernelFifo))
654     {
655         // TSG should always have a valid engine Id.
656         NV_ASSERT_TRUE_OR_GOTO(status,
657             RM_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType),
658             NV_ERR_INVALID_STATE,
659             cleanup);
660 
661         if (NV2080_ENGINE_TYPE_IS_VALID(pChannelGpfifoParams->engineType))
662         {
663             globalRmEngineType = gpuGetRmEngineType(pChannelGpfifoParams->engineType);
664             // Convert it to global engine id if MIG is enabled
665             if (bMIGInUse)
666             {
667                 MIG_INSTANCE_REF ref;
668 
669                 NV_CHECK_OK_OR_GOTO(
670                     status,
671                     LEVEL_ERROR,
672                     kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
673                                                     pDevice, &ref),
674                     cleanup);
675 
676                 NV_CHECK_OK_OR_GOTO(
677                     status,
678                     LEVEL_ERROR,
679                     kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref,
680                                                       globalRmEngineType,
681                                                       &globalRmEngineType),
682                     cleanup);
683             }
684 
685             // Throw an error if TSG engine Id does NOT match with channel engine Id
686             if (globalRmEngineType != pKernelChannelGroup->engineType)
687             {
688                 NV_PRINTF(LEVEL_ERROR,
689                     "Engine type of channel = 0x%x (0x%x) not compatible with engine type of TSG = 0x%x (0x%x)\n",
690                     gpuGetNv2080EngineType(pChannelGpfifoParams->engineType),
691                     pChannelGpfifoParams->engineType,
692                     gpuGetNv2080EngineType(pKernelChannelGroup->engineType),
693                     pKernelChannelGroup->engineType);
694 
695                 status = NV_ERR_INVALID_ARGUMENT;
696                 goto cleanup;
697             }
698         }
699 
700         // Assign the engine type from the parent TSG
701         pKernelChannel->engineType = pKernelChannelGroup->engineType;
702     }
703 
704     // Determine initial runlist ID (based on engine type if provided or inherited from TSG)
705     pKernelChannel->runlistId = kfifoGetDefaultRunlist_HAL(pGpu, pKernelFifo, pKernelChannel->engineType);
706 
707     pKernelChannel->bCCSecureChannel = FLD_TEST_DRF(OS04, _FLAGS, _CC_SECURE, _TRUE, flags);
708     pKernelChannel->bUseScrubKey = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE, pChannelGpfifoParams->flags);
709     if (pKernelChannel->bCCSecureChannel)
710     {
711         ConfidentialCompute* pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
712 
713         // return early if gpu is not ready to accept work
714         if (pConfCompute && kchannelCheckIsUserMode(pKernelChannel)
715             && !confComputeAcceptClientRequest(pGpu, pConfCompute))
716         {
717             return NV_ERR_NOT_READY;
718         }
719 
720         if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
721         {
722             KEY_ROTATION_STATUS state;
723             NvU32 h2dKey;
724             NV_ASSERT_OK_OR_GOTO(status,
725                                  confComputeGetKeyPairByChannel(pGpu, pConfCompute, pKernelChannel, &h2dKey, NULL),
726                                  cleanup);
727             NV_ASSERT_OK_OR_GOTO(status,
728                                  confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state),
729                                  cleanup);
730             if (state != KEY_ROTATION_STATUS_IDLE)
731             {
732                 status = NV_ERR_KEY_ROTATION_IN_PROGRESS;
733                 goto cleanup;
734             }
735         }
736         status = kchannelRetrieveKmb_HAL(pGpu, pKernelChannel, ROTATE_IV_ALL_VALID,
737                                          NV_TRUE, &pKernelChannel->clientKmb);
738         NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
739 
740         portMemCopy(pChannelGpfifoParams->encryptIv,
741                     sizeof(pChannelGpfifoParams->encryptIv),
742                     pKernelChannel->clientKmb.encryptBundle.iv,
743                     sizeof(pKernelChannel->clientKmb.encryptBundle.iv));
744 
745         portMemCopy(pChannelGpfifoParams->decryptIv,
746                     sizeof(pChannelGpfifoParams->decryptIv),
747                     pKernelChannel->clientKmb.decryptBundle.iv,
748                     sizeof(pKernelChannel->clientKmb.decryptBundle.iv));
749 
750         portMemCopy(pChannelGpfifoParams->hmacNonce,
751                     sizeof(pChannelGpfifoParams->hmacNonce),
752                     pKernelChannel->clientKmb.hmacBundle.nonce,
753                     sizeof(pKernelChannel->clientKmb.hmacBundle.nonce));
754 
755     }
756 
757     // Set TLS state and BAR0 window if we are working with Gr
758     if (bMIGInUse && RM_ENGINE_TYPE_IS_GR(pKernelChannel->engineType))
759     {
760         NV_ASSERT_OK(kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
761                                                      pDevice, &pKernelChannel->partitionRef));
762     }
763 
764     // Allocate the ChId (except legacy VGPU which allocates ChID on the host)
765     if (!IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
766     {
767         status = kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient,
768                                        flags, verifFlags2, chID);
769 
770         if (status != NV_OK)
771         {
772             NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
773                                    chID, hClient, pResourceRef->hResource);
774             DBG_BREAKPOINT();
775             goto cleanup;
776 
777         }
778 
779         chID = pKernelChannel->ChID;
780         bChidAllocated = NV_TRUE;
781     }
782 
783     //
784     // RPC alloc the channel in legacy VGPU / Heavy SRIOV so that instmem details can be gotten from it
785     //
786     if (IS_VIRTUAL(pGpu) && (!bFullSriov))
787     {
788         NV_ASSERT_OK_OR_GOTO(status,
789                              _kchannelSendChannelAllocRpc(pKernelChannel,
790                                                           pChannelGpfifoParams,
791                                                           pKernelChannelGroup,
792                                                           bFullSriov),
793                              cleanup);
794         bRpcAllocated = NV_TRUE;
795     }
796 
797     // Legacy VGPU: allocate chid that the host provided
798     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
799     {
800         chID = pKernelChannel->ChID;
801 
802         status = kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient,
803                                        flags, verifFlags2, chID);
804 
805         if (status != NV_OK)
806         {
807             NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
808                       chID, hClient, pResourceRef->hResource);
809             chID = ~0;
810             DBG_BREAKPOINT();
811             goto cleanup;
812         }
813 
814         bChidAllocated = NV_TRUE;
815     }
816 
817     //
818     // Do instmem setup here
819     // (Requires the channel to be created on the host if legacy VGPU / Heavy SRIOV.
820     // Does not require a Channel object.)
821     //
822     NV_ASSERT_OK_OR_GOTO(status,
823         _kchannelAllocOrDescribeInstMem(pKernelChannel, pChannelGpfifoParams),
824         cleanup);
825 
826     // Join the channel group here
827     NV_ASSERT_OK_OR_GOTO(status,
828         kchangrpAddChannel(pGpu, pKernelChannelGroup, pKernelChannel),
829         cleanup);
830     bAddedToGroup = NV_TRUE;
831 
832     // Assign to the same runlistId as the KernelChannelGroup if it's already determined
833     if (pKernelChannelGroup->bRunlistAssigned)
834     {
835         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
836         {
837             NV_ASSERT_OK_OR_ELSE(status,
838                 kfifoRunlistSetId_HAL(pGpu,
839                                       GPU_GET_KERNEL_FIFO(pGpu),
840                                       pKernelChannel,
841                                       pKernelChannelGroup->runlistId),
842                 SLI_LOOP_GOTO(cleanup));
843         }
844         SLI_LOOP_END
845      }
846 
847     // Allocate the physical channel
848     NV_ASSERT_OK_OR_GOTO(status,
849         kchannelAllocChannel_HAL(pKernelChannel, pChannelGpfifoParams),
850         cleanup);
851 
852     // Set up pNotifyActions
853     NV_ASSERT_OK_OR_GOTO(status,
854         _kchannelSetupNotifyActions(pKernelChannel, pResourceRef->externalClassId),
855         cleanup);
856     bNotifyActionsSetup = NV_TRUE;
857 
858     // Initialize the userd length
859     if (!pKernelChannel->bClientAllocatedUserD)
860     {
861         NvU64 temp_offset;
862 
863         kchannelGetUserdInfo_HAL(pGpu,
864                                  pKernelChannel,
865                                  NULL,
866                                  &temp_offset,
867                                  &pKernelChannel->userdLength);
868     }
869     else
870     {
871         kfifoGetUserdSizeAlign_HAL(pKernelFifo, (NvU32*)&pKernelChannel->userdLength, NULL);
872     }
873 
874     // Set GPU accounting
875     if (RMCFG_MODULE_GPUACCT &&
876         pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON))
877     {
878         GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(SYS_GET_INSTANCE());
879 
880         gpuacctSetProcType(pGpuAcct,
881                            pGpu->gpuInstance,
882                            pRmClient->ProcID,
883                            pRmClient->SubProcessID,
884                            NV_GPUACCT_PROC_TYPE_GPU);
885     }
886 
887     //
888     // RPC to allocate the channel on GSPFW/host.
889     // (Requires a Channel object but only for hPhysChannel.)
890     //
891     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
892     {
893         NV_ASSERT_OK_OR_GOTO(status,
894                              _kchannelSendChannelAllocRpc(pKernelChannel,
895                                                           pChannelGpfifoParams,
896                                                           pKernelChannelGroup,
897                                                           bFullSriov),
898                              cleanup);
899         bRpcAllocated = NV_TRUE;
900     }
901 
902     if (kfifoIsPerRunlistChramEnabled(pKernelFifo) ||
903         (gpuIsCCorApmFeatureEnabled(pGpu) || bMIGInUse))
904     {
905         SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
906         {
907             KernelFifo *pTempKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
908             //
909             // If we have a separate channel RAM for each runlist then we need to set
910             // runlistId as we already picked a chID from channel RAM based on this runlistId.
911             // This will also ensure runlistId is not overridden later to a different value
912             //
913             NV_ASSERT_OK_OR_GOTO(status,
914                 kfifoRunlistSetId_HAL(pGpu, pTempKernelFifo, pKernelChannel, pKernelChannel->runlistId),
915                 cleanup);
916         }
917         SLI_LOOP_END;
918     }
919 
920     //
921     // If we alloced this group, we want to free KernelChannel first,
922     // so we should set KernelChannel as its dependent.
923     //
924     if (bTsgAllocated)
925     {
926         NV_ASSERT_OK_OR_GOTO(status, refAddDependant(pChanGrpRef, pResourceRef), cleanup);
927     }
928 
929     // We depend on VASpace if it was provided
930     if (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT)
931     {
932         NV_ASSERT_OK_OR_GOTO(status,
933             clientGetResourceRef(pRsClient,
934                                  pChannelGpfifoParams->hVASpace,
935                                  &pVASpaceRef),
936             cleanup);
937         NV_ASSERT_TRUE_OR_GOTO(status,
938                                pVASpaceRef != NULL,
939                                NV_ERR_INVALID_OBJECT,
940                                cleanup);
941         NV_ASSERT_OK_OR_GOTO(status,
942                              refAddDependant(pVASpaceRef, pResourceRef),
943                              cleanup);
944     }
945 
946     //
947     // If KernelCtxShare was provided, we depend on it (and if we created it then we
948     // also want KernelChannel to be freed first.)
949     //
950     if (pKernelChannel->pKernelCtxShareApi != NULL)
951     {
952         NV_ASSERT_OK_OR_GOTO(
953             status,
954             refAddDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef),
955             cleanup);
956     }
957 
958     pKernelChannel->hKernelGraphicsContext = pKernelChannelGroupApi->hKernelGraphicsContext;
959     if (pKernelChannel->hKernelGraphicsContext != NV01_NULL_OBJECT)
960     {
961         NV_ASSERT_OK_OR_GOTO(status,
962             kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext),
963             cleanup);
964 
965         NV_ASSERT_OK_OR_GOTO(status,
966             refAddDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef),
967             cleanup);
968     }
969 
970     if (pChannelGpfifoParams->hObjectError != 0)
971     {
972         NV_ASSERT_OK_OR_GOTO(
973             status,
974             _kchannelNotifyOfChid(pGpu, pKernelChannel, pRsClient),
975             cleanup);
976     }
977 
978     // Cache the hVASpace for this channel in the KernelChannel object
979     pKernelChannel->hVASpace = pKernelChannel->pKernelCtxShareApi->hVASpace;
980 
981     ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
982     if ((pConfCompute != NULL) &&
983         (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED)) &&
984         (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED)) &&
985         (pKernelChannel->bCCSecureChannel))
986     {
987         // Create persistent mapping to key rotation notifier
988         NV_ASSERT_OK_OR_GOTO(
989             status,
990             kchannelSetKeyRotationNotifier_HAL(pGpu, pKernelChannel, NV_TRUE),
991             cleanup);
992     }
993 
994 cleanup:
995     if (bLockAcquired)
996         rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
997 
998     // These fields are only needed internally; clear them here
999     pChannelGpfifoParams->hPhysChannelGroup = 0;
1000     pChannelGpfifoParams->internalFlags = 0;
1001     portMemSet(&pChannelGpfifoParams->errorNotifierMem, 0,
1002                sizeof pChannelGpfifoParams->errorNotifierMem);
1003     portMemSet(&pChannelGpfifoParams->eccErrorNotifierMem, 0,
1004                sizeof pChannelGpfifoParams->eccErrorNotifierMem);
1005     pChannelGpfifoParams->ProcessID = 0;
1006     pChannelGpfifoParams->SubProcessID = 0;
1007     portMemSet(pChannelGpfifoParams->encryptIv, 0, sizeof(pChannelGpfifoParams->encryptIv));
1008     portMemSet(pChannelGpfifoParams->decryptIv, 0, sizeof(pChannelGpfifoParams->decryptIv));
1009     portMemSet(pChannelGpfifoParams->hmacNonce, 0, sizeof(pChannelGpfifoParams->hmacNonce));
1010 
1011     // Free the allocated resources if there was an error
1012     if (status != NV_OK)
1013     {
1014         if (bNotifyActionsSetup)
1015         {
1016             _kchannelCleanupNotifyActions(pKernelChannel);
1017         }
1018 
1019         // Remove any dependencies we may have added; we don't want our destructor called when freeing anything below
1020         if (pKernelGraphicsContext != NULL)
1021         {
1022             refRemoveDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef);
1023         }
1024         if (pKernelChannel->pKernelCtxShareApi != NULL)
1025         {
1026             refRemoveDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef);
1027         }
1028         if (pVASpaceRef != NULL)
1029         {
1030             refRemoveDependant(pVASpaceRef, pResourceRef);
1031         }
1032         if (bTsgAllocated)
1033         {
1034             refRemoveDependant(pChanGrpRef, pResourceRef);
1035         }
1036 
1037         if (bAddedToGroup)
1038         {
1039             kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel);
1040         }
1041 
1042         if (RMCFG_FEATURE_PLATFORM_GSP)
1043         {
1044             // Free memdescs created during construct on GSP path.
1045             memdescFree(pKernelChannel->pErrContextMemDesc);
1046             memdescDestroy(pKernelChannel->pErrContextMemDesc);
1047             memdescFree(pKernelChannel->pEccErrContextMemDesc);
1048             memdescDestroy(pKernelChannel->pEccErrContextMemDesc);
1049         }
1050         pKernelChannel->pErrContextMemDesc = NULL;
1051         pKernelChannel->pEccErrContextMemDesc = NULL;
1052 
1053         if (bRpcAllocated)
1054         {
1055             NV_RM_RPC_FREE_ON_ERROR(pGpu, hClient, hParent, RES_GET_HANDLE(pKernelChannel));
1056         }
1057 
1058         _kchannelFreeHalData(pGpu, pKernelChannel);
1059 
1060         if (pChannelBufPool != NULL)
1061         {
1062             ctxBufPoolRelease(pChannelBufPool);
1063         }
1064 
1065         if (bTsgAllocated)
1066         {
1067             pRmApi->Free(pRmApi, hClient, hChanGrp);
1068         }
1069 
1070         if (bChidAllocated)
1071         {
1072             kchannelFreeHwID_HAL(pGpu, pKernelChannel);
1073         }
1074     }
1075 
1076     return status;
1077 }
1078 
1079 void
kchannelDestruct_IMPL(KernelChannel * pKernelChannel)1080 kchannelDestruct_IMPL
1081 (
1082     KernelChannel *pKernelChannel
1083 )
1084 {
1085     CALL_CONTEXT                *pCallContext;
1086     RS_RES_FREE_PARAMS_INTERNAL *pParams;
1087     NvHandle                     hClient;
1088     RM_API                      *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
1089     OBJGPU                      *pGpu   = GPU_RES_GET_GPU(pKernelChannel);
1090     NV_STATUS                    status = NV_OK;
1091     KernelChannelGroup          *pKernelChannelGroup = NULL;
1092 
1093     NV_ASSERT(pKernelChannel->pKernelChannelGroupApi != NULL);
1094     pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup;
1095     NV_ASSERT(pKernelChannelGroup != NULL);
1096 
1097     resGetFreeParams(staticCast(pKernelChannel, RsResource), &pCallContext, &pParams);
1098     hClient = pCallContext->pClient->hClient;
1099 
1100     ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
1101     NvBool bCheckKeyRotation = NV_FALSE;
1102     NvU32 h2dKey, d2hKey;
1103     if ((pConfCompute != NULL) &&
1104         (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED)) &&
1105         (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED)) &&
1106         (pKernelChannel->bCCSecureChannel))
1107     {
1108         NV_ASSERT_OK(confComputeUpdateFreedChannelStats(pGpu, pConfCompute, pKernelChannel));
1109 
1110         // check if we need to trigger key rotation after freeing this channel
1111         KEY_ROTATION_STATUS state;
1112         NV_ASSERT_OK(confComputeGetKeyPairByChannel(pGpu, pConfCompute, pKernelChannel, &h2dKey, &d2hKey));
1113         NV_ASSERT_OK(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state));
1114         if ((state == KEY_ROTATION_STATUS_PENDING) ||
1115             (state == KEY_ROTATION_STATUS_PENDING_TIMER_SUSPENDED))
1116         {
1117             bCheckKeyRotation = NV_TRUE;
1118         }
1119 
1120         NV_ASSERT_OK(kchannelSetEncryptionStatsBuffer_HAL(pGpu, pKernelChannel, NULL, NV_FALSE));
1121         NV_ASSERT_OK(kchannelSetKeyRotationNotifier_HAL(pGpu, pKernelChannel, NV_FALSE));
1122     }
1123 
1124     if (RMCFG_FEATURE_PLATFORM_GSP)
1125     {
1126         // Free memdescs created during construct on GSP path.
1127         memdescFree(pKernelChannel->pErrContextMemDesc);
1128         memdescDestroy(pKernelChannel->pErrContextMemDesc);
1129         memdescFree(pKernelChannel->pEccErrContextMemDesc);
1130         memdescDestroy(pKernelChannel->pEccErrContextMemDesc);
1131     }
1132     pKernelChannel->pErrContextMemDesc = NULL;
1133     pKernelChannel->pEccErrContextMemDesc = NULL;
1134 
1135     // GSP and vGPU support
1136     if ((IS_GSP_CLIENT(pGpu) || IS_VIRTUAL(pGpu)))
1137     {
1138         //
1139         // GSP:
1140         //
1141         // Method buffer is allocated by CPU-RM during TSG construct
1142         // but mapped to invisible BAR2 in GSP during channel construct
1143         // During Free, first the BAR2 mapping must be unmapped in GSP
1144         // and then freeing of method buffer should be done on CPU.
1145         // This RPC call is especially required for the internal channel case
1146         // where channelDestruct calls free for its TSG
1147         //
1148         NV_RM_RPC_FREE(pGpu,
1149                        hClient,
1150                        RES_GET_PARENT_HANDLE(pKernelChannel),
1151                        RES_GET_HANDLE(pKernelChannel),
1152                        status);
1153     }
1154 
1155     {
1156         KernelGraphicsContext *pKernelGraphicsContext;
1157 
1158         // Perform GR ctx cleanup tasks on channel destruction
1159         if ((kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext) == NV_OK) &&
1160             kgrctxIsValid(pGpu, pKernelGraphicsContext, pKernelChannel))
1161         {
1162             shrkgrctxDetach(pGpu, kgrctxGetShared(pGpu, pKernelGraphicsContext), pKernelGraphicsContext, pKernelChannel);
1163         }
1164     }
1165 
1166     _kchannelCleanupNotifyActions(pKernelChannel);
1167 
1168     _kchannelFreeHalData(pGpu, pKernelChannel);
1169 
1170     NV_ASSERT(pKernelChannel->pKernelChannelGroupApi != NULL);
1171 
1172     pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup;
1173 
1174     NV_ASSERT(pKernelChannelGroup != NULL);
1175 
1176     // remove channel from the group
1177     kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel);
1178 
1179     // Free the Ctx Buf pool
1180     if (pKernelChannelGroup->pChannelBufPool != NULL)
1181     {
1182         ctxBufPoolRelease(pKernelChannelGroup->pChannelBufPool);
1183     }
1184 
1185     // Free the channel group, if we alloced it
1186     if (pKernelChannelGroup->bAllocatedByRm)
1187     {
1188         pRmApi->Free(pRmApi, hClient,
1189                      RES_GET_HANDLE(pKernelChannel->pKernelChannelGroupApi));
1190         pKernelChannelGroup = NULL;
1191         pKernelChannel->pKernelChannelGroupApi = NULL;
1192     }
1193 
1194     kchannelFreeHwID_HAL(pGpu, pKernelChannel);
1195     kchannelFreeMmuExceptionInfo(pKernelChannel);
1196 
1197     NV_ASSERT(pKernelChannel->refCount == 1);
1198 
1199     if (bCheckKeyRotation)
1200     {
1201         //
1202         // If key rotation is pending on this key because the channel being freed hasn't reported idle yet then,
1203         // we wait until this channel's SW state is cleared out before triggerring key rotation
1204         // so that the key rotation code doesn't try to notify this channel or check its idle state.
1205         //
1206         NV_ASSERT_OK(confComputeCheckAndPerformKeyRotation(pGpu, pConfCompute, h2dKey, d2hKey));
1207     }
1208 }
1209 
1210 NV_STATUS
kchannelMap_IMPL(KernelChannel * pKernelChannel,CALL_CONTEXT * pCallContext,RS_CPU_MAP_PARAMS * pParams,RsCpuMapping * pCpuMapping)1211 kchannelMap_IMPL
1212 (
1213     KernelChannel     *pKernelChannel,
1214     CALL_CONTEXT      *pCallContext,
1215     RS_CPU_MAP_PARAMS *pParams,
1216     RsCpuMapping      *pCpuMapping
1217 )
1218 {
1219     OBJGPU *pGpu;
1220     NV_STATUS rmStatus;
1221     RsClient *pRsClient = pCallContext->pClient;
1222     RmClient *pRmClient = dynamicCast(pRsClient, RmClient);
1223     GpuResource *pGpuResource;
1224 
1225     NV_ASSERT_OR_RETURN(!pKernelChannel->bClientAllocatedUserD, NV_ERR_INVALID_REQUEST);
1226 
1227     rmStatus = gpuresGetByDeviceOrSubdeviceHandle(pRsClient,
1228                                                   pCpuMapping->pContextRef->hResource,
1229                                                   &pGpuResource);
1230     if (rmStatus != NV_OK)
1231         return rmStatus;
1232 
1233     pGpu = GPU_RES_GET_GPU(pGpuResource);
1234     GPU_RES_SET_THREAD_BC_STATE(pGpuResource);
1235 
1236     // If the flags are fifo default then offset/length passed in
1237     if (DRF_VAL(OS33, _FLAGS, _FIFO_MAPPING, pCpuMapping->flags) == NVOS33_FLAGS_FIFO_MAPPING_DEFAULT)
1238     {
1239         // Validate the offset and limit passed in.
1240         if (pCpuMapping->offset >= pKernelChannel->userdLength)
1241             return NV_ERR_INVALID_BASE;
1242         if (pCpuMapping->length == 0)
1243             return NV_ERR_INVALID_LIMIT;
1244         if (pCpuMapping->offset + pCpuMapping->length > pKernelChannel->userdLength)
1245             return NV_ERR_INVALID_LIMIT;
1246     }
1247     else
1248     {
1249         pCpuMapping->offset = 0x0;
1250         pCpuMapping->length = pKernelChannel->userdLength;
1251     }
1252 
1253     rmStatus = kchannelMapUserD(pGpu, pKernelChannel,
1254                                 rmclientGetCachedPrivilege(pRmClient),
1255                                 pCpuMapping->offset,
1256                                 pCpuMapping->pPrivate->protect,
1257                                 &pCpuMapping->pLinearAddress,
1258                                 &(pCpuMapping->pPrivate->pPriv));
1259 
1260     if (rmStatus != NV_OK)
1261         return rmStatus;
1262 
1263     // Save off the mapping
1264     _kchannelUpdateFifoMapping(pKernelChannel,
1265                                pGpu,
1266                                (pRsClient->type == CLIENT_TYPE_KERNEL),
1267                                pCpuMapping->pLinearAddress,
1268                                pCpuMapping->pPrivate->pPriv,
1269                                pCpuMapping->length,
1270                                pCpuMapping->flags,
1271                                pCpuMapping->pContextRef->hResource,
1272                                pCpuMapping);
1273 
1274     return NV_OK;
1275 }
1276 
1277 NV_STATUS
kchannelUnmap_IMPL(KernelChannel * pKernelChannel,CALL_CONTEXT * pCallContext,RsCpuMapping * pCpuMapping)1278 kchannelUnmap_IMPL
1279 (
1280     KernelChannel *pKernelChannel,
1281     CALL_CONTEXT  *pCallContext,
1282     RsCpuMapping  *pCpuMapping
1283 )
1284 {
1285     OBJGPU   *pGpu;
1286     RsClient *pRsClient = pCallContext->pClient;
1287     RmClient *pRmClient = dynamicCast(pRsClient, RmClient);
1288 
1289     if (pKernelChannel->bClientAllocatedUserD)
1290     {
1291         DBG_BREAKPOINT();
1292         return NV_ERR_INVALID_REQUEST;
1293     }
1294 
1295     pGpu = pCpuMapping->pPrivate->pGpu;
1296 
1297     kchannelUnmapUserD(pGpu,
1298                        pKernelChannel,
1299                        rmclientGetCachedPrivilege(pRmClient),
1300                        &pCpuMapping->pLinearAddress,
1301                        &pCpuMapping->pPrivate->pPriv);
1302 
1303     return NV_OK;
1304 }
1305 
1306 NV_STATUS
kchannelGetMapAddrSpace_IMPL(KernelChannel * pKernelChannel,CALL_CONTEXT * pCallContext,NvU32 mapFlags,NV_ADDRESS_SPACE * pAddrSpace)1307 kchannelGetMapAddrSpace_IMPL
1308 (
1309     KernelChannel    *pKernelChannel,
1310     CALL_CONTEXT     *pCallContext,
1311     NvU32             mapFlags,
1312     NV_ADDRESS_SPACE *pAddrSpace
1313 )
1314 {
1315     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1316     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1317     NvU32 userdAperture;
1318     NvU32 userdAttribute;
1319 
1320     NV_ASSERT_OK_OR_RETURN(kfifoGetUserdLocation_HAL(pKernelFifo,
1321                                                      &userdAperture,
1322                                                      &userdAttribute));
1323     if (pAddrSpace)
1324         *pAddrSpace = userdAperture;
1325 
1326     return NV_OK;
1327 }
1328 
1329 NV_STATUS
kchannelGetMemInterMapParams_IMPL(KernelChannel * pKernelChannel,RMRES_MEM_INTER_MAP_PARAMS * pParams)1330 kchannelGetMemInterMapParams_IMPL
1331 (
1332     KernelChannel              *pKernelChannel,
1333     RMRES_MEM_INTER_MAP_PARAMS *pParams
1334 )
1335 {
1336     OBJGPU            *pGpu = pParams->pGpu;
1337     KernelFifo        *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1338     MEMORY_DESCRIPTOR *pSrcMemDesc = NULL;
1339     NV_STATUS          status;
1340 
1341     if (pParams->bSubdeviceHandleProvided)
1342     {
1343         NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of USERD not supported.\n");
1344         return NV_ERR_NOT_SUPPORTED;
1345     }
1346 
1347     if (!kfifoIsUserdMapDmaSupported(pKernelFifo))
1348         return NV_ERR_INVALID_OBJECT_HANDLE;
1349 
1350     status = _kchannelGetUserMemDesc(pGpu, pKernelChannel, &pSrcMemDesc);
1351     if (status != NV_OK)
1352         return status;
1353 
1354     pParams->pSrcMemDesc = pSrcMemDesc;
1355     pParams->pSrcGpu = pSrcMemDesc->pGpu;
1356 
1357     return NV_OK;
1358 }
1359 
1360 NV_STATUS
kchannelCheckMemInterUnmap_IMPL(KernelChannel * pKernelChannel,NvBool bSubdeviceHandleProvided)1361 kchannelCheckMemInterUnmap_IMPL
1362 (
1363     KernelChannel *pKernelChannel,
1364     NvBool         bSubdeviceHandleProvided
1365 )
1366 {
1367     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1368     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1369 
1370     if (bSubdeviceHandleProvided)
1371     {
1372         NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of channels not supported.\n");
1373         return NV_ERR_NOT_SUPPORTED;
1374     }
1375 
1376 
1377     if (!kfifoIsUserdMapDmaSupported(pKernelFifo))
1378         return NV_ERR_INVALID_OBJECT_HANDLE;
1379 
1380     return NV_OK;
1381 }
1382 
1383 /**
1384  * @brief Creates an iterator to iterate all channels in a given scope.
1385  *
1386  * Iterates over all channels under a given scope.  For a device it will loop
1387  * through all channels that are descendants of the device (including children
1388  * of channel groups).  For a channel group it will only iterate over the
1389  * channels within that group.  Ordering is ensured for channel group.
1390  * All channels within a channel group will be iterated together before moving to
1391  * another channel group or channel.
1392  *
1393  * @param[in]  pClient
1394  * @param[in]  pScopeRef The resource that defines the scope of iteration
1395  */
1396 RS_ORDERED_ITERATOR
kchannelGetIter(RsClient * pClient,RsResourceRef * pScopeRef)1397 kchannelGetIter
1398 (
1399     RsClient      *pClient,
1400     RsResourceRef *pScopeRef
1401 )
1402 {
1403     return clientRefOrderedIter(pClient, pScopeRef, classId(KernelChannel), NV_TRUE);
1404 }
1405 
1406 /**
1407  * @brief Given a client, parent, and KernelChannel handle retrieves the
1408  * KernelChannel object
1409  *
1410  * @param[in]  hClient
1411  * @param[in]  hParent              Device or Channel Group parent
1412  * @param[in]  hKernelChannel
1413  * @param[out] ppKernelChannel      Valid iff NV_OK is returned.
1414  *
1415  * @return  NV_OK if successful, appropriate error otherwise
1416  */
1417 NV_STATUS
CliGetKernelChannelWithDevice(RsClient * pClient,NvHandle hParent,NvHandle hKernelChannel,KernelChannel ** ppKernelChannel)1418 CliGetKernelChannelWithDevice
1419 (
1420     RsClient       *pClient,
1421     NvHandle        hParent,
1422     NvHandle        hKernelChannel,
1423     KernelChannel **ppKernelChannel
1424 )
1425 {
1426     RsResourceRef *pParentRef;
1427     RsResourceRef *pResourceRef;
1428     KernelChannel *pKernelChannel;
1429 
1430     if (ppKernelChannel == NULL)
1431         return NV_ERR_INVALID_ARGUMENT;
1432 
1433     *ppKernelChannel = NULL;
1434 
1435     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pClient, hKernelChannel, &pResourceRef));
1436 
1437     pKernelChannel = dynamicCast(pResourceRef->pResource, KernelChannel);
1438     NV_CHECK_OR_RETURN(LEVEL_INFO, pKernelChannel != NULL, NV_ERR_OBJECT_NOT_FOUND);
1439 
1440     pParentRef = pResourceRef->pParentRef;
1441     NV_CHECK_OR_RETURN(LEVEL_INFO, pParentRef != NULL, NV_ERR_OBJECT_NOT_FOUND);
1442 
1443     //
1444     // Check that the parent matches requested handle.  Parent handle can be a
1445     // device or a ChannelGroup.  The first case can match either, the second
1446     // matches a Device when the parent is a ChannelGroup.
1447     //
1448     NV_CHECK_OR_RETURN(LEVEL_INFO, (pParentRef->hResource == hParent) ||
1449                      (RES_GET_HANDLE(GPU_RES_GET_DEVICE(pKernelChannel)) == hParent),
1450                          NV_ERR_OBJECT_NOT_FOUND);
1451 
1452     *ppKernelChannel = pKernelChannel;
1453     return NV_OK;
1454 } // end of CliGetKernelChannelWithDevice()
1455 
1456 
1457 /**
1458  * @brief Given a classNum this routine returns various sdk specific values for
1459  * that class.
1460  *
1461  * @param[in]   classNum
1462  * @param[out]  pClassInfo
1463  */
1464 void
CliGetChannelClassInfo(NvU32 classNum,CLI_CHANNEL_CLASS_INFO * pClassInfo)1465 CliGetChannelClassInfo
1466 (
1467     NvU32 classNum,
1468     CLI_CHANNEL_CLASS_INFO *pClassInfo
1469 )
1470 {
1471     switch (classNum)
1472     {
1473         case GF100_CHANNEL_GPFIFO:
1474         {
1475             pClassInfo->notifiersMaxCount  = NV906F_NOTIFIERS_MAXCOUNT;
1476             pClassInfo->eventActionDisable = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1477             pClassInfo->eventActionSingle  = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1478             pClassInfo->eventActionRepeat  = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1479             pClassInfo->rcNotifierIndex    = NV906F_NOTIFIERS_RC;
1480             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1481             break;
1482         }
1483         case KEPLER_CHANNEL_GPFIFO_A:
1484         {
1485             pClassInfo->notifiersMaxCount  = NVA06F_NOTIFIERS_MAXCOUNT;
1486             pClassInfo->eventActionDisable = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1487             pClassInfo->eventActionSingle  = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1488             pClassInfo->eventActionRepeat  = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1489             pClassInfo->rcNotifierIndex    = NVA06F_NOTIFIERS_RC;
1490             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1491             break;
1492         }
1493         case KEPLER_CHANNEL_GPFIFO_B:
1494         {
1495             pClassInfo->notifiersMaxCount  = NVA16F_NOTIFIERS_MAXCOUNT;
1496             pClassInfo->eventActionDisable = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1497             pClassInfo->eventActionSingle  = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1498             pClassInfo->eventActionRepeat  = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1499             pClassInfo->rcNotifierIndex    = NVA16F_NOTIFIERS_RC;
1500             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1501             break;
1502         }
1503         case MAXWELL_CHANNEL_GPFIFO_A:
1504         {
1505             pClassInfo->notifiersMaxCount  = NVB06F_NOTIFIERS_MAXCOUNT;
1506             pClassInfo->eventActionDisable = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1507             pClassInfo->eventActionSingle  = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1508             pClassInfo->eventActionRepeat  = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1509             pClassInfo->rcNotifierIndex    = NVB06F_NOTIFIERS_RC;
1510             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1511             break;
1512         }
1513         case PASCAL_CHANNEL_GPFIFO_A:
1514         {
1515             pClassInfo->notifiersMaxCount  = NVC06F_NOTIFIERS_MAXCOUNT;
1516             pClassInfo->eventActionDisable = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1517             pClassInfo->eventActionSingle  = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1518             pClassInfo->eventActionRepeat  = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1519             pClassInfo->rcNotifierIndex    = NVC06F_NOTIFIERS_RC;
1520             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1521             break;
1522         }
1523         case VOLTA_CHANNEL_GPFIFO_A:
1524         {
1525             pClassInfo->notifiersMaxCount  = NVC36F_NOTIFIERS_MAXCOUNT;
1526             pClassInfo->eventActionDisable = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1527             pClassInfo->eventActionSingle  = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1528             pClassInfo->eventActionRepeat  = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1529             pClassInfo->rcNotifierIndex    = NVC36F_NOTIFIERS_RC;
1530             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1531             break;
1532         }
1533         case TURING_CHANNEL_GPFIFO_A:
1534         {
1535             pClassInfo->notifiersMaxCount  = NVC46F_NOTIFIERS_MAXCOUNT;
1536             pClassInfo->eventActionDisable = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1537             pClassInfo->eventActionSingle  = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1538             pClassInfo->eventActionRepeat  = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1539             pClassInfo->rcNotifierIndex    = NVC46F_NOTIFIERS_RC;
1540             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1541             break;
1542         }
1543         case AMPERE_CHANNEL_GPFIFO_A:
1544         {
1545             pClassInfo->notifiersMaxCount  = NVC56F_NOTIFIERS_MAXCOUNT;
1546             pClassInfo->eventActionDisable = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1547             pClassInfo->eventActionSingle  = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1548             pClassInfo->eventActionRepeat  = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1549             pClassInfo->rcNotifierIndex    = NVC56F_NOTIFIERS_RC;
1550             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1551             break;
1552         }
1553         case HOPPER_CHANNEL_GPFIFO_A:
1554         {
1555             pClassInfo->notifiersMaxCount  = NVC86F_NOTIFIERS_MAXCOUNT;
1556             pClassInfo->eventActionDisable = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
1557             pClassInfo->eventActionSingle  = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
1558             pClassInfo->eventActionRepeat  = NVC86F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1559             pClassInfo->rcNotifierIndex    = NVC86F_NOTIFIERS_RC;
1560             pClassInfo->classType          = CHANNEL_CLASS_TYPE_GPFIFO;
1561             break;
1562         }
1563 
1564         //
1565         // Does not make sense. Call with the class type from the client not the
1566         // internal type
1567         //
1568         case PHYSICAL_CHANNEL_GPFIFO:
1569             NV_PRINTF(LEVEL_ERROR,
1570                       "Invalid class for CliGetChannelClassInfo\n");
1571 
1572         default:
1573         {
1574             pClassInfo->notifiersMaxCount  = 0;
1575             pClassInfo->eventActionDisable = 0;
1576             pClassInfo->eventActionSingle  = 0;
1577             pClassInfo->eventActionRepeat  = 0;
1578             pClassInfo->rcNotifierIndex    = 0;
1579             pClassInfo->classType          = CHANNEL_CLASS_TYPE_DMA;
1580             break;
1581         }
1582     }
1583 }
1584 
1585 
1586 /**
1587  * @brief Returns the next KernelChannel from the iterator.
1588  *
1589  * Iterates over runlist IDs and ChIDs and returns the next KernelChannel found
1590  * on the heap, if any.
1591  *
1592  * (error guaranteed if pointer is NULL; non-NULL pointer guaranteed if NV_OK)
1593  *
1594  * @param[in] pGpu
1595  * @param[in] pIt                   the channel iterator
1596  * @param[out] ppKernelChannel      returns a KernelChannel *
1597  *
1598  * @return NV_OK if the returned pointer is valid or error
1599  */
kchannelGetNextKernelChannel(OBJGPU * pGpu,CHANNEL_ITERATOR * pIt,KernelChannel ** ppKernelChannel)1600 NV_STATUS kchannelGetNextKernelChannel
1601 (
1602     OBJGPU              *pGpu,
1603     CHANNEL_ITERATOR    *pIt,
1604     KernelChannel      **ppKernelChannel
1605 )
1606 {
1607     KernelChannel *pKernelChannel;
1608     KernelFifo    *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
1609 
1610     if (ppKernelChannel == NULL)
1611         return NV_ERR_INVALID_ARGUMENT;
1612 
1613     *ppKernelChannel = NULL;
1614 
1615     while (pIt->runlistId < pIt->numRunlists)
1616     {
1617         CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pIt->runlistId);
1618 
1619         if (pChidMgr == NULL)
1620         {
1621             pIt->runlistId++;
1622             continue;
1623         }
1624 
1625         pIt->numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr);
1626         while (pIt->physicalChannelID < pIt->numChannels)
1627         {
1628             pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo,
1629                 pChidMgr, pIt->physicalChannelID);
1630             pIt->physicalChannelID++;
1631 
1632             //
1633             // This iterator can be used during an interrupt, when a KernelChannel may
1634             // be in the process of being destroyed. Don't return it if so.
1635             //
1636             if (pKernelChannel == NULL)
1637                 continue;
1638             if (!kchannelIsValid_HAL(pKernelChannel))
1639                 continue;
1640 
1641             *ppKernelChannel = pKernelChannel;
1642             return NV_OK;
1643         }
1644 
1645         pIt->runlistId++;
1646         // Reset channel index to 0 for next runlist
1647         pIt->physicalChannelID = 0;
1648     }
1649 
1650     return NV_ERR_OBJECT_NOT_FOUND;
1651 }
1652 
1653 /**
1654  * @brief Finds the corresponding KernelChannel given client object and channel handle
1655  *
1656  * Looks in client object store for the channel handle.  Scales with total
1657  * number of registered objects in the client, not just the number of channels.
1658  *
1659  * @param[in]  pClient
1660  * @param[in]  hKernelChannel a KernelChannel Channel handle
1661  * @param[out] ppKernelChannel
1662  *
1663  * @return NV_STATUS
1664  */
1665 NV_STATUS
CliGetKernelChannel(RsClient * pClient,NvHandle hKernelChannel,KernelChannel ** ppKernelChannel)1666 CliGetKernelChannel
1667 (
1668     RsClient       *pClient,
1669     NvHandle        hKernelChannel,
1670     KernelChannel **ppKernelChannel
1671 )
1672 {
1673     NV_STATUS      status;
1674     RsResourceRef *pResourceRef;
1675 
1676     *ppKernelChannel = NULL;
1677 
1678     status = clientGetResourceRef(pClient, hKernelChannel, &pResourceRef);
1679     if (status != NV_OK)
1680     {
1681         return status;
1682     }
1683 
1684     *ppKernelChannel = dynamicCast(pResourceRef->pResource, KernelChannel);
1685     NV_CHECK_OR_RETURN(LEVEL_INFO,
1686                        *ppKernelChannel != NULL,
1687                        NV_ERR_INVALID_CHANNEL);
1688     return NV_OK;
1689 }
1690 
1691 /*!
1692  * @brief Notify client that channel is stopped.
1693  *
1694  * @param[in] pKernelChannnel
1695  */
1696 NV_STATUS
kchannelNotifyRc_IMPL(KernelChannel * pKernelChannel)1697 kchannelNotifyRc_IMPL
1698 (
1699     KernelChannel *pKernelChannel
1700 )
1701 {
1702     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1703     RM_ENGINE_TYPE rmEngineType = RM_ENGINE_TYPE_NULL;
1704     NV_STATUS rmStatus = NV_OK;
1705 
1706     if (IS_GFID_VF(kchannelGetGfid(pKernelChannel)))
1707     {
1708         NV_PRINTF(LEVEL_INFO, "Notification for channel 0x%x stop is already performed on guest-RM\n",
1709                   kchannelGetDebugTag(pKernelChannel));
1710         return NV_OK;
1711     }
1712 
1713     if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT &&
1714         pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT)
1715     {
1716         NV_PRINTF(LEVEL_WARNING, "Channel 0x%x has no notifier set\n",
1717                   kchannelGetDebugTag(pKernelChannel));
1718         return NV_OK;
1719     }
1720 
1721     if (RM_ENGINE_TYPE_IS_VALID(kchannelGetEngineType(pKernelChannel)))
1722     {
1723         rmEngineType = kchannelGetEngineType(pKernelChannel);
1724     }
1725     rmStatus = krcErrorSetNotifier(pGpu, GPU_GET_KERNEL_RC(pGpu),
1726                                    pKernelChannel,
1727                                    ROBUST_CHANNEL_PREEMPTIVE_REMOVAL,
1728                                    rmEngineType,
1729                                    RC_NOTIFIER_SCOPE_CHANNEL);
1730     if (rmStatus != NV_OK)
1731     {
1732         NV_PRINTF(LEVEL_ERROR,
1733             "Failed to set error notifier for channel 0x%x with error 0x%x.\n",
1734             kchannelGetDebugTag(pKernelChannel), rmStatus);
1735     }
1736     return rmStatus;
1737 }
1738 
1739 /**
1740  * @brief Sends event corresponding to index to notify clients
1741  *
1742  * @param[in] pKernelChannel
1743  * @param[in] notifyIndex
1744  * @param[in] pNotifyParams
1745  * @parms[in] notifyParamsSize
1746  */
kchannelNotifyEvent_IMPL(KernelChannel * pKernelChannel,NvU32 notifyIndex,NvU32 info32,NvU16 info16,void * pNotifyParams,NvU32 notifyParamsSize)1747 void kchannelNotifyEvent_IMPL
1748 (
1749     KernelChannel *pKernelChannel,
1750     NvU32          notifyIndex,
1751     NvU32          info32,
1752     NvU16          info16,
1753     void          *pNotifyParams,
1754     NvU32          notifyParamsSize
1755 )
1756 {
1757     OBJGPU                 *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1758     ContextDma             *pContextDma;
1759     EVENTNOTIFICATION      *pEventNotification;
1760     CLI_CHANNEL_CLASS_INFO  classInfo;
1761 
1762     CliGetChannelClassInfo(RES_GET_EXT_CLASS_ID(pKernelChannel), &classInfo);
1763 
1764     // validate notifyIndex
1765     NV_CHECK_OR_RETURN_VOID(LEVEL_INFO, notifyIndex < classInfo.notifiersMaxCount);
1766 
1767     // Check if we have allocated the channel notifier action table
1768     NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, pKernelChannel->pNotifyActions != NULL);
1769 
1770     // handle notification if client wants it
1771     if (pKernelChannel->pNotifyActions[notifyIndex] != classInfo.eventActionDisable)
1772     {
1773         // get notifier context dma for the channel
1774         if (ctxdmaGetByHandle(RES_GET_CLIENT(pKernelChannel),
1775                               pKernelChannel->hErrorContext,
1776                               &pContextDma) == NV_OK)
1777         {
1778             // make sure it's big enough
1779             if (pContextDma->Limit >=
1780                 ((classInfo.notifiersMaxCount * sizeof (NvNotification)) - 1))
1781             {
1782                 // finally, write out the notifier
1783                 notifyFillNotifierArray(pGpu, pContextDma,
1784                                         0x0, 0x0, 0x0,
1785                                         notifyIndex);
1786             }
1787         }
1788     }
1789 
1790     // handle event if client wants it
1791     pEventNotification = inotifyGetNotificationList(staticCast(pKernelChannel, INotifier));
1792     if (pEventNotification != NULL)
1793     {
1794         NV_PRINTF(LEVEL_INFO, "Posting event on channel = 0x%x with info16 = 0x%x\n",
1795            kchannelGetDebugTag(pKernelChannel), (NvU32)info16);
1796         // ping any events on the list of type notifyIndex
1797         osEventNotificationWithInfo(pGpu, pEventNotification, notifyIndex, info32, info16,
1798                                     pNotifyParams, notifyParamsSize);
1799     }
1800     else
1801     {
1802         NV_PRINTF(LEVEL_INFO, "No event on channel = 0x%x\n", kchannelGetDebugTag(pKernelChannel));
1803     }
1804 
1805     // reset if single shot notify action
1806     if (pKernelChannel->pNotifyActions[notifyIndex] == classInfo.eventActionSingle)
1807         pKernelChannel->pNotifyActions[notifyIndex] = classInfo.eventActionDisable;
1808 
1809     return;
1810 }
1811 
1812 /**
1813  * @brief Writes notifier memory at given index with given info
1814  *
1815  * @param[in] pKernelChannel
1816  * @param[in] notifyIndex
1817  * @param[in] info32
1818  * @param[in] info16
1819  * @parms[in] notifierStatus
1820  */
kchannelUpdateNotifierMem_IMPL(KernelChannel * pKernelChannel,NvU32 notifyIndex,NvU32 info32,NvU16 info16,NvU32 notifierStatus)1821 NV_STATUS kchannelUpdateNotifierMem_IMPL
1822 (
1823     KernelChannel *pKernelChannel,
1824     NvU32 notifyIndex,
1825     NvU32 info32,
1826     NvU16 info16,
1827     NvU32 notifierStatus
1828 )
1829 {
1830     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
1831     MEMORY_DESCRIPTOR *pNotifierMemDesc = pKernelChannel->pErrContextMemDesc;
1832     NV_ADDRESS_SPACE addressSpace;
1833     OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
1834     NvU64 time;
1835     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
1836     KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
1837     TRANSFER_SURFACE surf = {0};
1838     NvNotification *pNotifier = NULL;
1839     NvBool bMemEndTransfer = NV_FALSE;
1840 
1841     if (pNotifierMemDesc == NULL)
1842         return NV_OK;
1843 
1844     addressSpace = memdescGetAddressSpace(pNotifierMemDesc);
1845     if (RMCFG_FEATURE_PLATFORM_GSP)
1846         NV_ASSERT_OR_RETURN(addressSpace == ADDR_FBMEM, NV_ERR_INVALID_STATE);
1847 
1848     //
1849     // If clients did not allocate enough memory for the doorbell
1850     // notifier, return NV_OK so as not to regress older clients
1851     //
1852     NV_CHECK_OR_RETURN(LEVEL_INFO, memdescGetSize(pNotifierMemDesc) >= (notifyIndex + 1) * sizeof(NvNotification), NV_OK);
1853 
1854     //
1855     // we rely on persistent mapping for key rotation notifier
1856     // since this may be called in top half and mappings are not allowed
1857     // in contexts that can't sleep on KVM or similar HCC systems.
1858     //
1859     ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
1860     if ((pConfCompute != NULL) &&
1861         (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED)) &&
1862         (notifyIndex == NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS))
1863     {
1864         pNotifier = _kchannelGetKeyRotationNotifier(pKernelChannel);
1865         NV_ASSERT_OR_RETURN(pNotifier != NULL, NV_ERR_INVALID_STATE);
1866         bMemEndTransfer = NV_FALSE;
1867     }
1868     else
1869     {
1870         pNotifier = (NvNotification *)memdescGetKernelMapping(pNotifierMemDesc);
1871         if (pNotifier == NULL)
1872         {
1873             surf.pMemDesc = pNotifierMemDesc;
1874             surf.offset = notifyIndex * sizeof(NvNotification);
1875 
1876             pNotifier =
1877                 (NvNotification *) memmgrMemBeginTransfer(pMemoryManager, &surf,
1878                                                           sizeof(NvNotification),
1879                                                           TRANSFER_FLAGS_SHADOW_ALLOC);
1880             NV_ASSERT_OR_RETURN(pNotifier != NULL, NV_ERR_INVALID_STATE);
1881             bMemEndTransfer = NV_TRUE;
1882         }
1883         else
1884         {
1885             //
1886             // If a CPU pointer has been passed by caller ensure that the notifier
1887             // is in sysmem or in case it in vidmem, BAR access to the same is not
1888             // blocked (for HCC)
1889             //
1890             NV_ASSERT_OR_RETURN(
1891                 memdescGetAddressSpace(pNotifierMemDesc) == ADDR_SYSMEM ||
1892                 !kbusIsBarAccessBlocked(pKernelBus), NV_ERR_INVALID_ARGUMENT);
1893             pNotifier = &pNotifier[notifyIndex];
1894         }
1895     }
1896 
1897     tmrGetCurrentTime(pTmr, &time);
1898 
1899     notifyFillNvNotification(pGpu, pNotifier, info32, info16,
1900                              notifierStatus, NV_TRUE, time);
1901 
1902     if (bMemEndTransfer)
1903     {
1904         memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(NvNotification), 0);
1905     }
1906     return NV_OK;
1907 }
1908 
1909 /*!
1910  * @brief Stop channel and notify client
1911  *
1912  * @param[in] pKernelChannnel
1913  * @param[in] pStopChannelParams
1914  */
1915 NV_STATUS
kchannelCtrlCmdStopChannel_IMPL(KernelChannel * pKernelChannel,NVA06F_CTRL_STOP_CHANNEL_PARAMS * pStopChannelParams)1916 kchannelCtrlCmdStopChannel_IMPL
1917 (
1918     KernelChannel *pKernelChannel,
1919     NVA06F_CTRL_STOP_CHANNEL_PARAMS *pStopChannelParams
1920 )
1921 {
1922     NV_STATUS     rmStatus      = NV_OK;
1923     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
1924     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
1925     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
1926 
1927     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
1928     {
1929         NV_RM_RPC_CONTROL(pGpu,
1930                           pRmCtrlParams->hClient,
1931                           RES_GET_HANDLE(pKernelChannel),
1932                           pRmCtrlParams->cmd,
1933                           pRmCtrlParams->pParams,
1934                           pRmCtrlParams->paramsSize,
1935                           rmStatus);
1936         if (rmStatus != NV_OK)
1937             return rmStatus;
1938     }
1939     else
1940     {
1941         NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
1942             kchannelFwdToInternalCtrl_HAL(pGpu,
1943                                           pKernelChannel,
1944                                           NVA06F_CTRL_CMD_INTERNAL_STOP_CHANNEL,
1945                                           pRmCtrlParams));
1946     }
1947 
1948     NV_ASSERT_OK_OR_RETURN(kchannelNotifyRc_HAL(pKernelChannel));
1949 
1950     return NV_OK;
1951 }
1952 
1953 /*!
1954  * @brief Helper to get type and memdesc of a channel notifier (memory/ctxdma)
1955  */
1956 NV_STATUS
kchannelGetNotifierInfo(OBJGPU * pGpu,Device * pDevice,NvHandle hErrorContext,MEMORY_DESCRIPTOR ** ppMemDesc,ErrorNotifierType * pNotifierType,NvU64 * pOffset)1957 kchannelGetNotifierInfo
1958 (
1959     OBJGPU             *pGpu,
1960     Device             *pDevice,
1961     NvHandle            hErrorContext,
1962     MEMORY_DESCRIPTOR **ppMemDesc,
1963     ErrorNotifierType  *pNotifierType,
1964     NvU64              *pOffset
1965 )
1966 {
1967     RsClient   *pRsClient   = RES_GET_CLIENT(pDevice);
1968     NvHandle    hDevice     = RES_GET_HANDLE(pDevice);
1969     ContextDma *pContextDma = NULL;
1970     Memory     *pMemory     = NULL;
1971 
1972     NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_PARAMETER);
1973     NV_ASSERT_OR_RETURN(pNotifierType != NULL, NV_ERR_INVALID_PARAMETER);
1974 
1975     *ppMemDesc = NULL;
1976     *pNotifierType = ERROR_NOTIFIER_TYPE_UNKNOWN;
1977     *pOffset = 0;
1978 
1979     if (hErrorContext == NV01_NULL_OBJECT)
1980     {
1981         *pNotifierType = ERROR_NOTIFIER_TYPE_NONE;
1982         return NV_OK;
1983     }
1984 
1985     if (memGetByHandleAndDevice(pRsClient, hErrorContext, hDevice, &pMemory) ==
1986         NV_OK)
1987     {
1988         if (memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_VIRTUAL)
1989         {
1990             //
1991             // GPUVA case: Get the underlying DMA mapping in this case. In GSP
1992             // client mode + SLI, GSP won't be able to write to notifiers on
1993             // other GPUs.
1994             //
1995             NvU64 offset;
1996             NvU32 subdeviceInstance;
1997             NvU64 notifyGpuVA = memdescGetPhysAddr(pMemory->pMemDesc,
1998                                                    AT_GPU_VA, 0);
1999             CLI_DMA_MAPPING_INFO *pDmaMappingInfo;
2000             NvBool bFound;
2001 
2002             bFound = CliGetDmaMappingInfo(
2003                 pRsClient,
2004                 RES_GET_HANDLE(pDevice),
2005                 RES_GET_HANDLE(pMemory),
2006                 notifyGpuVA,
2007                 gpumgrGetDeviceGpuMask(pGpu->deviceInstance),
2008                 &pDmaMappingInfo);
2009 
2010             if (!bFound)
2011             {
2012                 NV_PRINTF(LEVEL_ERROR,
2013                           "Cannot find DMA mapping for GPU_VA notifier\n");
2014                 return NV_ERR_INVALID_STATE;
2015             }
2016 
2017             offset = notifyGpuVA - pDmaMappingInfo->DmaOffset;
2018             if (offset + sizeof(NOTIFICATION) > pDmaMappingInfo->pMemDesc->Size)
2019             {
2020                 NV_PRINTF(LEVEL_ERROR,
2021                     "Notifier does not fit within DMA mapping for GPU_VA\n");
2022                 return NV_ERR_INVALID_STATE;
2023             }
2024 
2025             subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(
2026                 gpumgrGetParentGPU(pGpu));
2027             SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
2028             if (IsSLIEnabled(pGpu) && IS_GSP_CLIENT(pGpu))
2029             {
2030                 NV_PRINTF(LEVEL_ERROR, "GSP does not support SLI\n");
2031                 return NV_ERR_NOT_SUPPORTED;
2032             }
2033             SLI_LOOP_END
2034 
2035             if (!pDmaMappingInfo->KernelVAddr[subdeviceInstance])
2036             {
2037                 NV_PRINTF(LEVEL_ERROR,
2038                           "Kernel VA addr mapping not present for notifier\n");
2039                 return NV_ERR_INVALID_STATE;
2040             }
2041             *ppMemDesc = pDmaMappingInfo->pMemDesc;
2042             // The notifier format here is struct NOTIFICATION, same as ctxdma
2043             *pNotifierType = ERROR_NOTIFIER_TYPE_CTXDMA;
2044             *pOffset = offset;
2045         }
2046         else
2047         {
2048             *ppMemDesc = pMemory->pMemDesc;
2049             *pNotifierType = ERROR_NOTIFIER_TYPE_MEMORY;
2050         }
2051         return NV_OK;
2052     }
2053 
2054     if (ctxdmaGetByHandle(pRsClient, hErrorContext, &pContextDma) == NV_OK)
2055     {
2056         *ppMemDesc = pContextDma->pMemDesc;
2057         *pNotifierType = ERROR_NOTIFIER_TYPE_CTXDMA;
2058         return NV_OK;
2059     }
2060 
2061     return NV_ERR_OBJECT_NOT_FOUND;
2062 }
2063 
2064 /*!
2065  * @brief  Check if the client that owns this channel is in user mode.
2066  *
2067  * This replaces using call context for privilege checking,
2068  * and is callable from both CPU and GSP.
2069  *
2070  * @param[in] pGpu
2071  * @param[in] pKernelChannel
2072  *
2073  * @returns NV_TRUE if owned by user mode or NV_FALSE.
2074  */
2075 NvBool
kchannelCheckIsUserMode_IMPL(KernelChannel * pKernelChannel)2076 kchannelCheckIsUserMode_IMPL
2077 (
2078     KernelChannel *pKernelChannel
2079 )
2080 {
2081     return (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER) ||
2082            (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN);
2083 }
2084 
2085 /*!
2086  * @brief  Check if the client that owns this channel is kernel.
2087  *
2088  * This replaces using call context for privilege checking,
2089  * and is callable from both CPU and GSP.
2090  *
2091  * @param[in] pGpu
2092  * @param[in] pKernelChannel
2093  *
2094  * @returns NV_TRUE if owned by kernel or NV_FALSE.
2095  */
2096 NvBool
kchannelCheckIsKernel_IMPL(KernelChannel * pKernelChannel)2097 kchannelCheckIsKernel_IMPL
2098 (
2099     KernelChannel *pKernelChannel
2100 )
2101 {
2102     return pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL;
2103 }
2104 
2105 /*!
2106  * @brief  Check if the client that owns this channel is admin.
2107  *
2108  * This replaces using call context for admin privilege checking,
2109  * but is callable from both CPU and GSP.
2110  *
2111  * @param[in] pGpu
2112  * @param[in] pKernelChannel
2113  *
2114  * @returns NV_TRUE if owned by admin or NV_FALSE.
2115  */
2116 NvBool
kchannelCheckIsAdmin_IMPL(KernelChannel * pKernelChannel)2117 kchannelCheckIsAdmin_IMPL
2118 (
2119     KernelChannel *pKernelChannel
2120 )
2121 {
2122     return (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL) ||
2123            (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN);
2124 }
2125 
2126 
2127 /*!
2128  * @brief  Check if the channel is bound to its resources.
2129  *
2130  * This is to make sure channel went through the UVM registration step before it can be scheduled.
2131  * This applies only to UVM owned channels.
2132  *
2133  * @param[in] pKernelChannel
2134  * @param[in] pGVAS
2135  *
2136  * @returns NV_TRUE if bound.
2137  */
2138 NvBool
kchannelIsSchedulable_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel)2139 kchannelIsSchedulable_IMPL
2140 (
2141     OBJGPU *pGpu,
2142     KernelChannel *pKernelChannel
2143 )
2144 {
2145     OBJGVASPACE *pGVAS = NULL;
2146     NvU32        engineDesc = 0;
2147     NvU32        gfId;
2148 
2149     gfId = kchannelGetGfid(pKernelChannel);
2150     if (IS_GFID_VF(gfId))
2151     {
2152         NV_PRINTF(LEVEL_INFO, "Check for channel schedulability for channel 0x%x is already performed on guest-RM\n",
2153                   kchannelGetDebugTag(pKernelChannel));
2154         return NV_TRUE;
2155     }
2156 
2157     pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE);
2158 
2159     //
2160     // It should be an error to have allocated and attempt to schedule a
2161     // channel without having allocated a GVAS. We ignore this check on
2162     // AMODEL, which has its own dummy AVAS.
2163     //
2164     NV_ASSERT_OR_RETURN(pGVAS != NULL || IS_MODS_AMODEL(pGpu), NV_FALSE);
2165 
2166     NV_ASSERT_OR_RETURN(kchannelGetEngine_HAL(pGpu, pKernelChannel, &engineDesc) == NV_OK, NV_FALSE);
2167 
2168     if (pGVAS != NULL && gvaspaceIsExternallyOwned(pGVAS) && IS_GR(engineDesc) && !pKernelChannel->bIsContextBound)
2169     {
2170         NV_PRINTF(LEVEL_ERROR,
2171                   "Cannot schedule externally-owned channel with unbound allocations :0x%x!\n",
2172                   kchannelGetDebugTag(pKernelChannel));
2173         return NV_FALSE;
2174     }
2175     return NV_TRUE;
2176 }
2177 
2178 // Alloc pFifoHalData
2179 static NV_STATUS
_kchannelAllocHalData(OBJGPU * pGpu,KernelChannel * pKernelChannel)2180 _kchannelAllocHalData
2181 (
2182     OBJGPU        *pGpu,
2183     KernelChannel *pKernelChannel
2184 )
2185 {
2186     portMemSet(pKernelChannel->pFifoHalData, 0, sizeof(pKernelChannel->pFifoHalData));
2187 
2188     // Alloc 1 page of instmem per GPU instance
2189     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2190 
2191     pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = portMemAllocNonPaged(sizeof(FIFO_INSTANCE_BLOCK));
2192 
2193     NV_ASSERT_OR_ELSE(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] != NULL,
2194             SLI_LOOP_GOTO(failed));
2195 
2196     portMemSet(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)], 0, sizeof(FIFO_INSTANCE_BLOCK));
2197 
2198     SLI_LOOP_END
2199 
2200     return NV_OK;
2201 
2202 failed:
2203     DBG_BREAKPOINT();
2204     _kchannelFreeHalData(pGpu, pKernelChannel);
2205     return NV_ERR_NO_MEMORY;
2206 }
2207 
2208 // Free memdescs and pFifoHalData, if any
2209 static void
_kchannelFreeHalData(OBJGPU * pGpu,KernelChannel * pKernelChannel)2210 _kchannelFreeHalData
2211 (
2212     OBJGPU        *pGpu,
2213     KernelChannel *pKernelChannel
2214 )
2215 {
2216     // Unmap / delete memdescs
2217     kchannelDestroyMem_HAL(pGpu, pKernelChannel);
2218 
2219     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2220     kchannelDestroyUserdMemDesc(pGpu, pKernelChannel);
2221 
2222     // Free pFifoHalData
2223     portMemFree(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]);
2224     pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = NULL;
2225     SLI_LOOP_END
2226 }
2227 
2228 // Returns the proper VerifFlags for kchannelAllocMem
2229 static NvU32
_kchannelgetVerifFlags(OBJGPU * pGpu,NV_CHANNEL_ALLOC_PARAMS * pChannelGpfifoParams)2230 _kchannelgetVerifFlags
2231 (
2232     OBJGPU                                    *pGpu,
2233     NV_CHANNEL_ALLOC_PARAMS    *pChannelGpfifoParams
2234 )
2235 {
2236     NvU32 verifFlags = 0;
2237 
2238     return verifFlags;
2239 }
2240 
2241 // Allocate and describe instance memory
2242 static NV_STATUS
_kchannelAllocOrDescribeInstMem(KernelChannel * pKernelChannel,NV_CHANNEL_ALLOC_PARAMS * pChannelGpfifoParams)2243 _kchannelAllocOrDescribeInstMem
2244 (
2245     KernelChannel  *pKernelChannel,
2246     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams
2247 )
2248 {
2249     OBJGPU                *pGpu        = GPU_RES_GET_GPU(pKernelChannel);
2250     KernelFifo            *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
2251     KernelChannelGroupApi *pKernelChannelGroupApi = pKernelChannel->pKernelChannelGroupApi;
2252     KernelChannelGroup    *pKernelChannelGroup    = pKernelChannelGroupApi->pKernelChannelGroup;
2253     NvU32                  gfid       = pKernelChannelGroup->gfid;
2254     NV_STATUS              status;
2255     NvHandle               hClient = RES_GET_CLIENT_HANDLE(pKernelChannel);
2256 
2257     // Alloc pFifoHalData
2258     NV_ASSERT_OK_OR_RETURN(_kchannelAllocHalData(pGpu, pKernelChannel));
2259 
2260     //
2261     // GSP RM and host RM on full SRIOV setup will not be aware of the client allocated userd handles,
2262     // translate the handle on client GSP. GSP RM or host RM on full SRIOV setup will get the translated
2263     // addresses which it will later memdescribe.
2264     //
2265     // However it is still client allocated userd from GSP RM or host RM on full SRIOV setup
2266     // perspective so set the flag accordingly.
2267     //
2268     if (!RMCFG_FEATURE_PLATFORM_GSP &&
2269         !(IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2270     {
2271         pKernelChannel->bClientAllocatedUserD = NV_FALSE;
2272         NV_ASSERT_OK_OR_GOTO(status,
2273                 kchannelCreateUserdMemDescBc_HAL(pGpu, pKernelChannel, hClient,
2274                     pChannelGpfifoParams->hUserdMemory,
2275                     pChannelGpfifoParams->userdOffset),
2276                 failed);
2277     }
2278     else
2279     {
2280         pKernelChannel->bClientAllocatedUserD = NV_TRUE;
2281     }
2282 
2283     // Alloc/describe instmem memdescs depending on platform
2284     if (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2285     {
2286         // On Heavy SRIOV, describe memdescs using RPC
2287         NV_ASSERT_OK_OR_GOTO(status,
2288                 _kchannelDescribeMemDescsHeavySriov(pGpu, pKernelChannel),
2289                 failed);
2290     }
2291     else if (RMCFG_FEATURE_PLATFORM_GSP ||
2292         (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2293     {
2294         // On GSPFW or non-heavy SRIOV, describe memdescs from params
2295         NV_ASSERT_OK_OR_GOTO(status,
2296                 _kchannelDescribeMemDescsFromParams(pGpu, pKernelChannel, pChannelGpfifoParams),
2297                 failed);
2298     }
2299     else if (!IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
2300     {
2301         // On baremetal, GSP client, or SRIOV host, alloc mem
2302         NV_ASSERT_OK_OR_GOTO(status,
2303                 kchannelAllocMem_HAL(pGpu,
2304                                      pKernelChannel,
2305                                      pChannelGpfifoParams->flags,
2306                                      _kchannelgetVerifFlags(pGpu, pChannelGpfifoParams)),
2307                 failed);
2308     }
2309 
2310     // Setup USERD
2311     if (IS_VIRTUAL(pGpu))
2312     {
2313         PMEMORY_DESCRIPTOR pUserdSubDeviceMemDesc =
2314                 pKernelChannel->pUserdSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
2315         NvBool bFullSriov = IS_VIRTUAL_WITH_SRIOV(pGpu) &&
2316             !gpuIsWarBug200577889SriovHeavyEnabled(pGpu);
2317 
2318         // Clear Userd if it is in FB for SRIOV environment without BUG 200577889 or if in SYSMEM
2319         if (pUserdSubDeviceMemDesc != NULL &&
2320                 ((memdescGetAddressSpace(pUserdSubDeviceMemDesc) == ADDR_SYSMEM)
2321                 || ((memdescGetAddressSpace(pUserdSubDeviceMemDesc) == ADDR_FBMEM) && bFullSriov)))
2322         {
2323             kfifoSetupUserD_HAL(pGpu, pKernelFifo, pUserdSubDeviceMemDesc);
2324         }
2325     }
2326     return NV_OK;
2327 
2328 failed:
2329     _kchannelFreeHalData(pGpu, pKernelChannel);
2330     return status;
2331 }
2332 
2333 /**
2334  * @brief Create and describe channel instance memory ramfc and userd memdescs
2335  *        Done using info in pChanGpfifoParams
2336  *
2337  * @param pGpu                  : OBJGPU pointer
2338  * @param pKernelChannel        : KernelChannel pointer
2339  * @param pChanGpfifoParams     : Pointer to channel allocation params
2340  */
2341 static NV_STATUS
_kchannelDescribeMemDescsFromParams(OBJGPU * pGpu,KernelChannel * pKernelChannel,NV_CHANNEL_ALLOC_PARAMS * pChannelGpfifoParams)2342 _kchannelDescribeMemDescsFromParams
2343 (
2344     OBJGPU                                 *pGpu,
2345     KernelChannel                          *pKernelChannel,
2346     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams
2347 )
2348 {
2349     NV_STATUS               status         = NV_OK;
2350     FIFO_INSTANCE_BLOCK    *pInstanceBlock = NULL;
2351     NvU32                   subDevInst;
2352     NvU32                   gfid           = GPU_GFID_PF;
2353     NvU32                   runqueue;
2354     KernelChannelGroupApi *pKernelChannelGroupApi =
2355         pKernelChannel->pKernelChannelGroupApi;
2356 
2357     NV_ASSERT_OR_RETURN((pKernelChannelGroupApi != NULL), NV_ERR_INVALID_STATE);
2358     gfid = pKernelChannelGroupApi->pKernelChannelGroup->gfid;
2359 
2360     NV_ASSERT_OR_RETURN(RMCFG_FEATURE_PLATFORM_GSP ||
2361                         (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)),
2362                         NV_ERR_INVALID_STATE);
2363 
2364     NV_ASSERT_OR_RETURN((pChannelGpfifoParams != NULL), NV_ERR_INVALID_ARGUMENT);
2365 
2366     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2367 
2368     subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2369 
2370     pInstanceBlock = (FIFO_INSTANCE_BLOCK*) pKernelChannel->pFifoHalData[subDevInst];
2371 
2372     // Create memory descriptor for the instance memory
2373     status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu,
2374                            pChannelGpfifoParams->instanceMem.size, 1 , NV_TRUE,
2375                            pChannelGpfifoParams->instanceMem.addressSpace,
2376                            pChannelGpfifoParams->instanceMem.cacheAttrib,
2377                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2378 
2379     if (status != NV_OK)
2380     {
2381         NV_PRINTF(LEVEL_ERROR,
2382                   "Unable to allocate instance memory descriptor!\n");
2383         SLI_LOOP_RETURN(status);
2384     }
2385 
2386     memdescDescribe(pInstanceBlock->pInstanceBlockDesc, pChannelGpfifoParams->instanceMem.addressSpace,
2387                     pChannelGpfifoParams->instanceMem.base, pChannelGpfifoParams->instanceMem.size);
2388 
2389 
2390     // Create memory descriptor for the ramfc
2391     status = memdescCreate(&pInstanceBlock->pRamfcDesc, pGpu,
2392                            pChannelGpfifoParams->ramfcMem.size, 1 , NV_TRUE,
2393                            pChannelGpfifoParams->ramfcMem.addressSpace,
2394                            pChannelGpfifoParams->ramfcMem.cacheAttrib,
2395                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2396 
2397     if (status != NV_OK)
2398     {
2399         NV_PRINTF(LEVEL_ERROR,
2400                   "Unable to allocate instance memory descriptor!\n");
2401         SLI_LOOP_RETURN(status);
2402     }
2403 
2404     memdescDescribe(pInstanceBlock->pRamfcDesc, pChannelGpfifoParams->ramfcMem.addressSpace,
2405                     pChannelGpfifoParams->ramfcMem.base, pChannelGpfifoParams->ramfcMem.size);
2406 
2407     // Create userd memory descriptor
2408     status = memdescCreate(&pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], pGpu,
2409                            pChannelGpfifoParams->userdMem.size, 1 , NV_TRUE,
2410                            pChannelGpfifoParams->userdMem.addressSpace,
2411                            pChannelGpfifoParams->userdMem.cacheAttrib,
2412                            MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2413 
2414     if (status != NV_OK)
2415     {
2416         NV_PRINTF(LEVEL_ERROR,
2417                   "Unable to allocate instance memory descriptor!\n");
2418         SLI_LOOP_RETURN(status);
2419     }
2420 
2421     memdescDescribe(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst],
2422                     pChannelGpfifoParams->userdMem.addressSpace,
2423                     pChannelGpfifoParams->userdMem.base, pChannelGpfifoParams->userdMem.size);
2424 
2425     if (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2426     {
2427         /*
2428          * For full SRIOV, guest RM allocates and sends istance, ramfc and userd memory.
2429          * Set MEMDESC_FLAGS_GUEST_ALLOCATED flag in memory descriptor
2430          */
2431         memdescSetFlag(pInstanceBlock->pInstanceBlockDesc, MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2432         memdescSetFlag(pInstanceBlock->pRamfcDesc, MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2433         memdescSetFlag(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE);
2434     }
2435 
2436     // Create method buffer memory descriptor
2437     runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, pChannelGpfifoParams->flags);
2438     if (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
2439     {
2440         pKernelChannelGroupApi->pKernelChannelGroup->pMthdBuffers[runqueue]
2441             .bar2Addr = pChannelGpfifoParams->mthdbufMem.base;
2442     }
2443     else if (pKernelChannelGroupApi->pKernelChannelGroup
2444                  ->pMthdBuffers[runqueue].pMemDesc == NULL)
2445     {
2446         NV_ASSERT(pChannelGpfifoParams->mthdbufMem.size > 0);
2447         NV_ASSERT(pChannelGpfifoParams->mthdbufMem.base != 0);
2448         status = memdescCreate(&pKernelChannelGroupApi->pKernelChannelGroup
2449                                     ->pMthdBuffers[runqueue].pMemDesc,
2450                                pGpu,
2451                                pChannelGpfifoParams->mthdbufMem.size,
2452                                1,
2453                                NV_TRUE,
2454                                pChannelGpfifoParams->mthdbufMem.addressSpace,
2455                                pChannelGpfifoParams->mthdbufMem.cacheAttrib,
2456                                MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2457 
2458         if (status != NV_OK)
2459         {
2460             NV_PRINTF(LEVEL_ERROR,
2461                       "Unable to allocate instance memory descriptor!\n");
2462             SLI_LOOP_RETURN(status);
2463         }
2464         memdescDescribe(pKernelChannelGroupApi->pKernelChannelGroup
2465                             ->pMthdBuffers[runqueue].pMemDesc,
2466                         pChannelGpfifoParams->mthdbufMem.addressSpace,
2467                         pChannelGpfifoParams->mthdbufMem.base,
2468                         pChannelGpfifoParams->mthdbufMem.size);
2469     }
2470 
2471     NV_PRINTF(LEVEL_INFO,
2472               "hChannel 0x%x hClient 0x%x, Class ID 0x%x "
2473               "Instance Block @ 0x%llx (%s %x) "
2474               "USERD @ 0x%llx "
2475               "for subdevice %d\n",
2476               RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel),
2477               memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0),
2478               memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2479               (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2480               (pKernelChannel->pUserdSubDeviceMemDesc[subDevInst] == NULL) ? 0x0LL :
2481               memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], AT_GPU, 0LL), subDevInst);
2482 
2483     SLI_LOOP_END
2484 
2485     return status;
2486 }
2487 
2488 /**
2489  * @brief Create and describe channel instance memory ramfc and userd memdescs
2490  *        Done using RPC for Heavy SRIOV guest
2491  *
2492  * @param pGpu                  : OBJGPU pointer
2493  * @param pKernelChannel        : KernelChannel pointer
2494  */
2495 static NV_STATUS
_kchannelDescribeMemDescsHeavySriov(OBJGPU * pGpu,KernelChannel * pKernelChannel)2496 _kchannelDescribeMemDescsHeavySriov
2497 (
2498     OBJGPU               *pGpu,
2499     KernelChannel        *pKernelChannel
2500 )
2501 {
2502     NV_STATUS               status         = NV_OK;
2503     FIFO_INSTANCE_BLOCK    *pInstanceBlock = NULL;
2504     NvU32                   subDevInst;
2505     Subdevice              *pSubDevice;
2506     NvHandle                hSubDevice     = 0;
2507     NvU32                   apert          = ADDR_UNKNOWN;
2508     NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS memInfoParams;
2509     Device                 *pDevice = GPU_RES_GET_DEVICE(pKernelChannel);
2510 
2511     NV_ASSERT_OR_RETURN(IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu),
2512             NV_ERR_INVALID_STATE);
2513 
2514     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
2515 
2516     subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2517 
2518     pInstanceBlock = (FIFO_INSTANCE_BLOCK*) pKernelChannel->pFifoHalData[subDevInst];
2519 
2520     //
2521     // In SRIOV enabled systems, MMU fault interrupts for guest contexts are received and handled in guests.
2522     // Inorder to correctly find the faulting channel, faulting instance address has be compared with list of allocated channels.
2523     // But since contexts are currently allocated in host during channelConstruct, we need
2524     // context info from host and save it locally for the above channel lookup to pass. This piece of code uses GET_CHANNEL_MEM_INFO
2525     // to fetch the info and update pFifoHalData with the relevant details.
2526     //
2527 
2528     portMemSet(&memInfoParams, 0, sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS));
2529     memInfoParams.hChannel = RES_GET_HANDLE(pKernelChannel);
2530 
2531     status = subdeviceGetByInstance(RES_GET_CLIENT(pKernelChannel),
2532                                     RES_GET_HANDLE(pDevice),
2533                                     subDevInst,
2534                                     &pSubDevice);
2535     if (status != NV_OK)
2536     {
2537         NV_PRINTF(LEVEL_ERROR, "Unable to get subdevice object.\n");
2538         DBG_BREAKPOINT();
2539         SLI_LOOP_RETURN(status);
2540     }
2541 
2542     GPU_RES_SET_THREAD_BC_STATE(pSubDevice);
2543 
2544     hSubDevice = RES_GET_HANDLE(pSubDevice);
2545 
2546     NV_RM_RPC_CONTROL(pGpu,
2547                       RES_GET_CLIENT_HANDLE(pKernelChannel),
2548                       hSubDevice,
2549                       NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO,
2550                       &memInfoParams,
2551                       sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS),
2552                       status);
2553     if (status != NV_OK)
2554     {
2555         NV_PRINTF(LEVEL_ERROR,
2556                   "RM Control call to fetch channel meminfo failed, hKernelChannel 0x%x\n",
2557                   RES_GET_HANDLE(pKernelChannel));
2558         DBG_BREAKPOINT();
2559         SLI_LOOP_RETURN(status);
2560     }
2561 
2562     // Find the aperture
2563     if (memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM)
2564     {
2565         apert = ADDR_FBMEM;
2566     }
2567     else if ((memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH) ||
2568              (memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH))
2569     {
2570         apert = ADDR_SYSMEM;
2571     }
2572     else
2573     {
2574         NV_PRINTF(LEVEL_ERROR,
2575                   "Unknown aperture, hClient 0x%x, hKernelChannel 0x%x\n",
2576                   RES_GET_CLIENT_HANDLE(pKernelChannel),
2577                   RES_GET_HANDLE(pKernelChannel));
2578         status = NV_ERR_INVALID_ARGUMENT;
2579         DBG_BREAKPOINT();
2580         SLI_LOOP_RETURN(status);
2581     }
2582 
2583     status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu,
2584                            memInfoParams.chMemInfo.inst.size, 1 , NV_TRUE,
2585                            apert, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE);
2586 
2587     if (status != NV_OK)
2588     {
2589         NV_PRINTF(LEVEL_ERROR,
2590                   "Unable to allocate instance memory descriptor!\n");
2591         SLI_LOOP_RETURN(status);
2592     }
2593 
2594     memdescDescribe(pInstanceBlock->pInstanceBlockDesc, apert, memInfoParams.chMemInfo.inst.base, memInfoParams.chMemInfo.inst.size);
2595 
2596     NV_PRINTF(LEVEL_INFO,
2597               "hChannel 0x%x hClient 0x%x, Class ID 0x%x "
2598               "Instance Block @ 0x%llx (%s %x) "
2599               "USERD @ 0x%llx "
2600               "for subdevice %d\n",
2601               RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel),
2602               memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0),
2603               memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2604               (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)),
2605               (pKernelChannel->pUserdSubDeviceMemDesc[subDevInst] == NULL) ? 0x0LL :
2606               memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], AT_GPU, 0LL), subDevInst);
2607 
2608     SLI_LOOP_END
2609 
2610     return status;
2611 }
2612 
2613 static NV_STATUS
_kchannelSendChannelAllocRpc(KernelChannel * pKernelChannel,NV_CHANNEL_ALLOC_PARAMS * pChannelGpfifoParams,KernelChannelGroup * pKernelChannelGroup,NvBool bFullSriov)2614 _kchannelSendChannelAllocRpc
2615 (
2616     KernelChannel *pKernelChannel,
2617     NV_CHANNEL_ALLOC_PARAMS *pChannelGpfifoParams,
2618     KernelChannelGroup *pKernelChannelGroup,
2619     NvBool bFullSriov
2620 )
2621 {
2622     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2623     NV_CHANNEL_ALLOC_PARAMS *pRpcParams;
2624     NV_STATUS status = NV_OK;
2625 
2626     pRpcParams = portMemAllocNonPaged(sizeof(*pRpcParams));
2627     NV_ASSERT_OR_RETURN(pRpcParams != NULL, NV_ERR_NO_MEMORY);
2628     portMemSet(pRpcParams, 0, sizeof(*pRpcParams));
2629 
2630     pRpcParams->hObjectError      = pChannelGpfifoParams->hObjectError;
2631     pRpcParams->hObjectBuffer     = 0;
2632     pRpcParams->gpFifoOffset      = pChannelGpfifoParams->gpFifoOffset;
2633     pRpcParams->gpFifoEntries     = pChannelGpfifoParams->gpFifoEntries;
2634     pRpcParams->flags             = pChannelGpfifoParams->flags;
2635     pRpcParams->hContextShare     = pChannelGpfifoParams->hContextShare;
2636     pRpcParams->hVASpace          = pChannelGpfifoParams->hVASpace;
2637     pRpcParams->engineType        = pChannelGpfifoParams->engineType;
2638     pRpcParams->subDeviceId       = pChannelGpfifoParams->subDeviceId;
2639     pRpcParams->hObjectEccError   = pChannelGpfifoParams->hObjectEccError;
2640     pRpcParams->hPhysChannelGroup = pChannelGpfifoParams->hPhysChannelGroup;
2641     pRpcParams->internalFlags     = pChannelGpfifoParams->internalFlags;
2642 
2643     portMemCopy((void*)pRpcParams->hUserdMemory,
2644                 sizeof(NvHandle) * NV2080_MAX_SUBDEVICES,
2645                 (const void*)pChannelGpfifoParams->hUserdMemory,
2646                 sizeof(NvHandle) * NV2080_MAX_SUBDEVICES);
2647 
2648     portMemCopy((void*)pRpcParams->userdOffset,
2649                 sizeof(NvU64) * NV2080_MAX_SUBDEVICES,
2650                 (const void*)pChannelGpfifoParams->userdOffset,
2651                 sizeof(NvU64) * NV2080_MAX_SUBDEVICES);
2652 
2653     if (pKernelChannel->bCCSecureChannel)
2654     {
2655         portMemCopy((void*)pRpcParams->encryptIv,
2656                     sizeof(pRpcParams->encryptIv),
2657                     (const void*)pChannelGpfifoParams->encryptIv,
2658                     sizeof(pChannelGpfifoParams->encryptIv));
2659 
2660         portMemCopy((void*)pRpcParams->decryptIv,
2661                     sizeof(pRpcParams->decryptIv),
2662                     (const void*)pChannelGpfifoParams->decryptIv,
2663                     sizeof(pChannelGpfifoParams->decryptIv));
2664 
2665         portMemCopy((void*)pRpcParams->hmacNonce,
2666                     sizeof(pRpcParams->hmacNonce),
2667                     (const void*)pChannelGpfifoParams->hmacNonce,
2668                     sizeof(pChannelGpfifoParams->hmacNonce));
2669     }
2670 
2671     //
2672     // These fields are only filled out for GSP client or full SRIOV
2673     // i.e. the guest independently allocs ChID and instmem
2674     //
2675     if (IS_GSP_CLIENT(pGpu) || bFullSriov)
2676     {
2677         NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
2678         FIFO_INSTANCE_BLOCK *pInstanceBlock = pKernelChannel->pFifoHalData[subdevInst];
2679         NvU32 runqueue  = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, pChannelGpfifoParams->flags);
2680 
2681         NV_ASSERT_TRUE_OR_GOTO(status,
2682                                pInstanceBlock != NULL,
2683                                NV_ERR_INVALID_STATE,
2684                                cleanup);
2685 
2686         portMemCopy(&pRpcParams->errorNotifierMem,
2687                     sizeof pRpcParams->errorNotifierMem,
2688                     &(pChannelGpfifoParams->errorNotifierMem),
2689                     sizeof pChannelGpfifoParams->errorNotifierMem);
2690         portMemCopy(&pRpcParams->eccErrorNotifierMem,
2691                     sizeof pRpcParams->eccErrorNotifierMem,
2692                     &(pChannelGpfifoParams->eccErrorNotifierMem),
2693                     sizeof pChannelGpfifoParams->eccErrorNotifierMem);
2694 
2695         // Fill the instance block
2696         if (pInstanceBlock)
2697         {
2698             pRpcParams->instanceMem.base =
2699                             memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0);
2700             pRpcParams->instanceMem.size = pInstanceBlock->pInstanceBlockDesc->Size;
2701             pRpcParams->instanceMem.addressSpace =
2702                             memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc);
2703             pRpcParams->instanceMem.cacheAttrib =
2704                             memdescGetCpuCacheAttrib(pInstanceBlock->pInstanceBlockDesc);
2705 
2706             pRpcParams->ramfcMem.base =
2707                             memdescGetPhysAddr(pInstanceBlock->pRamfcDesc,  AT_GPU, 0);
2708             pRpcParams->ramfcMem.size = pInstanceBlock->pRamfcDesc->Size;
2709             pRpcParams->ramfcMem.addressSpace =
2710                             memdescGetAddressSpace(pInstanceBlock->pRamfcDesc);
2711             pRpcParams->ramfcMem.cacheAttrib =
2712                             memdescGetCpuCacheAttrib(pInstanceBlock->pRamfcDesc);
2713         }
2714 
2715         // Fill the userd memory descriptor
2716         if (pKernelChannel->pUserdSubDeviceMemDesc[subdevInst])
2717         {
2718             pRpcParams->userdMem.base =
2719                             memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst], AT_GPU, 0);
2720             pRpcParams->userdMem.size = pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]->Size;
2721             pRpcParams->userdMem.addressSpace =
2722                             memdescGetAddressSpace(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]);
2723             pRpcParams->userdMem.cacheAttrib =
2724                             memdescGetCpuCacheAttrib(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]);
2725         }
2726 
2727         // Fill the method buffer memory descriptor
2728         if (pKernelChannelGroup->pMthdBuffers != NULL &&
2729             pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc != NULL)
2730         {
2731             if (bFullSriov)
2732             {
2733                 pRpcParams->mthdbufMem.base =
2734                     pKernelChannelGroup->pMthdBuffers[runqueue].bar2Addr;
2735                 pRpcParams->mthdbufMem.size =
2736                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc->Size;
2737                 pRpcParams->mthdbufMem.addressSpace = ADDR_VIRTUAL;
2738                 pRpcParams->mthdbufMem.cacheAttrib = 0;
2739             }
2740             else
2741             {
2742                 pRpcParams->mthdbufMem.base = memdescGetPhysAddr(
2743                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc,
2744                     AT_GPU, 0);
2745                 pRpcParams->mthdbufMem.size =
2746                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc->Size;
2747                 pRpcParams->mthdbufMem.addressSpace = memdescGetAddressSpace(
2748                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc);
2749                 pRpcParams->mthdbufMem.cacheAttrib = memdescGetCpuCacheAttrib(
2750                     pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc);
2751             }
2752         }
2753 
2754         if (IS_GSP_CLIENT(pGpu))
2755         {
2756             //
2757             // Setting these param flags will make the Physical RMAPI use our
2758             // ChID (which is already decided)
2759             //
2760 
2761             NvU32 numChannelsPerUserd = NVBIT(DRF_SIZE(NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE)); //  1<<3 -> 4K / 512B
2762 
2763             pRpcParams->flags = FLD_SET_DRF(OS04, _FLAGS,
2764                     _CHANNEL_USERD_INDEX_FIXED, _FALSE, pRpcParams->flags);
2765             pRpcParams->flags = FLD_SET_DRF(OS04, _FLAGS,
2766                     _CHANNEL_USERD_INDEX_PAGE_FIXED, _TRUE, pRpcParams->flags);
2767             pRpcParams->flags = FLD_SET_DRF_NUM(OS04, _FLAGS,
2768                     _CHANNEL_USERD_INDEX_VALUE, pKernelChannel->ChID % numChannelsPerUserd, pRpcParams->flags);
2769             pRpcParams->flags = FLD_SET_DRF_NUM(OS04, _FLAGS,
2770                     _CHANNEL_USERD_INDEX_PAGE_VALUE, pKernelChannel->ChID / numChannelsPerUserd, pRpcParams->flags);
2771 
2772             // GSP client needs to pass in privilege level as an alloc param since GSP-RM cannot check this
2773             pRpcParams->internalFlags =
2774                 FLD_SET_DRF_NUM(_KERNELCHANNEL, _ALLOC_INTERNALFLAGS, _PRIVILEGE,
2775                     pKernelChannel->privilegeLevel, pRpcParams->internalFlags);
2776             pRpcParams->ProcessID = pKernelChannel->ProcessID;
2777             pRpcParams->SubProcessID= pKernelChannel->SubProcessID;
2778         }
2779     }
2780 
2781     NV_RM_RPC_ALLOC_CHANNEL(pGpu,
2782                             RES_GET_CLIENT_HANDLE(pKernelChannel),
2783                             RES_GET_PARENT_HANDLE(pKernelChannel),
2784                             RES_GET_HANDLE(pKernelChannel),
2785                             RES_GET_EXT_CLASS_ID(pKernelChannel),
2786                             pRpcParams,
2787                             &pKernelChannel->ChID,
2788                             status);
2789     NV_ASSERT_OK_OR_GOTO(status, status, cleanup);
2790 
2791     NV_PRINTF(LEVEL_INFO,
2792         "Alloc Channel chid %d, hClient:0x%x, hParent:0x%x, hObject:0x%x, hClass:0x%x\n",
2793         pKernelChannel->ChID,
2794         RES_GET_CLIENT_HANDLE(pKernelChannel),
2795         RES_GET_PARENT_HANDLE(pKernelChannel),
2796         RES_GET_HANDLE(pKernelChannel),
2797         RES_GET_EXT_CLASS_ID(pKernelChannel));
2798 
2799 cleanup:
2800     portMemFree(pRpcParams);
2801 
2802     return status;
2803 }
2804 
2805 /*!
2806  * @brief Bind a single channel to a runlist
2807  *
2808  * This is a helper function for kchannelCtrlCmdBind and kchangrpapiCtrlCmdBind
2809  */
kchannelBindToRunlist_IMPL(KernelChannel * pKernelChannel,RM_ENGINE_TYPE localRmEngineType,ENGDESCRIPTOR engineDesc)2810 NV_STATUS kchannelBindToRunlist_IMPL
2811 (
2812     KernelChannel *pKernelChannel,
2813     RM_ENGINE_TYPE localRmEngineType,
2814     ENGDESCRIPTOR  engineDesc
2815 )
2816 {
2817     OBJGPU    *pGpu;
2818     NV_STATUS  status = NV_OK;
2819 
2820     NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT);
2821     pGpu = GPU_RES_GET_GPU(pKernelChannel);
2822 
2823     // copied from setRunlistIdByEngineType
2824     if ((engineDesc == ENG_SW) || (engineDesc == ENG_BUS))
2825     {
2826         return NV_OK;
2827     }
2828 
2829     //
2830     // vGPU:
2831     //
2832     // Since vGPU does all real hardware management in the
2833     // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true),
2834     // do an RPC to the host to do the hardware update.
2835     //
2836     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
2837     {
2838         NVA06F_CTRL_BIND_PARAMS params;
2839 
2840         params.engineType = gpuGetNv2080EngineType(localRmEngineType);
2841 
2842         NV_RM_RPC_CONTROL(pGpu,
2843                           RES_GET_CLIENT_HANDLE(pKernelChannel),
2844                           RES_GET_HANDLE(pKernelChannel),
2845                           NVA06F_CTRL_CMD_BIND,
2846                           &params,
2847                           sizeof(params),
2848                           status);
2849 
2850         NV_ASSERT_OR_RETURN(status == NV_OK, status);
2851     }
2852 
2853     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
2854 
2855     status = kfifoRunlistSetIdByEngine_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu),
2856                                            pKernelChannel, engineDesc);
2857 
2858     if (status != NV_OK)
2859     {
2860         NV_PRINTF(LEVEL_ERROR,
2861                   "Failed to set RunlistID 0x%08x for channel 0x%08x\n",
2862                   engineDesc, kchannelGetDebugTag(pKernelChannel));
2863         SLI_LOOP_BREAK;
2864     }
2865 
2866     SLI_LOOP_END;
2867 
2868     return status;
2869 }
2870 
2871 //
2872 // channelCtrlCmdEventSetNotification
2873 //
2874 // This command handles set notification operations for all tesla,
2875 // fermi, kepler, and maxwell based gpfifo classes:
2876 //
2877 //    NV50_DISPLAY             (Class: NV5070)
2878 //    GF100_CHANNEL_GPFIFO     (Class: NV906F)
2879 //    KEPLER_CHANNEL_GPFIFO_A  (Class: NVA06F)
2880 //    KEPLER_CHANNEL_GPFIFO_B  (Class: NVA16F)
2881 //    KEPLER_CHANNEL_GPFIFO_C  (Class: NVA26F)
2882 //    MAXWELL_CHANNEL_GPFIFO_A (Class: NVB06F)
2883 //    PASCAL_CHANNEL_GPFIFO_A  (Class: NVC06F)
2884 //
2885 NV_STATUS
kchannelCtrlCmdEventSetNotification_IMPL(KernelChannel * pKernelChannel,NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS * pSetEventParams)2886 kchannelCtrlCmdEventSetNotification_IMPL
2887 (
2888     KernelChannel *pKernelChannel,
2889     NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams
2890 )
2891 {
2892     CLI_CHANNEL_CLASS_INFO classInfo;
2893     CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
2894     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2895 
2896     // NV01_EVENT must have been plugged into this subdevice
2897     if (inotifyGetNotificationList(staticCast(pKernelChannel, INotifier)) == NULL)
2898     {
2899         NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", pRmCtrlParams->cmd);
2900         return NV_ERR_INVALID_STATE;
2901     }
2902 
2903     // get channel class-specific properties
2904     CliGetChannelClassInfo(REF_VAL(NVXXXX_CTRL_CMD_CLASS, pRmCtrlParams->cmd),
2905                            &classInfo);
2906 
2907     if (pSetEventParams->event >= classInfo.notifiersMaxCount)
2908     {
2909         NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event);
2910         return NV_ERR_INVALID_ARGUMENT;
2911     }
2912 
2913     if ((pSetEventParams->action == classInfo.eventActionSingle) ||
2914         (pSetEventParams->action == classInfo.eventActionRepeat))
2915     {
2916         // must be in disabled state to transition to an active state
2917         if (pKernelChannel->pNotifyActions[pSetEventParams->event] != classInfo.eventActionDisable)
2918         {
2919             return NV_ERR_INVALID_STATE;
2920         }
2921 
2922         pKernelChannel->pNotifyActions[pSetEventParams->event] = pSetEventParams->action;
2923     }
2924     else if (pSetEventParams->action == classInfo.eventActionDisable)
2925     {
2926         pKernelChannel->pNotifyActions[pSetEventParams->event] = pSetEventParams->action;
2927     }
2928     else
2929     {
2930         return NV_ERR_INVALID_ARGUMENT;
2931     }
2932 
2933     return NV_OK;
2934 }
2935 
2936 NV_STATUS
kchannelCtrlCmdGetClassEngineid_IMPL(KernelChannel * pKernelChannel,NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS * pParams)2937 kchannelCtrlCmdGetClassEngineid_IMPL
2938 (
2939     KernelChannel *pKernelChannel,
2940     NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams
2941 )
2942 {
2943     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
2944     KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
2945     NV_STATUS status = NV_OK;
2946     RM_ENGINE_TYPE rmEngineType;
2947 
2948     //
2949     // MODS uses hObject 0 to figure out if this call is supported or not.
2950     // In SRIOV VF scenario, plugin asserts if host returns an error code
2951     // for a control call. Adding a temporary work around till MODS submits
2952     // a proper fix.
2953     //
2954     if (pParams->hObject == NV01_NULL_OBJECT)
2955     {
2956         return NV_ERR_OBJECT_NOT_FOUND;
2957     }
2958 
2959     NV_CHECK_OR_RETURN(LEVEL_ERROR,
2960                        pParams->hObject != RES_GET_CLIENT_HANDLE(pKernelChannel),
2961                        NV_ERR_INVALID_ARGUMENT);
2962 
2963     if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) ||
2964         (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))
2965     {
2966         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
2967         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
2968 
2969         NV_RM_RPC_CONTROL(pGpu,
2970                           pRmCtrlParams->hClient,
2971                           RES_GET_HANDLE(pKernelChannel),
2972                           pRmCtrlParams->cmd,
2973                           pRmCtrlParams->pParams,
2974                           pRmCtrlParams->paramsSize,
2975                           status);
2976         return status;
2977     }
2978 
2979     NV_ASSERT_OK_OR_RETURN(
2980         kchannelGetClassEngineID_HAL(pGpu, pKernelChannel, pParams->hObject,
2981                                  &pParams->classEngineID,
2982                                  &pParams->classID,
2983                                  &rmEngineType));
2984 
2985     pParams->engineID = gpuGetNv2080EngineType(rmEngineType);
2986 
2987     if (IS_MIG_IN_USE(pGpu) &&
2988         kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, rmEngineType))
2989     {
2990         MIG_INSTANCE_REF ref;
2991         RM_ENGINE_TYPE localRmEngineType;
2992 
2993         NV_ASSERT_OK_OR_RETURN(
2994             kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
2995                                             GPU_RES_GET_DEVICE(pKernelChannel),
2996                                             &ref));
2997 
2998         NV_ASSERT_OK_OR_RETURN(
2999             kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref,
3000                                               rmEngineType,
3001                                               &localRmEngineType));
3002 
3003         NV_PRINTF(LEVEL_INFO, "Overriding global engine type 0x%x to local engine type 0x%x (0x%x) due to MIG\n",
3004                   pParams->engineID, gpuGetNv2080EngineType(localRmEngineType), localRmEngineType);
3005 
3006         pParams->engineID = gpuGetNv2080EngineType(localRmEngineType);
3007     }
3008 
3009     return status;
3010 }
3011 
3012 NV_STATUS
kchannelCtrlCmdResetIsolatedChannel_IMPL(KernelChannel * pKernelChannel,NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS * pResetIsolatedChannelParams)3013 kchannelCtrlCmdResetIsolatedChannel_IMPL
3014 (
3015     KernelChannel *pKernelChannel,
3016     NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS *pResetIsolatedChannelParams
3017 )
3018 {
3019     NV_STATUS  status    = NV_OK;
3020     OBJGPU    *pGpu      = GPU_RES_GET_GPU(pKernelChannel);
3021     RM_API    *pRmApi    = GPU_GET_PHYSICAL_RMAPI(pGpu);
3022 
3023 
3024     // This ctrl sets bIsRcPending in the KernelChannel object. Because Kernel-RM is
3025     // the source of truth on this, it's important that this ctrl is called from CPU-RM
3026     NV_ASSERT_OR_RETURN(!RMCFG_FEATURE_PLATFORM_GSP, NV_ERR_INVALID_OPERATION);
3027 
3028     // Call internal RMCTRL on physical-RM, kchannelFwdToInternalCtrl() is not
3029     // used because no conversion from KernelChannel to Channel is required
3030     status = pRmApi->Control(pRmApi,
3031                              resservGetTlsCallContext()->pControlParams->hClient,
3032                              RES_GET_HANDLE(pKernelChannel),
3033                              NV506F_CTRL_CMD_INTERNAL_RESET_ISOLATED_CHANNEL,
3034                              pResetIsolatedChannelParams,
3035                              sizeof(NV506F_CTRL_CMD_INTERNAL_RESET_ISOLATED_CHANNEL_PARAMS));
3036 
3037     // If physical RM successfully reset the isolated channel,
3038     // mark that the RC is no longer pending
3039     if (status == NV_OK)
3040         pKernelChannel->bIsRcPending[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = NV_FALSE;
3041 
3042     return status;
3043 }
3044 
3045 // This ctrl accesses bIsRcPending in the KernelChannel object to populate
3046 // information required by physical RM. Because Kernel-RM is the source of
3047 // truth on this, it's important that this ctrl be called originally from CPU-RM.
3048 NV_STATUS
kchannelCtrlCmdResetChannel_IMPL(KernelChannel * pKernelChannel,NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS * pResetChannelParams)3049 kchannelCtrlCmdResetChannel_IMPL
3050 (
3051     KernelChannel *pKernelChannel,
3052     NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams
3053 )
3054 {
3055     NV_STATUS status    = NV_OK;
3056     OBJGPU   *pGpu      = GPU_RES_GET_GPU(pKernelChannel);
3057     CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
3058     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3059 
3060     if (!(pRmCtrlParams->bInternal ||
3061           pResetChannelParams->resetReason <
3062               NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX))
3063     {
3064         return NV_ERR_INVALID_PARAMETER;
3065     }
3066 
3067     // Send physical RM info on if an RC is pending
3068     pResetChannelParams->bIsRcPending =
3069         pKernelChannel->bIsRcPending[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3070 
3071     //
3072     // All real hardware management is done in the host.
3073     // Do an RPC to the host to do the hardware update and return.
3074     //
3075     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
3076     {
3077         NV_RM_RPC_CONTROL(pGpu,
3078                           pRmCtrlParams->hClient,
3079                           RES_GET_HANDLE(pKernelChannel),
3080                           NV906F_CTRL_CMD_RESET_CHANNEL,
3081                           pResetChannelParams,
3082                           pRmCtrlParams->paramsSize,
3083                           status);
3084         return status;
3085     }
3086 
3087     //
3088     // Do an internal control call to do channel reset
3089     // on Host (Physical) RM
3090     //
3091     return kchannelFwdToInternalCtrl_HAL(pGpu,
3092                                          pKernelChannel,
3093                                          NVA06F_CTRL_CMD_INTERNAL_RESET_CHANNEL,
3094                                          pRmCtrlParams);
3095 }
3096 
3097 //
3098 // channelCtrlCmdEventSetTrigger
3099 //
3100 // This command handles set trigger operations for all kepler and maxwell based
3101 // gpfifo classes:
3102 //
3103 //    KEPLER_CHANNEL_GPFIFO_A  (Class: NVA06F)
3104 //    KEPLER_CHANNEL_GPFIFO_B  (Class: NVA16F)
3105 //    KEPLER_CHANNEL_GPFIFO_C  (Class: NVA26F)
3106 //    MAXWELL_CHANNEL_GPFIFO_A (Class: NVB06F)
3107 //    PASCAL_CHANNEL_GPFIFO_A  (Class: NVC06F)
3108 //
3109 NV_STATUS
kchannelCtrlCmdEventSetTrigger_IMPL(KernelChannel * pKernelChannel)3110 kchannelCtrlCmdEventSetTrigger_IMPL
3111 (
3112     KernelChannel *pKernelChannel
3113 )
3114 {
3115     kchannelNotifyEvent(pKernelChannel, NVA06F_NOTIFIERS_SW, 0, 0, NULL, 0);
3116 
3117     return NV_OK;
3118 }
3119 
3120 NV_STATUS
kchannelCtrlCmdGpFifoSchedule_IMPL(KernelChannel * pKernelChannel,NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS * pSchedParams)3121 kchannelCtrlCmdGpFifoSchedule_IMPL
3122 (
3123     KernelChannel *pKernelChannel,
3124     NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams
3125 )
3126 {
3127     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
3128     NV_STATUS     rmStatus      = NV_OK;
3129     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
3130     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3131 
3132     //
3133     // Bug 1737765: Prevent Externally Owned Channels from running unless bound
3134     //  It is possible for clients to allocate and schedule channels while
3135     //  skipping the UVM registration step which binds the appropriate
3136     //  allocations in RM. We need to fail channel scheduling if the channels
3137     //  have not been registered with UVM.
3138     //  This check is performed on baremetal, CPU-RM and guest-RM
3139     //
3140     NV_ASSERT_OR_RETURN(kchannelIsSchedulable_HAL(pGpu, pKernelChannel), NV_ERR_INVALID_STATE);
3141 
3142     //
3143     // If this was a host-only channel we'll have never set the runlist id, so
3144     // force it here to ensure it is immutable now that the channel is scheduled.
3145     //
3146     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
3147     kchannelSetRunlistSet(pGpu, pKernelChannel, NV_TRUE);
3148     SLI_LOOP_END
3149 
3150 
3151     //
3152     // All real hardware management is done in the host.
3153     // Do an RPC to the host to do the hardware update and return.
3154     //
3155     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
3156     {
3157         NV_RM_RPC_CONTROL(pGpu,
3158                           RES_GET_CLIENT_HANDLE(pKernelChannel),
3159                           RES_GET_HANDLE(pKernelChannel),
3160                           NVA06F_CTRL_CMD_GPFIFO_SCHEDULE,
3161                           pRmCtrlParams->pParams,
3162                           pRmCtrlParams->paramsSize,
3163                           rmStatus);
3164 
3165         return rmStatus;
3166     }
3167 
3168     //
3169     // Do an internal control call to do channel reset
3170     // on Host (Physical) RM
3171     //
3172     return kchannelFwdToInternalCtrl_HAL(pGpu,
3173                                          pKernelChannel,
3174                                          NVA06F_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE,
3175                                          pRmCtrlParams);
3176 }
3177 
3178 NV_STATUS
kchannelCtrlCmdGetEngineCtxSize_IMPL(KernelChannel * pKernelChannel,NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS * pCtxSizeParams)3179 kchannelCtrlCmdGetEngineCtxSize_IMPL
3180 (
3181     KernelChannel *pKernelChannel,
3182     NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS *pCtxSizeParams
3183 )
3184 {
3185     return NV_ERR_NOT_SUPPORTED;
3186 }
3187 
3188 NV_STATUS
kchannelCtrlCmdSetErrorNotifier_IMPL(KernelChannel * pKernelChannel,NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS * pSetErrorNotifierParams)3189 kchannelCtrlCmdSetErrorNotifier_IMPL
3190 (
3191     KernelChannel *pKernelChannel,
3192     NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS *pSetErrorNotifierParams
3193 )
3194 {
3195     OBJGPU   *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3196     RC_NOTIFIER_SCOPE scope;
3197     NV_STATUS rmStatus = NV_OK;
3198 
3199     NV_PRINTF(LEVEL_INFO,
3200               "calling setErrorNotifier on channel: 0x%x, broadcast to TSG: %s\n",
3201               kchannelGetDebugTag(pKernelChannel),
3202               pSetErrorNotifierParams->bNotifyEachChannelInTSG ? "true" : "false");
3203 
3204     scope = pSetErrorNotifierParams->bNotifyEachChannelInTSG ?
3205                 RC_NOTIFIER_SCOPE_TSG :
3206                 RC_NOTIFIER_SCOPE_CHANNEL;
3207 
3208     rmStatus = krcErrorSetNotifier(pGpu, GPU_GET_KERNEL_RC(pGpu),
3209                                    pKernelChannel,
3210                                    ROBUST_CHANNEL_GR_EXCEPTION,
3211                                    kchannelGetEngineType(pKernelChannel),
3212                                    scope);
3213     return rmStatus;
3214 }
3215 
3216 NV_STATUS
kchannelCtrlCmdBind_IMPL(KernelChannel * pKernelChannel,NVA06F_CTRL_BIND_PARAMS * pParams)3217 kchannelCtrlCmdBind_IMPL
3218 (
3219     KernelChannel *pKernelChannel,
3220     NVA06F_CTRL_BIND_PARAMS *pParams
3221 )
3222 {
3223     RM_ENGINE_TYPE globalRmEngineType;
3224     RM_ENGINE_TYPE localRmEngineType;
3225     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3226     NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
3227     NV_STATUS rmStatus = NV_OK;
3228     ENGDESCRIPTOR engineDesc;
3229 
3230     if (!pParams)
3231         return NV_ERR_INVALID_ARGUMENT;
3232 
3233     // Check if channel belongs to TSG that is not internal RM TSG
3234     if (!pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm)
3235     {
3236         // This may be valid request if we added new channel to TSG that is
3237         // already running. In that case we just have to check that it uses
3238         // the same runlist as whole TSG.
3239         // We do that in fifoRunlistSetId()
3240         NV_PRINTF(LEVEL_INFO,
3241                   "Bind requested for channel %d belonging to TSG %d.\n",
3242                   kchannelGetDebugTag(pKernelChannel),
3243                   pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->grpID);
3244     }
3245 
3246     localRmEngineType = globalRmEngineType = gpuGetRmEngineType(pParams->engineType);
3247 
3248     if (bMIGInUse)
3249     {
3250         KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
3251         MIG_INSTANCE_REF ref;
3252 
3253         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
3254             kmigmgrGetInstanceRefFromDevice(pGpu, pKernelMIGManager,
3255                                             GPU_RES_GET_DEVICE(pKernelChannel),
3256                                             &ref));
3257 
3258         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
3259             kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, localRmEngineType,
3260                                               &globalRmEngineType));
3261 
3262     }
3263 
3264     NV_PRINTF(LEVEL_INFO, "Binding Channel %d to Engine %d\n",
3265               kchannelGetDebugTag(pKernelChannel), globalRmEngineType);
3266 
3267     // Translate globalRmEngineType -> enginedesc
3268     NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
3269         gpuXlateClientEngineIdToEngDesc(pGpu, globalRmEngineType, &engineDesc));
3270 
3271     if (rmStatus == NV_OK)
3272     {
3273         NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
3274             kchannelBindToRunlist(pKernelChannel, localRmEngineType, engineDesc));
3275     }
3276 
3277     return rmStatus;
3278 }
3279 
3280 NV_STATUS
kchannelCtrlCmdSetInterleaveLevel_IMPL(KernelChannel * pKernelChannel,NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS * pParams)3281 kchannelCtrlCmdSetInterleaveLevel_IMPL
3282 (
3283     KernelChannel *pKernelChannel,
3284     NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams
3285 )
3286 {
3287     OBJGPU          *pGpu         = GPU_RES_GET_GPU(pKernelChannel);
3288     NV_STATUS        status       = NV_OK;
3289 
3290     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
3291     {
3292         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
3293         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3294 
3295         NV_RM_RPC_CONTROL(pGpu,
3296                           RES_GET_CLIENT_HANDLE(pKernelChannel),
3297                           RES_GET_HANDLE(pKernelChannel),
3298                           pRmCtrlParams->cmd,
3299                           pRmCtrlParams->pParams,
3300                           pRmCtrlParams->paramsSize,
3301                           status);
3302         NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, NV_ERR_NOT_SUPPORTED);
3303     }
3304 
3305     status = kchangrpSetInterleaveLevel(pGpu, pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup, pParams->channelInterleaveLevel);
3306 
3307     return status;
3308 }
3309 
3310 NV_STATUS
kchannelCtrlCmdGetInterleaveLevel_IMPL(KernelChannel * pKernelChannel,NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS * pParams)3311 kchannelCtrlCmdGetInterleaveLevel_IMPL
3312 (
3313     KernelChannel *pKernelChannel,
3314     NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams
3315 )
3316 {
3317     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3318 
3319     pParams->channelInterleaveLevel =
3320         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pInterleaveLevel[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3321 
3322     return NV_OK;
3323 }
3324 
3325 NV_STATUS
kchannelCtrlCmdGpfifoGetWorkSubmitToken_IMPL(KernelChannel * pKernelChannel,NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS * pTokenParams)3326 kchannelCtrlCmdGpfifoGetWorkSubmitToken_IMPL
3327 (
3328     KernelChannel *pKernelChannel,
3329     NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *pTokenParams
3330 )
3331 {
3332     NV_STATUS     rmStatus      = NV_OK;
3333     OBJGPU       *pGpu          = GPU_RES_GET_GPU(pKernelChannel);
3334     KernelFifo   *pKernelFifo   = GPU_GET_KERNEL_FIFO(pGpu);
3335     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
3336     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3337     NvBool bIsMIGEnabled        = IS_MIG_ENABLED(pGpu);
3338 
3339     NvBool bIsModsVgpu          = NV_FALSE;
3340 
3341     NvBool bIsVgpuRpcNeeded     = (bIsModsVgpu || (IS_VIRTUAL(pGpu) &&
3342                                   !(IS_VIRTUAL_WITH_SRIOV(pGpu) && !bIsMIGEnabled &&
3343                                     kfifoIsPerRunlistChramEnabled(pKernelFifo)))) &&
3344                                     (!pKernelFifo->bGuestGenenratesWorkSubmitToken);
3345     //
3346     // vGPU:
3347     // If required call into the host to get the worksubmit token.
3348     //
3349     if (bIsVgpuRpcNeeded)
3350     {
3351         NV_RM_RPC_CONTROL(pGpu,
3352                           pRmCtrlParams->hClient,
3353                           RES_GET_HANDLE(pKernelChannel),
3354                           pRmCtrlParams->cmd,
3355                           pRmCtrlParams->pParams,
3356                           pRmCtrlParams->paramsSize,
3357                           rmStatus);
3358         //
3359         // All done if error or for non-MODS vGPU guest (host did notification in RPC).
3360         // GSP FW is not able to perform the notification, nor is MODS vGPU host,
3361         // so it still needs to be handled by the client/guest outside the RPC.
3362         //
3363         if (rmStatus != NV_OK)
3364         {
3365             return rmStatus;
3366         }
3367 
3368         if (IS_VIRTUAL(pGpu))
3369         {
3370             return rmStatus;
3371         }
3372     }
3373 
3374     if (!bIsModsVgpu || pKernelFifo->bGuestGenenratesWorkSubmitToken)
3375     {
3376         NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE);
3377         NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE);
3378         rmStatus = kfifoGenerateWorkSubmitToken_HAL(pGpu, pKernelFifo, pKernelChannel,
3379                                                     &pTokenParams->workSubmitToken,
3380                                                     pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bIsCallingContextVgpuPlugin);
3381         NV_CHECK_OR_RETURN(LEVEL_INFO, rmStatus == NV_OK, rmStatus);
3382     }
3383 
3384     rmStatus = kchannelNotifyWorkSubmitToken(pGpu, pKernelChannel, pTokenParams->workSubmitToken);
3385     return rmStatus;
3386 }
3387 
3388 NV_STATUS
kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_IMPL(KernelChannel * pKernelChannel,NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS * pParams)3389 kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_IMPL
3390 (
3391     KernelChannel *pKernelChannel,
3392     NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *pParams
3393 )
3394 {
3395     NV_STATUS   rmStatus    = NV_OK;
3396     OBJGPU     *pGpu        = GPU_RES_GET_GPU(pKernelChannel);
3397 
3398     //
3399     // vGPU:
3400     //
3401     // Since vgpu plugin is required to update notifier for guest, send an RPC
3402     // to host RM for the plugin to hook.
3403     // RPC not needed for SR-IOV vGpu.
3404     //
3405     // GSP-RM:
3406     //
3407     // Notification is done in CPU-RM, so RPC is not made to FW-RM.
3408     //
3409     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3410     NvBool bIsMIGEnabled    = IS_MIG_ENABLED(pGpu);
3411     NvBool bIsVgpuRpcNeeded = IS_VIRTUAL(pGpu) &&
3412                               !(IS_VIRTUAL_WITH_SRIOV(pGpu) && !bIsMIGEnabled &&
3413                                 kfifoIsPerRunlistChramEnabled(pKernelFifo));
3414     if (bIsVgpuRpcNeeded)
3415     {
3416         CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
3417         RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
3418 
3419         NV_RM_RPC_CONTROL(pGpu,
3420                           pRmCtrlParams->hClient,
3421                           RES_GET_HANDLE(pKernelChannel),
3422                           pRmCtrlParams->cmd,
3423                           pRmCtrlParams->pParams,
3424                           pRmCtrlParams->paramsSize,
3425                           rmStatus);
3426         return rmStatus;
3427     }
3428 
3429     rmStatus = kchannelUpdateWorkSubmitTokenNotifIndex(pGpu, pKernelChannel, pParams->index);
3430     return rmStatus;
3431 }
3432 
3433 NV_STATUS
kchannelRegisterChild_IMPL(KernelChannel * pKernelChannel,ChannelDescendant * pObject)3434 kchannelRegisterChild_IMPL
3435 (
3436     KernelChannel     *pKernelChannel,
3437     ChannelDescendant *pObject
3438 )
3439 {
3440     NvU16 firstObjectClassID;
3441     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3442     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3443 
3444     //
3445     // On recent GPU architectures such as FERMI, SetObject operations
3446     // require an EngineID:ClassID tuple as an argument, rather than
3447     // an object handle. In order to be able to differentiate between
3448     // different instances of any given software class, the ClassID
3449     // field needs to be unique within the FIFO context. The code below
3450     // attempts to find a qualifying 16-bit ClassID.
3451     //
3452     if (pObject->resourceDesc.engDesc == ENG_SW)
3453     {
3454         RS_ORDERED_ITERATOR it;
3455         RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3456         ChannelDescendant *pMatchingObject = NULL;
3457 
3458         firstObjectClassID = pKernelChannel->nextObjectClassID;
3459 
3460         do
3461         {
3462             if (++pKernelChannel->nextObjectClassID == firstObjectClassID)
3463             {
3464                 NV_PRINTF(LEVEL_ERROR, "channel %08x:%08x: out of handles!\n",
3465                           RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_HANDLE(pKernelChannel));
3466                 return NV_ERR_INSUFFICIENT_RESOURCES;
3467             }
3468             if (pKernelChannel->nextObjectClassID == 0)
3469                 continue;
3470 
3471             it = clientRefOrderedIter(pClient, RES_GET_REF(pKernelChannel), classId(ChannelDescendant), NV_FALSE);
3472 
3473             while (clientRefOrderedIterNext(pClient, &it))
3474             {
3475                 pMatchingObject = dynamicCast(it.pResourceRef->pResource, ChannelDescendant);
3476                 NV_ASSERT_OR_ELSE(pMatchingObject != NULL, continue);
3477 
3478                 if ((pMatchingObject->resourceDesc.engDesc == ENG_SW) &&
3479                     (pMatchingObject->classID == pKernelChannel->nextObjectClassID))
3480                 {
3481                     break;
3482                 }
3483 
3484                 pMatchingObject = NULL;
3485             }
3486         }
3487         while (pMatchingObject != NULL);
3488 
3489         pObject->classID = pKernelChannel->nextObjectClassID;
3490     }
3491 
3492     return kfifoAddObject_HAL(pGpu, pKernelFifo, pObject);
3493 }
3494 
3495 NV_STATUS
kchannelDeregisterChild_IMPL(KernelChannel * pKernelChannel,ChannelDescendant * pObject)3496 kchannelDeregisterChild_IMPL
3497 (
3498     KernelChannel     *pKernelChannel,
3499     ChannelDescendant *pObject
3500 )
3501 {
3502     NV_STATUS status = NV_OK;
3503     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
3504     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
3505 
3506     status = kfifoDeleteObject_HAL(pGpu, pKernelFifo, pObject);
3507     if (status != NV_OK)
3508     {
3509         NV_PRINTF(LEVEL_ERROR, "Could not delete hal resources with object\n");
3510         DBG_BREAKPOINT();
3511     }
3512 
3513     return status;
3514 }
3515 
3516 void
kchannelGetChildIterator(KernelChannel * pKernelChannel,NvU32 classID,RM_ENGINE_TYPE engineID,KernelChannelChildIterator * pIter)3517 kchannelGetChildIterator
3518 (
3519     KernelChannel *pKernelChannel,
3520     NvU32 classID,
3521     RM_ENGINE_TYPE engineID,
3522     KernelChannelChildIterator *pIter
3523 )
3524 {
3525     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3526     NV_ASSERT_OR_RETURN_VOID(pIter != NULL);
3527 
3528     portMemSet(pIter, 0, sizeof(*pIter));
3529     pIter->classID = classID;
3530     pIter->engineID = engineID;
3531     pIter->rsIter = clientRefOrderedIter(pClient, RES_GET_REF(pKernelChannel), classId(ChannelDescendant), NV_FALSE);
3532 }
3533 
3534 ChannelDescendant *
kchannelGetNextChild(KernelChannelChildIterator * pIter)3535 kchannelGetNextChild
3536 (
3537     KernelChannelChildIterator *pIter
3538 )
3539 {
3540     ChannelDescendant *pChild;
3541 
3542     NV_ASSERT_OR_RETURN(pIter != NULL, NULL);
3543 
3544     while (clientRefOrderedIterNext(pIter->rsIter.pClient, &pIter->rsIter))
3545     {
3546         pChild = dynamicCast(pIter->rsIter.pResourceRef->pResource, ChannelDescendant);
3547         NV_ASSERT_OR_RETURN(pChild != NULL, NULL);
3548 
3549         // Continue to the next child if it doesn't match these filters:
3550         if (pIter->engineID != pChild->resourceDesc.engDesc)
3551             continue;
3552         if (pIter->classID != 0)
3553         {
3554             if ((RES_GET_EXT_CLASS_ID(pChild) != pIter->classID) &&
3555                 (pChild->classID != pIter->classID))
3556                 continue;
3557         }
3558 
3559         // Yield this matching child
3560         return pChild;
3561     }
3562 
3563     return NULL;
3564 }
3565 
3566 ChannelDescendant *
kchannelGetOneChild(KernelChannel * pKernelChannel,NvU32 classID,NvU32 engineID)3567 kchannelGetOneChild
3568 (
3569     KernelChannel *pKernelChannel,
3570     NvU32          classID,
3571     NvU32          engineID
3572 )
3573 {
3574     KernelChannelChildIterator iter;
3575 
3576     kchannelGetChildIterator(pKernelChannel, classID, engineID, &iter);
3577     return kchannelGetNextChild(&iter);
3578 }
3579 
3580 /**
3581  * @brief Gets object iterator for a channel or channel group
3582  *
3583  * @param[in] pKernelChannel
3584  * @param[in] classNum
3585  * @param[in] engDesc
3586  * @param[out] pIt
3587  *
3588  */
3589 void
kchannelGetChildIterOverGroup(KernelChannel * pKernelChannel,NvU32 classNum,NvU32 engDesc,KernelChannelChildIterOverGroup * pIt)3590 kchannelGetChildIterOverGroup
3591 (
3592     KernelChannel                   *pKernelChannel,
3593     NvU32                            classNum,
3594     NvU32                            engDesc,
3595     KernelChannelChildIterOverGroup *pIt
3596 )
3597 {
3598     NV_ASSERT_OR_RETURN_VOID(pIt != NULL);
3599     portMemSet(pIt, 0, sizeof(*pIt));
3600 
3601     NV_ASSERT_OR_RETURN_VOID(pKernelChannel != NULL);
3602 
3603     pIt->classNum = classNum;
3604     pIt->engDesc = engDesc;
3605 
3606     pIt->channelNode.pKernelChannel =
3607         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pKernelChannel;
3608     pIt->channelNode.pNext =
3609         pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pNext;
3610 
3611     kchannelGetChildIterator(pIt->channelNode.pKernelChannel, pIt->classNum, pIt->engDesc, &pIt->kchannelIter);
3612 }
3613 
3614 /**
3615  * @brief Get the next object based on given class/engine tag.
3616  * When the class number is 0, it is ignored.
3617  *
3618  * @param[in] pIt
3619  *
3620  * Returns: found child or NULL
3621  *
3622  */
3623 ChannelDescendant *
kchannelGetNextChildOverGroup(KernelChannelChildIterOverGroup * pIt)3624 kchannelGetNextChildOverGroup
3625 (
3626     KernelChannelChildIterOverGroup *pIt
3627 )
3628 {
3629     PCHANNEL_NODE pHead = NULL;
3630     ChannelDescendant *pObject = NULL;
3631 
3632     NV_ASSERT_OR_RETURN(pIt != NULL, NULL);
3633 
3634     // Start iterating from the given object (if any) of the given channel.
3635     pHead = &pIt->channelNode;
3636 
3637     while ((pHead != NULL) && (pHead->pKernelChannel != NULL))
3638     {
3639         pObject = kchannelGetNextChild(&pIt->kchannelIter);
3640 
3641         if (pObject != NULL)
3642             break;
3643 
3644         //
3645         // If there are no more objects to inspect in the given channel,
3646         // move to the next channel (if any, for TSGs).
3647         //
3648         pHead = pHead->pNext;
3649         if (pHead != NULL)
3650         {
3651             NV_ASSERT_OR_ELSE(pHead->pKernelChannel != NULL, break);
3652             // Re-initialize the channeldescendant iterator based on this channel
3653             kchannelGetChildIterator(pHead->pKernelChannel, pIt->classNum, pIt->engDesc, &pIt->kchannelIter);
3654         }
3655     }
3656 
3657     // Cache off the next channel to start searching from in future iterations.
3658     pIt->channelNode.pKernelChannel = pHead ? pHead->pKernelChannel : NULL;
3659     pIt->channelNode.pNext = pHead ? pHead->pNext : NULL;
3660 
3661     return pObject;
3662 }
3663 
3664 NV_STATUS
kchannelFindChildByHandle(KernelChannel * pKernelChannel,NvHandle hResource,ChannelDescendant ** ppObject)3665 kchannelFindChildByHandle
3666 (
3667     KernelChannel *pKernelChannel,
3668     NvHandle hResource,
3669     ChannelDescendant **ppObject
3670 )
3671 {
3672     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
3673     RsResourceRef *pResourceRef = NULL;
3674 
3675     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, clientGetResourceRef(pClient, hResource, &pResourceRef));
3676 
3677     NV_CHECK_OR_RETURN(LEVEL_ERROR, pResourceRef->pParentRef->hResource == RES_GET_HANDLE(pKernelChannel), NV_ERR_OBJECT_NOT_FOUND);
3678 
3679     *ppObject = dynamicCast(pResourceRef->pResource, ChannelDescendant);
3680     NV_CHECK_OR_RETURN(LEVEL_ERROR, *ppObject != NULL, NV_ERR_OBJECT_NOT_FOUND);
3681 
3682     return NV_OK;
3683 }
3684 
3685 static NV_STATUS
_kchannelClearVAList(OBJGPU * pGpu,VA_LIST * pVaList,NvBool bUnmap)3686 _kchannelClearVAList
3687 (
3688     OBJGPU          *pGpu,
3689     VA_LIST         *pVaList,
3690     NvBool           bUnmap
3691 )
3692 {
3693     //
3694     // Subcontext handling
3695     // We need to unmap the mappings on all the subcontext, since the this call will be made only on one of the TSG channels.
3696     //
3697     if (bUnmap)
3698     {
3699         OBJVASPACE *pVas;
3700         NvU64 vAddr;
3701 
3702         FOR_EACH_IN_VADDR_LIST(pVaList, pVas, vAddr)
3703         {
3704             dmaUnmapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVas, vAddr);
3705         }
3706         FOR_EACH_IN_VADDR_LIST_END(pVaList, pVas, vAddr);
3707     }
3708 
3709     vaListClear(pVaList);
3710 
3711     return NV_OK;
3712 }
3713 
3714 /**
3715  * @brief Set or clear the Engine Context Memdesc.
3716  *
3717  * Should be committed to hardware after this using channelCommitEngineContext().
3718  * Should be unmapped before cleared/changed using kchannelUnmapEngineCtxBuf()
3719  *
3720  * @param[in] pGpu
3721  * @param[in] pKernelChannel
3722  * @param[in] engDesc
3723  * @param[in] pMemDesc                the new memdesc to assign, or NULL to clear
3724  *
3725  * Returns: status
3726  */
3727 NV_STATUS
kchannelSetEngineContextMemDesc_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvU32 engDesc,MEMORY_DESCRIPTOR * pMemDesc)3728 kchannelSetEngineContextMemDesc_IMPL
3729 (
3730     OBJGPU             *pGpu,
3731     KernelChannel      *pKernelChannel,
3732     NvU32               engDesc,
3733     MEMORY_DESCRIPTOR  *pMemDesc
3734 )
3735 {
3736     NV_STATUS status = NV_OK;
3737     ENGINE_CTX_DESCRIPTOR *pEngCtxDesc;
3738     KernelChannelGroup *pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup;
3739 
3740     NV_PRINTF(LEVEL_INFO,
3741               "ChID %x engDesc 0x%x pMemDesc %p\n",
3742               kchannelGetDebugTag(pKernelChannel), engDesc, pMemDesc);
3743 
3744     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_PARAMETER);
3745 
3746     if (IS_GR(engDesc))
3747     {
3748         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3749     }
3750 
3751     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
3752 
3753     // Get or allocate the EngCtxDesc
3754     pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3755 
3756     if (pEngCtxDesc == NULL && pMemDesc == NULL)
3757     {
3758         // There is no need to clean up or alloc anything.
3759         SLI_LOOP_CONTINUE;
3760     }
3761 
3762     if (pEngCtxDesc != NULL)
3763     {
3764         // Cleanup for the engDesc context that existed before
3765         if (pEngCtxDesc->pMemDesc != NULL)
3766         {
3767             memdescFree(pEngCtxDesc->pMemDesc);
3768             memdescDestroy(pEngCtxDesc->pMemDesc);
3769         }
3770 
3771         //
3772     }
3773     else
3774     {
3775         NV_ASSERT_OK_OR_ELSE(status,
3776             kchangrpAllocEngineContextDescriptor(pGpu, pKernelChannelGroup),
3777             SLI_LOOP_GOTO(fail));
3778         pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3779         NV_ASSERT_OR_ELSE(pEngCtxDesc != NULL, status = NV_ERR_NO_MEMORY; SLI_LOOP_GOTO(fail));
3780     }
3781 
3782     if (pMemDesc != NULL)
3783     {
3784         // We are setting a memdesc
3785         if (pMemDesc->Allocated > 0)
3786             pMemDesc->Allocated++;
3787         memdescAddRef(pMemDesc);
3788 
3789         if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL)
3790         {
3791             NvU64 virtAddr;
3792 
3793             // Since the memdesc is already virtual, we do not manage it
3794             status = vaListSetManaged(&pEngCtxDesc->vaList, NV_FALSE);
3795             NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3796 
3797             // memdescGetPhysAddr of a virtual memdesc is a virtual addr
3798             virtAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0);
3799             status = vaListAddVa(&pEngCtxDesc->vaList, pKernelChannel->pVAS, virtAddr);
3800             NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3801         }
3802     }
3803 
3804     // Assign the memdesc (or NULL)
3805     pEngCtxDesc->pMemDesc = pMemDesc;
3806     pEngCtxDesc->engDesc = engDesc;
3807 
3808     SLI_LOOP_END
3809 
3810 fail:
3811     return status;
3812 }
3813 
3814 /**
3815  * @brief Unmaps everything from the Engine Context Memdesc.
3816  *
3817  * @param[in] pGpu
3818  * @param[in] pKernelChannel
3819  * @param[in] engDesc
3820  *
3821  * Returns: status
3822  */
3823 NV_STATUS
kchannelUnmapEngineCtxBuf_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvU32 engDesc)3824 kchannelUnmapEngineCtxBuf_IMPL
3825 (
3826     OBJGPU             *pGpu,
3827     KernelChannel      *pKernelChannel,
3828     NvU32               engDesc
3829 )
3830 {
3831     NV_STATUS status = NV_OK;
3832     ENGINE_CTX_DESCRIPTOR *pEngCtxDesc;
3833 
3834     NV_PRINTF(LEVEL_INFO,
3835               "ChID %x engDesc 0x%x\n",
3836               kchannelGetDebugTag(pKernelChannel), engDesc);
3837 
3838     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_PARAMETER);
3839 
3840     if (IS_GR(engDesc))
3841     {
3842         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3843     }
3844 
3845     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
3846     pEngCtxDesc = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3847 
3848     // EngCtxDesc and MemDesc will be here, or else nothing can be mapped
3849     if ((pEngCtxDesc == NULL) || (pEngCtxDesc->pMemDesc == NULL))
3850     {
3851         SLI_LOOP_CONTINUE;
3852     }
3853 
3854     // Clear VA list, including unmap if managed
3855     status = _kchannelClearVAList(pGpu, &pEngCtxDesc->vaList, vaListGetManaged(&pEngCtxDesc->vaList));
3856     NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail));
3857 
3858     SLI_LOOP_END
3859 
3860 fail:
3861     return status;
3862 }
3863 
3864 // Check that BcState stays consistent for GR channel engine context
3865 NV_STATUS
kchannelCheckBcStateCurrent_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel)3866 kchannelCheckBcStateCurrent_IMPL
3867 (
3868     OBJGPU        *pGpu,
3869     KernelChannel *pKernelChannel
3870 )
3871 {
3872 #define KERNEL_CHANNEL_BCSTATE_UNINITIALIZED (0)
3873 #define KERNEL_CHANNEL_BCSTATE_DISABLED (1)
3874 #define KERNEL_CHANNEL_BCSTATE_ENABLED (2)
3875 
3876     NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu);
3877     NvU8   channelBcStateEnum = bBcState ? KERNEL_CHANNEL_BCSTATE_ENABLED : KERNEL_CHANNEL_BCSTATE_DISABLED;
3878 
3879     NV_PRINTF(
3880         LEVEL_INFO,
3881         "GPU = %d, ChID = %d, bcStateCurrent = %d, channelBcStateEnum = %d\n",
3882         pGpu->gpuInstance,
3883         kchannelGetDebugTag(pKernelChannel),
3884         pKernelChannel->bcStateCurrent,
3885         channelBcStateEnum);
3886 
3887     // Check that the BC status did not change - 0 = first call, 1 = disable, 2 = enable.
3888     if (pKernelChannel->bcStateCurrent == KERNEL_CHANNEL_BCSTATE_UNINITIALIZED)
3889     {
3890         pKernelChannel->bcStateCurrent = channelBcStateEnum;
3891     }
3892     NV_ASSERT_OR_RETURN(pKernelChannel->bcStateCurrent == channelBcStateEnum, NV_ERR_INVALID_STATE);
3893 
3894     return NV_OK;
3895 }
3896 
3897 // Map the Engine Context Memdesc and add it's VAddr
3898 NV_STATUS
kchannelMapEngineCtxBuf_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvU32 engDesc)3899 kchannelMapEngineCtxBuf_IMPL
3900 (
3901     OBJGPU      *pGpu,
3902     KernelChannel *pKernelChannel,
3903     NvU32        engDesc
3904 )
3905 {
3906     OBJVASPACE            *pVAS           = NULL;
3907     NV_STATUS              status         = NV_OK;
3908     ENGINE_CTX_DESCRIPTOR *pEngCtx;
3909     NvU64                  addr;
3910     MEMORY_DESCRIPTOR     *pTempMemDesc;
3911     OBJGVASPACE           *pGVAS;
3912     KernelFifo            *pKernelFifo    = GPU_GET_KERNEL_FIFO(pGpu);
3913 
3914     NV_ASSERT_OR_RETURN(engDesc != ENG_FIFO, NV_ERR_INVALID_ARGUMENT);
3915 
3916     if (IS_GR(engDesc))
3917     {
3918         NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel));
3919     }
3920 
3921     NV_PRINTF(LEVEL_INFO, "ChID %d engDesc %s (0x%x) \n",
3922               kchannelGetDebugTag(pKernelChannel),
3923               kfifoGetEngineName_HAL(GPU_GET_KERNEL_FIFO(pGpu), ENGINE_INFO_TYPE_ENG_DESC, engDesc),
3924               engDesc);
3925 
3926     pVAS = pKernelChannel->pVAS;
3927     pGVAS = dynamicCast(pVAS, OBJGVASPACE);
3928     NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_INVALID_STATE);
3929 
3930     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY)
3931 
3932     pEngCtx = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
3933     NV_ASSERT_OR_ELSE(pEngCtx != NULL, status = NV_ERR_INVALID_STATE; goto fail);
3934 
3935     pTempMemDesc = pEngCtx->pMemDesc;
3936     NV_ASSERT_OR_ELSE(pTempMemDesc != NULL, status = NV_ERR_INVALID_STATE; goto fail);
3937 
3938     //
3939     // For virtual context, UMD has already alloced/mapped the engine context.
3940     // So simply get the vaddr
3941     //
3942 
3943     status = vaListFindVa(&pEngCtx->vaList, pVAS, &addr);
3944     if (status == NV_OK)
3945     {
3946         // VAddr already exists and needs no action
3947         SLI_LOOP_CONTINUE;
3948     }
3949     else if (status == NV_ERR_OBJECT_NOT_FOUND)
3950     {
3951         NvU32 flags = DMA_ALLOC_VASPACE_NONE;
3952         if (gvaspaceIsExternallyOwned(pGVAS))
3953         {
3954             // We should never land up here if VA space is externally owned!
3955             NV_ASSERT_FAILED("Externally owned object not found");
3956             status = NV_ERR_INVALID_OPERATION;
3957             goto fail;
3958         }
3959 
3960         kfifoGetCtxBufferMapFlags_HAL(pGpu, pKernelFifo, engDesc, &flags);
3961 
3962         status = dmaMapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, pTempMemDesc, &addr,
3963             flags, DMA_UPDATE_VASPACE_FLAGS_NONE);
3964         if (status != NV_OK)
3965         {
3966             NV_PRINTF(LEVEL_ERROR,
3967                       "Could not map context buffer for engDesc 0x%x\n",
3968                       engDesc);
3969             goto fail;
3970         }
3971         else
3972         {
3973             status = vaListAddVa(&pEngCtx->vaList, pVAS, addr);
3974             NV_ASSERT(status == NV_OK);
3975         }
3976     }
3977     else
3978     {
3979         NV_ASSERT_OK_FAILED("vaListFindVa", status);
3980         goto fail;
3981     }
3982 
3983 fail:
3984     if (status != NV_OK)
3985     {
3986         SLI_LOOP_BREAK;
3987     }
3988     SLI_LOOP_END
3989 
3990     return status;
3991 }
3992 
3993 /**
3994  * @brief Updates the notifier index with which to update the work submit
3995  *        notifier on request.
3996  *
3997  * @param[IN] pGpu              OBJGPU
3998  * @param[in] pKernelChannel    KernelChannel
3999  * @param[in] index             Updated notifier index
4000  *
4001  * @return NV_OK
4002  *         NV_ERR_OUT_OF_RANGE if index is beyond the bounds of the notifier
4003  */
4004 NV_STATUS
kchannelUpdateWorkSubmitTokenNotifIndex_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvU32 index)4005 kchannelUpdateWorkSubmitTokenNotifIndex_IMPL
4006 (
4007     OBJGPU *pGpu,
4008     KernelChannel *pKernelChannel,
4009     NvU32 index
4010 )
4011 {
4012     NvHandle hNotifier;
4013     RsClient *pClient = RES_GET_CLIENT(pKernelChannel);
4014     Memory *pMemory;
4015     ContextDma *pContextDma;
4016     NvU32 addressSpace;
4017     NvU64 notificationBufferSize;
4018     Device *pDevice;
4019 
4020     hNotifier = pKernelChannel->hErrorContext;
4021 
4022     // Clobbering error notifier index is illegal
4023     NV_CHECK_OR_RETURN(LEVEL_INFO, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR,
4024                      NV_ERR_INVALID_ARGUMENT);
4025 
4026     // If key rotation is enabled then clobbering key rotation notifier is disallowed
4027     ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
4028     if ((pConfCompute != NULL) &&
4029         (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED)))
4030     {
4031         NV_CHECK_OR_RETURN(LEVEL_ERROR, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS,
4032                            NV_ERR_INVALID_ARGUMENT);
4033     }
4034 
4035     // Check for integer overflows
4036     if (((index + 1) < index) ||
4037         !portSafeMulU64(index + 1, sizeof(NvNotification), &notificationBufferSize))
4038     {
4039         return NV_ERR_OUT_OF_RANGE;
4040     }
4041 
4042     pDevice = GPU_RES_GET_DEVICE(pKernelChannel);
4043 
4044     if (NV_OK == memGetByHandleAndDevice(pClient, hNotifier, RES_GET_HANDLE(pDevice), &pMemory))
4045     {
4046         addressSpace = memdescGetAddressSpace(pMemory->pMemDesc);
4047 
4048         NV_CHECK_OR_RETURN(LEVEL_INFO, pMemory->Length >= notificationBufferSize,
4049                          NV_ERR_OUT_OF_RANGE);
4050         switch (addressSpace)
4051         {
4052             case ADDR_VIRTUAL:
4053             {
4054                 NvU64 physAddr = memdescGetPhysAddr(pMemory->pMemDesc, AT_GPU_VA, 0);
4055                 PCLI_DMA_MAPPING_INFO pDmaMappingInfo;
4056 
4057                 NV_CHECK_OR_RETURN(LEVEL_INFO,
4058                     CliGetDmaMappingInfo(pClient,
4059                                          RES_GET_HANDLE(pDevice),
4060                                          RES_GET_HANDLE(pMemory),
4061                                          physAddr,
4062                                          gpumgrGetDeviceGpuMask(pGpu->deviceInstance),
4063                                          &pDmaMappingInfo),
4064                     NV_ERR_GENERIC);
4065 
4066                 NV_CHECK_OR_RETURN(LEVEL_INFO, pDmaMappingInfo->pMemDesc->Size >= notificationBufferSize,
4067                                  NV_ERR_OUT_OF_RANGE);
4068                 break;
4069             }
4070             case ADDR_FBMEM:
4071                 // fall through
4072             case ADDR_SYSMEM:
4073                 // Covered by check prior to switch/case
4074                 break;
4075             default:
4076                 return NV_ERR_NOT_SUPPORTED;
4077         }
4078     }
4079     else if (NV_OK == ctxdmaGetByHandle(pClient, hNotifier, &pContextDma))
4080     {
4081         NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= (notificationBufferSize - 1),
4082                          NV_ERR_OUT_OF_RANGE);
4083     }
4084     else
4085     {
4086         return NV_ERR_OBJECT_NOT_FOUND;
4087     }
4088 
4089     pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN]
4090         = index;
4091 
4092     return NV_OK;
4093 }
4094 
4095 /**
4096  * @brief Updates the work submit notifier passed to the channel during channel
4097  *        creation with the new work submit token.
4098  *
4099  * @param[IN] pGpu              OBJGPU
4100  * @param[in] pKernelChannel    KernelChannel
4101  * @param[in] token             Work submit token to notify clients of
4102  *
4103  * @return NV_OK on successful notify
4104  *         NV_OK if client has not set up the doorbell notifier. This should
4105  *         be an error once all clients have been updated.
4106  */
4107 NV_STATUS
kchannelNotifyWorkSubmitToken_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvU32 token)4108 kchannelNotifyWorkSubmitToken_IMPL
4109 (
4110     OBJGPU *pGpu,
4111     KernelChannel *pKernelChannel,
4112     NvU32 token
4113 )
4114 {
4115     NvU16 notifyStatus = 0x0;
4116     NvU32 index = pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN];
4117 
4118     notifyStatus =
4119         FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _TRUE, notifyStatus);
4120     notifyStatus =
4121         FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, 0xFFFF, notifyStatus);
4122 
4123     return kchannelUpdateNotifierMem(pKernelChannel, index, token, 0, notifyStatus);
4124 }
4125 
4126 /**
4127  * @brief Alloc and set up pNotifyActions
4128  *
4129  * @param[in]  pKernelChannel
4130  * @param[in]  classNuml           Channel class
4131  *
4132  * @return  NV_OK or error code
4133  */
4134 static NV_STATUS
_kchannelSetupNotifyActions(KernelChannel * pKernelChannel,NvU32 classNum)4135 _kchannelSetupNotifyActions
4136 (
4137     KernelChannel *pKernelChannel,
4138     NvU32          classNum
4139 )
4140 {
4141     CLI_CHANNEL_CLASS_INFO classInfo;
4142 
4143     // Allocate notifier action table for the maximum supported by this class
4144     CliGetChannelClassInfo(classNum, &classInfo);
4145     if (classInfo.notifiersMaxCount > 0)
4146     {
4147         pKernelChannel->pNotifyActions = portMemAllocNonPaged(
4148                                    classInfo.notifiersMaxCount * sizeof(*pKernelChannel->pNotifyActions));
4149         if (pKernelChannel->pNotifyActions == NULL)
4150             return NV_ERR_NO_MEMORY;
4151 
4152         portMemSet(pKernelChannel->pNotifyActions, 0,
4153                  classInfo.notifiersMaxCount * sizeof(*pKernelChannel->pNotifyActions));
4154     }
4155 
4156     return NV_OK;
4157 } // end of _kchannelSetupNotifyActions()
4158 
4159 /**
4160  * @brief Cleans up pNotifyActions
4161  *
4162  * @param[in] pKernelChannel
4163  */
4164 static void
_kchannelCleanupNotifyActions(KernelChannel * pKernelChannel)4165 _kchannelCleanupNotifyActions
4166 (
4167     KernelChannel *pKernelChannel
4168 )
4169 {
4170     // free memory associated with notify actions table
4171     portMemFree(pKernelChannel->pNotifyActions);
4172     pKernelChannel->pNotifyActions = NULL;
4173 } // end of _kchannelCleanupNotifyActions()
4174 
4175 static NV_STATUS
_kchannelNotifyOfChid(OBJGPU * pGpu,KernelChannel * pKernelChannel,RsClient * pRsClient)4176 _kchannelNotifyOfChid
4177 (
4178     OBJGPU *pGpu,
4179     KernelChannel *pKernelChannel,
4180     RsClient *pRsClient
4181 )
4182 {
4183     ContextDma *pContextDma;
4184 
4185     //
4186     // Return the chid to the drivers in the error context DMA
4187     //
4188     // We need to update this when virtual channel gets mapped in.
4189     //
4190 
4191     if ((ctxdmaGetByHandle(pRsClient, pKernelChannel->hErrorContext, &pContextDma)) == NV_OK)
4192     {
4193         NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= sizeof(NvNotification) - 1, NV_ERR_INVALID_ARGUMENT);
4194         notifyFillNotifier(pGpu, pContextDma, pKernelChannel->ChID, 0, NV_OK);
4195     }
4196 
4197     return NV_OK;
4198 }
4199 
4200 NvU32
kchannelGetGfid_IMPL(KernelChannel * pKernelChannel)4201 kchannelGetGfid_IMPL
4202 (
4203     KernelChannel *pKernelChannel
4204 )
4205 {
4206     return pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->gfid;
4207 }
4208 
4209 NvBool
kchannelIsCpuMapped(OBJGPU * pGpu,KernelChannel * pKernelChannel)4210 kchannelIsCpuMapped
4211 (
4212     OBJGPU *pGpu,
4213     KernelChannel *pKernelChannel
4214 )
4215 {
4216     return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &
4217               KERNEL_CHANNEL_SW_STATE_CPU_MAP);
4218 }
4219 
4220 void
kchannelSetCpuMapped(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvBool bCpuMapped)4221 kchannelSetCpuMapped
4222 (
4223     OBJGPU *pGpu,
4224     KernelChannel *pKernelChannel,
4225     NvBool bCpuMapped
4226 )
4227 {
4228     if (bCpuMapped)
4229     {
4230         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |=
4231               KERNEL_CHANNEL_SW_STATE_CPU_MAP;
4232     }
4233     else
4234     {
4235         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &=
4236               ~(KERNEL_CHANNEL_SW_STATE_CPU_MAP);
4237     }
4238 }
4239 
4240 NvBool
kchannelIsRunlistSet(OBJGPU * pGpu,KernelChannel * pKernelChannel)4241 kchannelIsRunlistSet
4242 (
4243     OBJGPU *pGpu,
4244     KernelChannel *pKernelChannel
4245 )
4246 {
4247     return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &
4248               KERNEL_CHANNEL_SW_STATE_RUNLIST_SET);
4249 }
4250 
4251 void
kchannelSetRunlistSet(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvBool bRunlistSet)4252 kchannelSetRunlistSet
4253 (
4254     OBJGPU *pGpu,
4255     KernelChannel *pKernelChannel,
4256     NvBool bRunlistSet
4257 )
4258 {
4259     if (bRunlistSet)
4260     {
4261         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |=
4262               KERNEL_CHANNEL_SW_STATE_RUNLIST_SET;
4263     }
4264     else
4265     {
4266         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &=
4267               ~(KERNEL_CHANNEL_SW_STATE_RUNLIST_SET);
4268     }
4269 }
4270 
4271 NV_STATUS
kchannelGetChannelPhysicalState_KERNEL(OBJGPU * pGpu,KernelChannel * pKernelChannel,NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS * pChannelStateParams)4272 kchannelGetChannelPhysicalState_KERNEL
4273 (
4274     OBJGPU *pGpu,
4275     KernelChannel *pKernelChannel,
4276     NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams
4277 )
4278 {
4279     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
4280     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
4281     NV_STATUS status = NV_OK;
4282 
4283     // Get the physical state from GSP
4284     NV_RM_RPC_CONTROL(pGpu,
4285                       pRmCtrlParams->hClient,
4286                       pRmCtrlParams->hObject,
4287                       pRmCtrlParams->cmd,
4288                       pRmCtrlParams->pParams,
4289                       pRmCtrlParams->paramsSize,
4290                       status);
4291     NV_ASSERT_OK_OR_RETURN(status);
4292 
4293     return NV_OK;
4294 }
4295 
4296 NV_STATUS
kchannelMapUserD_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel,RS_PRIV_LEVEL privLevel,NvU64 offset,NvU32 protect,NvP64 * ppCpuVirtAddr,NvP64 * ppPriv)4297 kchannelMapUserD_IMPL
4298 (
4299     OBJGPU         *pGpu,
4300     KernelChannel  *pKernelChannel,
4301     RS_PRIV_LEVEL   privLevel,
4302     NvU64           offset,
4303     NvU32           protect,
4304     NvP64          *ppCpuVirtAddr,
4305     NvP64          *ppPriv
4306 )
4307 {
4308     NV_STATUS status      = NV_OK;
4309     NvU64     userBase;
4310     NvU64     userOffset;
4311     NvU64     userSize;
4312     NvU32     cachingMode = NV_MEMORY_UNCACHED;
4313 
4314     // if USERD is allocated by client
4315     if (pKernelChannel->bClientAllocatedUserD)
4316     {
4317         return NV_OK;
4318     }
4319 
4320     status = kchannelGetUserdInfo_HAL(pGpu, pKernelChannel,
4321                                       &userBase, &userOffset, &userSize);
4322 
4323     if (status != NV_OK)
4324         return status;
4325 
4326 
4327     if (userBase == pGpu->busInfo.gpuPhysAddr)
4328     {
4329         // Create a mapping of BAR0
4330         status = osMapGPU(pGpu, privLevel, NvU64_LO32(userOffset+offset),
4331                  NvU64_LO32(userSize), protect, ppCpuVirtAddr, ppPriv);
4332         goto done;
4333     }
4334 
4335     if (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING))
4336     {
4337         cachingMode = NV_MEMORY_CACHED;
4338     }
4339 
4340     //
4341     // If userBase is not bar0, then it is bar1 and we create a regular memory
4342     // mapping.
4343     //
4344     if (privLevel >= RS_PRIV_LEVEL_KERNEL)
4345     {
4346         status = osMapPciMemoryKernel64(pGpu, userBase + userOffset + offset,
4347                                         userSize, protect, ppCpuVirtAddr, cachingMode);
4348     }
4349     else
4350     {
4351         status = osMapPciMemoryUser(pGpu->pOsGpuInfo,
4352                                     userBase + userOffset + offset,
4353                                     userSize, protect, ppCpuVirtAddr,
4354                                     ppPriv, cachingMode);
4355     }
4356     if (!((status == NV_OK) && *ppCpuVirtAddr))
4357     {
4358         NV_PRINTF(LEVEL_ERROR,
4359                   "BAR1 offset 0x%llx for USERD of channel %x could not be cpu mapped\n",
4360                   userOffset, kchannelGetDebugTag(pKernelChannel));
4361     }
4362 
4363 done:
4364 
4365     // Indicate channel is mapped
4366     if (status == NV_OK)
4367     {
4368             SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
4369             kchannelSetCpuMapped(pGpu, pKernelChannel, NV_TRUE);
4370             SLI_LOOP_END
4371     }
4372 
4373     return status;
4374 }
4375 
4376 void
kchannelUnmapUserD_IMPL(OBJGPU * pGpu,KernelChannel * pKernelChannel,RS_PRIV_LEVEL privLevel,NvP64 * ppCpuVirtAddr,NvP64 * ppPriv)4377 kchannelUnmapUserD_IMPL
4378 (
4379     OBJGPU         *pGpu,
4380     KernelChannel  *pKernelChannel,
4381     RS_PRIV_LEVEL   privLevel,
4382     NvP64          *ppCpuVirtAddr,
4383     NvP64          *ppPriv
4384 )
4385 {
4386     NV_STATUS status;
4387     NvU64     userBase;
4388     NvU64     userOffset;
4389     NvU64     userSize;
4390 
4391     if (pKernelChannel->bClientAllocatedUserD)
4392     {
4393         return;
4394     }
4395 
4396     status = kchannelGetUserdInfo_HAL(pGpu, pKernelChannel,
4397                                       &userBase, &userOffset, &userSize);
4398 
4399     NV_ASSERT_OR_RETURN_VOID(status == NV_OK);
4400 
4401     if (userBase == pGpu->busInfo.gpuPhysAddr)
4402     {
4403         osUnmapGPU(pGpu->pOsGpuInfo, privLevel, *ppCpuVirtAddr,
4404                    NvU64_LO32(userSize), *ppPriv);
4405     }
4406     else
4407     {
4408         // GF100+
4409         // Unmap Cpu virt mapping
4410         if (privLevel >= RS_PRIV_LEVEL_KERNEL)
4411         {
4412             osUnmapPciMemoryKernel64(pGpu, *ppCpuVirtAddr);
4413         }
4414         else
4415         {
4416             osUnmapPciMemoryUser(pGpu->pOsGpuInfo, *ppCpuVirtAddr,
4417                                  userSize, *ppPriv);
4418         }
4419     }
4420 
4421     // Indicate channel is !mapped
4422     kchannelSetCpuMapped(pGpu, pKernelChannel, NV_FALSE);
4423     return;
4424 }
4425 
4426 static NV_STATUS
_kchannelGetUserMemDesc(OBJGPU * pGpu,KernelChannel * pKernelChannel,PMEMORY_DESCRIPTOR * ppMemDesc)4427 _kchannelGetUserMemDesc
4428 (
4429     OBJGPU             *pGpu,
4430     KernelChannel      *pKernelChannel,
4431     PMEMORY_DESCRIPTOR *ppMemDesc
4432 )
4433 {
4434     NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_STATE);
4435     *ppMemDesc = NULL;
4436 
4437     NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_STATE);
4438 
4439     *ppMemDesc = pKernelChannel->pInstSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)];
4440 
4441     return *ppMemDesc ? NV_OK : NV_ERR_INVALID_STATE;
4442 }
4443 
4444 /*!
4445  * @brief Retrieve a KernelChannel from either a KernelChannel or TSG handle. KernelChannel is
4446  * checked first. If TSG is provided, the head of the TSG is returned.
4447  *
4448  * @param[in]  pClient            Client object
4449  * @param[in]  hDual              NvHandle either to TSG or to KernelChannel
4450  * @param[out] ppKernelChannel    Referenced KernelChannel
4451  */
4452 NV_STATUS
kchannelGetFromDualHandle_IMPL(RsClient * pClient,NvHandle hDual,KernelChannel ** ppKernelChannel)4453 kchannelGetFromDualHandle_IMPL
4454 (
4455     RsClient        *pClient,
4456     NvHandle         hDual,
4457     KernelChannel  **ppKernelChannel
4458 )
4459 {
4460     KernelChannel *pKernelChannel;
4461     RsResourceRef *pChanGrpRef;
4462 
4463     NV_ASSERT_OR_RETURN(ppKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT);
4464 
4465     *ppKernelChannel = NULL;
4466 
4467     if (CliGetKernelChannel(pClient, hDual, &pKernelChannel) == NV_OK)
4468     {
4469         *ppKernelChannel = pKernelChannel;
4470         return NV_OK;
4471     }
4472 
4473     if (CliGetChannelGroup(pClient->hClient, hDual, &pChanGrpRef, NULL) == NV_OK)
4474     {
4475         KernelChannelGroupApi *pKernelChannelGroupApi = dynamicCast(
4476             pChanGrpRef->pResource,
4477             KernelChannelGroupApi);
4478 
4479         NV_ASSERT_OR_RETURN(
4480             (pKernelChannelGroupApi != NULL) &&
4481                 (pKernelChannelGroupApi->pKernelChannelGroup != NULL),
4482             NV_ERR_INVALID_ARGUMENT);
4483 
4484         if (pKernelChannelGroupApi->pKernelChannelGroup->chanCount == 0)
4485             return NV_ERR_INVALID_ARGUMENT;
4486 
4487         *ppKernelChannel =
4488             pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pKernelChannel;
4489         NV_ASSERT_OR_RETURN(*ppKernelChannel != NULL, NV_ERR_INVALID_STATE);
4490 
4491         return NV_OK;
4492     }
4493 
4494     return NV_ERR_OBJECT_NOT_FOUND;
4495 }
4496 
4497 /*!
4498  * @brief Retrieve a KernelChannel from either a KernelChannel or TSG handle. KernelChannel is
4499  * checked first. If TSG is provided, the head of the TSG is returned. If
4500  * KernelChannel handle is provided, it must not be part of a client-allocated TSG.
4501  *
4502  * @param[in]  pClient            Client object
4503  * @param[in]  hDual              NvHandle either to TSG or to bare Channel
4504  * @param[out] ppKernelChannel    Referenced KernelChannel
4505  */
4506 NV_STATUS
kchannelGetFromDualHandleRestricted_IMPL(RsClient * pClient,NvHandle hDual,KernelChannel ** ppKernelChannel)4507 kchannelGetFromDualHandleRestricted_IMPL
4508 (
4509     RsClient        *pClient,
4510     NvHandle         hDual,
4511     KernelChannel  **ppKernelChannel
4512 )
4513 {
4514     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
4515         kchannelGetFromDualHandle(pClient, hDual, ppKernelChannel));
4516     if ((RES_GET_HANDLE(*ppKernelChannel) == hDual) &&
4517         (((*ppKernelChannel)->pKernelChannelGroupApi->pKernelChannelGroup != NULL) &&
4518          !(*ppKernelChannel)->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm))
4519     {
4520         NV_PRINTF(LEVEL_ERROR, "channel handle 0x%08x is part of a channel group, not allowed!\n",
4521                   RES_GET_HANDLE(*ppKernelChannel));
4522         return NV_ERR_INVALID_ARGUMENT;
4523     }
4524     return NV_OK;
4525 }
4526 
4527 static void
_kchannelUpdateFifoMapping(KernelChannel * pKernelChannel,OBJGPU * pGpu,NvBool bKernel,NvP64 cpuAddress,NvP64 priv,NvU64 cpuMapLength,NvU32 flags,NvHandle hSubdevice,RsCpuMapping * pMapping)4528 _kchannelUpdateFifoMapping
4529 (
4530     KernelChannel    *pKernelChannel,
4531     OBJGPU           *pGpu,
4532     NvBool            bKernel,
4533     NvP64             cpuAddress,
4534     NvP64             priv,
4535     NvU64             cpuMapLength,
4536     NvU32             flags,
4537     NvHandle          hSubdevice,
4538     RsCpuMapping     *pMapping
4539 )
4540 {
4541     pMapping->pPrivate->pGpu      = pGpu;
4542     pMapping->pPrivate->bKernel   = bKernel;
4543     pMapping->processId = osGetCurrentProcess();
4544     pMapping->pLinearAddress      = cpuAddress;
4545     pMapping->pPrivate->pPriv     = priv;
4546     pMapping->length              = cpuMapLength;
4547     pMapping->flags               = flags;
4548     pMapping->pContext            = (void*)(NvUPtr)pKernelChannel->ChID;
4549 }
4550 
kchannelRetrieveKmb_KERNEL(OBJGPU * pGpu,KernelChannel * pKernelChannel,ROTATE_IV_TYPE rotateOperation,NvBool bIncludeIvOrNonce,CC_KMB * keyMaterialBundle)4551 NV_STATUS kchannelRetrieveKmb_KERNEL
4552 (
4553     OBJGPU *pGpu,
4554     KernelChannel *pKernelChannel,
4555     ROTATE_IV_TYPE rotateOperation,
4556     NvBool bIncludeIvOrNonce,
4557     CC_KMB *keyMaterialBundle
4558 )
4559 {
4560     ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
4561 
4562     NV_ASSERT(pCC != NULL);
4563 
4564     return (confComputeKeyStoreRetrieveViaChannel_HAL(pCC, pKernelChannel, rotateOperation,
4565                                                       bIncludeIvOrNonce, keyMaterialBundle));
4566 }
4567 
4568 /*!
4569  * @brief Get KMB for secure channel
4570  *
4571  * @param[in] pKernelChannnel
4572  * @param[out] pGetKmbParams
4573  */
4574 NV_STATUS
kchannelCtrlCmdGetKmb_KERNEL(KernelChannel * pKernelChannel,NVC56F_CTRL_CMD_GET_KMB_PARAMS * pGetKmbParams)4575 kchannelCtrlCmdGetKmb_KERNEL
4576 (
4577     KernelChannel *pKernelChannel,
4578     NVC56F_CTRL_CMD_GET_KMB_PARAMS *pGetKmbParams
4579 )
4580 {
4581     if (!pKernelChannel->bCCSecureChannel)
4582     {
4583         return NV_ERR_NOT_SUPPORTED;
4584     }
4585 
4586     OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
4587     ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
4588     if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
4589     {
4590         KEY_ROTATION_STATUS state;
4591         NvU32 h2dKey;
4592         NV_ASSERT_OK_OR_RETURN(confComputeGetKeyPairByChannel(pGpu, pConfCompute, pKernelChannel, &h2dKey, NULL));
4593         NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state));
4594         if ((state != KEY_ROTATION_STATUS_IDLE) ||
4595             (kchannelIsDisabledForKeyRotation(pGpu, pKernelChannel)))
4596         {
4597             return NV_ERR_KEY_ROTATION_IN_PROGRESS;
4598         }
4599     }
4600 
4601     portMemCopy((void*)(&pGetKmbParams->kmb), sizeof(CC_KMB),
4602                 (const void*)(&pKernelChannel->clientKmb), sizeof(CC_KMB));
4603 
4604     if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
4605     {
4606         RsClient          *pRsClient      = NULL;
4607         RsResourceRef     *pResourceRef   = NULL;
4608         NvHandle           hClient        = RES_GET_CLIENT_HANDLE(pKernelChannel);
4609 
4610         NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient));
4611         if (clientGetResourceRef(pRsClient, pGetKmbParams->hMemory, &pResourceRef) == NV_OK)
4612         {
4613             // If a buffer already exists then replace it with new buffer
4614             if (pKernelChannel->pEncStatsBuf != NULL)
4615             {
4616                 NV_ASSERT_OK_OR_RETURN(kchannelSetEncryptionStatsBuffer_HAL(pGpu, pKernelChannel, NULL, NV_FALSE));
4617             }
4618             Memory *pMemory = dynamicCast(pResourceRef->pResource, Memory);
4619             MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc;
4620             NV_ASSERT_OR_RETURN(pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT);
4621             NV_ASSERT_OK_OR_RETURN(kchannelSetEncryptionStatsBuffer_HAL(pGpu, pKernelChannel, pMemDesc, NV_TRUE));
4622         }
4623 
4624         //
4625         // Reset statistics every time GET_KMB is called
4626         // TODO CONFCOMP-984: Make this fatal if this ptr is NULL
4627         //
4628         if (pKernelChannel->pEncStatsBuf != NULL)
4629             portMemSet(pKernelChannel->pEncStatsBuf, 0, sizeof(CC_CRYPTOBUNDLE_STATS));
4630     }
4631 
4632     return NV_OK;
4633     return NV_ERR_NOT_SUPPORTED;
4634 }
4635 
4636 /*!
4637  * @brief      Rotate the IVs for the given secure channel
4638  *
4639  * @param[in]  pKernelChannel
4640  * @param[out] pRotateIvParams
4641  *
4642  * @return     NV_OK on success
4643  * @return     NV_ERR_NOT_SUPPORTED if channel is not a secure channel.
4644  */
4645 NV_STATUS
kchannelCtrlRotateSecureChannelIv_KERNEL(KernelChannel * pKernelChannel,NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS * pRotateIvParams)4646 kchannelCtrlRotateSecureChannelIv_KERNEL
4647 (
4648     KernelChannel *pKernelChannel,
4649     NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS *pRotateIvParams
4650 )
4651 {
4652     NV_STATUS            status            = NV_OK;
4653     OBJGPU              *pGpu              = GPU_RES_GET_GPU(pKernelChannel);
4654     ConfidentialCompute *pCC               = GPU_GET_CONF_COMPUTE(pGpu);
4655     ROTATE_IV_TYPE       rotateIvOperation = pRotateIvParams->rotateIvType;
4656 
4657     if (!pKernelChannel->bCCSecureChannel)
4658     {
4659         return NV_ERR_NOT_SUPPORTED;
4660     }
4661 
4662     NV_PRINTF(LEVEL_INFO, "Rotating IV in CPU-RM.\n");
4663 
4664     status = confComputeKeyStoreRetrieveViaChannel_HAL(
4665         pCC, pKernelChannel, rotateIvOperation, NV_TRUE, &pKernelChannel->clientKmb);
4666 
4667     if (status != NV_OK)
4668     {
4669         return status;
4670     }
4671 
4672     portMemSet(pRotateIvParams, 0, sizeof(*pRotateIvParams));
4673 
4674     portMemCopy(pRotateIvParams->updatedKmb.encryptBundle.iv,
4675                 sizeof(pRotateIvParams->updatedKmb.encryptBundle.iv),
4676                 pKernelChannel->clientKmb.encryptBundle.iv,
4677                 sizeof(pKernelChannel->clientKmb.encryptBundle.iv));
4678 
4679     portMemCopy(pRotateIvParams->updatedKmb.decryptBundle.iv,
4680                 sizeof(pRotateIvParams->updatedKmb.decryptBundle.iv),
4681                 pKernelChannel->clientKmb.decryptBundle.iv,
4682                 sizeof(pKernelChannel->clientKmb.decryptBundle.iv));
4683 
4684     pRotateIvParams->rotateIvType = rotateIvOperation;
4685 
4686     NV_RM_RPC_CONTROL(pGpu,
4687                       RES_GET_CLIENT_HANDLE(pKernelChannel),
4688                       RES_GET_HANDLE(pKernelChannel),
4689                       NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV,
4690                       pRotateIvParams,
4691                       sizeof(*pRotateIvParams),
4692                       status);
4693 
4694     if (status != NV_OK)
4695     {
4696         return status;
4697     }
4698 
4699     if ((rotateIvOperation == ROTATE_IV_ALL_VALID) || (rotateIvOperation == ROTATE_IV_ENCRYPT))
4700     {
4701         portMemCopy(&pRotateIvParams->updatedKmb.encryptBundle,
4702                     sizeof(pRotateIvParams->updatedKmb.encryptBundle),
4703                     &pKernelChannel->clientKmb.encryptBundle,
4704                     sizeof(pKernelChannel->clientKmb.encryptBundle));
4705     }
4706 
4707     if ((rotateIvOperation == ROTATE_IV_ALL_VALID) || (rotateIvOperation == ROTATE_IV_DECRYPT))
4708     {
4709         portMemCopy(&pRotateIvParams->updatedKmb.decryptBundle,
4710                     sizeof(pRotateIvParams->updatedKmb.decryptBundle),
4711                     &pKernelChannel->clientKmb.decryptBundle,
4712                     sizeof(pKernelChannel->clientKmb.decryptBundle));
4713     }
4714 
4715     return NV_OK;
4716     return NV_ERR_NOT_SUPPORTED;
4717 }
4718 
4719 NV_STATUS
kchannelCtrlRotateSecureChannelIv_PHYSICAL(KernelChannel * pKernelChannel,NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS * pRotateIvParams)4720 kchannelCtrlRotateSecureChannelIv_PHYSICAL
4721 (
4722     KernelChannel *pKernelChannel,
4723     NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS *pRotateIvParams
4724 )
4725 {
4726     NV_STATUS status;
4727 
4728     NV_PRINTF(LEVEL_INFO, "Rotating IV in GSP-RM.\n");
4729 
4730     // CPU-side encrypt IV corresponds to GPU-side decrypt IV.
4731     // CPU-side decrypt IV corresponds to GPU-side encrypt IV.
4732     status =
4733         kchannelRotateSecureChannelIv_HAL(pKernelChannel,
4734                                           pRotateIvParams->rotateIvType,
4735                                           pRotateIvParams->updatedKmb.decryptBundle.iv,
4736                                           pRotateIvParams->updatedKmb.encryptBundle.iv);
4737     if (status != NV_OK)
4738     {
4739         return status;
4740     }
4741 
4742     return NV_OK;
4743 }
4744 
4745 /*!
4746  * Fill in per-channel MMU exception data and allocate memory for this data if
4747  * necessary
4748  *
4749  * @param[inout]    pKernelChannel
4750  * @param[in]       pMmuExceptionData MMU exception data to be copied
4751  */
4752 void
kchannelFillMmuExceptionInfo_IMPL(KernelChannel * pKernelChannel,FIFO_MMU_EXCEPTION_DATA * pMmuExceptionData)4753 kchannelFillMmuExceptionInfo_IMPL
4754 (
4755     KernelChannel           *pKernelChannel,
4756     FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData
4757 )
4758 {
4759     NV_STATUS status = NV_OK;
4760 
4761     NV_ASSERT_OR_RETURN_VOID(pKernelChannel);
4762 
4763     if (pKernelChannel->pMmuExceptionData == NULL)
4764     {
4765         pKernelChannel->pMmuExceptionData = portMemAllocNonPaged(sizeof(FIFO_MMU_EXCEPTION_DATA));
4766         if (pKernelChannel->pMmuExceptionData == NULL)
4767             status = NV_ERR_NO_MEMORY;
4768     }
4769 
4770     if (status == NV_OK)
4771     {
4772         portMemCopy(pKernelChannel->pMmuExceptionData,
4773                     sizeof(FIFO_MMU_EXCEPTION_DATA),
4774                     pMmuExceptionData,
4775                     sizeof(FIFO_MMU_EXCEPTION_DATA));
4776     }
4777 }
4778 
4779 /*!
4780  * Free per-channel MMU exception data if it exists
4781  *
4782  * @param[inout]    pKernelChannel
4783  */
4784 void
kchannelFreeMmuExceptionInfo_IMPL(KernelChannel * pKernelChannel)4785 kchannelFreeMmuExceptionInfo_IMPL
4786 (
4787     KernelChannel           *pKernelChannel
4788 )
4789 {
4790     portMemFree(pKernelChannel->pMmuExceptionData);
4791     pKernelChannel->pMmuExceptionData = NULL;
4792 }
4793 
4794 /*!
4795  * Check if channel is disabled for key rotation
4796  */
kchannelIsDisabledForKeyRotation(OBJGPU * pGpu,KernelChannel * pKernelChannel)4797 NvBool kchannelIsDisabledForKeyRotation
4798 (
4799     OBJGPU *pGpu,
4800     KernelChannel *pKernelChannel
4801 )
4802 {
4803     return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &
4804               KERNEL_CHANNEL_SW_STATE_DISABLED_FOR_KEY_ROTATION);
4805 }
4806 
4807 /*!
4808  * Mark channel disabled for key rotation
4809  */
kchannelDisableForKeyRotation(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvBool bDisable)4810 void kchannelDisableForKeyRotation
4811 (
4812     OBJGPU *pGpu,
4813     KernelChannel *pKernelChannel,
4814     NvBool bDisable
4815 )
4816 {
4817     if (bDisable)
4818     {
4819         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |=
4820         KERNEL_CHANNEL_SW_STATE_DISABLED_FOR_KEY_ROTATION;
4821     }
4822     else
4823     {
4824         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &=
4825         ~KERNEL_CHANNEL_SW_STATE_DISABLED_FOR_KEY_ROTATION;
4826     }
4827 }
4828 
4829 /*!
4830  * Check if channel needs to be enabled after key rotation
4831  */
kchannelIsEnableAfterKeyRotation(OBJGPU * pGpu,KernelChannel * pKernelChannel)4832 NvBool kchannelIsEnableAfterKeyRotation
4833 (
4834     OBJGPU *pGpu,
4835     KernelChannel *pKernelChannel
4836 )
4837 {
4838     return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &
4839               KERNEL_CHANNEL_SW_STATE_ENABLE_AFTER_KEY_ROTATION);
4840 }
4841 
4842 /*!
4843  * Mark channel to be re-enabled after key rotation completes
4844  */
kchannelEnableAfterKeyRotation(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvBool bEnable)4845 void kchannelEnableAfterKeyRotation
4846 (
4847     OBJGPU *pGpu,
4848     KernelChannel *pKernelChannel,
4849     NvBool bEnable
4850 )
4851 {
4852     if (bEnable)
4853     {
4854         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |=
4855         KERNEL_CHANNEL_SW_STATE_ENABLE_AFTER_KEY_ROTATION;
4856     }
4857     else
4858     {
4859         pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &=
4860         ~KERNEL_CHANNEL_SW_STATE_ENABLE_AFTER_KEY_ROTATION;
4861     }
4862 }
4863 
4864 /*!
4865  * Creates/destroys persistent mappings for key rotation notifier
4866  */
4867 NV_STATUS
kchannelSetKeyRotationNotifier_KERNEL(OBJGPU * pGpu,KernelChannel * pKernelChannel,NvBool bSet)4868 kchannelSetKeyRotationNotifier_KERNEL
4869 (
4870     OBJGPU *pGpu,
4871     KernelChannel *pKernelChannel,
4872     NvBool bSet
4873 )
4874 {
4875     NV_STATUS status = NV_OK;
4876     MEMORY_DESCRIPTOR *pNotifierMemDesc = pKernelChannel->pErrContextMemDesc;
4877     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
4878     TRANSFER_SURFACE surf = {0};
4879     NV_ASSERT_OR_RETURN(pNotifierMemDesc != NULL, NV_ERR_INVALID_STATE);
4880     NV_ADDRESS_SPACE addressSpace = memdescGetAddressSpace(pNotifierMemDesc);
4881     NvU32 notifyIndex = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS;
4882     if (bSet)
4883     {
4884         NV_ASSERT_OR_RETURN(memdescGetSize(pNotifierMemDesc) >= ((notifyIndex + 1) * sizeof(NvNotification)),
4885                             NV_ERR_INVALID_ARGUMENT);
4886 
4887         NV_ASSERT_OR_RETURN(addressSpace == ADDR_SYSMEM, NV_ERR_NOT_SUPPORTED);
4888         if (pKernelChannel->pKeyRotationNotifierMemDesc == NULL)
4889         {
4890             NV_ASSERT_OK_OR_RETURN(memdescCreateSubMem(&pKernelChannel->pKeyRotationNotifierMemDesc,
4891                                                        pNotifierMemDesc, pGpu, notifyIndex * sizeof(NvNotification),
4892                                                        sizeof(NvNotification)));
4893             surf.pMemDesc = pKernelChannel->pKeyRotationNotifierMemDesc;
4894             surf.offset = 0;
4895 
4896             pKernelChannel->pKeyRotationNotifier =
4897                 (NvNotification *) memmgrMemBeginTransfer(pMemoryManager, &surf,
4898                                                           sizeof(NvNotification),
4899                                                           TRANSFER_FLAGS_SHADOW_ALLOC);
4900             NV_ASSERT_OR_ELSE(pKernelChannel->pKeyRotationNotifier != NULL, status = NV_ERR_INVALID_STATE; goto done;);
4901 
4902             portMemSet((void*)pKernelChannel->pKeyRotationNotifier, 0, sizeof(NvNotification));
4903         }
4904     }
4905     else
4906     {
4907         if (pKernelChannel->pKeyRotationNotifierMemDesc != NULL)
4908         {
4909             if (pKernelChannel->pKeyRotationNotifier != NULL)
4910             {
4911                 surf.pMemDesc = pKernelChannel->pKeyRotationNotifierMemDesc;
4912                 surf.offset = 0;
4913                 memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(NvNotification), 0);
4914                 pKernelChannel->pKeyRotationNotifier = NULL;
4915             }
4916             memdescDestroy(pKernelChannel->pKeyRotationNotifierMemDesc);
4917             pKernelChannel->pKeyRotationNotifierMemDesc = NULL;
4918         }
4919     }
4920 
4921 done:
4922     if (status != NV_OK)
4923     {
4924         if (pKernelChannel->pKeyRotationNotifierMemDesc != NULL)
4925         {
4926             memdescDestroy(pKernelChannel->pKeyRotationNotifierMemDesc);
4927             pKernelChannel->pKeyRotationNotifierMemDesc = NULL;
4928         }
4929     }
4930     return status;
4931 }
4932 
4933 /*!
4934  * Creates/destroys persistent mappings for encryption stats buffer
4935  */
4936 NV_STATUS
kchannelSetEncryptionStatsBuffer_KERNEL(OBJGPU * pGpu,KernelChannel * pKernelChannel,MEMORY_DESCRIPTOR * pMemDesc,NvBool bSet)4937 kchannelSetEncryptionStatsBuffer_KERNEL
4938 (
4939     OBJGPU *pGpu,
4940     KernelChannel *pKernelChannel,
4941     MEMORY_DESCRIPTOR *pMemDesc,
4942     NvBool bSet
4943 )
4944 {
4945     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
4946     TRANSFER_SURFACE surf = {0};
4947     if (bSet)
4948     {
4949         NV_ASSERT_OR_RETURN(pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT);
4950         NV_ASSERT_OR_RETURN(pKernelChannel->pEncStatsBuf == NULL, NV_ERR_INVALID_STATE);
4951         NV_ASSERT_OK_OR_RETURN(memdescCreateSubMem(&pKernelChannel->pEncStatsBufMemDesc, pMemDesc, pGpu,
4952                                                    0, memdescGetSize(pMemDesc)));
4953         //
4954         // we rely on persistent mapping for encryption statistics buffer
4955         // since these will be used in top half and mappings are not allowed
4956         // in contexts that can't sleep on KVM or similar HCC systems.
4957         //
4958         surf.pMemDesc = pKernelChannel->pEncStatsBufMemDesc;
4959         surf.offset = 0;
4960         pKernelChannel->pEncStatsBuf = (CC_CRYPTOBUNDLE_STATS*)memmgrMemBeginTransfer(pMemoryManager, &surf,
4961                                                                                       sizeof(CC_CRYPTOBUNDLE_STATS),
4962                                                                                       TRANSFER_FLAGS_SHADOW_ALLOC);
4963         if (pKernelChannel->pEncStatsBuf == NULL)
4964         {
4965             memdescDestroy(pKernelChannel->pEncStatsBufMemDesc);
4966             pKernelChannel->pEncStatsBufMemDesc = NULL;
4967             return NV_ERR_INVALID_STATE;
4968         }
4969         portMemSet(pKernelChannel->pEncStatsBuf, 0, sizeof(CC_CRYPTOBUNDLE_STATS));
4970     }
4971     else
4972     {
4973         //
4974         // Free persistent mappings for encryption stats buffer
4975         // TODO CONFCOMP-984: Make this fatal if this ptr is NULL
4976         //
4977         if (pKernelChannel->pEncStatsBufMemDesc != NULL)
4978         {
4979             surf.pMemDesc = pKernelChannel->pEncStatsBufMemDesc;
4980             surf.offset = 0;
4981             memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(CC_CRYPTOBUNDLE_STATS), 0);
4982             pKernelChannel->pEncStatsBuf = NULL;
4983             memdescDestroy(pKernelChannel->pEncStatsBufMemDesc);
4984             pKernelChannel->pEncStatsBufMemDesc = NULL;
4985         }
4986     }
4987     return NV_OK;
4988 }
4989 
4990 static NvNotification*
_kchannelGetKeyRotationNotifier(KernelChannel * pKernelChannel)4991 _kchannelGetKeyRotationNotifier(KernelChannel *pKernelChannel)
4992 {
4993     return pKernelChannel->pKeyRotationNotifier;
4994 }
4995