1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "kernel/gpu/fifo/kernel_fifo.h"
25 #include "kernel/gpu/fifo/kernel_channel.h"
26 #include "kernel/gpu/fifo/kernel_channel_group.h"
27 #include "kernel/gpu/device/device.h"
28 #include "kernel/gpu/subdevice/subdevice.h"
29 #include "kernel/gpu/subdevice/subdevice_diag.h"
30 #include "kernel/gpu/mem_mgr/mem_mgr.h"
31 #include "kernel/virtualization/hypervisor/hypervisor.h"
32 #include "kernel/core/locks.h"
33 #include "lib/base_utils.h"
34 #include "platform/sli/sli.h"
35 
36 #include "vgpu/rpc.h"
37 #include "vgpu/vgpu_events.h"
38 
39 #include "class/cl0080.h"
40 #include "class/cl2080.h"
41 #include "class/cl208f.h"
42 
43 #include "ctrl/ctrl0080/ctrl0080fifo.h"
44 
45 #include "kernel/gpu/conf_compute/conf_compute.h"
46 
47 static NV_STATUS _kfifoGetCaps(OBJGPU *pGpu, NvU8 *pKfifoCaps);
48 
49 /*!
50  * @brief deviceCtrlCmdFifoGetChannelList
51  */
52 NV_STATUS
53 deviceCtrlCmdFifoGetChannelList_IMPL
54 (
55     Device *pDevice,
56     NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *pChannelParams
57 )
58 {
59     OBJGPU  *pGpu               = GPU_RES_GET_GPU(pDevice);
60     NvU32   *pChannelHandleList = NvP64_VALUE(pChannelParams->pChannelHandleList);
61     NvU32   *pChannelList       = NvP64_VALUE(pChannelParams->pChannelList);
62     NvU32    counter;
63 
64     // Validate input / Size / Args / Copy args
65     if (pChannelParams->numChannels == 0)
66     {
67         NV_PRINTF(LEVEL_ERROR,
68                   "Invalid Params for command NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST\n");
69         return NV_ERR_INVALID_ARGUMENT;
70     }
71 
72     for (counter = 0; counter < pChannelParams->numChannels; counter++)
73     {
74         KernelChannel *pKernelChannel;
75         NvU32 chid = NV0080_CTRL_FIFO_GET_CHANNELLIST_INVALID_CHANNEL;
76         NV_STATUS status;
77 
78         // Searching through the rm client db.
79         status = CliGetKernelChannel(RES_GET_CLIENT(pDevice),
80             pChannelHandleList[counter], &pKernelChannel);
81 
82         if (status == NV_OK)
83         {
84             chid = pKernelChannel->ChID;
85 
86             // Amodel-specific : Encode runlist ID
87             if (pGpu && (IS_MODS_AMODEL(pGpu)))
88             {
89                 chid |= ((kchannelGetRunlistId(pKernelChannel) & 0xffff) << 16);
90             }
91         }
92 
93         pChannelList[counter] = chid;
94     }
95 
96     return NV_OK;
97 }
98 
99 NV_STATUS
100 deviceCtrlCmdFifoIdleChannels_IMPL
101 (
102     Device *pDevice,
103     NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS *pParams
104 )
105 {
106     NvBool        isGpuLockAcquired = NV_FALSE;
107     NV_STATUS     status = NV_OK;
108     OBJGPU       *pGpu = GPU_RES_GET_GPU(pDevice);
109     CALL_CONTEXT *pCallContext  = resservGetTlsCallContext();
110     RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
111 
112     // Check buffer size against maximum
113     if (pParams->numChannels > NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS)
114         return NV_ERR_INVALID_ARGUMENT;
115 
116     //
117     // Acquire GPU lock manually in control call body instead of letting Resource
118     // Server do it to ensure that RM_LOCK_MODULES_FIFO is used.
119     //
120     if (!rmGpuLockIsOwner())
121     {
122         status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO);
123 
124         if (status != NV_OK)
125             goto done;
126 
127         isGpuLockAcquired = NV_TRUE;
128     }
129 
130     //
131     // Send RPC if running in Guest/CPU-RM. Do this manually instead of ROUTE_TO_PHYSICAL
132     // so that we can acquire the GPU lock in CPU-RM first.
133     //
134     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
135     {
136         NV_RM_RPC_CONTROL(pGpu,
137                           pRmCtrlParams->hClient,
138                           pRmCtrlParams->hObject,
139                           pRmCtrlParams->cmd,
140                           pRmCtrlParams->pParams,
141                           pRmCtrlParams->paramsSize,
142                           status);
143     }
144     else
145     {
146         status = NV_ERR_NOT_SUPPORTED;
147     }
148 
149 done:
150 
151     if (isGpuLockAcquired)
152         rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
153 
154     return status;
155 }
156 
157 NV_STATUS
158 subdeviceCtrlCmdGetPhysicalChannelCount_IMPL
159 (
160     Subdevice *pSubdevice,
161     NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS *pParams
162 )
163 {
164     OBJGPU   *pGpu          = GPU_RES_GET_GPU(pSubdevice);
165     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
166     NvU32     numChannelsInUse = 0;
167     NvU32     numChannels;
168     NvU32     i;
169     NvU32     chGrpID;
170 
171     pParams->physChannelCount      = NV_U32_MAX;
172     pParams->physChannelCountInUse = 0;
173 
174     // TODO: Follow up with clients before turning on per esched chidmgr
175     for (i = 0; i < pKernelFifo->numChidMgrs; i++)
176     {
177         if (pKernelFifo->ppChidMgr[i] != NULL)
178         {
179             // Get the max number of HW channels on the runlist
180             numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pKernelFifo->ppChidMgr[i]);
181 
182             // Get the number of channels already in use
183             for (chGrpID = 0; chGrpID < numChannels; chGrpID++)
184             {
185                 if (nvBitFieldTest(pKernelFifo->ppChidMgr[i]->channelGrpMgr.pHwIdInUse,
186                                    pKernelFifo->ppChidMgr[i]->channelGrpMgr.hwIdInUseSz,
187                                    chGrpID))
188                 {
189                     numChannelsInUse++;
190                 }
191             }
192 
193             pParams->physChannelCount      = NV_MIN(pParams->physChannelCount, numChannels);
194             pParams->physChannelCountInUse = NV_MAX(pParams->physChannelCountInUse, numChannelsInUse);
195         }
196     }
197     return NV_OK;
198 }
199 
200 /*!
201  * @brief subdeviceCtrlCmdFifoGetInfo
202  *
203  * Lock Requirements:
204  *      Assert that both the GPUs lock and API lock are held on entry.
205  */
206 NV_STATUS
207 subdeviceCtrlCmdFifoGetInfo_IMPL
208 (
209     Subdevice *pSubdevice,
210     NV2080_CTRL_FIFO_GET_INFO_PARAMS *pFifoInfoParams
211 )
212 {
213     OBJGPU        *pGpu           = GPU_RES_GET_GPU(pSubdevice);
214     KernelFifo    *pKernelFifo    = GPU_GET_KERNEL_FIFO(pGpu);
215     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
216     NV_STATUS      status         = NV_OK;
217     NvU32          runlistId;
218     CHID_MGR      *pChidMgr;
219     NvU32          i;
220     NvU32          data;
221 
222     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
223 
224     // error checck
225     if (pFifoInfoParams->fifoInfoTblSize > NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES)
226         return NV_ERR_INVALID_PARAM_STRUCT;
227 
228     // step thru list
229     for (i = 0; i < pFifoInfoParams->fifoInfoTblSize; i++)
230     {
231         switch (pFifoInfoParams->fifoInfoTbl[i].index)
232         {
233             case NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL:
234                 data = memmgrGetRsvdMemorySize(pMemoryManager);
235                 break;
236             case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS:
237                 //
238                 // TODO: Follow up with clients using this control call before
239                 // turning on per esched chidmgr
240                 //
241                 data = kfifoGetMaxChannelGroupsInSystem(pGpu, pKernelFifo);
242                 break;
243             case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP:
244                 data = kfifoGetMaxChannelGroupSize_HAL(pKernelFifo);
245                 break;
246             case NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE:
247                 //
248                 // TODO: Follow up with clients using this control call before
249                 // turning on per esched chidmgr
250                 //
251                 data = kfifoGetChannelGroupsInUse(pGpu, pKernelFifo);
252                 break;
253             case NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP:
254                 //
255                 // RM-SMC AMPERE-TODO This data is incompatible with SMC, where
256                 // different engines can have different max VEID counts
257                 //
258                 data = kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, NV_FALSE);
259                 break;
260             case NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET:
261             {
262                 NvU64 userdAddr;
263                 NvU32 userdSize;
264                 NvU32 gfid;
265 
266                 NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid));
267                 if (hypervisorIsVgxHyper() && IS_GFID_PF(gfid))
268                 {
269                     status = kfifoGetUserdBar1MapInfo_HAL(pGpu, pKernelFifo, &userdAddr, &userdSize);
270                     if (status == NV_OK)
271                         data = (NvU32)(userdAddr >> NV2080_CTRL_FIFO_GET_INFO_USERD_OFFSET_SHIFT);
272                 }
273                 else
274                 {
275                     data = 0;
276                     status = NV_ERR_INVALID_REQUEST;
277                 }
278                 break;
279             }
280             case NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE:
281                 {
282                     NvU64 timeslice = kfifoChannelGroupGetDefaultTimeslice_HAL(pKernelFifo);
283                     data = NvU64_LO32(timeslice);
284                     NV_ASSERT_OR_RETURN((NvU64_HI32(timeslice) == 0), NV_ERR_INVALID_PARAM_STRUCT);
285                 }
286                 break;
287             case NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED:
288                 data = (NvU32) kfifoIsPerRunlistChramEnabled(pKernelFifo);
289                 break;
290             case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE:
291                 // Get runlist ID for Engine type.
292                 NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo,
293                                                                 ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
294                                                                 gpuGetRmEngineType(pFifoInfoParams->engineType),
295                                                                 ENGINE_INFO_TYPE_RUNLIST,
296                                                                 &runlistId));
297                 pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistId);
298                 data = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr);
299                 break;
300             case NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE:
301                 // Get runlist ID for Engine type.
302                 NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo,
303                                                                 ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
304                                                                 gpuGetRmEngineType(pFifoInfoParams->engineType),
305                                                                 ENGINE_INFO_TYPE_RUNLIST, &runlistId));
306                 data = kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId);
307                 break;
308             default:
309                 data = 0;
310                 status = NV_ERR_INVALID_ARGUMENT;
311                 break;
312         }
313 
314         if (status != NV_OK)
315             break;
316 
317         // save off data value
318         pFifoInfoParams->fifoInfoTbl[i].data = data;
319     }
320 
321     return status;
322 }
323 
324 
325 /*!
326  * @brief Get bitmask of allocated channels
327  */
328 NV_STATUS subdeviceCtrlCmdFifoGetAllocatedChannels_IMPL
329 (
330     Subdevice                                      *pSubdevice,
331     NV2080_CTRL_FIFO_GET_ALLOCATED_CHANNELS_PARAMS *pParams
332 )
333 {
334     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(GPU_RES_GET_GPU(pSubdevice));
335     NV_STATUS status;
336 
337     status = kfifoGetAllocatedChannelMask(GPU_RES_GET_GPU(pSubdevice),
338                                           pKernelFifo,
339                                           pParams->runlistId,
340                                           pParams->bitMask,
341                                           sizeof pParams->bitMask);
342     switch(status)
343     {
344     case NV_ERR_BUFFER_TOO_SMALL:
345     case NV_ERR_INVALID_ARGUMENT:
346         //
347         // Update the ctrl call structure to have sufficient space for 1 bit per
348         // possible channels in a runlist. This is a driver bug.
349         //
350         NV_ASSERT_OK(status);
351         return NV_ERR_NOT_SUPPORTED;
352     default:
353         return status;
354     }
355 }
356 
357 
358 /*!
359  * @brief subdeviceCtrlCmdFifoGetUserdLocation
360  *
361  * Lock Requirements:
362  *      Assert that API lock and GPUs lock held on entry
363  */
364 NV_STATUS
365 subdeviceCtrlCmdFifoGetUserdLocation_IMPL
366 (
367     Subdevice *pSubdevice,
368     NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS *pUserdLocationParams
369 )
370 {
371     NvU32      userdAperture;
372     NvU32      userdAttribute;
373     NV_STATUS  rmStatus = NV_OK;
374     OBJGPU    *pGpu  = GPU_RES_GET_GPU(pSubdevice);
375     KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
376 
377     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
378 
379     rmStatus = kfifoGetUserdLocation_HAL(pKernelFifo,
380                                          &userdAperture,
381                                          &userdAttribute);
382 
383     if (rmStatus != NV_OK)
384         return rmStatus;
385 
386     // Support for NVLINK coherent memory is not yet available in RM
387 
388     if (userdAperture == ADDR_FBMEM)
389     {
390         pUserdLocationParams->aperture = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_VIDMEM;
391     }
392     else if (userdAperture == ADDR_SYSMEM)
393     {
394         pUserdLocationParams->aperture = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_SYSMEM;
395     }
396     else
397     {
398         NV_PRINTF(LEVEL_ERROR, "Invalid userdAperture value = 0x%08x\n",
399                   userdAperture);
400         return NV_ERR_INVALID_STATE;
401     }
402 
403     if (userdAttribute == NV_MEMORY_CACHED)
404     {
405         pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_CACHED;
406     }
407     else if (userdAttribute == NV_MEMORY_UNCACHED)
408     {
409         pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_UNCACHED;
410     }
411     else if (userdAttribute == NV_MEMORY_WRITECOMBINED)
412     {
413         pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_WRITECOMBINED;
414     }
415     else
416     {
417         NV_PRINTF(LEVEL_ERROR, "Invalid userdAttribute value = 0x%08x\n",
418                   userdAttribute);
419         return NV_ERR_INVALID_STATE;
420     }
421 
422     return rmStatus;
423 }
424 
425 /*!
426  * @brief subdeviceCtrlCmdFifoGetChannelMemInfo
427  *
428  * Lock Requirements:
429  *      Assert that API lock and GPUs lock held on entry
430  */
431 NV_STATUS
432 subdeviceCtrlCmdFifoGetChannelMemInfo_IMPL
433 (
434     Subdevice *pSubdevice,
435     NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS *pChannelMemParams
436 )
437 {
438     OBJGPU    *pGpu     = GPU_RES_GET_GPU(pSubdevice);
439     NvHandle   hDevice  = RES_GET_PARENT_HANDLE(pSubdevice);
440     RsClient  *pClient  = RES_GET_CLIENT(pSubdevice);
441     NV_STATUS  rmStatus = NV_OK;
442     NvU32      index;
443     NvU32      runqueues;
444     KernelFifo *pKernelFifo     = GPU_GET_KERNEL_FIFO(pGpu);
445     KernelChannel *pKernelChannel;
446     MEMORY_DESCRIPTOR *pMemDesc = NULL;
447     NV2080_CTRL_FIFO_CHANNEL_MEM_INFO chMemInfo;
448 
449     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
450 
451     rmStatus = CliGetKernelChannelWithDevice(pClient,
452                                              hDevice,
453                                              pChannelMemParams->hChannel,
454                                              &pKernelChannel);
455     if (rmStatus != NV_OK)
456     {
457         return NV_ERR_INVALID_CHANNEL;
458     }
459 
460     portMemSet((void *)&chMemInfo, 0, sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO));
461 
462     // Get Inst Block Mem Info
463     rmStatus = kfifoChannelGetFifoContextMemDesc_HAL(pGpu,
464                                                      pKernelFifo,
465                                                      pKernelChannel,
466                                                      FIFO_CTX_INST_BLOCK,
467                                                      &pMemDesc);
468     if (rmStatus != NV_OK)
469         return rmStatus;
470 
471     kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.inst);
472 
473     // Get RAMFC mem Info
474     pMemDesc = NULL;
475     rmStatus = kfifoChannelGetFifoContextMemDesc_HAL(pGpu,
476                                           pKernelFifo,
477                                           pKernelChannel,
478                                           FIFO_CTX_RAMFC,
479                                           &pMemDesc);
480 
481     if (rmStatus != NV_OK)
482         return rmStatus;
483 
484     kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.ramfc);
485 
486     // Get Method buffer mem info
487     runqueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo);
488     NV_ASSERT((runqueues <= NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT));
489     for (index = 0; index < runqueues; index++)
490     {
491         pMemDesc = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pMthdBuffers[index].pMemDesc;
492         if (pMemDesc != NULL)
493         {
494             kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.methodBuf[index]);
495             chMemInfo.methodBufCount++;
496         }
497     }
498 
499     // copy into the kernel structure, there is no userland pointer
500     // maybe later structure is copied out to userland
501     portMemCopy(&pChannelMemParams->chMemInfo,
502                 sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO),
503                 &chMemInfo,
504                 sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO));
505 
506     return rmStatus;
507 }
508 
509 NV_STATUS
510 diagapiCtrlCmdFifoEnableVirtualContext_IMPL
511 (
512     DiagApi *pDiagApi,
513     NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS *pEnableVCParams
514 )
515 {
516     NV_STATUS      rmStatus = NV_OK;
517     KernelChannel *pKernelChannel = NULL;
518     RsClient      *pClient = RES_GET_CLIENT(pDiagApi);
519     Device        *pDevice = GPU_RES_GET_DEVICE(pDiagApi);
520 
521     NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
522         CliGetKernelChannelWithDevice(pClient,
523                                       RES_GET_HANDLE(pDevice),
524                                       pEnableVCParams->hChannel,
525                                       &pKernelChannel));
526 
527     rmStatus = kchannelEnableVirtualContext_HAL(pKernelChannel);
528     return rmStatus;
529 }
530 
531 /*!
532  * @brief subdeviceCtrlCmdFifoUpdateChannelInfo
533  *
534  * This function is broken for SLI.
535  * Will be fixed after instance block and userd
536  * is made unicast.
537  *
538  * Lock Requirements:
539  *      Assert that API lock and GPUs lock held on entry
540  */
541 NV_STATUS
542 subdeviceCtrlCmdFifoUpdateChannelInfo_IMPL
543 (
544     Subdevice *pSubdevice,
545     NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS *pChannelInfo
546 )
547 {
548     CALL_CONTEXT             *pCallContext  = resservGetTlsCallContext();
549     RmCtrlParams             *pRmCtrlParams = pCallContext->pControlParams;
550     OBJGPU                   *pGpu           = GPU_RES_GET_GPU(pSubdevice);
551     RsClient                 *pChannelClient;
552     NvHandle                  hClient        = RES_GET_CLIENT_HANDLE(pSubdevice);
553     KernelChannel            *pKernelChannel = NULL;
554     NV_STATUS                 status         = NV_OK;
555     NvU64                     userdAddr      = 0;
556     NvU32                     userdAper      = 0;
557 
558     // Bug 724186 -- Skip this check for deferred API
559     LOCK_ASSERT_AND_RETURN(pRmCtrlParams->bDeferredApi || rmGpuLockIsOwner());
560 
561     NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
562                           serverGetClientUnderLock(&g_resServ,
563                                                    pChannelInfo->hClient,
564                                                    &pChannelClient));
565 
566     NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
567                           CliGetKernelChannel(pChannelClient,
568                                               pChannelInfo->hChannel,
569                                               &pKernelChannel));
570     NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL);
571 
572     if (!pChannelInfo->hUserdMemory)
573     {
574         return NV_ERR_INVALID_ARGUMENT;
575     }
576 
577     if (!pKernelChannel->bClientAllocatedUserD)
578     {
579         return NV_ERR_NOT_SUPPORTED;
580     }
581 
582     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
583     {
584         NV_RM_RPC_CONTROL(pGpu,
585                           pRmCtrlParams->hClient,
586                           pRmCtrlParams->hObject,
587                           pRmCtrlParams->cmd,
588                           pRmCtrlParams->pParams,
589                           pRmCtrlParams->paramsSize,
590                           status);
591         if (status != NV_OK)
592             return status;
593 
594         // Destroy the submemdescriptor of the previous USERD
595         kchannelDestroyUserdMemDesc_HAL(pGpu, pKernelChannel);
596 
597         // Get the userd hMemory and create a submemdescriptor
598         // Store it in pKernelChannel
599         status = kchannelCreateUserdMemDesc_HAL(pGpu, pKernelChannel, hClient,
600                                    pChannelInfo->hUserdMemory,
601                                    pChannelInfo->userdOffset,
602                                    &userdAddr, &userdAper);
603         if (status != NV_OK)
604         {
605             NV_PRINTF(LEVEL_ERROR,
606                         "kchannelCreateUserdMemDesc_HAL"
607                         "failed for hClient 0x%x and channel 0x%x status 0x%x\n",
608                         hClient, kchannelGetDebugTag(pKernelChannel), status);
609         }
610     }
611     else
612     {
613         status = NV_ERR_NOT_SUPPORTED;
614     }
615 
616     return status;
617 }
618 
619 NV_STATUS
620 diagapiCtrlCmdFifoGetChannelState_IMPL
621 (
622     DiagApi *pDiagApi,
623     NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams
624 )
625 {
626     OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi);
627     RsClient *pChannelClient;
628     KernelChannel *pKernelChannel;
629 
630     NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
631         serverGetClientUnderLock(&g_resServ, pChannelStateParams->hClient,
632             &pChannelClient));
633 
634     NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
635         CliGetKernelChannel(pChannelClient, pChannelStateParams->hChannel, &pKernelChannel));
636     NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
637         kchannelGetChannelPhysicalState(pGpu, pKernelChannel, pChannelStateParams));
638 
639     // Fill out kernel state here
640     pChannelStateParams->bCpuMap     = kchannelIsCpuMapped(pGpu, pKernelChannel);
641     pChannelStateParams->bRunlistSet = kchannelIsRunlistSet(pGpu, pKernelChannel);
642 
643     return NV_OK;
644 }
645 
646 static NV_STATUS
647 _kfifoGetCaps
648 (
649     OBJGPU *pGpu,
650     NvU8   *pKfifoCaps
651 )
652 {
653     NV_STATUS   rmStatus         = NV_OK;
654     NvBool      bCapsInitialized = NV_FALSE;
655     KernelFifo *pKernelFifo      = GPU_GET_KERNEL_FIFO(pGpu);
656 
657     VERIFY_OBJ_PTR(pKernelFifo);
658 
659     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
660     {
661         pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
662         if (pKernelFifo == NULL)
663         {
664             rmStatus = NV_ERR_INVALID_POINTER;
665             SLI_LOOP_BREAK;
666         }
667         kfifoGetDeviceCaps(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized);
668         bCapsInitialized = NV_TRUE;
669     }
670     SLI_LOOP_END
671 
672     return rmStatus;
673 }
674 
675 /*!
676  * @brief deviceCtrlCmdFifoGetCaps
677  *
678  * Lock Requirements:
679  *      Assert that API lock and GPUs lock held on entry
680  */
681 NV_STATUS
682 deviceCtrlCmdFifoGetCaps_IMPL
683 (
684     Device *pDevice,
685     NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pKfifoCapsParams
686 )
687 {
688     OBJGPU  *pGpu      = GPU_RES_GET_GPU(pDevice);
689     NvU8    *pKfifoCaps = NvP64_VALUE(pKfifoCapsParams->capsTbl);
690 
691     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
692 
693     // sanity check array size
694     if (pKfifoCapsParams->capsTblSize != NV0080_CTRL_FIFO_CAPS_TBL_SIZE)
695     {
696         NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n",
697                   pKfifoCapsParams->capsTblSize,
698                   NV0080_CTRL_FIFO_CAPS_TBL_SIZE);
699         return NV_ERR_INVALID_ARGUMENT;
700     }
701 
702     // now accumulate caps for entire device
703     return _kfifoGetCaps(pGpu, pKfifoCaps);
704 }
705 
706 /*!
707  * @brief deviceCtrlCmdFifoGetCapsV2
708  *
709  * Lock Requirements:
710  *      Assert that API lock and GPUs lock held on entry
711  */
712 NV_STATUS
713 deviceCtrlCmdFifoGetCapsV2_IMPL
714 (
715     Device *pDevice,
716     NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS *pKfifoCapsParams
717 )
718 {
719     OBJGPU    *pGpu      = GPU_RES_GET_GPU(pDevice);
720     NvU8      *pKfifoCaps = pKfifoCapsParams->capsTbl;
721 
722     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
723 
724     // now accumulate caps for entire device
725     return _kfifoGetCaps(pGpu, pKfifoCaps);
726 }
727 
728 /**
729  * @brief Disables or enables the given channels.
730  */
731 NV_STATUS
732 subdeviceCtrlCmdFifoDisableChannels_IMPL
733 (
734     Subdevice *pSubdevice,
735     NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *pDisableChannelParams
736 )
737 {
738     NV_STATUS       status        = NV_OK;
739     OBJGPU         *pGpu          = GPU_RES_GET_GPU(pSubdevice);
740     CALL_CONTEXT   *pCallContext  = resservGetTlsCallContext();
741     RmCtrlParams   *pRmCtrlParams = pCallContext->pControlParams;
742 
743     // Validate use of pRunlistPreemptEvent to allow use by Kernel clients only
744     if ((pDisableChannelParams->pRunlistPreemptEvent != NULL) &&
745         (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL))
746     {
747         return NV_ERR_INSUFFICIENT_PERMISSIONS;
748     }
749 
750     // Send RPC to handle message on Host-RM
751     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
752     {
753         NV_RM_RPC_CONTROL(pGpu,
754                           pRmCtrlParams->hClient,
755                           pRmCtrlParams->hObject,
756                           pRmCtrlParams->cmd,
757                           pRmCtrlParams->pParams,
758                           pRmCtrlParams->paramsSize,
759                           status);
760     }
761     // Send internal control call to actually disable channels
762     else
763     {
764         status = NV_ERR_NOT_SUPPORTED;
765     }
766 
767     return status;
768 }
769 
770 /**
771  * @brief Disables and preempts the given channels and marks
772  *        them disabled for key rotation. Conditionally also marks
773  *        them for re-enablement.
774  */
775 NV_STATUS
776 subdeviceCtrlCmdFifoDisableChannelsForKeyRotation_IMPL
777 (
778     Subdevice *pSubdevice,
779     NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_PARAMS *pDisableChannelParams
780 )
781 {
782     NV_STATUS       status        = NV_OK;
783     NV_STATUS       tmpStatus     = NV_OK;
784     OBJGPU         *pGpu          = GPU_RES_GET_GPU(pSubdevice);
785     CALL_CONTEXT   *pCallContext  = resservGetTlsCallContext();
786     RmCtrlParams   *pRmCtrlParams = pCallContext->pControlParams;
787     NvU32           i;
788     KernelChannel  *pKernelChannel = NULL;
789 
790     NV_CHECK_OR_RETURN(LEVEL_INFO,
791         pDisableChannelParams->numChannels <= NV_ARRAY_ELEMENTS(pDisableChannelParams->hChannelList),
792         NV_ERR_INVALID_ARGUMENT);
793     ct_assert(NV_ARRAY_ELEMENTS(pDisableChannelParams->hClientList) == \
794               NV_ARRAY_ELEMENTS(pDisableChannelParams->hChannelList));
795 
796     // Send RPC to handle message on Host-RM
797     if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))
798     {
799         NV_RM_RPC_CONTROL(pGpu,
800                           pRmCtrlParams->hClient,
801                           pRmCtrlParams->hObject,
802                           pRmCtrlParams->cmd,
803                           pRmCtrlParams->pParams,
804                           pRmCtrlParams->paramsSize,
805                           status);
806     }
807     // Send internal control call to actually disable channels and preempt channels
808     else
809     {
810         status = NV_ERR_NOT_SUPPORTED;
811         NV_ASSERT_OR_RETURN(status == NV_OK, status);
812     }
813 
814     // Loop through all the channels and mark them disabled
815     for (i = 0; i < pDisableChannelParams->numChannels; i++)
816     {
817         RsClient              *pClient = NULL;
818         tmpStatus = serverGetClientUnderLock(&g_resServ,
819                                           pDisableChannelParams->hClientList[i], &pClient);
820         if (tmpStatus != NV_OK)
821         {
822             status = tmpStatus;
823             NV_PRINTF(LEVEL_ERROR, "Failed to get client with hClient = 0x%x status = 0x%x\n", pDisableChannelParams->hClientList[i], status);
824             continue;
825         }
826         tmpStatus = CliGetKernelChannel(pClient,
827                                      pDisableChannelParams->hChannelList[i], &pKernelChannel);
828         if (tmpStatus != NV_OK)
829         {
830             status = tmpStatus;
831             NV_PRINTF(LEVEL_ERROR, "Failed to get channel with hclient = 0x%x hChannel = 0x%x status = 0x%x\n",
832                                     pDisableChannelParams->hClientList[i], pDisableChannelParams->hChannelList[i], status);
833             continue;
834         }
835         kchannelDisableForKeyRotation(pGpu, pKernelChannel, NV_TRUE);
836         kchannelEnableAfterKeyRotation(pGpu, pKernelChannel, pDisableChannelParams->bEnableAfterKeyRotation);
837     }
838 
839     if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) &&
840         (pKernelChannel != NULL))
841     {
842         NvU32 h2dKey, d2hKey;
843         ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
844         NV_ASSERT_OK_OR_RETURN(confComputeGetKeyPairByChannel_HAL(pGpu, pConfCompute, pKernelChannel, &h2dKey, &d2hKey));
845         KEY_ROTATION_STATUS state;
846         NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state));
847         if (state == KEY_ROTATION_STATUS_PENDING)
848         {
849             NV_ASSERT_OK_OR_RETURN(confComputeCheckAndScheduleKeyRotation(pGpu, pConfCompute, h2dKey, d2hKey));
850         }
851     }
852     return status;
853 }
854