1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "rmapi/rmapi.h"
25 #include "rmapi/control.h"
26 #include "rmapi/client.h"
27 #include "rmapi/rs_utils.h"
28 #include "diagnostics/tracer.h"
29 #include "core/locks.h"
30 #include "core/thread_state.h"
31 #include "virtualization/hypervisor/hypervisor.h"
32 #include "gpu/device/device.h"
33 
34 #include "entry_points.h"
35 #include "resserv/rs_access_map.h"
36 #include "gpu/gpu.h"
37 #include "gpu/subdevice/subdevice.h"
38 #include "rmapi/rmapi_utils.h"
39 
40 #include "ctrl/ctrl0000/ctrl0000client.h" // NV0000_CTRL_CMD_CLIENT_*
41 #include "ctrl/ctrl0000/ctrl0000gpu.h" // NV0000_CTRL_CMD_GPU_*
42 #include "ctrl/ctrl0000/ctrl0000system.h" // NV0000_CTRL_CMD_SYSTEM_*
43 #include "ctrl/ctrl0000/ctrl0000syncgpuboost.h" // NV0000_CTRL_CMD_SYNC_GPU_BOOST_*
44 #include "ctrl/ctrl0000/ctrl0000nvd.h" // NV0000_CTRL_CMD_NVD_*
45 #include "ctrl/ctrl2080/ctrl2080rc.h" // NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM
46 #include "ctrl/ctrl0002.h" // N09002_CTRL_CMD_*_CONTEXTDMA
47 #include "ctrl/ctrl906f.h" // NV906F_CTRL_CMD_GET_MMU_FAULT_INFO
48 #include "ctrl/ctrlc370/ctrlc370chnc.h" // NVC370_CTRL_CMD_*
49 #include "ctrl/ctrl9010.h" //NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION
50 #include "ctrl/ctrl2080/ctrl2080tmr.h" // NV2080_CTRL_CMD_TIMER_*
51 #include "ctrl/ctrl0000/ctrl0000gpuacct.h" // NV0000_CTRL_CMD_GPUACCT_*
52 
53 static NV_STATUS
54 releaseDeferRmCtrlBuffer(RmCtrlDeferredCmd* pRmCtrlDeferredCmd)
55 {
56     portMemSet(&pRmCtrlDeferredCmd->paramBuffer, 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE);
57 
58     portAtomicSetS32(&pRmCtrlDeferredCmd->pending, RMCTRL_DEFERRED_FREE);
59 
60     return NV_OK;
61 }
62 
63 //
64 // This is the rmControl internal handler for deferred calls.
65 //
66 //
67 
68 NV_STATUS
69 rmControl_Deferred(RmCtrlDeferredCmd* pRmCtrlDeferredCmd)
70 {
71     RmCtrlParams rmCtrlParams;
72     NvU8 paramBuffer[RMCTRL_DEFERRED_MAX_PARAM_SIZE];
73     NV_STATUS status;
74     RS_LOCK_INFO lockInfo = {0};
75     RS_CONTROL_COOKIE rmCtrlExecuteCookie = {0};
76 
77     // init RmCtrlParams
78     portMemCopy(&rmCtrlParams, sizeof(RmCtrlParams), &pRmCtrlDeferredCmd->rmCtrlDeferredParams, sizeof(RmCtrlParams));
79     rmCtrlParams.hParent    = NV01_NULL_OBJECT;
80     rmCtrlParams.pGpu       = NULL;
81     rmCtrlParams.pLockInfo = &lockInfo;
82     rmCtrlParams.pCookie = &rmCtrlExecuteCookie;
83 
84     // Temporary: tell ResServ not to take any locks
85     lockInfo.flags = RM_LOCK_FLAGS_NO_GPUS_LOCK |
86                      RM_LOCK_FLAGS_NO_CLIENT_LOCK;
87 
88     if (rmapiLockIsOwner())
89     {
90         lockInfo.state = RM_LOCK_STATES_API_LOCK_ACQUIRED;
91     }
92     else
93     {
94         lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK;
95     }
96 
97     // paramsSize not > _MAX already verified in _rmControlDeferred
98     if ((rmCtrlParams.pParams != NvP64_NULL) && (rmCtrlParams.paramsSize != 0))
99     {
100         // copy param to a local buffer so that pRmCtrlDeferredCmd can be released
101         portMemSet(paramBuffer, 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE);
102         portMemCopy(paramBuffer, rmCtrlParams.paramsSize, rmCtrlParams.pParams, rmCtrlParams.paramsSize);
103         rmCtrlParams.pParams = paramBuffer;
104     }
105 
106     releaseDeferRmCtrlBuffer(pRmCtrlDeferredCmd);
107 
108     // client was checked when we came in through rmControl()
109     // but check again to make sure it's still good
110     if (serverutilGetClientUnderLock(rmCtrlParams.hClient) == NULL)
111     {
112         status = NV_ERR_INVALID_CLIENT;
113         goto exit;
114     }
115 
116     status = serverControl(&g_resServ, &rmCtrlParams);
117 
118 exit:
119 
120     if (status != NV_OK)
121     {
122         NV_PRINTF(LEVEL_WARNING, "deferred rmctrl %x failed %x!\n",
123                   rmCtrlParams.cmd, status);
124     }
125 
126     return status;
127 }
128 
129 static NV_STATUS
130 _rmControlDeferred(RmCtrlParams *pRmCtrlParams, NvP64 pUserParams, NvU32 paramsSize)
131 {
132     // Schedule a deferred rmctrl call
133     OBJGPU     *pGpu;
134     NvBool      bBcResource;
135     NV_STATUS   rmStatus;
136     RsClient   *pClient;
137 
138     // We can't allocate memory at DIRQL, so use pre-allocated buffer to store any rmctrl param.
139     // The size can't be large than DEFERRED_RMCTRL_MAX_PARAM_SIZE (defined in rmctrl.h), otherwise,
140     // fail this call.
141     if (paramsSize > RMCTRL_DEFERRED_MAX_PARAM_SIZE)
142     {
143         NV_PRINTF(LEVEL_WARNING,
144                   "rmctrl param size (%d) larger than limit (%d).\n",
145                   paramsSize, RMCTRL_DEFERRED_MAX_PARAM_SIZE);
146         rmStatus = NV_ERR_INSUFFICIENT_RESOURCES;
147         goto done;
148     }
149 
150     rmStatus = serverGetClientUnderLock(&g_resServ, pRmCtrlParams->hClient, &pClient);
151     if (rmStatus != NV_OK)
152         return rmStatus;
153 
154     rmStatus = gpuGetByHandle(pClient, pRmCtrlParams->hObject, &bBcResource, &pGpu);
155     if (rmStatus != NV_OK)
156         return rmStatus;
157 
158     // Set SLI BC state for thread
159     gpuSetThreadBcState(pGpu, bBcResource);
160 
161     pRmCtrlParams->pGpu = pGpu;
162     pRmCtrlParams->pLockInfo = NULL;
163 
164     switch (pRmCtrlParams->cmd)
165     {
166         // we don't have available bit left in RmCtrlParams.cmd to
167         // indicate a rmctrl type as deferrable so use cmd list here
168         case NV2080_CTRL_CMD_TIMER_SCHEDULE:
169         {
170             if (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED)
171             {
172                 OBJSYS    *pSys = SYS_GET_INSTANCE();
173                 NvU32 idx;
174 
175                 for ( idx = 0; idx < MAX_DEFERRED_CMDS; idx++)
176                 {
177                     if (portAtomicCompareAndSwapS32(&pGpu->pRmCtrlDeferredCmd[idx].pending,
178                                                     RMCTRL_DEFERRED_ACQUIRED,
179                                                     RMCTRL_DEFERRED_FREE))
180                     {
181                         portMemCopy(&pGpu->pRmCtrlDeferredCmd[idx].rmCtrlDeferredParams,
182                                     sizeof(RmCtrlParams), pRmCtrlParams, sizeof(RmCtrlParams));
183 
184                         // copyin param to kernel buffer for deferred rmctrl
185                         if (paramsSize != 0 && pUserParams != 0)
186                         {
187                             portMemCopy(pGpu->pRmCtrlDeferredCmd[idx].paramBuffer, paramsSize,
188                                         NvP64_VALUE(pUserParams), paramsSize);
189 
190                             if (paramsSize < RMCTRL_DEFERRED_MAX_PARAM_SIZE)
191                             {
192                                 portMemSet(pGpu->pRmCtrlDeferredCmd[idx].paramBuffer +
193                                            paramsSize,
194                                            0, RMCTRL_DEFERRED_MAX_PARAM_SIZE - paramsSize);
195                             }
196 
197                             pGpu->pRmCtrlDeferredCmd[idx].rmCtrlDeferredParams.pParams =
198                                 pGpu->pRmCtrlDeferredCmd[idx].paramBuffer;
199                         }
200 
201                         portAtomicSetS32(&pGpu->pRmCtrlDeferredCmd[idx].pending,
202                                          RMCTRL_DEFERRED_READY);
203 
204                         // Make sure there's a release call to trigger the deferred rmctrl.
205                         // Previous rmctrl that is holding the lock can already
206                         // finished (release its lock) during the period before the pending
207                         // flag is set and after this rmctrl failed to acquire lock.
208 
209                         // LOCK: try to acquire GPUs lock
210                         if (rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE,
211                                               RM_LOCK_MODULES_CLIENT) == NV_OK)
212                         {
213                             if (osCondAcquireRmSema(pSys->pSema) == NV_OK)
214                             {
215                                 // In case this is called from device interrupt, use pGpu to queue DPC.
216                                 osReleaseRmSema(pSys->pSema, pGpu);
217                             }
218                             // In case this is called from device interrupt, use pGpu to queue DPC.
219                             // UNLOCK: release GPUs lock
220                             rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu);
221                         }
222 
223                         rmStatus = NV_OK;
224                         goto done;
225                     }
226                 }
227             }
228 
229             rmStatus = NV_ERR_STATE_IN_USE;
230             break;
231         }
232 
233         default:
234             rmStatus = NV_ERR_BUSY_RETRY;
235             break;
236     }
237 
238 done:
239     return rmStatus;
240 }
241 
242 NV_STATUS
243 serverControlApiCopyIn
244 (
245     RsServer                        *pServer,
246     RS_RES_CONTROL_PARAMS_INTERNAL  *pRmCtrlParams,
247     RS_CONTROL_COOKIE               *pCookie
248 )
249 {
250     NV_STATUS rmStatus;
251     API_STATE *pParamCopy;
252     API_STATE *pEmbeddedParamCopies;
253     NvP64 pUserParams;
254     NvU32 paramsSize;
255 
256     NV_ASSERT_OR_RETURN(pCookie != NULL, NV_ERR_INVALID_ARGUMENT);
257     pParamCopy = &pCookie->paramCopy;
258     pEmbeddedParamCopies = pCookie->embeddedParamCopies;
259     pUserParams = NV_PTR_TO_NvP64(pRmCtrlParams->pParams);
260     paramsSize = pRmCtrlParams->paramsSize;
261 
262     RMAPI_PARAM_COPY_INIT(*pParamCopy, pRmCtrlParams->pParams, pUserParams, 1, paramsSize);
263 
264     if (pCookie->apiCopyFlags & RMCTRL_API_COPY_FLAGS_SKIP_COPYIN_ZERO_BUFFER)
265     {
266         pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN;
267         pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER;
268     }
269 
270     rmStatus = rmapiParamsAcquire(pParamCopy, (pRmCtrlParams->secInfo.paramLocation == PARAM_LOCATION_USER));
271     if (rmStatus != NV_OK)
272         return rmStatus;
273     pCookie->bFreeParamCopy = NV_TRUE;
274 
275     rmStatus = embeddedParamCopyIn(pEmbeddedParamCopies, pRmCtrlParams);
276     if (rmStatus != NV_OK)
277         return rmStatus;
278     pCookie->bFreeEmbeddedCopy = NV_TRUE;
279 
280     return NV_OK;
281 }
282 
283 NV_STATUS
284 serverControlApiCopyOut
285 (
286     RsServer                       *pServer,
287     RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams,
288     RS_CONTROL_COOKIE              *pCookie,
289     NV_STATUS                       rmStatus
290 )
291 {
292     NV_STATUS  cpStatus;
293     API_STATE *pParamCopy;
294     API_STATE *pEmbeddedParamCopies;
295     NvP64      pUserParams;
296     NvBool     bFreeEmbeddedCopy;
297     NvBool     bFreeParamCopy;
298 
299     NV_ASSERT_OR_RETURN(pCookie != NULL, NV_ERR_INVALID_ARGUMENT);
300     NV_ASSERT_OR_RETURN(pRmCtrlParams != NULL, NV_ERR_INVALID_ARGUMENT);
301 
302     if ((pCookie->apiCopyFlags & RMCTRL_API_COPY_FLAGS_SET_CONTROL_CACHE) && rmStatus == NV_OK)
303     {
304         rmapiControlCacheSet(pRmCtrlParams->hClient,
305                              pRmCtrlParams->hObject,
306                              pRmCtrlParams->cmd,
307                              pRmCtrlParams->pParams,
308                              pRmCtrlParams->paramsSize);
309     }
310 
311     pParamCopy = &pCookie->paramCopy;
312     pEmbeddedParamCopies = pCookie->embeddedParamCopies;
313     pUserParams = pCookie->paramCopy.pUserParams;
314     bFreeParamCopy = pCookie->bFreeParamCopy;
315     bFreeEmbeddedCopy = pCookie->bFreeEmbeddedCopy;
316 
317     if ((rmStatus != NV_OK) &&
318         (!(pCookie->ctrlFlags & RMCTRL_FLAGS_COPYOUT_ON_ERROR) ||
319         (pCookie->apiCopyFlags & RMCTRL_API_COPY_FLAGS_FORCE_SKIP_COPYOUT_ON_ERROR)))
320     {
321         pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT;
322 
323         if (bFreeEmbeddedCopy)
324         {
325             pEmbeddedParamCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT;
326             pEmbeddedParamCopies[1].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT;
327             pEmbeddedParamCopies[2].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT;
328             pEmbeddedParamCopies[3].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT;
329         }
330     }
331 
332     if (bFreeEmbeddedCopy)
333     {
334         cpStatus = embeddedParamCopyOut(pEmbeddedParamCopies, pRmCtrlParams);
335         if (rmStatus == NV_OK)
336             rmStatus = cpStatus;
337         pCookie->bFreeEmbeddedCopy = NV_FALSE;
338     }
339 
340     if (bFreeParamCopy)
341     {
342         cpStatus = rmapiParamsRelease(pParamCopy);
343         if (rmStatus == NV_OK)
344             rmStatus = cpStatus;
345         pRmCtrlParams->pParams = NvP64_VALUE(pUserParams);
346         pCookie->bFreeParamCopy = NV_FALSE;
347     }
348 
349     return rmStatus;
350 }
351 
352 static NvBool _rmapiRmControlCanBeRaisedIrql(NvU32 cmd)
353 {
354     switch (cmd)
355     {
356         case NV2080_CTRL_CMD_TIMER_SCHEDULE:
357         case NV2080_CTRL_CMD_TIMER_GET_TIME:
358         // Below 2 control calls are used for flip canceling (HW Flip Queue)
359         // We use TRASH/ABORT mode to discard queued hw commands in the push buffer (bug 200644346)
360         case NVC370_CTRL_CMD_SET_ACCL:
361         case NVC370_CTRL_CMD_GET_CHANNEL_INFO:
362         case NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION:
363             return NV_TRUE;
364         default:
365             return NV_FALSE;
366     }
367 }
368 
369 static NvBool _rmapiRmControlCanBeBypassLock(NvU32 cmd)
370 {
371     switch (cmd)
372     {
373         case NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM:
374         case NV2080_CTRL_CMD_TIMER_GET_TIME:
375         case NV906F_CTRL_CMD_GET_MMU_FAULT_INFO:
376         // Below 2 control calls are used for flip canceling (HW Flip Queue)
377         // We use TRASH/ABORT mode to discard queued hw commands in the push buffer (bug 200644346)
378         case NVC370_CTRL_CMD_SET_ACCL:
379         case NVC370_CTRL_CMD_GET_CHANNEL_INFO:
380         case NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS:
381         case NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION:
382         case NV2080_CTRL_CMD_NVD_SET_NOCAT_JOURNAL_DATA:
383             return NV_TRUE;
384         default:
385             return NV_FALSE;
386     }
387 }
388 
389 static NV_STATUS
390 _rmapiRmControl(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvP64 pUserParams, NvU32 paramsSize, NvU32 flags, RM_API *pRmApi, API_SECURITY_INFO *pSecInfo)
391 {
392     OBJSYS    *pSys = SYS_GET_INSTANCE();
393     RmCtrlParams rmCtrlParams;
394     RS_CONTROL_COOKIE rmCtrlExecuteCookie = {0};
395     NvBool bIsRaisedIrqlCmd;
396     NvBool bIsLockBypassCmd;
397     NvBool bInternalRequest;
398     NV_STATUS  rmStatus = NV_OK;
399     RS_LOCK_INFO        lockInfo = {0};
400     NvU32 ctrlFlags = 0;
401     NvU32 ctrlAccessRight = 0;
402     NV_STATUS getCtrlInfoStatus;
403 
404     RMTRACE_RMAPI(_RMCTRL_ENTRY, cmd);
405 
406     // Check first for the NULL command.
407     // Return NV_OK immediately for NVXXXX_CTRL_CMD_NULL (0x00000000)
408     // as well as the per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 )
409     if ((cmd == NVXXXX_CTRL_CMD_NULL) ||
410         (FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) &&
411          FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX,    0x00, cmd)))
412     {
413         return NV_OK;
414     }
415 
416     NV_PRINTF(LEVEL_INFO,
417               "rmControl: hClient 0x%x hObject 0x%x cmd 0x%x\n", hClient,
418               hObject, cmd);
419 
420     NV_PRINTF(LEVEL_INFO, "rmControl: pUserParams 0x%p paramSize 0x%x\n",
421               NvP64_VALUE(pUserParams), paramsSize);
422 
423     // If we're behind either API lock or GPU lock treat as internal.
424     bInternalRequest = pRmApi->bApiLockInternal || pRmApi->bGpuLockInternal;
425 
426     // is this a raised IRQL cmd?
427     bIsRaisedIrqlCmd = (flags & NVOS54_FLAGS_IRQL_RAISED);
428 
429     // is this a lock bypass cmd?
430     bIsLockBypassCmd = ((flags & NVOS54_FLAGS_LOCK_BYPASS) || pRmApi->bGpuLockInternal);
431 
432     // NVOS54_FLAGS_IRQL_RAISED cmds are only allowed to be called in raised irq level.
433     if (bIsRaisedIrqlCmd)
434     {
435         // Check that we support this control call at raised IRQL
436         if (!_rmapiRmControlCanBeRaisedIrql(cmd))
437         {
438             NV_PRINTF(LEVEL_WARNING,
439                       "rmControl:  cmd 0x%x cannot be called at raised irq level\n", cmd);
440             rmStatus = NV_ERR_INVALID_ARGUMENT;
441             goto done;
442         }
443 
444         if (!osIsRaisedIRQL())
445         {
446             NV_PRINTF(LEVEL_WARNING,
447                       "rmControl:  raised cmd 0x%x at normal irq level\n", cmd);
448             rmStatus = NV_ERR_INVALID_ARGUMENT;
449             goto done;
450         }
451     }
452 
453     if (bIsLockBypassCmd)
454     {
455         flags |= NVOS54_FLAGS_LOCK_BYPASS;
456 
457         if (!bInternalRequest)
458         {
459             // Check that we support bypassing locks with this control call
460             if (!_rmapiRmControlCanBeBypassLock(cmd))
461             {
462                 NV_PRINTF(LEVEL_WARNING,
463                           "rmControl:  cmd 0x%x cannot bypass locks\n", cmd);
464                 rmStatus = NV_ERR_INVALID_ARGUMENT;
465                 goto done;
466             }
467         }
468     }
469 
470     // Potential race condition if run lockless?
471     if (serverutilGetClientUnderLock(hClient) == NULL)
472     {
473         rmStatus = NV_ERR_INVALID_CLIENT;
474         goto done;
475     }
476 
477     // only kernel clients can issue raised IRQL or lock bypass cmds
478     // bypass client priv check for internal calls done on behalf of lower priv
479     // clients
480     if ((bIsRaisedIrqlCmd || bIsLockBypassCmd) && !bInternalRequest)
481     {
482         if (pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL)
483         {
484             rmStatus = NV_ERR_INVALID_CLIENT;
485             goto done;
486         }
487     }
488 
489     // error check parameters
490     if (((paramsSize != 0) && (pUserParams == (NvP64) 0))   ||
491         ((paramsSize == 0) && (pUserParams != (NvP64) 0)))
492     {
493         NV_PRINTF(LEVEL_WARNING, "bad params: ptr " NvP64_fmt " size: 0x%x\n",
494                   pUserParams, paramsSize);
495         rmStatus = NV_ERR_INVALID_ARGUMENT;
496         goto done;
497     }
498 
499     // init RmCtrlParams
500     portMemSet(&rmCtrlParams, 0, sizeof(rmCtrlParams));
501     rmCtrlParams.hClient = hClient;
502     rmCtrlParams.hObject = hObject;
503     rmCtrlParams.cmd = cmd;
504     rmCtrlParams.flags = flags;
505     rmCtrlParams.pParams = NvP64_VALUE(pUserParams);
506     rmCtrlParams.paramsSize = paramsSize;
507     rmCtrlParams.hParent = NV01_NULL_OBJECT;
508     rmCtrlParams.pGpu = NULL;
509     rmCtrlParams.pResourceRef = NULL;
510     rmCtrlParams.secInfo = *pSecInfo;
511     rmCtrlParams.pLockInfo = &lockInfo;
512     rmCtrlParams.pCookie = &rmCtrlExecuteCookie;
513     rmCtrlParams.bInternal = bInternalRequest;
514 
515     if (pRmApi->bApiLockInternal)
516     {
517         lockInfo.state |= RM_LOCK_STATES_API_LOCK_ACQUIRED;
518         lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK;
519     }
520 
521     getCtrlInfoStatus = rmapiutilGetControlInfo(cmd, &ctrlFlags, &ctrlAccessRight);
522     if (getCtrlInfoStatus == NV_OK)
523     {
524         //
525         // The output of CACHEABLE RMCTRL do not depend on the input.
526         // Skip param copy and clear the buffer in case the uninitialized
527         // buffer leaks information to clients.
528         //
529         if (ctrlFlags & RMCTRL_FLAGS_CACHEABLE)
530             rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_SKIP_COPYIN_ZERO_BUFFER;
531     }
532 
533     //
534     // Three separate rmctrl command modes:
535     //
536     //  mode#1: lock bypass rmctrl request
537     //  mode#2: raised-irql rmctrl request
538     //  mode#3: normal rmctrl request
539     //
540     if (bIsLockBypassCmd)
541     {
542         lockInfo.state |= RM_LOCK_STATES_API_LOCK_ACQUIRED;
543         lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK |
544                           RM_LOCK_FLAGS_NO_GPUS_LOCK |
545                           RM_LOCK_FLAGS_NO_CLIENT_LOCK;
546 
547         //
548         // Lock bypass rmctrl request.
549         //
550         rmStatus = serverControl(&g_resServ, &rmCtrlParams);
551     }
552     else if (bIsRaisedIrqlCmd)
553     {
554         //
555         // Raised IRQL rmctrl request.
556         //
557         // Try to get lock; if we cannot get it then place on deferred queue.
558         //
559 
560         // LOCK: try to acquire GPUs lock
561         if (osCondAcquireRmSema(pSys->pSema) == NV_OK)
562         {
563             if (rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_CLIENT) == NV_OK)
564             {
565                 lockInfo.state |= RM_LOCK_STATES_GPUS_LOCK_ACQUIRED;
566                 lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK |
567                                   RM_LOCK_FLAGS_NO_GPUS_LOCK |
568                                   RM_LOCK_FLAGS_NO_CLIENT_LOCK;
569                 rmStatus = serverControl(&g_resServ, &rmCtrlParams);
570 
571                 // UNLOCK: release GPUs lock
572                 rmGpuLocksRelease(GPU_LOCK_FLAGS_COND_ACQUIRE, osIsISR() ? rmCtrlParams.pGpu : NULL);
573             }
574             else
575             {
576                 rmStatus = _rmControlDeferred(&rmCtrlParams, pUserParams, paramsSize);
577             }
578             // we must have a pGpu here for queuing of a DPC.
579             NV_ASSERT(!osIsISR() || rmCtrlParams.pGpu);
580             osReleaseRmSema(pSys->pSema, osIsISR() ? rmCtrlParams.pGpu : NULL);
581         }
582         else
583         {
584             rmStatus = _rmControlDeferred(&rmCtrlParams, pUserParams, paramsSize);
585         }
586     }
587     else
588     {
589         //
590         // Normal rmctrl request.
591         //
592 
593         if (getCtrlInfoStatus == NV_OK)
594         {
595             if (rmapiControlIsCacheable(ctrlFlags, ctrlAccessRight, NV_FALSE))
596             {
597                 rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_FORCE_SKIP_COPYOUT_ON_ERROR;
598 
599                 serverControlApiCopyIn(&g_resServ, &rmCtrlParams, rmCtrlParams.pCookie);
600                 rmStatus = rmapiControlCacheGet(hClient, hObject, cmd, rmCtrlParams.pParams, paramsSize);
601                 serverControlApiCopyOut(&g_resServ, &rmCtrlParams, rmCtrlParams.pCookie, rmStatus);
602 
603                 if (rmStatus == NV_OK)
604                 {
605                     goto done;
606                 }
607                 else
608                 {
609                     // reset cookie if cache get failed
610                     portMemSet(rmCtrlParams.pCookie, 0, sizeof(RS_CONTROL_COOKIE));
611                     rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_SET_CONTROL_CACHE;
612 
613                     // re-initialize the flag if it's cleaned
614                     if (ctrlFlags & RMCTRL_FLAGS_CACHEABLE)
615                         rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_SKIP_COPYIN_ZERO_BUFFER;
616                 }
617             }
618         }
619 
620         RM_API_CONTEXT rmApiContext = {0};
621         rmStatus = rmapiPrologue(pRmApi, &rmApiContext);
622         if (rmStatus != NV_OK)
623             goto epilogue;
624 
625         //
626         // If this is an internal request within the same RM instance, make
627         // sure we don't double lock clients and preserve previous lock state.
628         //
629         if (bInternalRequest && resservGetTlsCallContext() != NULL)
630         {
631             rmStatus = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
632             if (rmStatus != NV_OK)
633                 goto epilogue;
634 
635             //
636             // rmapiInitLockInfo overwrites lockInfo.flags, re-add
637             // RM_LOCK_FLAGS_NO_API_LOCK if it was originally added.
638             //
639             if (pRmApi->bApiLockInternal)
640                 lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK;
641         }
642 
643         lockInfo.flags |= RM_LOCK_FLAGS_RM_SEMA;
644         rmStatus = serverControl(&g_resServ, &rmCtrlParams);
645 epilogue:
646         rmapiEpilogue(pRmApi, &rmApiContext);
647     }
648 done:
649 
650     RMTRACE_RMAPI(_RMCTRL_EXIT, cmd);
651     return rmStatus;
652 }
653 
654 static NvBool
655 serverControl_ValidateVgpu
656 (
657     OBJGPU *pGpu,
658     NvU32 cmd,
659     RS_PRIV_LEVEL privLevel,
660     const NvU32 cookieFlags
661 )
662 {
663     NvBool bPermissionGranted = NV_FALSE;
664 
665     // Check if context is already sufficiently admin privileged
666     if (cookieFlags & RMCTRL_FLAGS_PRIVILEGED)
667     {
668         if (privLevel >= RS_PRIV_LEVEL_USER_ROOT)
669         {
670             bPermissionGranted = NV_TRUE;
671         }
672     }
673 
674     //
675     // If context is not privileged enough, check whether this
676     // control call is allowed in current hypervisor environment
677     //
678     if (!bPermissionGranted)
679     {
680         {
681             // For non-NV0000, identify current hypervisor environment and check for allow flag
682             if
683             (
684                 (IS_SRIOV_WITH_VGPU_GSP_ENABLED(pGpu) && (cookieFlags & RMCTRL_FLAGS_CPU_PLUGIN_FOR_VGPU_GSP))
685             )
686             {
687                 bPermissionGranted = NV_TRUE;
688             }
689         }
690     }
691 
692     return bPermissionGranted;
693 }
694 
695 // validate rmctrl flags
696 NV_STATUS serverControl_ValidateCookie
697 (
698     RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams,
699     RS_CONTROL_COOKIE *pRmCtrlExecuteCookie
700 )
701 {
702     NV_STATUS status;
703     OBJGPU *pGpu;
704     CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
705 
706     if (pCallContext == NULL)
707     {
708         NV_PRINTF(LEVEL_ERROR, "Calling context is NULL!\n");
709         return NV_ERR_INVALID_PARAMETER;
710     }
711 
712     if (RMCFG_FEATURE_PLATFORM_GSP)
713     {
714         pGpu = gpumgrGetSomeGpu();
715         if (pGpu == NULL)
716         {
717             NV_PRINTF(LEVEL_ERROR, "GPU is not found\n");
718             return NV_ERR_INVALID_STATE;
719         }
720     }
721 
722     if (g_resServ.bRsAccessEnabled)
723     {
724         if (pRmCtrlParams->pResourceRef != NULL)
725         {
726             //
727             // Check that the invoking client has appropriate access rights
728             // For control calls, the invoking client is the owner of the ref
729             //
730             status = rsAccessCheckRights(pRmCtrlParams->pResourceRef,
731                                          pRmCtrlParams->pResourceRef->pClient,
732                                          &pRmCtrlExecuteCookie->rightsRequired);
733             if (status != NV_OK)
734                 return status;
735         }
736         else
737         {
738             // pResourceRef can be NULL when rmControlCmdExecute is manually
739             // invoked from the deferred API path (see class5080DeferredApiV2).
740             // For now, we skip performing any access right checks in this case.
741         }
742     }
743     else
744     {
745         //
746         // When access rights are disabled, any control calls that have the
747         // *_IF_RS_ACCESS_DISABLED flags should be treated as if they were declared
748         // with the corresponding flags
749         //
750         if ((pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED) != 0)
751         {
752             pRmCtrlExecuteCookie->ctrlFlags |= RMCTRL_FLAGS_PRIVILEGED;
753         }
754     }
755 
756     if ((pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_INTERNAL))
757     {
758         NvBool bInternalCall = pRmCtrlParams->bInternal;
759         if (!bInternalCall)
760             return NV_ERR_NOT_SUPPORTED;
761     }
762 
763     //
764     // Narrow down usecase as much as possible to CPU-plugin.
765     // Must be running in hypervisor, at least cached privileged, not a kernel context and
766     // accessing a privileged or kernel privileged control call.
767     //
768     if (hypervisorIsVgxHyper() &&
769         clientIsAdmin(pCallContext->pClient, clientGetCachedPrivilege(pCallContext->pClient)) &&
770         (pRmCtrlParams->secInfo.privLevel != RS_PRIV_LEVEL_KERNEL) &&
771         !(pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_NON_PRIVILEGED))
772     {
773         // VGPU CPU-Plugin (Legacy Non-SRIOV, SRIOV-HYPERV, SRIOV-LEGACY, SRIOV-Offload), and Admin or kernel clients running in hypervisor
774         NvBool bPermissionGranted = serverControl_ValidateVgpu(pRmCtrlParams->pGpu,
775                                                                pRmCtrlParams->cmd,
776                                                                pRmCtrlParams->secInfo.privLevel,
777                                                                pRmCtrlExecuteCookie->ctrlFlags);
778         if (!bPermissionGranted)
779         {
780             NV_PRINTF(LEVEL_WARNING,
781                       "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-privileged hypervisor context issued privileged cmd\n",
782                       pRmCtrlParams->hClient, pRmCtrlParams->hObject,
783                       pRmCtrlParams->cmd);
784             return NV_ERR_INSUFFICIENT_PERMISSIONS;
785         }
786     }
787     else
788     {
789         //
790         // Non-Hypervisor clients
791         // PF clients
792         // Unprivileged processes running in Hypervisor
793         // Privileged processes running in Hypervisor, executing an unprivileged control call.
794         // Kernel privileged processes running in Hypervisor
795         //
796 
797         // permissions check for PRIVILEGED controls
798         if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_PRIVILEGED)
799         {
800             //
801             // Calls originating from usermode require admin perms while calls
802             // originating from other kernel drivers are always allowed.
803             //
804             if (pRmCtrlParams->secInfo.privLevel < RS_PRIV_LEVEL_USER_ROOT)
805             {
806                 NV_PRINTF(LEVEL_WARNING,
807                           "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-privileged context issued privileged cmd\n",
808                           pRmCtrlParams->hClient, pRmCtrlParams->hObject,
809                           pRmCtrlParams->cmd);
810                 return NV_ERR_INSUFFICIENT_PERMISSIONS;
811             }
812         }
813 
814         // permissions check for KERNEL_PRIVILEGED (default) unless NON_PRIVILEGED, PRIVILEGED or INTERNAL is specified
815         if (!(pRmCtrlExecuteCookie->ctrlFlags & (RMCTRL_FLAGS_NON_PRIVILEGED | RMCTRL_FLAGS_PRIVILEGED | RMCTRL_FLAGS_INTERNAL)))
816         {
817             if (pRmCtrlParams->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL)
818             {
819                 NV_PRINTF(LEVEL_WARNING,
820                           "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-kernel client issued kernel-only cmd\n",
821                           pRmCtrlParams->hClient, pRmCtrlParams->hObject,
822                           pRmCtrlParams->cmd);
823                 return NV_ERR_INSUFFICIENT_PERMISSIONS;
824             }
825         }
826     }
827 
828     // fail if GPU isn't ready
829     if ((!(pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_ACCESS)) && (pRmCtrlParams->pGpu != NULL))
830     {
831         API_GPU_FULL_POWER_SANITY_CHECK(pRmCtrlParams->pGpu, NV_FALSE,
832             pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS);
833 
834         if ( ! API_GPU_ATTACHED_SANITY_CHECK(pRmCtrlParams->pGpu))
835             return NV_ERR_GPU_IS_LOST;
836     }
837 
838     if ((pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) &&
839         (pRmCtrlParams->secInfo.paramLocation != PARAM_LOCATION_KERNEL))
840     {
841         return NV_ERR_INVALID_PARAMETER;
842     }
843 
844     return NV_OK;
845 }
846 
847 NV_STATUS
848 serverControlLookupLockFlags
849 (
850     RsServer *pServer,
851     RS_LOCK_ENUM lock,
852     RmCtrlParams *pRmCtrlParams,
853     RmCtrlExecuteCookie *pRmCtrlExecuteCookie,
854     LOCK_ACCESS_TYPE *pAccess
855 )
856 {
857     //
858     // Calls with LOCK_TOP doesn't fill in the cookie param correctly.
859     // This is just a WAR for this.
860     //
861     NvU32 controlFlags = pRmCtrlExecuteCookie->ctrlFlags;
862     if (controlFlags == 0 && !RMCFG_FEATURE_PLATFORM_GSP)
863     {
864         NV_STATUS status = rmapiutilGetControlInfo(pRmCtrlParams->cmd, &controlFlags, NULL);
865         if (status != NV_OK)
866         {
867             NV_PRINTF(LEVEL_INFO,
868                       "rmapiutilGetControlInfo(cmd=0x%x, out flags=0x%x, NULL) = status=0x%x\n",
869                       pRmCtrlParams->cmd, controlFlags, status);
870         }
871     }
872 
873     NvBool areAllGpusInOffloadMode = gpumgrAreAllGpusInOffloadMode();
874 
875     //
876     // If the control is ROUTE_TO_PHYSICAL, and we're in GSP offload mode,
877     // we can use a more relaxed locking mode:
878     //    1. Only lock the single device and not all GPUs
879     //    2. Take the API lock for READ instead of WRITE.
880     // Unfortunately, at this point we don't have the pGpu yet to check if it
881     // is in offload mode or not. So, instead, these optimizations are only
882     // done if *all* GPUs in the system are in offload mode.
883     //
884     NvBool bUseGspLockingMode = areAllGpusInOffloadMode &&
885                                 (controlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL);
886 
887     if (pAccess == NULL)
888         return NV_ERR_INVALID_ARGUMENT;
889 
890     *pAccess = LOCK_ACCESS_WRITE;
891 
892     if (lock == RS_LOCK_TOP)
893     {
894         if (!serverSupportsReadOnlyLock(&g_resServ, RS_LOCK_TOP, RS_API_CTRL))
895         {
896             *pAccess = LOCK_ACCESS_WRITE;
897             return NV_OK;
898         }
899 
900         if (controlFlags & RMCTRL_FLAGS_API_LOCK_READONLY)
901         {
902             *pAccess = LOCK_ACCESS_READ;
903         }
904 
905         //
906         // ROUTE_TO_PHYSICAL controls always take the READ API lock. This only applies
907         // to GSP clients: Only there can we guarantee per-gpu execution of commands.
908         //
909         if (g_resServ.bRouteToPhysicalLockBypass && bUseGspLockingMode)
910         {
911             *pAccess = LOCK_ACCESS_READ;
912         }
913 
914         return NV_OK;
915     }
916 
917     if (lock == RS_LOCK_RESOURCE)
918     {
919         RS_LOCK_INFO *pLockInfo = pRmCtrlParams->pLockInfo;
920 
921         //
922         // Do not acquire the GPU lock if we were explicitly told
923         // not to or if this is an Internal Call meaning that
924         // we already own the GPUs Lock.
925         //
926         if  ((pLockInfo->state & RM_LOCK_STATES_GPUS_LOCK_ACQUIRED) ||
927              (controlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK) ||
928              (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) ||
929              (pRmCtrlParams->flags & NVOS54_FLAGS_LOCK_BYPASS))
930         {
931             pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK;
932             pLockInfo->flags &= ~RM_LOCK_FLAGS_GPU_GROUP_LOCK;
933         }
934         else
935         {
936             if ((controlFlags & RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY) ||
937                 (g_resServ.bRouteToPhysicalLockBypass && bUseGspLockingMode))
938             {
939                 pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK;
940                 pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK;
941             }
942             else
943             {
944                 pLockInfo->flags &= ~RM_LOCK_FLAGS_NO_GPUS_LOCK;
945                 pLockInfo->flags &= ~RM_LOCK_FLAGS_GPU_GROUP_LOCK;
946             }
947 
948             if (controlFlags & RMCTRL_FLAGS_GPU_LOCK_READONLY)
949                 *pAccess = LOCK_ACCESS_READ;
950         }
951 
952         return NV_OK;
953     }
954 
955     return NV_ERR_NOT_SUPPORTED;
956 }
957 
958 NV_STATUS
959 rmapiControl
960 (
961     RM_API   *pRmApi,
962     NvHandle  hClient,
963     NvHandle  hObject,
964     NvU32     cmd,
965     void     *pParams,
966     NvU32     paramsSize
967 )
968 {
969     if (!pRmApi->bHasDefaultSecInfo)
970         return NV_ERR_NOT_SUPPORTED;
971 
972     return pRmApi->ControlWithSecInfo(pRmApi, hClient, hObject, cmd, NV_PTR_TO_NvP64(pParams),
973                                       paramsSize, 0, &pRmApi->defaultSecInfo);
974 }
975 
976 NV_STATUS
977 rmapiControlWithSecInfo
978 (
979     RM_API            *pRmApi,
980     NvHandle           hClient,
981     NvHandle           hObject,
982     NvU32              cmd,
983     NvP64              pParams,
984     NvU32              paramsSize,
985     NvU32              flags,
986     API_SECURITY_INFO *pSecInfo
987 )
988 {
989     NV_STATUS status;
990 
991     NV_PRINTF(LEVEL_INFO,
992               "Nv04Control: hClient:0x%x hObject:0x%x cmd:0x%x params:" NvP64_fmt " paramSize:0x%x flags:0x%x\n",
993               hClient, hObject, cmd, pParams, paramsSize, flags);
994 
995     status = _rmapiRmControl(hClient, hObject, cmd, pParams, paramsSize, flags, pRmApi, pSecInfo);
996 
997     if (status == NV_OK)
998     {
999         NV_PRINTF(LEVEL_INFO, "Nv04Control: control complete\n");
1000     }
1001     else
1002     {
1003         NV_PRINTF(LEVEL_INFO,
1004                   "Nv04Control: control failed; status: %s (0x%08x)\n",
1005                   nvstatusToString(status), status);
1006         NV_PRINTF(LEVEL_INFO,
1007                   "Nv04Control:  hClient:0x%x hObject:0x%x cmd:0x%x params:" NvP64_fmt " paramSize:0x%x flags:0x%x\n",
1008                   hClient, hObject, cmd, pParams, paramsSize, flags);
1009     }
1010 
1011     return status;
1012 }
1013 
1014 
1015 //
1016 // Called at DIRQL, where we can't do memory allocations
1017 // Do not inline that function to save stack space
1018 //
1019 static NV_NOINLINE NV_STATUS
1020 _rmapiControlWithSecInfoTlsIRQL
1021 (
1022     RM_API* pRmApi,
1023     NvHandle           hClient,
1024     NvHandle           hObject,
1025     NvU32              cmd,
1026     NvP64              pParams,
1027     NvU32              paramsSize,
1028     NvU32              flags,
1029     API_SECURITY_INFO* pSecInfo
1030 )
1031 {
1032     NV_STATUS           status;
1033     THREAD_STATE_NODE   threadState;
1034 
1035     NvU8                stackAllocator[2*TLS_ISR_ALLOCATOR_SIZE];
1036     PORT_MEM_ALLOCATOR* pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator));
1037     tlsIsrInit(pIsrAllocator);
1038 
1039     //
1040     // SMP synchronization for Nv04Control is handled lower in the
1041     // call sequence to accommodate the various operation-specific
1042     // lock requirements (e.g. some operations can run locklessly).
1043     //
1044     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1045 
1046     status = rmapiControlWithSecInfo(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo);
1047 
1048     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1049 
1050     tlsIsrDestroy(pIsrAllocator);
1051     portMemAllocatorRelease(pIsrAllocator);
1052 
1053     return status;
1054 }
1055 
1056 
1057 NV_STATUS
1058 rmapiControlWithSecInfoTls
1059 (
1060     RM_API            *pRmApi,
1061     NvHandle           hClient,
1062     NvHandle           hObject,
1063     NvU32              cmd,
1064     NvP64              pParams,
1065     NvU32              paramsSize,
1066     NvU32              flags,
1067     API_SECURITY_INFO *pSecInfo
1068 )
1069 {
1070     NV_STATUS           status;
1071     THREAD_STATE_NODE   threadState;
1072 
1073     if (!portMemExSafeForNonPagedAlloc())
1074     {
1075         return _rmapiControlWithSecInfoTlsIRQL(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo);
1076     }
1077 
1078     //
1079     // SMP synchronization for Nv04Control is handled lower in the
1080     // call sequence to accommodate the various operation-specific
1081     // lock requirements (e.g. some operations can run locklessly).
1082     //
1083     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1084 
1085     status = rmapiControlWithSecInfo(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo);
1086 
1087     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1088 
1089     return status;
1090 }
1091 
1092