1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*!
25  * @file
26  * @brief This module contains the gpu control interfaces for the
27  *        device (NV01_DEVICE_0) class. Device-level control calls
28  *        are broadcasted to all GPUs within the device.
29  */
30 
31 #include "gpu/device/device.h"
32 #include "gpu/subdevice/subdevice.h"
33 #include "core/system.h"
34 #include "core/locks.h"
35 #include "gpu/gpu.h"
36 #include "gpu_mgr/gpu_mgr.h"
37 #include "kernel/gpu/rc/kernel_rc.h"
38 #include "virtualization/hypervisor/hypervisor.h"
39 
40 
41 
42 //
43 // This rmctrl MUST NOT touch hw since it's tagged as NO_GPUS_ACCESS in ctrl0080.def
44 // RM allow this type of rmctrl to go through when GPU is not available.
45 //
46 // Lock Requirements:
47 //      Assert that API lock held on entry
48 //
49 NV_STATUS
50 deviceCtrlCmdGpuGetClasslist_IMPL
51 (
52     Device *pDevice,
53     NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams
54 )
55 {
56     OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
57 
58     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner());
59 
60     return gpuGetClassList(pGpu, &pClassListParams->numClasses,
61                            NvP64_VALUE(pClassListParams->classList), ENG_INVALID);
62 }
63 
64 //
65 // This rmctrl MUST NOT touch hw since it's tagged with flag NO_GPUS_ACCESS in device.h
66 // RM allow this type of rmctrl to go through when GPU is not available.
67 //
68 // Lock Requirements:
69 //      Assert that API lock held on entry
70 //
71 NV_STATUS
72 deviceCtrlCmdGpuGetClasslistV2_IMPL
73 (
74     Device *pDevice,
75     NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pClassListParams
76 )
77 {
78     OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
79 
80     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner());
81 
82     pClassListParams->numClasses = NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE;
83 
84     return gpuGetClassList(pGpu, &pClassListParams->numClasses,
85                            pClassListParams->classList, ENG_INVALID);
86 }
87 
88 //
89 // Lock Requirements:
90 //      Assert that API lock held on entry
91 //
92 NV_STATUS
93 deviceCtrlCmdGpuGetNumSubdevices_IMPL
94 (
95     Device *pDevice,
96     NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams
97 )
98 {
99     pSubDeviceCountParams->numSubDevices = 1;
100 
101     return NV_OK;
102 }
103 
104 NV_STATUS
105 deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL
106 (
107     Device *pDevice,
108     NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams
109 )
110 {
111     OBJSYS *pSys = SYS_GET_INSTANCE();
112     OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys);
113     NvU32 gpuMask, index;
114     NvBool bEnable;
115     OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
116     OBJGPU *pTmpGpu;
117 
118     if (NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED ==
119             pParams->newState)
120     {
121         bEnable = NV_TRUE;
122     }
123     else if (NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED ==
124             pParams->newState)
125     {
126         bEnable = NV_FALSE;
127     }
128     else
129     {
130         return NV_ERR_INVALID_ARGUMENT;
131     }
132 
133     // Get the gpuMask for the device pGpu belongs to
134     gpuMask = gpumgrGetGpuMask(pGpu);
135 
136     index = 0;
137     while ((pTmpGpu = gpumgrGetNextGpu(gpuMask, &index)) != NULL)
138     {
139         if (bEnable)
140         {
141             pGpuMgr->persistentSwStateGpuMask |= NVBIT(pTmpGpu->gpuInstance);
142             pTmpGpu->setProperty(pTmpGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE,
143                     NV_TRUE);
144         }
145         else
146         {
147             pGpuMgr->persistentSwStateGpuMask &= ~NVBIT(pTmpGpu->gpuInstance);
148             pTmpGpu->setProperty(pTmpGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE,
149                     NV_FALSE);
150         }
151 
152         // Set/Clear OS-specific persistence flags
153         osModifyGpuSwStatePersistence(pTmpGpu->pOsGpuInfo, bEnable);
154     }
155 
156     return NV_OK;
157 }
158 
159 NV_STATUS
160 deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL
161 (
162     Device *pDevice,
163     NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams
164 )
165 {
166     OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
167 
168     if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE))
169     {
170         pParams->swStatePersistence =
171             NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED;
172     }
173     else
174     {
175         pParams->swStatePersistence =
176             NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED;
177     }
178 
179     return NV_OK;
180 }
181 
182 /*!
183  * @brief   This Command is used to get the virtualization mode of GPU. GPU
184  *          can be in NMOS/VGX/host-vGPU/host-vSGA mode.
185  *
186  * @return  Returns NV_STATUS
187  *          NV_OK                     If GPU is present.
188  *          NV_ERR_INVALID_ARGUMENT   If GPU is not present.
189  */
190 NV_STATUS
191 deviceCtrlCmdGpuGetVirtualizationMode_IMPL
192 (
193     Device *pDevice,
194     NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams
195 )
196 {
197     OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
198 
199     if (pGpu == NULL)
200     {
201         return NV_ERR_INVALID_ARGUMENT;
202     }
203 
204     if (IS_VIRTUAL(pGpu))
205     {
206         pParams->virtualizationMode =
207             NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX;
208     }
209     else if (IS_PASSTHRU(pGpu))
210     {
211         pParams->virtualizationMode =
212             NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS;
213     }
214     else if (hypervisorIsVgxHyper() && (gpuIsSriovEnabled(pGpu)
215     ))
216     {
217         if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU))
218         {
219             pParams->virtualizationMode =
220                 NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VGPU;
221         }
222         else if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA))
223         {
224             pParams->virtualizationMode =
225                 NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VSGA;
226         }
227         else
228         {
229             NV_PRINTF(LEVEL_ERROR,
230                       "invalid virtualization Mode: %x. Returning NONE!\n",
231                       pParams->virtualizationMode);
232 
233             pParams->virtualizationMode =
234                 NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE;
235         }
236     }
237     else
238     {
239         pParams->virtualizationMode =
240             NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE;
241     }
242 
243     NV_PRINTF(LEVEL_INFO, "Virtualization Mode: %x\n",
244               pParams->virtualizationMode);
245 
246     return NV_OK;
247 }
248 
249 /*!
250  * @brief   This Command is used to get GPU SRIOV capabilities
251  *
252  * @return NV_OK
253  */
254 NV_STATUS
255 deviceCtrlCmdGpuGetSriovCaps_IMPL
256 (
257     Device *pDevice,
258     NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS *pParams
259 )
260 {
261     OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
262 
263     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
264 
265     return gpuGetSriovCaps_HAL(pGpu, pParams);
266 }
267 
268 /*!
269  * @brief   This command is used to find a subdevice handle by subdeviceinst
270  */
271 NV_STATUS
272 deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL
273 (
274     Device *pDevice,
275     NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams
276 )
277 {
278     NV_STATUS       status;
279     Subdevice      *pSubdevice;
280 
281     status = subdeviceGetByInstance(RES_GET_CLIENT(pDevice),
282                                     RES_GET_HANDLE(pDevice),
283                                     pParams->subDeviceInst,
284                                     &pSubdevice);
285 
286     if (status == NV_OK)
287     {
288         pParams->hSubDevice = RES_GET_HANDLE(pSubdevice);
289     }
290 
291     return status;
292 }
293 
294 /*!
295  * @brief Get the GPU's sparse texture compute mode setting information.
296  *
297  * This setting indicates how the RM should set the large page size for the
298  * GPU, based on which use case it should optimize for.
299  *
300  * @param[in, out] pModeParams      Pointer to struct of user params.
301  *                 defaultSetting:  The default use case to optimize for on this
302  *                                  GPU.
303  *                 currentSetting:  The use case that the large page size was
304  *                                  optimized for on this GPU, on the last driver
305  *                                  load.
306  *                 pendingSetting:  The use case that the large page size will
307  *                                  be optimized for on this GPU, on the next
308  *                                  driver reload.
309  *
310  * @returns NV_STATUS
311  *          NV_OK                   Success
312  */
313 NV_STATUS
314 deviceCtrlCmdGpuGetSparseTextureComputeMode_IMPL
315 (
316     Device *pDevice,
317     NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *pModeParams
318 )
319 {
320     NV_STATUS status;
321     OBJGPU   *pGpu = GPU_RES_GET_GPU(pDevice);
322 
323     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner());
324 
325     status = gpuGetSparseTextureComputeMode(pGpu,
326                                            &pModeParams->defaultSetting,
327                                            &pModeParams->currentSetting,
328                                            &pModeParams->pendingSetting);
329 
330     return status;
331 }
332 
333 /*!
334  * @brief Set the GPU's sparse texture compute mode setting to apply on the
335  *        next driver load.
336  *
337  * This setting indicates how the RM should set the large page size for the
338  * GPU, based on which use case it should optimize for.
339  *
340  * @param[in, out] pModeParams      Pointer to struct of user params.
341  *                 setting:         The use case that the large page size should
342  *                                  be optimized for on this GPU, on the next
343  *                                  driver reload.
344  *
345  * @returns NV_STATUS
346  *          NV_ERR_INVALID_ARGUMENT The specified setting is invalid
347  *          NV_OK                   Success
348  */
349 NV_STATUS
350 deviceCtrlCmdGpuSetSparseTextureComputeMode_IMPL
351 (
352     Device *pDevice,
353     NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *pModeParams
354 )
355 {
356     NV_STATUS status = NV_ERR_NOT_SUPPORTED;
357     OBJGPU   *pGpu = GPU_RES_GET_GPU(pDevice);
358 
359     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner());
360 
361     //
362     // In SLI, both GPUs will have the same setting for sparse texture/compute
363     // mode. Individual toggling is not allowed.
364     //
365     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
366     {
367         status = gpuSetSparseTextureComputeMode(pGpu, pModeParams->setting);
368     }
369     SLI_LOOP_END
370 
371     return status;
372 }
373 
374 /*!
375  * @brief Get the GPU's VGX capability depending upon state of VGX hardware fuse.
376  *
377  * @returns NV_STATUS
378  *          NV_OK                   Success
379  */
380 NV_STATUS
381 deviceCtrlCmdGpuGetVgxCaps_IMPL
382 (
383     Device *pDevice,
384     NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS *pParams
385 )
386 {
387     pParams->isVgx = NV_FALSE;
388 
389     return NV_OK;
390 }
391 
392 /*
393  * @brief Request per-VF BAR1 resizing and, subsequently, the number
394  *        of VFs that can be created. The request will take a per-VF
395  *        BAR1 size in MB and calculate the number of possible VFs
396  *
397  * @param[in] pParams  NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS
398  *                     pointer detailing the per-VF BAR1 size and
399  *                     number of VFs
400  */
401 
402 NV_STATUS
403 deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL
404 (
405     Device *pDevice,
406     NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams
407 )
408 {
409     OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
410     LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
411 
412     return gpuSetVFBarSizes_HAL(pGpu, pParams);
413 }
414