1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "core/core.h"
25 #include "objtmr.h"
26 #include "os/os.h"
27 #include "gpu/gpu.h"
28 #include "class/cl0080.h"
29 #include "vgpu/vgpu_util.h"
30 
31 /*!
32  * Generate GID data for vGPU.
33  * For SHA-1, we return uuid cached in the vGPU object
34  * We do not support SHA-256
35  *
36  * @param  [in]  pGpu      OBJGPU pointer
37  * @param  [out] pGidData  data array into which GID should be written
38  * @param  [in]  gidSize   size of data array
39  * @param  [in]  gidFlags  selects either the SHA-1 or SHA-256 GID
40  *
41  * @return       NV_OK if the GID if SHA1 GID is requested
42  */
43 NV_STATUS
gpuGenGidData_VGPUSTUB(OBJGPU * pGpu,NvU8 * pGidData,NvU32 gidSize,NvU32 gidFlags)44 gpuGenGidData_VGPUSTUB
45 (
46     OBJGPU *pGpu,
47     NvU8   *pGidData,
48     NvU32   gidSize,
49     NvU32   gidFlags
50 )
51 {
52     VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
53 
54     if (pVSI != NULL && FLD_TEST_DRF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1, gidFlags))
55     {
56         portMemCopy(pGidData, RM_SHA1_GID_SIZE, pVSI->gidInfo.data, RM_SHA1_GID_SIZE);
57         return NV_OK;
58     }
59     return NV_ERR_NOT_SUPPORTED;
60 }
61 
62 /*!
63  * @brief       Returns FBIO Floorsweeping Mask
64  *
65  * @param[in]   pGpu            OBJGPU pointer
66  * @returns     FBIO Floorsweeping Mask - On is enabled
67  *
68  */
gpuGetActiveFBIOs_VGPUSTUB(OBJGPU * pGpu)69 NvU32 gpuGetActiveFBIOs_VGPUSTUB
70 (
71     OBJGPU *pGpu
72 )
73 {
74     // Cache is not valid.
75     if (pGpu->activeFBIOs == 0)
76     {
77         VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
78         if (pVSI)
79             pGpu->activeFBIOs = pVSI->fbioMask;
80     }
81     // Return the cached map of available FBIOs
82     return pGpu->activeFBIOs;
83 }
84 
85 NV_STATUS
gpuCreateDefaultClientShare_VGPUSTUB(OBJGPU * pGpu)86 gpuCreateDefaultClientShare_VGPUSTUB
87 (
88     OBJGPU *pGpu
89 )
90 {
91     NvHandle                hClient    = NV01_NULL_OBJECT;
92     NvHandle                hDevice    = NV01_NULL_OBJECT;
93     NvHandle                hSubDevice = NV01_NULL_OBJECT;
94     NV_STATUS               status;
95     NV0080_ALLOC_PARAMETERS deviceAllocParams;
96     NV2080_ALLOC_PARAMETERS subdeviceAllocParams;
97     NvU32                   deviceInstance;
98     RM_API                 *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
99 
100     NV_ASSERT_OK_OR_RETURN(pRmApi->AllocWithHandle(pRmApi,
101                                                    hClient,
102                                                    hClient,
103                                                    hClient,
104                                                    NV01_ROOT,
105                                                    &hClient,
106                                                    sizeof(hClient)));
107 
108     pGpu->hDefaultClientShare = hClient;
109 
110     // Which device are we?
111     deviceInstance = gpuGetDeviceInstance(pGpu);
112 
113     portMemSet(&deviceAllocParams, 0, sizeof(NV0080_ALLOC_PARAMETERS));
114     deviceAllocParams.deviceId = deviceInstance;
115     deviceAllocParams.hClientShare = pGpu->hDefaultClientShare;
116 
117     // Add a device.
118     NV_ASSERT_OK_OR_GOTO(status, pRmApi->Alloc(pRmApi,
119                                                hClient,
120                                                hClient,
121                                                &hDevice,
122                                                NV01_DEVICE_0,
123                                                &deviceAllocParams,
124                                                sizeof(deviceAllocParams)), failed);
125 
126     pGpu->hDefaultClientShareDevice = hDevice;
127 
128     portMemSet(&subdeviceAllocParams, 0, sizeof(NV2080_ALLOC_PARAMETERS));
129     subdeviceAllocParams.subDeviceId = 0;
130 
131     NV_ASSERT_OK_OR_GOTO(status, pRmApi->Alloc(pRmApi,
132                                                hClient,
133                                                hDevice,
134                                                &hSubDevice,
135                                                NV20_SUBDEVICE_0,
136                                                &subdeviceAllocParams,
137                                                sizeof(subdeviceAllocParams)), failed);
138 
139     pGpu->hDefaultClientShareSubDevice = hSubDevice;
140 
141     return NV_OK;
142 
143 failed:
144     pRmApi->Free(pRmApi, hClient, hClient);
145     pGpu->hDefaultClientShare = NV01_NULL_OBJECT;
146 
147     return status;
148 }
149 
150 void
gpuDestroyDefaultClientShare_VGPUSTUB(OBJGPU * pGpu)151 gpuDestroyDefaultClientShare_VGPUSTUB
152 (
153     OBJGPU *pGpu
154 )
155 {
156     RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
157 
158     if (pGpu->hDefaultClientShare != NV01_NULL_OBJECT)
159     {
160         // Frees everything allocated under this client.
161         pRmApi->Free(pRmApi, pGpu->hDefaultClientShare,
162                      pGpu->hDefaultClientShare);
163     }
164 }
165 
166 /*!
167  *  Determines if the board/GPU supports page retirement i.e. removal of
168  *  blacklisted pages from FB heap. For vGPU guest, it is only
169  *  supported for vGPU types which have ECC enabled
170  *
171  * @param[in]   pGpu        GPU object pointer
172  *
173  * @return NV_TRUE
174  *      If page retirement is supported.
175  * @return NV_FALSE
176  *      If page retirement is not supported.
177  */
178 
179 NvBool
gpuCheckPageRetirementSupport_VGPUSTUB(OBJGPU * pGpu)180 gpuCheckPageRetirementSupport_VGPUSTUB
181 (
182     OBJGPU *pGpu
183 )
184 {
185     NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_POINTER);
186     OBJVGPU *pVGpu = GPU_GET_VGPU(pGpu);
187 
188     return pVGpu->page_retirement_enabled;
189 }
190 
191 NV_STATUS
gpuInitSriov_VGPUSTUB(OBJGPU * pGpu)192 gpuInitSriov_VGPUSTUB
193 (
194     OBJGPU *pGpu
195 )
196 {
197     if (IS_VIRTUAL_WITH_SRIOV(pGpu))
198     {
199         VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
200         pGpu->bSplitVasManagementServerClientRm = pVSI->bSplitVasBetweenServerClientRm;
201     }
202 
203     return NV_OK;
204 }
205 
206 NV_STATUS
gpuGetNameString_VGPUSTUB(OBJGPU * pGpu,NvU32 type,void * nameStringBuffer)207 gpuGetNameString_VGPUSTUB
208 (
209     OBJGPU *pGpu,
210     NvU32 type,
211     void *nameStringBuffer
212 )
213 {
214     VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
215 
216     NV_ASSERT_OR_RETURN(pVSI != NULL, NV_ERR_INVALID_STATE);
217 
218     if (type == NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII)
219     {
220         portMemCopy(nameStringBuffer, sizeof(pVSI->adapterName),
221                     pVSI->adapterName, sizeof(pVSI->adapterName));
222     }
223     else
224     {
225         portMemCopy(nameStringBuffer, sizeof(pVSI->adapterName_Unicode),
226                     pVSI->adapterName_Unicode, sizeof(pVSI->adapterName_Unicode));
227     }
228 
229     return NV_OK;
230 }
231 
232 NV_STATUS
gpuGetShortNameString_VGPUSTUB(OBJGPU * pGpu,NvU8 * nameStringBuffer)233 gpuGetShortNameString_VGPUSTUB
234 (
235     OBJGPU *pGpu,
236     NvU8 *nameStringBuffer
237 )
238 {
239     VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
240 
241     NV_ASSERT_OR_RETURN(pVSI != NULL, NV_ERR_INVALID_STATE);
242 
243     portMemCopy(nameStringBuffer, sizeof(pVSI->shortGpuNameString),
244                 pVSI->shortGpuNameString, sizeof(pVSI->shortGpuNameString));
245 
246     return NV_OK;
247 }
248 
249 NvBool
gpuIsGlobalPoisonFuseEnabled_VGPUSTUB(OBJGPU * pGpu)250 gpuIsGlobalPoisonFuseEnabled_VGPUSTUB
251 (
252     OBJGPU *pGpu
253 )
254 {
255     VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
256     return pVSI->poisonFuseEnabled;
257 }
258 
259 NV_STATUS
gpuConstructDeviceInfoTable_VGPUSTUB(OBJGPU * pGpu)260 gpuConstructDeviceInfoTable_VGPUSTUB
261 (
262     OBJGPU *pGpu
263 )
264 {
265     VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
266 
267     NV_ASSERT_OR_RETURN(pVSI != NULL, NV_ERR_INVALID_STATE);
268 
269     if (pGpu->pDeviceInfoTable) // already initialized
270         return NV_OK;
271 
272     if (pVSI->deviceInfoTable.numEntries == 0)
273         return NV_OK;
274 
275     NV_ASSERT_OR_RETURN(pVSI->deviceInfoTable.numEntries <= NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES,
276                         NV_ERR_INVALID_STATE);
277 
278     pGpu->pDeviceInfoTable = portMemAllocNonPaged(
279         pVSI->deviceInfoTable.numEntries * (sizeof *pGpu->pDeviceInfoTable));
280 
281     NV_ASSERT_OR_RETURN(pGpu->pDeviceInfoTable != NULL, NV_ERR_NO_MEMORY);
282 
283     pGpu->numDeviceInfoEntries = pVSI->deviceInfoTable.numEntries;
284     portMemCopy(pGpu->pDeviceInfoTable,
285                 pGpu->numDeviceInfoEntries * (sizeof *pGpu->pDeviceInfoTable),
286                 pVSI->deviceInfoTable.deviceInfoTable,
287                 pVSI->deviceInfoTable.numEntries * (sizeof pVSI->deviceInfoTable.deviceInfoTable[0]));
288 
289     return NV_OK;
290 }
291 
292 /*!
293  * @brief Initialize GPU branding properties
294  */
gpuInitBranding_VGPUSTUB(OBJGPU * pGpu)295 NV_STATUS gpuInitBranding_VGPUSTUB(OBJGPU *pGpu)
296 {
297     OBJVGPU *pVGpu = GPU_GET_VGPU(pGpu);
298 
299     NV_ASSERT_OR_RETURN(pVGpu != NULL, NV_ERR_INVALID_STATE);
300 
301     pGpu->bIsQuadro    = (pVGpu->vgpuConfigUsmType == NV_VGPU_CONFIG_USM_TYPE_QUADRO);
302     pGpu->bIsQuadroAD  = NV_FALSE;
303     pGpu->bIsNvidiaNvs = (pVGpu->vgpuConfigUsmType == NV_VGPU_CONFIG_USM_TYPE_NVS);
304     pGpu->bIsVgx       = NV_TRUE;
305     pGpu->bGeforceSmb  = NV_FALSE;
306     pGpu->bIsTitan     = NV_FALSE;
307     pGpu->bIsTesla     = (pVGpu->vgpuConfigUsmType == NV_VGPU_CONFIG_USM_TYPE_COMPUTE);
308     pGpu->bIsGeforce   = !(pGpu->bIsQuadro || pGpu->bIsTesla || pGpu->bIsNvidiaNvs);
309 
310     return NV_OK;
311 }
312 
313 NV_STATUS
gpuGetSkuInfo_VGPUSTUB(OBJGPU * pGpu,NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS * pParams)314 gpuGetSkuInfo_VGPUSTUB
315 (
316     OBJGPU *pGpu,
317     NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pParams
318 )
319 {
320     VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
321 
322     NV_ASSERT_OR_RETURN(pVSI != NULL, NV_ERR_INVALID_STATE);
323 
324     portMemCopy(pParams,
325                 sizeof(NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS),
326                 &pVSI->SKUInfo,
327                 sizeof(NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS));
328 
329     return NV_OK;
330 }