1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 #include "resserv/rs_client.h" 24 #include "resserv/rs_server.h" 25 #include "rmapi/client.h" 26 #include "rmapi/resource.h" 27 #include "rmapi/rmapi.h" 28 #include "rmapi/control.h" 29 #include "ctrl/ctrlxxxx.h" 30 #include "gpu/gpu_resource.h" 31 #include "gpu/gpu.h" 32 #include "vgpu/rpc.h" 33 #include "core/locks.h" 34 35 NV_STATUS 36 rmrescmnConstruct_IMPL 37 ( 38 RmResourceCommon *pResourceCommmon 39 ) 40 { 41 return NV_OK; 42 } 43 44 NV_STATUS 45 rmresConstruct_IMPL 46 ( 47 RmResource *pResource, 48 CALL_CONTEXT *pCallContext, 49 RS_RES_ALLOC_PARAMS_INTERNAL *pParams 50 ) 51 { 52 if (RS_IS_COPY_CTOR(pParams)) 53 { 54 RmResource *pSrcResource = dynamicCast(pParams->pSrcRef->pResource, RmResource); 55 56 pResource->rpcGpuInstance = pSrcResource->rpcGpuInstance; 57 pResource->bRpcFree = pSrcResource->bRpcFree; 58 } 59 else 60 { 61 pResource->rpcGpuInstance = ~0; 62 pResource->bRpcFree = NV_FALSE; 63 } 64 65 return NV_OK; 66 } 67 68 NvBool 69 rmresAccessCallback_IMPL 70 ( 71 RmResource *pResource, 72 RsClient *pInvokingClient, 73 void *pAllocParams, 74 RsAccessRight accessRight 75 ) 76 { 77 NV_STATUS status; 78 RsResourceRef *pCliResRef; 79 80 status = clientGetResourceRef(RES_GET_CLIENT(pResource), 81 RES_GET_CLIENT_HANDLE(pResource), 82 &pCliResRef); 83 84 if (status == NV_OK) 85 { 86 // Allow access if the resource's owner would get the access right 87 if(resAccessCallback(pCliResRef->pResource, pInvokingClient, pAllocParams, accessRight)) 88 return NV_TRUE; 89 } 90 91 // Delegate to superclass 92 return resAccessCallback_IMPL(staticCast(pResource, RsResource), pInvokingClient, pAllocParams, accessRight); 93 } 94 95 NvBool 96 rmresShareCallback_IMPL 97 ( 98 RmResource *pResource, 99 RsClient *pInvokingClient, 100 RsResourceRef *pParentRef, 101 RS_SHARE_POLICY *pSharePolicy 102 ) 103 { 104 NV_STATUS status; 105 RsResourceRef *pCliResRef; 106 107 // 108 // cliresShareCallback contains some require exceptions for non-GpuResource, 109 // which we don't want to hit. ClientResource doesn't normally implement these 110 // share types anyway, so we're fine with skipping them. 111 // 112 switch (pSharePolicy->type) 113 { 114 case RS_SHARE_TYPE_SMC_PARTITION: 115 case RS_SHARE_TYPE_GPU: 116 { 117 // 118 // We do not want to lock down these GpuResource-specific require policies 119 // when the check cannot be applied for other resources, so add these checks 120 // as an alternative bypass for those policies 121 // 122 if ((pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) && 123 (NULL == dynamicCast(pResource, GpuResource))) 124 { 125 return NV_TRUE; 126 } 127 break; 128 } 129 case RS_SHARE_TYPE_FM_CLIENT: 130 { 131 RmClient *pSrcClient = dynamicCast(RES_GET_CLIENT(pResource), RmClient); 132 NvBool bSrcIsKernel = (pSrcClient != NULL) && (rmclientGetCachedPrivilege(pSrcClient) >= RS_PRIV_LEVEL_KERNEL); 133 134 if (rmclientIsCapable(dynamicCast(pInvokingClient, RmClient), 135 NV_RM_CAP_EXT_FABRIC_MGMT) && !bSrcIsKernel) 136 { 137 return NV_TRUE; 138 } 139 break; 140 } 141 default: 142 { 143 status = clientGetResourceRef(RES_GET_CLIENT(pResource), 144 RES_GET_CLIENT_HANDLE(pResource), 145 &pCliResRef); 146 if (status == NV_OK) 147 { 148 // Allow sharing if the resource's owner would be shared with 149 if (resShareCallback(pCliResRef->pResource, pInvokingClient, 150 pParentRef, pSharePolicy)) 151 return NV_TRUE; 152 } 153 break; 154 } 155 } 156 157 // Delegate to superclass 158 return resShareCallback_IMPL(staticCast(pResource, RsResource), 159 pInvokingClient, pParentRef, pSharePolicy); 160 } 161 162 void serverControl_InitCookie 163 ( 164 const struct NVOC_EXPORTED_METHOD_DEF *exportedEntry, 165 RmCtrlExecuteCookie *pRmCtrlExecuteCookie 166 ) 167 { 168 // Copy from NVOC exportedEntry 169 pRmCtrlExecuteCookie->cmd = exportedEntry->methodId; 170 pRmCtrlExecuteCookie->ctrlFlags = exportedEntry->flags; 171 // One time initialization of a const variable 172 *(NvU32 *)&pRmCtrlExecuteCookie->rightsRequired.limbs[0] 173 = exportedEntry->accessRight; 174 } 175 176 // 177 // This routine searches through the Resource's NVOC exported methods for an entry 178 // that matches the specified command. 179 // 180 // Same logic as rmControlCmdLookup() in legacy RMCTRL path 181 // 182 NV_STATUS rmresControlLookup_IMPL 183 ( 184 RmResource *pResource, 185 RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams, 186 const struct NVOC_EXPORTED_METHOD_DEF **ppEntry 187 ) 188 { 189 NvU32 cmd = pRsParams->cmd; 190 191 if (RMCTRL_IS_NULL_CMD(cmd)) 192 return NV_WARN_NOTHING_TO_DO; 193 194 return resControlLookup_IMPL(staticCast(pResource, RsResource), pRsParams, ppEntry); 195 } 196 197 NV_STATUS 198 rmresGetMemInterMapParams_IMPL 199 ( 200 RmResource *pRmResource, 201 RMRES_MEM_INTER_MAP_PARAMS *pParams 202 ) 203 { 204 return NV_ERR_INVALID_OBJECT_HANDLE; 205 } 206 207 NV_STATUS 208 rmresCheckMemInterUnmap_IMPL 209 ( 210 RmResource *pRmResource, 211 NvBool bSubdeviceHandleProvided 212 ) 213 { 214 return NV_ERR_INVALID_OBJECT_HANDLE; 215 } 216 217 NV_STATUS 218 rmresGetMemoryMappingDescriptor_IMPL 219 ( 220 RmResource *pRmResource, 221 struct MEMORY_DESCRIPTOR **ppMemDesc 222 ) 223 { 224 return NV_ERR_NOT_SUPPORTED; 225 } 226 227 NV_STATUS 228 rmresControlSerialization_Prologue_IMPL 229 ( 230 RmResource *pResource, 231 CALL_CONTEXT *pCallContext, 232 RS_RES_CONTROL_PARAMS_INTERNAL *pParams 233 ) 234 { 235 OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); 236 237 if (pGpu != NULL && 238 ((IS_VIRTUAL(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST)) || 239 (IS_GSP_CLIENT(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL)))) 240 { 241 return serverSerializeCtrlDown(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags); 242 } 243 else 244 { 245 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, serverDeserializeCtrlDown(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags)); 246 } 247 248 return NV_OK; 249 } 250 251 void 252 rmresControlSerialization_Epilogue_IMPL 253 ( 254 RmResource *pResource, 255 CALL_CONTEXT *pCallContext, 256 RS_RES_CONTROL_PARAMS_INTERNAL *pParams 257 ) 258 { 259 OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); 260 261 if (pGpu != NULL && 262 ((IS_VIRTUAL(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST)) || 263 (IS_GSP_CLIENT(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL)))) 264 { 265 NV_ASSERT_OK(serverDeserializeCtrlUp(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags)); 266 } 267 268 NV_ASSERT_OK(serverSerializeCtrlUp(pCallContext, pParams->cmd, &pParams->pParams, &pParams->paramsSize, &pParams->flags)); 269 serverFreeSerializeStructures(pCallContext, pParams->pParams); 270 } 271 272 NV_STATUS 273 rmresControl_Prologue_IMPL 274 ( 275 RmResource *pResource, 276 CALL_CONTEXT *pCallContext, 277 RS_RES_CONTROL_PARAMS_INTERNAL *pParams 278 ) 279 { 280 NV_STATUS status = NV_OK; 281 OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); 282 283 if (pGpu != NULL && 284 ((IS_VIRTUAL(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST)) || 285 (IS_GSP_CLIENT(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL)))) 286 { 287 // 288 // GPU lock is required to protect the RPC buffers. 289 // However, some controls have ROUTE_TO_PHYSICAL + NO_GPUS_LOCK flags set. 290 // This is not valid in offload mode, but is in monolithic. 291 // In those cases, just acquire the lock for the RPC 292 // 293 GPU_MASK gpuMaskRelease = 0; 294 if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) 295 { 296 // 297 // Log any case where the above assumption is not true, but continue 298 // anyway. Use SAFE_LOCK_UPGRADE to try and recover in these cases. 299 // 300 NV_ASSERT(pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK); 301 NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, 302 GPU_LOCK_GRP_SUBDEVICE, 303 GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, 304 RM_LOCK_MODULES_RPC, 305 &gpuMaskRelease)); 306 } 307 308 NV_RM_RPC_CONTROL(pGpu, pParams->hClient, pParams->hObject, pParams->cmd, 309 pParams->pParams, pParams->paramsSize, status); 310 311 if (gpuMaskRelease != 0) 312 { 313 rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); 314 } 315 316 return (status == NV_OK) ? NV_WARN_NOTHING_TO_DO : status; 317 } 318 319 return NV_OK; 320 } 321 322 void 323 rmresControl_Epilogue_IMPL 324 ( 325 RmResource *pResource, 326 CALL_CONTEXT *pCallContext, 327 RS_RES_CONTROL_PARAMS_INTERNAL *pParams 328 ) 329 { 330 } 331