1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 25 /* 26 * nv_gpu_ops.h 27 * 28 * This file defines the interface between the common RM layer 29 * and the OS specific platform layers. (Currently supported 30 * are Linux and KMD) 31 * 32 */ 33 34 #ifndef _NV_GPU_OPS_H_ 35 #define _NV_GPU_OPS_H_ 36 #include "nvgputypes.h" 37 #include "nv_uvm_types.h" 38 39 typedef struct gpuSession *gpuSessionHandle; 40 typedef struct gpuDevice *gpuDeviceHandle; 41 typedef struct gpuAddressSpace *gpuAddressSpaceHandle; 42 typedef struct gpuChannel *gpuChannelHandle; 43 typedef struct gpuObject *gpuObjectHandle; 44 45 typedef struct gpuRetainedChannel_struct gpuRetainedChannel; 46 47 NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session); 48 49 NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session); 50 51 NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session, 52 const gpuInfo *pGpuInfo, 53 const NvProcessorUuid *gpuGuid, 54 struct gpuDevice **device, 55 NvBool bCreateSmcPartition); 56 57 NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device); 58 59 NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device, 60 NvU64 vaBase, 61 NvU64 vaSize, 62 gpuAddressSpaceHandle *vaSpace, 63 UvmGpuAddressSpaceInfo *vaSpaceInfo); 64 65 NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1, 66 gpuDeviceHandle device2, 67 getP2PCapsParams *p2pCaps); 68 69 void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace); 70 71 NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace, 72 NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); 73 74 NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace, 75 NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); 76 77 NV_STATUS nvGpuOpsPmaAllocPages(void *pPma, 78 NvLength pageCount, 79 NvU64 pageSize, 80 gpuPmaAllocationOptions *pPmaAllocOptions, 81 NvU64 *pPages); 82 83 void nvGpuOpsPmaFreePages(void *pPma, 84 NvU64 *pPages, 85 NvLength pageCount, 86 NvU64 pageSize, 87 NvU32 flags); 88 89 NV_STATUS nvGpuOpsPmaPinPages(void *pPma, 90 NvU64 *pPages, 91 NvLength pageCount, 92 NvU64 pageSize, 93 NvU32 flags); 94 95 NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma, 96 NvU64 *pPages, 97 NvLength pageCount, 98 NvU64 pageSize); 99 100 NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace, 101 const gpuChannelAllocParams *params, 102 gpuChannelHandle *channelHandle, 103 gpuChannelInfo *channelInfo); 104 105 NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace, 106 NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset); 107 108 void nvGpuOpsChannelDestroy(struct gpuChannel *channel); 109 110 void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace, 111 NvU64 pointer); 112 113 NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace, 114 NvU64 memory, NvLength length, 115 void **cpuPtr, NvU64 pageSize); 116 117 void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace, 118 void* cpuPtr); 119 120 NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device, 121 gpuCaps *caps); 122 123 NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device, 124 gpuCesCaps *caps); 125 126 NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace, 127 NvU64 srcAddress, 128 struct gpuAddressSpace *dstVaSpace, 129 NvU64 dstVaAlignment, 130 NvU64 *dstAddress); 131 132 NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device, 133 NvHandle hClient, 134 NvHandle hPhysMemory, 135 NvHandle *hDupMemory, 136 gpuMemoryInfo *pGpuMemoryInfo); 137 138 NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice, 139 NvHandle hSubDevice, NvU8 *gpuGuid, 140 unsigned guidLength); 141 142 NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid, 143 const NvU8 *gpuUuid, 144 NvHandle *hClient, 145 NvHandle *hDevice, 146 NvHandle *hSubDevice); 147 148 NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device, 149 NvHandle hPhysHandle); 150 151 NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus); 152 153 NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid, 154 const gpuClientInfo *pGpuClientInfo, 155 gpuInfo *pGpuInfo); 156 157 NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId, 158 NvU32 *pSubdeviceId); 159 160 NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts); 161 162 NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device); 163 164 NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet); 165 166 NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace, 167 NvU64 physAddress, unsigned numEntries, 168 NvBool bVidMemAperture, NvU32 pasid); 169 170 NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace); 171 172 NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt); 173 174 NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace); 175 176 NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo); 177 178 NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo); 179 180 NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo); 181 182 NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device, 183 gpuFaultInfo *pFaultInfo); 184 185 NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults); 186 187 NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults); 188 189 NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device, 190 NvHandle hUserClient, 191 NvHandle hUserVASpace, 192 struct gpuAddressSpace **vaSpace, 193 UvmGpuAddressSpaceInfo *vaSpaceInfo); 194 195 NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device, 196 void **pPma, 197 const UvmPmaStatistics **pPmaPubStats); 198 199 NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); 200 201 NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device, 202 gpuAccessCntrInfo *pAccessCntrInfo); 203 204 NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session, 205 gpuAccessCntrInfo *pAccessCntrInfo, 206 NvBool bOwnInterrupts); 207 208 NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device, 209 gpuAccessCntrInfo *pAccessCntrInfo, 210 gpuAccessCntrConfig *pAccessCntrConfig); 211 212 NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); 213 214 NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1, 215 struct gpuDevice *device2, 216 NvHandle *hP2pObject); 217 218 NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session, 219 NvHandle hP2pObject); 220 221 NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace, 222 NvHandle hDupedMemory, 223 NvU64 offset, 224 NvU64 size, 225 gpuExternalMappingInfo *pGpuExternalMappingInfo); 226 227 NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace, 228 NvHandle hClient, 229 NvHandle hChannel, 230 gpuRetainedChannel **retainedChannel, 231 gpuChannelInstanceInfo *channelInstanceInfo); 232 233 void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel); 234 235 NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel, 236 gpuChannelResourceBindParams *channelResourceBindParams); 237 238 void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate); 239 240 NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace, 241 NvP64 resourceDescriptor, 242 NvU64 offset, 243 NvU64 size, 244 gpuExternalMappingInfo *pGpuExternalMappingInfo); 245 246 NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device, 247 const void *pFaultPacket); 248 249 // Private interface used for windows only 250 251 #if defined(NV_WINDOWS) 252 NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient); 253 254 NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel); 255 #endif // WINDOWS 256 257 // Interface used for SR-IOV heavy 258 259 NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device, 260 const gpuPagingChannelAllocParams *params, 261 gpuPagingChannelHandle *channelHandle, 262 gpuPagingChannelInfo *channelinfo); 263 264 void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel); 265 266 NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace, 267 NvU64 srcAddress, 268 struct gpuDevice *device, 269 NvU64 *dstAddress); 270 271 void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace, 272 NvU64 srcAddress, 273 struct gpuDevice *device); 274 275 NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel, 276 char *methodStream, 277 NvU32 methodStreamSize); 278 279 NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device); 280 281 #endif /* _NV_GPU_OPS_H_*/ 282