1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 /*
26  * nv_gpu_ops.h
27  *
28  * This file defines the interface between the common RM layer
29  * and the OS specific platform layers. (Currently supported
30  * are Linux and KMD)
31  *
32  */
33 
34 #ifndef _NV_GPU_OPS_H_
35 #define _NV_GPU_OPS_H_
36 #include "nvgputypes.h"
37 #include "nv_uvm_types.h"
38 
39 typedef struct gpuSession       *gpuSessionHandle;
40 typedef struct gpuDevice        *gpuDeviceHandle;
41 typedef struct gpuAddressSpace  *gpuAddressSpaceHandle;
42 typedef struct gpuTsg           *gpuTsgHandle;
43 typedef struct gpuChannel       *gpuChannelHandle;
44 typedef struct gpuObject        *gpuObjectHandle;
45 
46 typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
47 
48 NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
49 
50 NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
51 
52 NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session,
53                                const gpuInfo *pGpuInfo,
54                                const NvProcessorUuid *gpuGuid,
55                                struct gpuDevice **device,
56                                NvBool bCreateSmcPartition);
57 
58 NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device);
59 
60 NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device,
61                                      NvU64 vaBase,
62                                      NvU64 vaSize,
63                                      gpuAddressSpaceHandle *vaSpace,
64                                      UvmGpuAddressSpaceInfo *vaSpaceInfo);
65 
66 NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1,
67                              gpuDeviceHandle device2,
68                              getP2PCapsParams *p2pCaps);
69 
70 void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace);
71 
72 NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace,
73     NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
74 
75 NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace,
76     NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
77 
78 NV_STATUS nvGpuOpsPmaAllocPages(void *pPma,
79                                 NvLength pageCount,
80                                 NvU64 pageSize,
81                                 gpuPmaAllocationOptions *pPmaAllocOptions,
82                                 NvU64 *pPages);
83 
84 void nvGpuOpsPmaFreePages(void *pPma,
85                           NvU64 *pPages,
86                           NvLength pageCount,
87                           NvU64 pageSize,
88                           NvU32 flags);
89 
90 NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
91                               NvU64 *pPages,
92                               NvLength pageCount,
93                               NvU64 pageSize,
94                               NvU32 flags);
95 
96 NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
97                                 NvU64 *pPages,
98                                 NvLength pageCount,
99                                 NvU64 pageSize);
100 
101 NV_STATUS nvGpuOpsTsgAllocate(gpuAddressSpaceHandle vaSpace,
102                               const gpuTsgAllocParams *params,
103                               gpuTsgHandle *tsgHandle);
104 
105 NV_STATUS nvGpuOpsChannelAllocate(const gpuTsgHandle tsgHandle,
106                                   const gpuChannelAllocParams *params,
107                                   gpuChannelHandle *channelHandle,
108                                   gpuChannelInfo *channelInfo);
109 
110 NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
111      NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
112 
113 void nvGpuOpsTsgDestroy(struct gpuTsg *tsg);
114 
115 void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
116 
117 void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
118      NvU64 pointer);
119 
120 NV_STATUS  nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace,
121                                 NvU64 memory, NvLength length,
122                                 void **cpuPtr, NvU64 pageSize);
123 
124 void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace,
125      void* cpuPtr);
126 
127 NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device,
128                             gpuCaps *caps);
129 
130 NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device,
131                                gpuCesCaps *caps);
132 
133 NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace,
134                                 NvU64 srcAddress,
135                                 struct gpuAddressSpace *dstVaSpace,
136                                 NvU64 dstVaAlignment,
137                                 NvU64 *dstAddress);
138 
139 NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device,
140                             NvHandle hClient,
141                             NvHandle hPhysMemory,
142                             NvHandle *hDupMemory,
143                             gpuMemoryInfo *pGpuMemoryInfo);
144 
145 NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice,
146                           NvHandle hSubDevice, NvU8 *gpuGuid,
147                           unsigned guidLength);
148 
149 NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid,
150                                        const NvU8 *gpuUuid,
151                                        NvHandle *hClient,
152                                        NvHandle *hDevice,
153                                        NvHandle *hSubDevice);
154 
155 NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device,
156                                   NvHandle hPhysHandle);
157 
158 NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus);
159 
160 NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid,
161                              const gpuClientInfo *pGpuClientInfo,
162                              gpuInfo *pGpuInfo);
163 
164 NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId,
165                             NvU32 *pSubdeviceId);
166 
167 NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts);
168 
169 NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device);
170 
171 NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet);
172 
173 NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace,
174                                    NvU64 physAddress, unsigned numEntries,
175                                    NvBool bVidMemAperture, NvU32 pasid);
176 
177 NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace);
178 
179 NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt);
180 
181 NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace);
182 
183 NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo);
184 
185 NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo);
186 
187 NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo);
188 
189 NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device,
190                                    gpuFaultInfo *pFaultInfo);
191 
192 NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults);
193 
194 NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults);
195 
196 NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device,
197                                   NvHandle hUserClient,
198                                   NvHandle hUserVASpace,
199                                   struct gpuAddressSpace **vaSpace,
200                                   UvmGpuAddressSpaceInfo *vaSpaceInfo);
201 
202 NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
203                                void **pPma,
204                                const UvmPmaStatistics **pPmaPubStats);
205 
206 NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo, NvU32 accessCntrIndex);
207 
208 NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
209                                         gpuAccessCntrInfo *pAccessCntrInfo);
210 
211 NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session,
212                                     gpuAccessCntrInfo *pAccessCntrInfo,
213                                     NvBool bOwnInterrupts);
214 
215 NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device,
216                                    gpuAccessCntrInfo *pAccessCntrInfo,
217                                    gpuAccessCntrConfig *pAccessCntrConfig);
218 
219 NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
220 
221 NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1,
222                                   struct gpuDevice *device2,
223                                   NvHandle *hP2pObject);
224 
225 NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session,
226                               NvHandle hP2pObject);
227 
228 NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
229                                        NvHandle hDupedMemory,
230                                        NvU64 offset,
231                                        NvU64 size,
232                                        gpuExternalMappingInfo *pGpuExternalMappingInfo);
233 
234 NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace,
235                                 NvHandle hClient,
236                                 NvHandle hChannel,
237                                 gpuRetainedChannel **retainedChannel,
238                                 gpuChannelInstanceInfo *channelInstanceInfo);
239 
240 void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel);
241 
242 NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel,
243                                        gpuChannelResourceBindParams *channelResourceBindParams);
244 
245 void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate);
246 
247 NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace,
248                                          NvP64 resourceDescriptor,
249                                          NvU64 offset,
250                                          NvU64 size,
251                                          gpuExternalMappingInfo *pGpuExternalMappingInfo);
252 
253 NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device,
254                                            const void *pFaultPacket);
255 
256 // Private interface used for windows only
257 
258 // Interface used for SR-IOV heavy
259 
260 NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device,
261                                         const gpuPagingChannelAllocParams *params,
262                                         gpuPagingChannelHandle *channelHandle,
263                                         gpuPagingChannelInfo *channelinfo);
264 
265 void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel);
266 
267 NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace,
268                                     NvU64 srcAddress,
269                                     struct gpuDevice *device,
270                                     NvU64 *dstAddress);
271 
272 void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace,
273                                  NvU64 srcAddress,
274                                  struct gpuDevice *device);
275 
276 NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
277                                           char *methodStream,
278                                           NvU32 methodStreamSize);
279 
280 NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device);
281 
282 // Interface used for CCSL
283 
284 NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
285                                   gpuChannelHandle channel);
286 NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
287 NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
288                                NvU8 direction);
289 NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
290                               NvU32 bufferSize,
291                               NvU8 const *inputBuffer,
292                               NvU8 *outputBuffer,
293                               NvU8 *authTagBuffer);
294 NV_STATUS nvGpuOpsCcslEncryptWithIv(struct ccslContext_t *ctx,
295                                     NvU32 bufferSize,
296                                     NvU8 const *inputBuffer,
297                                     NvU8 *encryptIv,
298                                     NvU8 *outputBuffer,
299                                     NvU8 *authTagBuffer);
300 NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
301                               NvU32 bufferSize,
302                               NvU8 const *inputBuffer,
303                               NvU8 const *decryptIv,
304                               NvU8 *outputBuffer,
305                               NvU8 const *addAuthData,
306                               NvU32 addAuthDataSize,
307                               NvU8 const *authTagBuffer);
308 NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx,
309                            NvU32 bufferSize,
310                            NvU8 const *inputBuffer,
311                            NvU8 *authTagBuffer);
312 NV_STATUS nvGpuOpsQueryMessagePool(struct ccslContext_t *ctx,
313                                    NvU8 direction,
314                                    NvU64 *messageNum);
315 NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
316                               NvU8 direction,
317                               NvU64 increment,
318                               NvU8 *iv);
319 
320 #endif /* _NV_GPU_OPS_H_*/
321