1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 /*
26  * nv_gpu_ops.h
27  *
28  * This file defines the interface between the common RM layer
29  * and the OS specific platform layers. (Currently supported
30  * are Linux and KMD)
31  *
32  */
33 
34 #ifndef _NV_GPU_OPS_H_
35 #define _NV_GPU_OPS_H_
36 #include "nvgputypes.h"
37 #include "nv_uvm_types.h"
38 
39 typedef struct gpuSession       *gpuSessionHandle;
40 typedef struct gpuDevice        *gpuDeviceHandle;
41 typedef struct gpuAddressSpace  *gpuAddressSpaceHandle;
42 typedef struct gpuTsg           *gpuTsgHandle;
43 typedef struct gpuChannel       *gpuChannelHandle;
44 typedef struct gpuObject        *gpuObjectHandle;
45 
46 typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
47 
48 NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
49 
50 NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
51 
52 NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session,
53                                const gpuInfo *pGpuInfo,
54                                const NvProcessorUuid *gpuGuid,
55                                struct gpuDevice **device,
56                                NvBool bCreateSmcPartition);
57 
58 NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device);
59 
60 NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device,
61                                      NvU64 vaBase,
62                                      NvU64 vaSize,
63                                      NvBool enableAts,
64                                      gpuAddressSpaceHandle *vaSpace,
65                                      UvmGpuAddressSpaceInfo *vaSpaceInfo);
66 
67 NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1,
68                              gpuDeviceHandle device2,
69                              getP2PCapsParams *p2pCaps);
70 
71 void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace);
72 
73 NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace,
74     NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
75 
76 NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace,
77     NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
78 
79 NV_STATUS nvGpuOpsPmaAllocPages(void *pPma,
80                                 NvLength pageCount,
81                                 NvU64 pageSize,
82                                 gpuPmaAllocationOptions *pPmaAllocOptions,
83                                 NvU64 *pPages);
84 
85 void nvGpuOpsPmaFreePages(void *pPma,
86                           NvU64 *pPages,
87                           NvLength pageCount,
88                           NvU64 pageSize,
89                           NvU32 flags);
90 
91 NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
92                               NvU64 *pPages,
93                               NvLength pageCount,
94                               NvU64 pageSize,
95                               NvU32 flags);
96 
97 NV_STATUS nvGpuOpsTsgAllocate(gpuAddressSpaceHandle vaSpace,
98                               const gpuTsgAllocParams *params,
99                               gpuTsgHandle *tsgHandle);
100 
101 NV_STATUS nvGpuOpsChannelAllocate(const gpuTsgHandle tsgHandle,
102                                   const gpuChannelAllocParams *params,
103                                   gpuChannelHandle *channelHandle,
104                                   gpuChannelInfo *channelInfo);
105 
106 NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
107      NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
108 
109 void nvGpuOpsTsgDestroy(struct gpuTsg *tsg);
110 
111 void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
112 
113 void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
114      NvU64 pointer);
115 
116 NV_STATUS  nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace,
117                                 NvU64 memory, NvLength length,
118                                 void **cpuPtr, NvU64 pageSize);
119 
120 void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace,
121      void* cpuPtr);
122 
123 NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device,
124                             gpuCaps *caps);
125 
126 NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device,
127                                gpuCesCaps *caps);
128 
129 NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace,
130                                 NvU64 srcAddress,
131                                 struct gpuAddressSpace *dstVaSpace,
132                                 NvU64 dstVaAlignment,
133                                 NvU64 *dstAddress);
134 
135 NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device,
136                             NvHandle hClient,
137                             NvHandle hPhysMemory,
138                             NvHandle *hDupMemory,
139                             gpuMemoryInfo *pGpuMemoryInfo);
140 
141 NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice,
142                           NvHandle hSubDevice, NvU8 *gpuGuid,
143                           unsigned guidLength);
144 
145 NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid,
146                                        const NvU8 *gpuUuid,
147                                        NvHandle *hClient,
148                                        NvHandle *hDevice,
149                                        NvHandle *hSubDevice);
150 
151 NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device,
152                                   NvHandle hPhysHandle);
153 
154 NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus);
155 
156 NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid,
157                              const gpuClientInfo *pGpuClientInfo,
158                              gpuInfo *pGpuInfo);
159 
160 NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId,
161                             NvU32 *pSubdeviceId);
162 
163 NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts);
164 
165 NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device);
166 
167 NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet);
168 
169 NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace,
170                                    NvU64 physAddress, unsigned numEntries,
171                                    NvBool bVidMemAperture, NvU32 pasid);
172 
173 NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace);
174 
175 NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt);
176 
177 NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace);
178 
179 NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo);
180 
181 NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo);
182 
183 NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo);
184 
185 NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device,
186                                    gpuFaultInfo *pFaultInfo);
187 
188 NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults);
189 
190 NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults);
191 
192 NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device,
193                                   NvHandle hUserClient,
194                                   NvHandle hUserVASpace,
195                                   struct gpuAddressSpace **vaSpace,
196                                   UvmGpuAddressSpaceInfo *vaSpaceInfo);
197 
198 NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
199                                void **pPma,
200                                const UvmPmaStatistics **pPmaPubStats);
201 
202 NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo, NvU32 accessCntrIndex);
203 
204 NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
205                                         gpuAccessCntrInfo *pAccessCntrInfo);
206 
207 NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session,
208                                     gpuAccessCntrInfo *pAccessCntrInfo,
209                                     NvBool bOwnInterrupts);
210 
211 NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device,
212                                    gpuAccessCntrInfo *pAccessCntrInfo,
213                                    gpuAccessCntrConfig *pAccessCntrConfig);
214 
215 NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
216 
217 NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1,
218                                   struct gpuDevice *device2,
219                                   NvHandle *hP2pObject);
220 
221 NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session,
222                               NvHandle hP2pObject);
223 
224 NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
225                                        NvHandle hDupedMemory,
226                                        NvU64 offset,
227                                        NvU64 size,
228                                        gpuExternalMappingInfo *pGpuExternalMappingInfo);
229 
230 NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace,
231                                 NvHandle hClient,
232                                 NvHandle hChannel,
233                                 gpuRetainedChannel **retainedChannel,
234                                 gpuChannelInstanceInfo *channelInstanceInfo);
235 
236 void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel);
237 
238 NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel,
239                                        gpuChannelResourceBindParams *channelResourceBindParams);
240 
241 void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate);
242 
243 NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace,
244                                          NvP64 resourceDescriptor,
245                                          NvU64 offset,
246                                          NvU64 size,
247                                          gpuExternalMappingInfo *pGpuExternalMappingInfo);
248 
249 NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device,
250                                            const void *pFaultPacket);
251 
252 // Private interface used for windows only
253 
254 #if defined(NV_WINDOWS)
255 NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient);
256 
257 NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel);
258 #endif // WINDOWS
259 
260 // Interface used for SR-IOV heavy
261 
262 NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device,
263                                         const gpuPagingChannelAllocParams *params,
264                                         gpuPagingChannelHandle *channelHandle,
265                                         gpuPagingChannelInfo *channelinfo);
266 
267 void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel);
268 
269 NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace,
270                                     NvU64 srcAddress,
271                                     struct gpuDevice *device,
272                                     NvU64 *dstAddress);
273 
274 void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace,
275                                  NvU64 srcAddress,
276                                  struct gpuDevice *device);
277 
278 NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
279                                           char *methodStream,
280                                           NvU32 methodStreamSize);
281 
282 NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device);
283 
284 NV_STATUS nvGpuOpsTogglePrefetchFaults(gpuFaultInfo *pFaultInfo, NvBool bEnable);
285 
286 // Interface used for CCSL
287 
288 NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
289                                   gpuChannelHandle channel);
290 NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
291 NV_STATUS nvGpuOpsCcslContextUpdate(struct ccslContext_t *ctx);
292 NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
293                                NvU8 direction);
294 NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
295                               NvU32 bufferSize,
296                               NvU8 const *inputBuffer,
297                               NvU8 *outputBuffer,
298                               NvU8 *authTagBuffer);
299 NV_STATUS nvGpuOpsCcslEncryptWithIv(struct ccslContext_t *ctx,
300                                     NvU32 bufferSize,
301                                     NvU8 const *inputBuffer,
302                                     NvU8 *encryptIv,
303                                     NvU8 *outputBuffer,
304                                     NvU8 *authTagBuffer);
305 NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
306                               NvU32 bufferSize,
307                               NvU8 const *inputBuffer,
308                               NvU8 const *decryptIv,
309                               NvU8 *outputBuffer,
310                               NvU8 const *addAuthData,
311                               NvU32 addAuthDataSize,
312                               NvU8 const *authTagBuffer);
313 NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx,
314                            NvU32 bufferSize,
315                            NvU8 const *inputBuffer,
316                            NvU8 *authTagBuffer);
317 NV_STATUS nvGpuOpsQueryMessagePool(struct ccslContext_t *ctx,
318                                    NvU8 direction,
319                                    NvU64 *messageNum);
320 NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
321                               NvU8 direction,
322                               NvU64 increment,
323                               NvU8 *iv);
324 NV_STATUS nvGpuOpsLogDeviceEncryption(struct ccslContext_t *ctx,
325                                       NvU32 bufferSize);
326 
327 #endif /* _NV_GPU_OPS_H_*/
328