1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <nv.h>
25 #include <os/os.h>
26 #include <osapi.h>
27 #include <core/thread_state.h>
28 #include "rmapi/nv_gpu_ops.h"
29 #include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h"
30 
rm_gpu_ops_create_session(nvidia_stack_t * sp,struct gpuSession ** session)31 NV_STATUS NV_API_CALL rm_gpu_ops_create_session(
32     nvidia_stack_t *sp,
33     struct gpuSession **session)
34 {
35     NV_STATUS rmStatus;
36     void *fp;
37     NV_ENTER_RM_RUNTIME(sp,fp);
38     rmStatus = nvGpuOpsCreateSession(session);
39     NV_EXIT_RM_RUNTIME(sp,fp);
40     return rmStatus;
41 }
42 
rm_gpu_ops_destroy_session(nvidia_stack_t * sp,gpuSessionHandle session)43 NV_STATUS  NV_API_CALL  rm_gpu_ops_destroy_session (
44     nvidia_stack_t *sp, gpuSessionHandle session)
45 {
46     NV_STATUS rmStatus;
47     void *fp;
48     NV_ENTER_RM_RUNTIME(sp,fp);
49     rmStatus = nvGpuOpsDestroySession(session);
50     NV_EXIT_RM_RUNTIME(sp,fp);
51     return rmStatus;
52 }
53 
rm_gpu_ops_device_create(nvidia_stack_t * sp,nvgpuSessionHandle_t session,const gpuInfo * pGpuInfo,const NvProcessorUuid * gpuUuid,nvgpuDeviceHandle_t * device,NvBool bCreateSmcPartition)54 NV_STATUS  NV_API_CALL  rm_gpu_ops_device_create (
55     nvidia_stack_t *sp,
56     nvgpuSessionHandle_t session,
57     const gpuInfo *pGpuInfo,
58     const NvProcessorUuid *gpuUuid,
59     nvgpuDeviceHandle_t *device,
60     NvBool bCreateSmcPartition)
61 {
62     NV_STATUS rmStatus;
63     void *fp;
64     NV_ENTER_RM_RUNTIME(sp,fp);
65     rmStatus = nvGpuOpsDeviceCreate(session, pGpuInfo, gpuUuid, device, bCreateSmcPartition);
66     NV_EXIT_RM_RUNTIME(sp,fp);
67     return rmStatus;
68 }
69 
rm_gpu_ops_device_destroy(nvidia_stack_t * sp,gpuDeviceHandle device)70 NV_STATUS  NV_API_CALL  rm_gpu_ops_device_destroy (
71     nvidia_stack_t *sp,
72     gpuDeviceHandle device)
73 {
74     NV_STATUS rmStatus;
75     void *fp;
76     NV_ENTER_RM_RUNTIME(sp,fp);
77     rmStatus = nvGpuOpsDeviceDestroy(device);
78     NV_EXIT_RM_RUNTIME(sp,fp);
79     return rmStatus;
80 }
81 
rm_gpu_ops_address_space_create(nvidia_stack_t * sp,gpuDeviceHandle device,NvU64 vaBase,NvU64 vaSize,NvU32 enableAts,gpuAddressSpaceHandle * vaSpace,gpuAddressSpaceInfo * vaSpaceInfo)82 NV_STATUS  NV_API_CALL  rm_gpu_ops_address_space_create (
83     nvidia_stack_t *sp,
84     gpuDeviceHandle device,
85     NvU64 vaBase,
86     NvU64 vaSize,
87     NvU32 enableAts,
88     gpuAddressSpaceHandle *vaSpace,
89     gpuAddressSpaceInfo *vaSpaceInfo)
90 {
91     NV_STATUS rmStatus;
92     void *fp;
93     NV_ENTER_RM_RUNTIME(sp,fp);
94     rmStatus = nvGpuOpsAddressSpaceCreate(device, vaBase, vaSize, enableAts,
95                                           vaSpace, vaSpaceInfo);
96     NV_EXIT_RM_RUNTIME(sp,fp);
97     return rmStatus;
98 }
99 
rm_gpu_ops_dup_address_space(nvidia_stack_t * sp,gpuDeviceHandle device,NvHandle hUserClient,NvHandle hUserVASpace,gpuAddressSpaceHandle * dupedVaspace,gpuAddressSpaceInfo * vaSpaceInfo)100 NV_STATUS  NV_API_CALL  rm_gpu_ops_dup_address_space(
101     nvidia_stack_t *sp,
102     gpuDeviceHandle device,
103     NvHandle hUserClient,
104     NvHandle hUserVASpace,
105     gpuAddressSpaceHandle *dupedVaspace,
106     gpuAddressSpaceInfo *vaSpaceInfo)
107 {
108     NV_STATUS rmStatus;
109     void *fp;
110     NV_ENTER_RM_RUNTIME(sp,fp);
111     rmStatus = nvGpuOpsDupAddressSpace(device, hUserClient, hUserVASpace,
112                                        dupedVaspace, vaSpaceInfo);
113     NV_EXIT_RM_RUNTIME(sp,fp);
114     return rmStatus;
115 }
116 
rm_gpu_ops_address_space_destroy(nvidia_stack_t * sp,gpuAddressSpaceHandle vaspace)117 NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *sp,
118     gpuAddressSpaceHandle vaspace)
119 {
120     void *fp;
121     NV_ENTER_RM_RUNTIME(sp,fp);
122     nvGpuOpsAddressSpaceDestroy(vaspace);
123     NV_EXIT_RM_RUNTIME(sp,fp);
124     return NV_OK;
125 }
126 
rm_gpu_ops_memory_alloc_fb(nvidia_stack_t * sp,gpuAddressSpaceHandle vaspace,NvLength size,NvU64 * gpuOffset,gpuAllocInfo * allocInfo)127 NV_STATUS  NV_API_CALL  rm_gpu_ops_memory_alloc_fb(
128     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
129     NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo)
130 {
131     NV_STATUS rmStatus;
132     void *fp;
133     NV_ENTER_RM_RUNTIME(sp,fp);
134     rmStatus = nvGpuOpsMemoryAllocFb(vaspace, size, gpuOffset, allocInfo);
135     NV_EXIT_RM_RUNTIME(sp,fp);
136     return rmStatus;
137 }
138 
rm_gpu_ops_get_p2p_caps(nvidia_stack_t * sp,gpuDeviceHandle device1,gpuDeviceHandle device2,getP2PCapsParams * pP2pCapsParams)139 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_p2p_caps(nvidia_stack_t *sp,
140                                                 gpuDeviceHandle device1,
141                                                 gpuDeviceHandle device2,
142                                                 getP2PCapsParams *pP2pCapsParams)
143 {
144     NV_STATUS rmStatus;
145     void *fp;
146     NV_ENTER_RM_RUNTIME(sp,fp);
147     rmStatus = nvGpuOpsGetP2PCaps(device1, device2, pP2pCapsParams);
148     NV_EXIT_RM_RUNTIME(sp,fp);
149     return rmStatus;
150 }
151 
rm_gpu_ops_memory_alloc_sys(nvidia_stack_t * sp,gpuAddressSpaceHandle vaspace,NvLength size,NvU64 * gpuOffset,gpuAllocInfo * allocInfo)152 NV_STATUS  NV_API_CALL  rm_gpu_ops_memory_alloc_sys(
153     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
154     NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo)
155 {
156     NV_STATUS rmStatus;
157     void *fp;
158     NV_ENTER_RM_RUNTIME(sp,fp);
159     rmStatus = nvGpuOpsMemoryAllocSys(vaspace, size, gpuOffset, allocInfo);
160     NV_EXIT_RM_RUNTIME(sp,fp);
161     return rmStatus;
162 }
163 
rm_gpu_ops_pma_register_callbacks(nvidia_stack_t * sp,void * pPma,pmaEvictPagesCb_t evictPages,pmaEvictRangeCb_t evictRange,void * callbackData)164 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_register_callbacks(
165     nvidia_stack_t *sp,
166     void *pPma,
167     pmaEvictPagesCb_t evictPages,
168     pmaEvictRangeCb_t evictRange,
169     void *callbackData)
170 {
171     THREAD_STATE_NODE threadState;
172     NV_STATUS rmStatus;
173     void *fp;
174 
175     NV_ENTER_RM_RUNTIME(sp,fp);
176     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
177 
178     rmStatus = pmaRegisterEvictionCb(pPma, evictPages, evictRange, callbackData);
179 
180     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
181     NV_EXIT_RM_RUNTIME(sp,fp);
182     return rmStatus;
183 }
184 
rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t * sp,void * pPma)185 void  NV_API_CALL  rm_gpu_ops_pma_unregister_callbacks(
186     nvidia_stack_t *sp,
187     void *pPma)
188 {
189     THREAD_STATE_NODE threadState;
190     void *fp;
191 
192     NV_ENTER_RM_RUNTIME(sp,fp);
193     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
194 
195     pmaUnregisterEvictionCb(pPma);
196 
197     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
198     NV_EXIT_RM_RUNTIME(sp,fp);
199 }
200 
rm_gpu_ops_get_pma_object(nvidia_stack_t * sp,gpuDeviceHandle device,void ** pPma,const nvgpuPmaStatistics_t * pPmaPubStats)201 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_pma_object(
202     nvidia_stack_t *sp,
203     gpuDeviceHandle device,
204     void **pPma,
205     const nvgpuPmaStatistics_t *pPmaPubStats)
206 {
207     NV_STATUS rmStatus;
208     void *fp;
209     NV_ENTER_RM_RUNTIME(sp,fp);
210     rmStatus = nvGpuOpsGetPmaObject(device, pPma,
211                                     (const UvmPmaStatistics **)pPmaPubStats);
212     NV_EXIT_RM_RUNTIME(sp,fp);
213     return rmStatus;
214 }
215 
rm_gpu_ops_pma_alloc_pages(nvidia_stack_t * sp,void * pPma,NvLength pageCount,NvU64 pageSize,nvgpuPmaAllocationOptions_t pPmaAllocOptions,NvU64 * pPages)216 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_alloc_pages(
217     nvidia_stack_t *sp, void *pPma,
218     NvLength pageCount, NvU64 pageSize,
219     nvgpuPmaAllocationOptions_t pPmaAllocOptions,
220     NvU64 *pPages)
221 {
222     NV_STATUS rmStatus;
223     void *fp;
224     NV_ENTER_RM_RUNTIME(sp,fp);
225     rmStatus = nvGpuOpsPmaAllocPages(pPma, pageCount, pageSize,
226                                      pPmaAllocOptions, pPages);
227     NV_EXIT_RM_RUNTIME(sp,fp);
228     return rmStatus;
229 }
230 
rm_gpu_ops_pma_pin_pages(nvidia_stack_t * sp,void * pPma,NvU64 * pPages,NvLength pageCount,NvU64 pageSize,NvU32 flags)231 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_pin_pages(
232     nvidia_stack_t *sp, void *pPma,
233     NvU64 *pPages, NvLength pageCount, NvU64 pageSize, NvU32 flags)
234 {
235     NV_STATUS rmStatus;
236     void *fp;
237     NV_ENTER_RM_RUNTIME(sp,fp);
238     rmStatus = nvGpuOpsPmaPinPages(pPma, pPages, pageCount, pageSize, flags);
239     NV_EXIT_RM_RUNTIME(sp,fp);
240     return rmStatus;
241 }
242 
rm_gpu_ops_memory_cpu_map(nvidia_stack_t * sp,gpuAddressSpaceHandle vaspace,NvU64 gpuOffset,NvLength length,void ** cpuPtr,NvU64 pageSize)243 NV_STATUS  NV_API_CALL  rm_gpu_ops_memory_cpu_map(
244     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
245     NvU64 gpuOffset, NvLength length, void **cpuPtr, NvU64 pageSize)
246 {
247     NV_STATUS rmStatus;
248     void *fp;
249     NV_ENTER_RM_RUNTIME(sp,fp);
250     rmStatus = nvGpuOpsMemoryCpuMap(vaspace, gpuOffset, length, cpuPtr,
251                                     pageSize);
252     NV_EXIT_RM_RUNTIME(sp,fp);
253     return rmStatus;
254 }
255 
rm_gpu_ops_memory_cpu_ummap(nvidia_stack_t * sp,gpuAddressSpaceHandle vaspace,void * cpuPtr)256 NV_STATUS  NV_API_CALL  rm_gpu_ops_memory_cpu_ummap(
257     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, void* cpuPtr)
258 {
259     void *fp;
260     NV_ENTER_RM_RUNTIME(sp,fp);
261     nvGpuOpsMemoryCpuUnMap(vaspace, cpuPtr);
262     NV_EXIT_RM_RUNTIME(sp,fp);
263     return NV_OK;
264 }
265 
rm_gpu_ops_tsg_allocate(nvidia_stack_t * sp,gpuAddressSpaceHandle vaspace,const gpuTsgAllocParams * allocParams,gpuTsgHandle * tsg)266 NV_STATUS  NV_API_CALL  rm_gpu_ops_tsg_allocate(nvidia_stack_t *sp,
267                                                 gpuAddressSpaceHandle vaspace,
268                                                 const gpuTsgAllocParams *allocParams,
269                                                 gpuTsgHandle *tsg)
270 {
271     NV_STATUS rmStatus;
272     void *fp;
273     NV_ENTER_RM_RUNTIME(sp,fp);
274     rmStatus = nvGpuOpsTsgAllocate(vaspace, allocParams, tsg);
275     NV_EXIT_RM_RUNTIME(sp,fp);
276     return rmStatus;
277 }
278 
rm_gpu_ops_tsg_destroy(nvidia_stack_t * sp,nvgpuTsgHandle_t tsg)279 NV_STATUS NV_API_CALL rm_gpu_ops_tsg_destroy(nvidia_stack_t * sp,
280                                              nvgpuTsgHandle_t tsg)
281 {
282     void *fp;
283     NV_ENTER_RM_RUNTIME(sp,fp);
284     nvGpuOpsTsgDestroy(tsg);
285     NV_EXIT_RM_RUNTIME(sp,fp);
286     return NV_OK;
287 }
288 
rm_gpu_ops_channel_allocate(nvidia_stack_t * sp,const gpuTsgHandle tsg,const gpuChannelAllocParams * allocParams,gpuChannelHandle * channel,gpuChannelInfo * channelInfo)289 NV_STATUS  NV_API_CALL  rm_gpu_ops_channel_allocate(nvidia_stack_t *sp,
290                                                     const gpuTsgHandle tsg,
291                                                     const gpuChannelAllocParams *allocParams,
292                                                     gpuChannelHandle *channel,
293                                                     gpuChannelInfo *channelInfo)
294 {
295     NV_STATUS rmStatus;
296     void *fp;
297     NV_ENTER_RM_RUNTIME(sp,fp);
298     rmStatus = nvGpuOpsChannelAllocate(tsg, allocParams, channel,
299                                        channelInfo);
300     NV_EXIT_RM_RUNTIME(sp,fp);
301     return rmStatus;
302 }
303 
rm_gpu_ops_channel_destroy(nvidia_stack_t * sp,nvgpuChannelHandle_t channel)304 NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t * sp,
305                                                  nvgpuChannelHandle_t channel)
306 {
307     void *fp;
308     NV_ENTER_RM_RUNTIME(sp,fp);
309     nvGpuOpsChannelDestroy(channel);
310     NV_EXIT_RM_RUNTIME(sp,fp);
311     return NV_OK;
312 }
313 
rm_gpu_ops_pma_free_pages(nvidia_stack_t * sp,void * pPma,NvU64 * pPages,NvLength pageCount,NvU64 pageSize,NvU32 flags)314 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_free_pages(nvidia_stack_t *sp,
315     void *pPma, NvU64 *pPages, NvLength pageCount, NvU64 pageSize, NvU32 flags)
316 {
317     void *fp;
318     NV_ENTER_RM_RUNTIME(sp,fp);
319     nvGpuOpsPmaFreePages(pPma, pPages, pageCount, pageSize, flags);
320     NV_EXIT_RM_RUNTIME(sp,fp);
321     return NV_OK;
322 }
323 
rm_gpu_ops_memory_free(nvidia_stack_t * sp,gpuAddressSpaceHandle vaspace,NvU64 gpuOffset)324 NV_STATUS  NV_API_CALL rm_gpu_ops_memory_free(
325     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, NvU64 gpuOffset)
326 {
327     void *fp;
328     NV_ENTER_RM_RUNTIME(sp,fp);
329     nvGpuOpsMemoryFree(vaspace, gpuOffset);
330     NV_EXIT_RM_RUNTIME(sp,fp);
331     return NV_OK;
332 }
333 
rm_gpu_ops_query_caps(nvidia_stack_t * sp,gpuDeviceHandle device,gpuCaps * caps)334 NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *sp,
335                                             gpuDeviceHandle device,
336                                             gpuCaps * caps)
337 {
338     NV_STATUS rmStatus;
339     void *fp;
340     NV_ENTER_RM_RUNTIME(sp,fp);
341     rmStatus = nvGpuOpsQueryCaps(device, caps);
342     NV_EXIT_RM_RUNTIME(sp,fp);
343     return rmStatus;
344 }
345 
rm_gpu_ops_query_ces_caps(nvidia_stack_t * sp,gpuDeviceHandle device,gpuCesCaps * caps)346 NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp,
347                                                 gpuDeviceHandle device,
348                                                 gpuCesCaps *caps)
349 {
350     NV_STATUS rmStatus;
351     void *fp;
352     NV_ENTER_RM_RUNTIME(sp,fp);
353     rmStatus = nvGpuOpsQueryCesCaps(device, caps);
354     NV_EXIT_RM_RUNTIME(sp,fp);
355     return rmStatus;
356 }
357 
rm_gpu_ops_get_gpu_info(nvidia_stack_t * sp,const NvProcessorUuid * pUuid,const gpuClientInfo * pGpuClientInfo,gpuInfo * pGpuInfo)358 NV_STATUS  NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *sp,
359                                                const NvProcessorUuid *pUuid,
360                                                const gpuClientInfo *pGpuClientInfo,
361                                                gpuInfo *pGpuInfo)
362 {
363     NV_STATUS rmStatus;
364     void *fp;
365     NV_ENTER_RM_RUNTIME(sp,fp);
366     rmStatus = nvGpuOpsGetGpuInfo(pUuid, pGpuClientInfo, pGpuInfo);
367     NV_EXIT_RM_RUNTIME(sp,fp);
368     return rmStatus;
369 }
370 
rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t * sp,gpuDeviceHandle device)371 NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *sp,
372                                                               gpuDeviceHandle device)
373 {
374     NV_STATUS rmStatus;
375     void *fp;
376     NV_ENTER_RM_RUNTIME(sp,fp);
377     rmStatus =  nvGpuOpsServiceDeviceInterruptsRM(device);
378     NV_EXIT_RM_RUNTIME(sp,fp);
379     return rmStatus;
380 }
381 
rm_gpu_ops_set_page_directory(nvidia_stack_t * sp,gpuAddressSpaceHandle vaSpace,NvU64 physAddress,unsigned numEntries,NvBool bVidMemAperture,NvU32 pasid)382 NV_STATUS  NV_API_CALL  rm_gpu_ops_set_page_directory (nvidia_stack_t *sp,
383                                          gpuAddressSpaceHandle vaSpace,
384                                          NvU64 physAddress, unsigned numEntries,
385                                          NvBool bVidMemAperture, NvU32 pasid)
386 {
387     NV_STATUS rmStatus;
388     void *fp;
389     NV_ENTER_RM_RUNTIME(sp,fp);
390     rmStatus = nvGpuOpsSetPageDirectory(vaSpace, physAddress, numEntries,
391                                         bVidMemAperture, pasid);
392     NV_EXIT_RM_RUNTIME(sp,fp);
393     return rmStatus;
394 }
395 
rm_gpu_ops_unset_page_directory(nvidia_stack_t * sp,gpuAddressSpaceHandle vaSpace)396 NV_STATUS  NV_API_CALL  rm_gpu_ops_unset_page_directory (nvidia_stack_t *sp,
397                                                  gpuAddressSpaceHandle vaSpace)
398 {
399     NV_STATUS rmStatus;
400     void *fp;
401     NV_ENTER_RM_RUNTIME(sp,fp);
402     rmStatus = nvGpuOpsUnsetPageDirectory(vaSpace);
403     NV_EXIT_RM_RUNTIME(sp,fp);
404     return rmStatus;
405 }
406 
rm_gpu_ops_dup_allocation(nvidia_stack_t * sp,gpuAddressSpaceHandle srcVaSpace,NvU64 srcAddress,gpuAddressSpaceHandle dstVaSpace,NvU64 dstVaAlignment,NvU64 * dstAddress)407 NV_STATUS  NV_API_CALL  rm_gpu_ops_dup_allocation(nvidia_stack_t *sp,
408                                                   gpuAddressSpaceHandle srcVaSpace,
409                                                   NvU64 srcAddress,
410                                                   gpuAddressSpaceHandle dstVaSpace,
411                                                   NvU64 dstVaAlignment,
412                                                   NvU64 *dstAddress)
413 {
414     NV_STATUS rmStatus;
415     void *fp;
416     NV_ENTER_RM_RUNTIME(sp,fp);
417     rmStatus = nvGpuOpsDupAllocation(srcVaSpace, srcAddress, dstVaSpace, dstVaAlignment, dstAddress);
418     NV_EXIT_RM_RUNTIME(sp,fp);
419     return rmStatus;
420 }
421 
rm_gpu_ops_dup_memory(nvidia_stack_t * sp,gpuDeviceHandle device,NvHandle hClient,NvHandle hPhysMemory,NvHandle * hDupMemory,nvgpuMemoryInfo_t gpuMemoryInfo)422 NV_STATUS  NV_API_CALL  rm_gpu_ops_dup_memory (nvidia_stack_t *sp,
423                                                gpuDeviceHandle device,
424                                                NvHandle hClient,
425                                                NvHandle hPhysMemory,
426                                                NvHandle *hDupMemory,
427                                                nvgpuMemoryInfo_t gpuMemoryInfo)
428 {
429     NV_STATUS rmStatus;
430     void *fp;
431     NV_ENTER_RM_RUNTIME(sp,fp);
432     rmStatus = nvGpuOpsDupMemory(device, hClient, hPhysMemory, hDupMemory, gpuMemoryInfo);
433     NV_EXIT_RM_RUNTIME(sp,fp);
434     return rmStatus;
435 }
436 
rm_gpu_ops_free_duped_handle(nvidia_stack_t * sp,gpuDeviceHandle device,NvHandle hPhysHandle)437 NV_STATUS  NV_API_CALL  rm_gpu_ops_free_duped_handle (nvidia_stack_t *sp,
438                                                 gpuDeviceHandle device,
439                                                 NvHandle hPhysHandle)
440 {
441     NV_STATUS rmStatus;
442     void *fp;
443     NV_ENTER_RM_RUNTIME(sp,fp);
444     rmStatus = nvGpuOpsFreeDupedHandle(device, hPhysHandle);
445     NV_EXIT_RM_RUNTIME(sp,fp);
446     return rmStatus;
447 }
448 
rm_gpu_ops_get_fb_info(nvidia_stack_t * sp,gpuDeviceHandle device,gpuFbInfo * fbInfo)449 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_fb_info (nvidia_stack_t *sp,
450                                                 gpuDeviceHandle device,
451                                                 gpuFbInfo * fbInfo)
452 {
453     NV_STATUS rmStatus;
454     void *fp;
455     NV_ENTER_RM_RUNTIME(sp,fp);
456     rmStatus = nvGpuOpsGetFbInfo(device, fbInfo);
457     NV_EXIT_RM_RUNTIME(sp,fp);
458     return rmStatus;
459 }
460 
rm_gpu_ops_get_ecc_info(nvidia_stack_t * sp,gpuDeviceHandle device,gpuEccInfo * eccInfo)461 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_ecc_info (nvidia_stack_t *sp,
462                                                  gpuDeviceHandle device,
463                                                  gpuEccInfo * eccInfo)
464 {
465     NV_STATUS rmStatus;
466     void *fp;
467     NV_ENTER_RM_RUNTIME(sp,fp);
468     rmStatus = nvGpuOpsGetEccInfo(device, eccInfo);
469     NV_EXIT_RM_RUNTIME(sp,fp);
470     return rmStatus;
471 }
472 
473 //
474 // Please see the comments for nvUvmInterfaceOwnPageFaultIntr(), in
475 // nv_uvm_interface.h, for the recommended way to use this routine.
476 //
477 // How it works:
478 //
479 // The rmGpuLocksAcquire call generally saves the current GPU interrupt
480 // state, then disables interrupt generation for one (or all) GPUs.
481 // Likewise, the rmGpuLocksRelease call restores (re-enables) those
482 // interrupts to their previous state. However, the rmGpuLocksRelease
483 // call does NOT restore interrupts that RM does not own.
484 //
485 // This is rather hard to find in the code, so: very approximately, the
486 // following sequence happens: rmGpuLocksRelease, osEnableInterrupts,
487 // intrRestoreNonStall_HAL, intrEncodeIntrEn_HAL, and that last one skips
488 // over any interrupts that RM does not own.
489 //
490 // This means that things are a bit asymmetric, because this routine
491 // actually changes that ownership in between the rmGpuLocksAcquire and
492 // rmGpuLocksRelease calls. So:
493 //
494 // -- If you call this routine with bOwnInterrupts == NV_TRUE (UVM is
495 //    taking ownership from the RM), then rmGpuLocksAcquire disables all
496 //    GPU interrupts. Then the ownership is taken away from RM, so the
497 //    rmGpuLocksRelease call leaves the replayable page fault interrupts
498 //    disabled. It is then up to UVM (the caller) to enable replayable
499 //    page fault interrupts when it is ready.
500 //
501 // -- If you call this routine with bOwnInterrupts == NV_FALSE (UVM is
502 //    returning ownership to the RM), then rmGpuLocksAcquire disables
503 //    all GPU interrupts that RM owns. Then the ownership is returned to
504 //    RM, so the rmGpuLocksRelease call re-enables replayable page fault
505 //    interrupts. So, that implies that you need to disable replayable page
506 //    fault interrupts before calling this routine, in order to hand
507 //    over a GPU to RM that is not generating interrupts, until RM is
508 //    ready to handle the interrupts.
509 //
rm_gpu_ops_own_page_fault_intr(nvidia_stack_t * sp,struct gpuDevice * device,NvBool bOwnInterrupts)510 NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *sp,
511                                                      struct gpuDevice *device,
512                                                      NvBool bOwnInterrupts)
513 {
514     NV_STATUS rmStatus;
515     void *fp;
516     NV_ENTER_RM_RUNTIME(sp,fp);
517     rmStatus = nvGpuOpsOwnPageFaultIntr(device, bOwnInterrupts);
518     NV_EXIT_RM_RUNTIME(sp,fp);
519     return rmStatus;
520 }
521 
rm_gpu_ops_init_fault_info(nvidia_stack_t * sp,gpuDeviceHandle device,gpuFaultInfo * pFaultInfo)522 NV_STATUS  NV_API_CALL  rm_gpu_ops_init_fault_info (nvidia_stack_t *sp,
523                                                     gpuDeviceHandle device,
524                                                     gpuFaultInfo *pFaultInfo)
525 {
526     NV_STATUS rmStatus;
527     void *fp;
528     NV_ENTER_RM_RUNTIME(sp,fp);
529     rmStatus = nvGpuOpsInitFaultInfo(device, pFaultInfo);
530     NV_EXIT_RM_RUNTIME(sp,fp);
531     return rmStatus;
532 }
533 
rm_gpu_ops_destroy_fault_info(nvidia_stack_t * sp,gpuDeviceHandle device,gpuFaultInfo * pFaultInfo)534 NV_STATUS  NV_API_CALL  rm_gpu_ops_destroy_fault_info (nvidia_stack_t *sp,
535                                                        gpuDeviceHandle device,
536                                                        gpuFaultInfo *pFaultInfo)
537 {
538     NV_STATUS rmStatus;
539     void *fp;
540     NV_ENTER_RM_RUNTIME(sp,fp);
541     rmStatus = nvGpuOpsDestroyFaultInfo(device, pFaultInfo);
542     NV_EXIT_RM_RUNTIME(sp,fp);
543     return rmStatus;
544 }
545 
546 // Functions
547 //
548 // - rm_gpu_ops_has_pending_non_replayable_faults
549 // - rm_gpu_ops_get_non_replayable_faults
550 //
551 // Cannot take the GPU/RM lock because it is called during fault servicing.
552 // This could produce deadlocks if the UVM bottom half gets stuck behind a
553 // stalling interrupt that cannot be serviced if UVM is holding the lock.
554 //
555 // However, these functions can be safely called with no locks because it is
556 // just accessing the given client shadow fault buffer, which is implemented
557 // using a lock-free queue. There is a different client shadow fault buffer
558 // per GPU: RM top-half producer, UVM top/bottom-half consumer.
559 
rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t * sp,gpuFaultInfo * pFaultInfo,NvBool * hasPendingFaults)560 NV_STATUS  NV_API_CALL  rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *sp,
561                                                                      gpuFaultInfo *pFaultInfo,
562                                                                      NvBool *hasPendingFaults)
563 {
564     NV_STATUS rmStatus;
565     void *fp;
566     NV_ENTER_RM_RUNTIME(sp,fp);
567     rmStatus = nvGpuOpsHasPendingNonReplayableFaults(pFaultInfo, hasPendingFaults);
568     NV_EXIT_RM_RUNTIME(sp,fp);
569     return rmStatus;
570 }
571 
rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t * sp,gpuFaultInfo * pFaultInfo,void * faultBuffer,NvU32 * numFaults)572 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *sp,
573                                                              gpuFaultInfo *pFaultInfo,
574                                                              void *faultBuffer,
575                                                              NvU32 *numFaults)
576 {
577     NV_STATUS rmStatus;
578     void *fp;
579     NV_ENTER_RM_RUNTIME(sp,fp);
580     rmStatus = nvGpuOpsGetNonReplayableFaults(pFaultInfo, faultBuffer, numFaults);
581     NV_EXIT_RM_RUNTIME(sp,fp);
582     return rmStatus;
583 }
584 
rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t * sp,gpuFaultInfo * pFaultInfo,NvBool bCopyAndFlush)585 NV_STATUS  NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *sp,
586                                                                 gpuFaultInfo *pFaultInfo,
587                                                                 NvBool bCopyAndFlush)
588 {
589     NV_STATUS rmStatus;
590     void *fp;
591     NV_ENTER_RM_RUNTIME(sp,fp);
592     rmStatus = nvGpuOpsFlushReplayableFaultBuffer(pFaultInfo, bCopyAndFlush);
593     NV_EXIT_RM_RUNTIME(sp,fp);
594     return rmStatus;
595 }
596 
rm_gpu_ops_toggle_prefetch_faults(nvidia_stack_t * sp,gpuFaultInfo * pFaultInfo,NvBool bEnable)597 NV_STATUS  NV_API_CALL rm_gpu_ops_toggle_prefetch_faults(nvidia_stack_t *sp,
598                                                          gpuFaultInfo *pFaultInfo,
599                                                          NvBool bEnable)
600 {
601     NV_STATUS rmStatus;
602     void *fp;
603     NV_ENTER_RM_RUNTIME(sp,fp);
604     rmStatus = nvGpuOpsTogglePrefetchFaults(pFaultInfo, bEnable);
605     NV_EXIT_RM_RUNTIME(sp,fp);
606     return rmStatus;
607 }
608 
rm_gpu_ops_init_access_cntr_info(nvidia_stack_t * sp,gpuDeviceHandle device,gpuAccessCntrInfo * accessCntrInfo,NvU32 accessCntrIndex)609 NV_STATUS  NV_API_CALL  rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *sp,
610                                                          gpuDeviceHandle device,
611                                                          gpuAccessCntrInfo *accessCntrInfo,
612                                                          NvU32 accessCntrIndex)
613 {
614     NV_STATUS rmStatus;
615     void *fp;
616     NV_ENTER_RM_RUNTIME(sp,fp);
617     rmStatus = nvGpuOpsInitAccessCntrInfo(device, accessCntrInfo, accessCntrIndex);
618     NV_EXIT_RM_RUNTIME(sp,fp);
619     return rmStatus;
620 }
621 
rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t * sp,gpuDeviceHandle device,gpuAccessCntrInfo * accessCntrInfo)622 NV_STATUS  NV_API_CALL  rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *sp,
623                                                             gpuDeviceHandle device,
624                                                             gpuAccessCntrInfo *accessCntrInfo)
625 {
626     NV_STATUS rmStatus;
627     void *fp;
628     NV_ENTER_RM_RUNTIME(sp,fp);
629     rmStatus = nvGpuOpsDestroyAccessCntrInfo(device, accessCntrInfo);
630     NV_EXIT_RM_RUNTIME(sp,fp);
631     return rmStatus;
632 }
633 
rm_gpu_ops_enable_access_cntr(nvidia_stack_t * sp,gpuDeviceHandle device,gpuAccessCntrInfo * accessCntrInfo,gpuAccessCntrConfig * accessCntrConfig)634 NV_STATUS  NV_API_CALL  rm_gpu_ops_enable_access_cntr(nvidia_stack_t *sp,
635                                                       gpuDeviceHandle device,
636                                                       gpuAccessCntrInfo *accessCntrInfo,
637                                                       gpuAccessCntrConfig *accessCntrConfig)
638 {
639     NV_STATUS rmStatus;
640     void *fp;
641     NV_ENTER_RM_RUNTIME(sp,fp);
642     rmStatus = nvGpuOpsEnableAccessCntr(device, accessCntrInfo, accessCntrConfig);
643     NV_EXIT_RM_RUNTIME(sp,fp);
644     return rmStatus;
645 }
646 
rm_gpu_ops_disable_access_cntr(nvidia_stack_t * sp,gpuDeviceHandle device,gpuAccessCntrInfo * accessCntrInfo)647 NV_STATUS  NV_API_CALL  rm_gpu_ops_disable_access_cntr(nvidia_stack_t *sp,
648                                                        gpuDeviceHandle device,
649                                                        gpuAccessCntrInfo *accessCntrInfo)
650 {
651     NV_STATUS rmStatus;
652     void *fp;
653     NV_ENTER_RM_RUNTIME(sp,fp);
654     rmStatus = nvGpuOpsDisableAccessCntr(device, accessCntrInfo);
655     NV_EXIT_RM_RUNTIME(sp,fp);
656     return rmStatus;
657 }
658 
659 NV_STATUS NV_API_CALL
rm_gpu_ops_p2p_object_create(nvidia_stack_t * sp,gpuDeviceHandle device1,gpuDeviceHandle device2,NvHandle * hP2pObject)660 rm_gpu_ops_p2p_object_create(nvidia_stack_t *sp,
661                              gpuDeviceHandle device1,
662                              gpuDeviceHandle device2,
663                              NvHandle *hP2pObject)
664 {
665     NV_STATUS rmStatus;
666     void *fp;
667     NV_ENTER_RM_RUNTIME(sp, fp);
668     rmStatus = nvGpuOpsP2pObjectCreate(device1, device2, hP2pObject);
669     NV_EXIT_RM_RUNTIME(sp, fp);
670     return rmStatus;
671 }
672 
673 void NV_API_CALL
rm_gpu_ops_p2p_object_destroy(nvidia_stack_t * sp,nvgpuSessionHandle_t session,NvHandle hP2pObject)674 rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *sp,
675                               nvgpuSessionHandle_t session,
676                               NvHandle hP2pObject)
677 {
678     void *fp;
679     NV_ENTER_RM_RUNTIME(sp, fp);
680     nvGpuOpsP2pObjectDestroy(session, hP2pObject);
681     NV_EXIT_RM_RUNTIME(sp, fp);
682 }
683 
684 NV_STATUS  NV_API_CALL
rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t * sp,nvgpuAddressSpaceHandle_t vaSpace,NvHandle hDupedMemory,NvU64 offset,NvU64 size,nvgpuExternalMappingInfo_t gpuExternalMappingInfo)685 rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t* sp,
686                                    nvgpuAddressSpaceHandle_t vaSpace,
687                                    NvHandle hDupedMemory,
688                                    NvU64 offset,
689                                    NvU64 size,
690                                    nvgpuExternalMappingInfo_t gpuExternalMappingInfo)
691 {
692     NV_STATUS rmStatus;
693     void *fp;
694     NV_ENTER_RM_RUNTIME(sp, fp);
695     rmStatus = nvGpuOpsGetExternalAllocPtes(vaSpace, hDupedMemory, offset, size,
696                                             gpuExternalMappingInfo);
697     NV_EXIT_RM_RUNTIME(sp, fp);
698     return rmStatus;
699 }
700 
701 NV_STATUS  NV_API_CALL
rm_gpu_ops_retain_channel(nvidia_stack_t * sp,nvgpuAddressSpaceHandle_t vaSpace,NvHandle hClient,NvHandle hChannel,void ** retainedChannel,nvgpuChannelInstanceInfo_t channelInstanceInfo)702 rm_gpu_ops_retain_channel(nvidia_stack_t* sp,
703                           nvgpuAddressSpaceHandle_t vaSpace,
704                           NvHandle hClient,
705                           NvHandle hChannel,
706                           void **retainedChannel,
707                           nvgpuChannelInstanceInfo_t channelInstanceInfo)
708 {
709     NV_STATUS rmStatus;
710     void *fp;
711     NV_ENTER_RM_RUNTIME(sp, fp);
712     rmStatus = nvGpuOpsRetainChannel(vaSpace, hClient, hChannel,
713                                      (gpuRetainedChannel **)retainedChannel,
714                                      channelInstanceInfo);
715     NV_EXIT_RM_RUNTIME(sp, fp);
716     return rmStatus;
717 }
718 
719 NV_STATUS  NV_API_CALL
rm_gpu_ops_bind_channel_resources(nvidia_stack_t * sp,void * retainedChannel,nvgpuChannelResourceBindParams_t channelResourceBindParams)720 rm_gpu_ops_bind_channel_resources(nvidia_stack_t* sp,
721                                   void *retainedChannel,
722                                   nvgpuChannelResourceBindParams_t channelResourceBindParams)
723 {
724     NV_STATUS rmStatus;
725     void *fp;
726     NV_ENTER_RM_RUNTIME(sp, fp);
727     rmStatus = nvGpuOpsBindChannelResources(retainedChannel,
728                                             channelResourceBindParams);
729     NV_EXIT_RM_RUNTIME(sp, fp);
730     return rmStatus;
731 }
732 
733 void NV_API_CALL
rm_gpu_ops_release_channel(nvidia_stack_t * sp,void * retainedChannel)734 rm_gpu_ops_release_channel(nvidia_stack_t *sp, void *retainedChannel)
735 {
736     void *fp;
737     NV_ENTER_RM_RUNTIME(sp, fp);
738     nvGpuOpsReleaseChannel(retainedChannel);
739     NV_EXIT_RM_RUNTIME(sp, fp);
740 }
741 
742 void NV_API_CALL
rm_gpu_ops_stop_channel(nvidia_stack_t * sp,void * retainedChannel,NvBool bImmediate)743 rm_gpu_ops_stop_channel(nvidia_stack_t * sp,
744                         void *retainedChannel,
745                         NvBool bImmediate)
746 {
747     void *fp;
748     NV_ENTER_RM_RUNTIME(sp,fp);
749     nvGpuOpsStopChannel(retainedChannel, bImmediate);
750     NV_EXIT_RM_RUNTIME(sp, fp);
751 }
752 
753 NV_STATUS  NV_API_CALL
rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t * sp,nvgpuAddressSpaceHandle_t vaSpace,NvP64 resourceDescriptor,NvU64 offset,NvU64 size,nvgpuExternalMappingInfo_t gpuExternalMappingInfo)754 rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t* sp,
755                                      nvgpuAddressSpaceHandle_t vaSpace,
756                                      NvP64 resourceDescriptor,
757                                      NvU64 offset,
758                                      NvU64 size,
759                                      nvgpuExternalMappingInfo_t gpuExternalMappingInfo)
760 {
761     NV_STATUS rmStatus;
762     void *fp;
763     NV_ENTER_RM_RUNTIME(sp, fp);
764     rmStatus = nvGpuOpsGetChannelResourcePtes(vaSpace, resourceDescriptor,
765                                               offset, size,
766                                               gpuExternalMappingInfo);
767     NV_EXIT_RM_RUNTIME(sp, fp);
768     return rmStatus;
769 }
770 
771 NV_STATUS NV_API_CALL
rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t * sp,nvgpuDeviceHandle_t device,const void * pFaultPacket)772 rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *sp,
773                                        nvgpuDeviceHandle_t device,
774                                        const void *pFaultPacket)
775 {
776     NV_STATUS rmStatus;
777     void *fp;
778     NV_ENTER_RM_RUNTIME(sp,fp);
779     rmStatus = nvGpuOpsReportNonReplayableFault(device, pFaultPacket);
780     NV_EXIT_RM_RUNTIME(sp,fp);
781     return rmStatus;
782 }
783 
784 NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channel_allocate(nvidia_stack_t * sp,gpuDeviceHandle device,const gpuPagingChannelAllocParams * allocParams,gpuPagingChannelHandle * channel,gpuPagingChannelInfo * channelInfo)785 rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *sp,
786                                    gpuDeviceHandle device,
787                                    const gpuPagingChannelAllocParams *allocParams,
788                                    gpuPagingChannelHandle *channel,
789                                    gpuPagingChannelInfo *channelInfo)
790 {
791     NV_STATUS rmStatus;
792     void *fp;
793     NV_ENTER_RM_RUNTIME(sp,fp);
794     rmStatus = nvGpuOpsPagingChannelAllocate(device, allocParams, channel,
795                                              channelInfo);
796     NV_EXIT_RM_RUNTIME(sp,fp);
797     return rmStatus;
798 }
799 
800 void NV_API_CALL
rm_gpu_ops_paging_channel_destroy(nvidia_stack_t * sp,gpuPagingChannelHandle channel)801 rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *sp,
802                                   gpuPagingChannelHandle channel)
803 {
804     void *fp;
805     NV_ENTER_RM_RUNTIME(sp,fp);
806     nvGpuOpsPagingChannelDestroy(channel);
807     NV_EXIT_RM_RUNTIME(sp,fp);
808 }
809 
810 NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channels_map(nvidia_stack_t * sp,gpuAddressSpaceHandle srcVaSpace,NvU64 srcAddress,gpuDeviceHandle device,NvU64 * dstAddress)811 rm_gpu_ops_paging_channels_map(nvidia_stack_t *sp,
812                                gpuAddressSpaceHandle srcVaSpace,
813                                NvU64 srcAddress,
814                                gpuDeviceHandle device,
815                                NvU64 *dstAddress)
816 {
817     NV_STATUS rmStatus;
818     void *fp;
819     NV_ENTER_RM_RUNTIME(sp,fp);
820     rmStatus = nvGpuOpsPagingChannelsMap(srcVaSpace, srcAddress, device, dstAddress);
821     NV_EXIT_RM_RUNTIME(sp,fp);
822     return rmStatus;
823 }
824 
825 void NV_API_CALL
rm_gpu_ops_paging_channels_unmap(nvidia_stack_t * sp,gpuAddressSpaceHandle srcVaSpace,NvU64 srcAddress,gpuDeviceHandle device)826 rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *sp,
827                                  gpuAddressSpaceHandle srcVaSpace,
828                                  NvU64 srcAddress,
829                                  gpuDeviceHandle device)
830 {
831     void *fp;
832     NV_ENTER_RM_RUNTIME(sp,fp);
833     nvGpuOpsPagingChannelsUnmap(srcVaSpace, srcAddress, device);
834     NV_EXIT_RM_RUNTIME(sp,fp);
835 }
836 
837 NV_STATUS NV_API_CALL
rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t * sp,gpuPagingChannelHandle channel,char * methodStream,NvU32 methodStreamSize)838 rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *sp,
839                                       gpuPagingChannelHandle channel,
840                                       char *methodStream,
841                                       NvU32 methodStreamSize)
842 {
843     NV_STATUS rmStatus;
844     void *fp;
845     NV_ENTER_RM_RUNTIME(sp,fp);
846     rmStatus = nvGpuOpsPagingChannelPushStream(channel, methodStream, methodStreamSize);
847     NV_EXIT_RM_RUNTIME(sp,fp);
848     return rmStatus;
849 }
850 
rm_gpu_ops_ccsl_context_init(nvidia_stack_t * sp,struct ccslContext_t ** ctx,gpuChannelHandle channel)851 NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *sp,
852                                                    struct ccslContext_t **ctx,
853                                                    gpuChannelHandle channel)
854 {
855     NV_STATUS rmStatus;
856     void *fp;
857     NV_ENTER_RM_RUNTIME(sp,fp);
858     rmStatus = nvGpuOpsCcslContextInit(ctx, channel);
859     NV_EXIT_RM_RUNTIME(sp,fp);
860     return rmStatus;
861 }
862 
rm_gpu_ops_ccsl_context_clear(nvidia_stack_t * sp,struct ccslContext_t * ctx)863 NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *sp,
864                                                     struct ccslContext_t *ctx)
865 {
866     NV_STATUS rmStatus;
867     void *fp;
868     NV_ENTER_RM_RUNTIME(sp,fp);
869     rmStatus = nvGpuOpsCcslContextClear(ctx);
870     NV_EXIT_RM_RUNTIME(sp,fp);
871     return rmStatus;
872 }
873 
rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t * sp,UvmCslContext * contextList[],NvU32 contextListCount)874 NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t *sp,
875                                                  UvmCslContext *contextList[],
876                                                  NvU32 contextListCount)
877 {
878     NV_STATUS rmStatus;
879     void *fp;
880     NV_ENTER_RM_RUNTIME(sp,fp);
881     rmStatus = nvGpuOpsCcslRotateKey(contextList, contextListCount);
882     NV_EXIT_RM_RUNTIME(sp,fp);
883     return rmStatus;
884 }
885 
rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t * sp,struct ccslContext_t * ctx,NvU8 direction)886 NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *sp,
887                                                 struct ccslContext_t *ctx,
888                                                 NvU8 direction)
889 {
890     NV_STATUS rmStatus;
891     void *fp;
892     NV_ENTER_RM_RUNTIME(sp,fp);
893     rmStatus = nvGpuOpsCcslRotateIv(ctx, direction);
894     NV_EXIT_RM_RUNTIME(sp,fp);
895     return rmStatus;
896 }
897 
rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t * sp,struct ccslContext_t * ctx,NvU32 bufferSize,NvU8 const * inputBuffer,NvU8 * encryptIv,NvU8 * outputBuffer,NvU8 * authTagData)898 NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *sp,
899                                                       struct ccslContext_t *ctx,
900                                                       NvU32 bufferSize,
901                                                       NvU8 const *inputBuffer,
902                                                       NvU8 *encryptIv,
903                                                       NvU8 *outputBuffer,
904                                                       NvU8 *authTagData)
905 {
906     NV_STATUS rmStatus;
907     void *fp;
908     NV_ENTER_RM_RUNTIME(sp,fp);
909     rmStatus = nvGpuOpsCcslEncryptWithIv(ctx, bufferSize, inputBuffer, encryptIv, outputBuffer, authTagData);
910     NV_EXIT_RM_RUNTIME(sp,fp);
911     return rmStatus;
912 }
913 
rm_gpu_ops_ccsl_encrypt(nvidia_stack_t * sp,struct ccslContext_t * ctx,NvU32 bufferSize,NvU8 const * inputBuffer,NvU8 * outputBuffer,NvU8 * authTagData)914 NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *sp,
915                                               struct ccslContext_t *ctx,
916                                               NvU32 bufferSize,
917                                               NvU8 const *inputBuffer,
918                                               NvU8 *outputBuffer,
919                                               NvU8 *authTagData)
920 {
921     NV_STATUS rmStatus;
922     void *fp;
923     NV_ENTER_RM_RUNTIME(sp,fp);
924     rmStatus = nvGpuOpsCcslEncrypt(ctx, bufferSize, inputBuffer, outputBuffer, authTagData);
925     NV_EXIT_RM_RUNTIME(sp,fp);
926     return rmStatus;
927 }
928 
rm_gpu_ops_ccsl_decrypt(nvidia_stack_t * sp,struct ccslContext_t * ctx,NvU32 bufferSize,NvU8 const * inputBuffer,NvU8 const * decryptIv,NvU32 keyRotationId,NvU8 * outputBuffer,NvU8 const * addAuthData,NvU32 addAuthDataSize,NvU8 const * authTagData)929 NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *sp,
930                                               struct ccslContext_t *ctx,
931                                               NvU32 bufferSize,
932                                               NvU8 const *inputBuffer,
933                                               NvU8 const *decryptIv,
934                                               NvU32 keyRotationId,
935                                               NvU8 *outputBuffer,
936                                               NvU8 const *addAuthData,
937                                               NvU32 addAuthDataSize,
938                                               NvU8 const *authTagData)
939 {
940     NV_STATUS rmStatus;
941     void *fp;
942     NV_ENTER_RM_RUNTIME(sp,fp);
943     rmStatus = nvGpuOpsCcslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, keyRotationId, outputBuffer,
944                                    addAuthData, addAuthDataSize, authTagData);
945     NV_EXIT_RM_RUNTIME(sp,fp);
946     return rmStatus;
947 }
948 
rm_gpu_ops_ccsl_sign(nvidia_stack_t * sp,struct ccslContext_t * ctx,NvU32 bufferSize,NvU8 const * inputBuffer,NvU8 * authTagData)949 NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *sp,
950                                            struct ccslContext_t *ctx,
951                                            NvU32 bufferSize,
952                                            NvU8 const *inputBuffer,
953                                            NvU8 *authTagData)
954 
955 {
956     NV_STATUS rmStatus;
957     void *fp;
958     NV_ENTER_RM_RUNTIME(sp,fp);
959     rmStatus = nvGpuOpsCcslSign(ctx, bufferSize, inputBuffer, authTagData);
960     NV_EXIT_RM_RUNTIME(sp,fp);
961     return rmStatus;
962 }
963 
rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t * sp,struct ccslContext_t * ctx,NvU8 direction,NvU64 * messageNum)964 NV_STATUS  NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *sp,
965                                                           struct ccslContext_t *ctx,
966                                                           NvU8 direction,
967                                                           NvU64 *messageNum)
968 {
969     NV_STATUS rmStatus;
970     void *fp;
971     NV_ENTER_RM_RUNTIME(sp,fp);
972     rmStatus = nvGpuOpsQueryMessagePool(ctx, direction, messageNum);
973     NV_EXIT_RM_RUNTIME(sp,fp);
974     return rmStatus;
975 }
976 
rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t * sp,struct ccslContext_t * ctx,NvU8 direction,NvU64 increment,NvU8 * iv)977 NV_STATUS  NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *sp,
978                                                     struct ccslContext_t *ctx,
979                                                     NvU8 direction,
980                                                     NvU64 increment,
981                                                     NvU8 *iv)
982 {
983     NV_STATUS rmStatus;
984     void *fp;
985     NV_ENTER_RM_RUNTIME(sp,fp);
986     rmStatus = nvGpuOpsIncrementIv(ctx, direction, increment, iv);
987     NV_EXIT_RM_RUNTIME(sp,fp);
988     return rmStatus;
989 }
990 
rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t * sp,struct ccslContext_t * ctx,NvU8 direction,NvU32 bufferSize)991 NV_STATUS  NV_API_CALL rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t *sp,
992                                                       struct ccslContext_t *ctx,
993                                                       NvU8 direction,
994                                                       NvU32 bufferSize)
995 {
996     NV_STATUS rmStatus;
997     void *fp;
998     NV_ENTER_RM_RUNTIME(sp,fp);
999     rmStatus = nvGpuOpsLogEncryption(ctx, direction, bufferSize);
1000     NV_EXIT_RM_RUNTIME(sp,fp);
1001     return rmStatus;
1002 }
1003