1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <nv.h>
25 #include <os/os.h>
26 #include <osapi.h>
27 #include <core/thread_state.h>
28 #include "rmapi/nv_gpu_ops.h"
29 #include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h"
30 
31 NV_STATUS NV_API_CALL rm_gpu_ops_create_session(
32     nvidia_stack_t *sp,
33     struct gpuSession **session)
34 {
35     NV_STATUS rmStatus;
36     void *fp;
37     NV_ENTER_RM_RUNTIME(sp,fp);
38     rmStatus = nvGpuOpsCreateSession(session);
39     NV_EXIT_RM_RUNTIME(sp,fp);
40     return rmStatus;
41 }
42 
43 NV_STATUS  NV_API_CALL  rm_gpu_ops_destroy_session (
44     nvidia_stack_t *sp, gpuSessionHandle session)
45 {
46     NV_STATUS rmStatus;
47     void *fp;
48     NV_ENTER_RM_RUNTIME(sp,fp);
49     rmStatus = nvGpuOpsDestroySession(session);
50     NV_EXIT_RM_RUNTIME(sp,fp);
51     return rmStatus;
52 }
53 
54 NV_STATUS  NV_API_CALL  rm_gpu_ops_device_create (
55     nvidia_stack_t *sp,
56     nvgpuSessionHandle_t session,
57     const gpuInfo *pGpuInfo,
58     const NvProcessorUuid *gpuUuid,
59     nvgpuDeviceHandle_t *device,
60     NvBool bCreateSmcPartition)
61 {
62     NV_STATUS rmStatus;
63     void *fp;
64     NV_ENTER_RM_RUNTIME(sp,fp);
65     rmStatus = nvGpuOpsDeviceCreate(session, pGpuInfo, gpuUuid, device, bCreateSmcPartition);
66     NV_EXIT_RM_RUNTIME(sp,fp);
67     return rmStatus;
68 }
69 
70 NV_STATUS  NV_API_CALL  rm_gpu_ops_device_destroy (
71     nvidia_stack_t *sp,
72     gpuDeviceHandle device)
73 {
74     NV_STATUS rmStatus;
75     void *fp;
76     NV_ENTER_RM_RUNTIME(sp,fp);
77     rmStatus = nvGpuOpsDeviceDestroy(device);
78     NV_EXIT_RM_RUNTIME(sp,fp);
79     return rmStatus;
80 }
81 
82 NV_STATUS  NV_API_CALL  rm_gpu_ops_address_space_create (
83     nvidia_stack_t *sp,
84     gpuDeviceHandle device,
85     NvU64 vaBase,
86     NvU64 vaSize,
87     gpuAddressSpaceHandle *vaSpace,
88     gpuAddressSpaceInfo *vaSpaceInfo)
89 {
90     NV_STATUS rmStatus;
91     void *fp;
92     NV_ENTER_RM_RUNTIME(sp,fp);
93     rmStatus = nvGpuOpsAddressSpaceCreate(device, vaBase, vaSize, vaSpace,
94                                           vaSpaceInfo);
95     NV_EXIT_RM_RUNTIME(sp,fp);
96     return rmStatus;
97 }
98 
99 NV_STATUS  NV_API_CALL  rm_gpu_ops_dup_address_space(
100     nvidia_stack_t *sp,
101     gpuDeviceHandle device,
102     NvHandle hUserClient,
103     NvHandle hUserVASpace,
104     gpuAddressSpaceHandle *dupedVaspace,
105     gpuAddressSpaceInfo *vaSpaceInfo)
106 {
107     NV_STATUS rmStatus;
108     void *fp;
109     NV_ENTER_RM_RUNTIME(sp,fp);
110     rmStatus = nvGpuOpsDupAddressSpace(device, hUserClient, hUserVASpace,
111                                        dupedVaspace, vaSpaceInfo);
112     NV_EXIT_RM_RUNTIME(sp,fp);
113     return rmStatus;
114 }
115 
116 NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *sp,
117     gpuAddressSpaceHandle vaspace)
118 {
119     void *fp;
120     NV_ENTER_RM_RUNTIME(sp,fp);
121     nvGpuOpsAddressSpaceDestroy(vaspace);
122     NV_EXIT_RM_RUNTIME(sp,fp);
123     return NV_OK;
124 }
125 
126 NV_STATUS  NV_API_CALL  rm_gpu_ops_memory_alloc_fb(
127     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
128     NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo)
129 {
130     NV_STATUS rmStatus;
131     void *fp;
132     NV_ENTER_RM_RUNTIME(sp,fp);
133     rmStatus = nvGpuOpsMemoryAllocFb(vaspace, size, gpuOffset, allocInfo);
134     NV_EXIT_RM_RUNTIME(sp,fp);
135     return rmStatus;
136 }
137 
138 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_p2p_caps(nvidia_stack_t *sp,
139                                                 gpuDeviceHandle device1,
140                                                 gpuDeviceHandle device2,
141                                                 getP2PCapsParams *pP2pCapsParams)
142 {
143     NV_STATUS rmStatus;
144     void *fp;
145     NV_ENTER_RM_RUNTIME(sp,fp);
146     rmStatus = nvGpuOpsGetP2PCaps(device1, device2, pP2pCapsParams);
147     NV_EXIT_RM_RUNTIME(sp,fp);
148     return rmStatus;
149 }
150 
151 NV_STATUS  NV_API_CALL  rm_gpu_ops_memory_alloc_sys(
152     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
153     NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo)
154 {
155     NV_STATUS rmStatus;
156     void *fp;
157     NV_ENTER_RM_RUNTIME(sp,fp);
158     rmStatus = nvGpuOpsMemoryAllocSys(vaspace, size, gpuOffset, allocInfo);
159     NV_EXIT_RM_RUNTIME(sp,fp);
160     return rmStatus;
161 }
162 
163 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_register_callbacks(
164     nvidia_stack_t *sp,
165     void *pPma,
166     pmaEvictPagesCb_t evictPages,
167     pmaEvictRangeCb_t evictRange,
168     void *callbackData)
169 {
170     THREAD_STATE_NODE threadState;
171     NV_STATUS rmStatus;
172     void *fp;
173 
174     NV_ENTER_RM_RUNTIME(sp,fp);
175     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
176 
177     rmStatus = pmaRegisterEvictionCb(pPma, evictPages, evictRange, callbackData);
178 
179     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
180     NV_EXIT_RM_RUNTIME(sp,fp);
181     return rmStatus;
182 }
183 
184 void  NV_API_CALL  rm_gpu_ops_pma_unregister_callbacks(
185     nvidia_stack_t *sp,
186     void *pPma)
187 {
188     THREAD_STATE_NODE threadState;
189     void *fp;
190 
191     NV_ENTER_RM_RUNTIME(sp,fp);
192     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
193 
194     pmaUnregisterEvictionCb(pPma);
195 
196     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
197     NV_EXIT_RM_RUNTIME(sp,fp);
198 }
199 
200 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_pma_object(
201     nvidia_stack_t *sp,
202     gpuDeviceHandle device,
203     void **pPma,
204     const nvgpuPmaStatistics_t *pPmaPubStats)
205 {
206     NV_STATUS rmStatus;
207     void *fp;
208     NV_ENTER_RM_RUNTIME(sp,fp);
209     rmStatus = nvGpuOpsGetPmaObject(device, pPma,
210                                     (const UvmPmaStatistics **)pPmaPubStats);
211     NV_EXIT_RM_RUNTIME(sp,fp);
212     return rmStatus;
213 }
214 
215 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_alloc_pages(
216     nvidia_stack_t *sp, void *pPma,
217     NvLength pageCount, NvU32 pageSize,
218     nvgpuPmaAllocationOptions_t pPmaAllocOptions,
219     NvU64 *pPages)
220 {
221     NV_STATUS rmStatus;
222     void *fp;
223     NV_ENTER_RM_RUNTIME(sp,fp);
224     rmStatus = nvGpuOpsPmaAllocPages(pPma, pageCount, pageSize,
225                                      pPmaAllocOptions, pPages);
226     NV_EXIT_RM_RUNTIME(sp,fp);
227     return rmStatus;
228 }
229 
230 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_pin_pages(
231     nvidia_stack_t *sp, void *pPma,
232     NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags)
233 {
234     NV_STATUS rmStatus;
235     void *fp;
236     NV_ENTER_RM_RUNTIME(sp,fp);
237     rmStatus = nvGpuOpsPmaPinPages(pPma, pPages, pageCount, pageSize, flags);
238     NV_EXIT_RM_RUNTIME(sp,fp);
239     return rmStatus;
240 }
241 
242 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_unpin_pages(
243     nvidia_stack_t *sp, void *pPma,
244     NvU64 *pPages, NvLength pageCount, NvU32 pageSize)
245 {
246     NV_STATUS rmStatus;
247     void *fp;
248     NV_ENTER_RM_RUNTIME(sp,fp);
249     rmStatus = nvGpuOpsPmaUnpinPages(pPma, pPages, pageCount, pageSize);
250     NV_EXIT_RM_RUNTIME(sp,fp);
251     return rmStatus;
252 }
253 
254 NV_STATUS  NV_API_CALL  rm_gpu_ops_memory_cpu_map(
255     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace,
256     NvU64 gpuOffset, NvLength length, void **cpuPtr, NvU32 pageSize)
257 {
258     NV_STATUS rmStatus;
259     void *fp;
260     NV_ENTER_RM_RUNTIME(sp,fp);
261     rmStatus = nvGpuOpsMemoryCpuMap(vaspace, gpuOffset, length, cpuPtr,
262                                     pageSize);
263     NV_EXIT_RM_RUNTIME(sp,fp);
264     return rmStatus;
265 }
266 
267 NV_STATUS  NV_API_CALL  rm_gpu_ops_memory_cpu_ummap(
268     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, void* cpuPtr)
269 {
270     void *fp;
271     NV_ENTER_RM_RUNTIME(sp,fp);
272     nvGpuOpsMemoryCpuUnMap(vaspace, cpuPtr);
273     NV_EXIT_RM_RUNTIME(sp,fp);
274     return NV_OK;
275 }
276 
277 NV_STATUS  NV_API_CALL  rm_gpu_ops_channel_allocate(nvidia_stack_t *sp,
278                                                     gpuAddressSpaceHandle vaspace,
279                                                     const gpuChannelAllocParams *allocParams,
280                                                     gpuChannelHandle *channel,
281                                                     gpuChannelInfo *channelInfo)
282 {
283     NV_STATUS rmStatus;
284     void *fp;
285     NV_ENTER_RM_RUNTIME(sp,fp);
286     rmStatus = nvGpuOpsChannelAllocate(vaspace, allocParams, channel,
287                                        channelInfo);
288     NV_EXIT_RM_RUNTIME(sp,fp);
289     return rmStatus;
290 }
291 
292 NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t * sp,
293                                                  nvgpuChannelHandle_t channel)
294 {
295     void *fp;
296     NV_ENTER_RM_RUNTIME(sp,fp);
297     nvGpuOpsChannelDestroy(channel);
298     NV_EXIT_RM_RUNTIME(sp,fp);
299     return NV_OK;
300 }
301 
302 NV_STATUS  NV_API_CALL  rm_gpu_ops_pma_free_pages(nvidia_stack_t *sp,
303     void *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags)
304 {
305     void *fp;
306     NV_ENTER_RM_RUNTIME(sp,fp);
307     nvGpuOpsPmaFreePages(pPma, pPages, pageCount, pageSize, flags);
308     NV_EXIT_RM_RUNTIME(sp,fp);
309     return NV_OK;
310 }
311 
312 NV_STATUS  NV_API_CALL rm_gpu_ops_memory_free(
313     nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, NvU64 gpuOffset)
314 {
315     void *fp;
316     NV_ENTER_RM_RUNTIME(sp,fp);
317     nvGpuOpsMemoryFree(vaspace, gpuOffset);
318     NV_EXIT_RM_RUNTIME(sp,fp);
319     return NV_OK;
320 }
321 
322 NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *sp,
323                                             gpuDeviceHandle device,
324                                             gpuCaps * caps)
325 {
326     NV_STATUS rmStatus;
327     void *fp;
328     NV_ENTER_RM_RUNTIME(sp,fp);
329     rmStatus = nvGpuOpsQueryCaps(device, caps);
330     NV_EXIT_RM_RUNTIME(sp,fp);
331     return rmStatus;
332 }
333 
334 NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp,
335                                                 gpuDeviceHandle device,
336                                                 gpuCesCaps *caps)
337 {
338     NV_STATUS rmStatus;
339     void *fp;
340     NV_ENTER_RM_RUNTIME(sp,fp);
341     rmStatus = nvGpuOpsQueryCesCaps(device, caps);
342     NV_EXIT_RM_RUNTIME(sp,fp);
343     return rmStatus;
344 }
345 
346 NV_STATUS  NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *sp,
347                                                const NvProcessorUuid *pUuid,
348                                                const gpuClientInfo *pGpuClientInfo,
349                                                gpuInfo *pGpuInfo)
350 {
351     NV_STATUS rmStatus;
352     void *fp;
353     NV_ENTER_RM_RUNTIME(sp,fp);
354     rmStatus = nvGpuOpsGetGpuInfo(pUuid, pGpuClientInfo, pGpuInfo);
355     NV_EXIT_RM_RUNTIME(sp,fp);
356     return rmStatus;
357 }
358 
359 NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *sp,
360                                                               gpuDeviceHandle device)
361 {
362     NV_STATUS rmStatus;
363     void *fp;
364     NV_ENTER_RM_RUNTIME(sp,fp);
365     rmStatus =  nvGpuOpsServiceDeviceInterruptsRM(device);
366     NV_EXIT_RM_RUNTIME(sp,fp);
367     return rmStatus;
368 }
369 
370 NV_STATUS  NV_API_CALL  rm_gpu_ops_set_page_directory (nvidia_stack_t *sp,
371                                          gpuAddressSpaceHandle vaSpace,
372                                          NvU64 physAddress, unsigned numEntries,
373                                          NvBool bVidMemAperture, NvU32 pasid)
374 {
375     NV_STATUS rmStatus;
376     void *fp;
377     NV_ENTER_RM_RUNTIME(sp,fp);
378     rmStatus = nvGpuOpsSetPageDirectory(vaSpace, physAddress, numEntries,
379                                         bVidMemAperture, pasid);
380     NV_EXIT_RM_RUNTIME(sp,fp);
381     return rmStatus;
382 }
383 
384 NV_STATUS  NV_API_CALL  rm_gpu_ops_unset_page_directory (nvidia_stack_t *sp,
385                                                  gpuAddressSpaceHandle vaSpace)
386 {
387     NV_STATUS rmStatus;
388     void *fp;
389     NV_ENTER_RM_RUNTIME(sp,fp);
390     rmStatus = nvGpuOpsUnsetPageDirectory(vaSpace);
391     NV_EXIT_RM_RUNTIME(sp,fp);
392     return rmStatus;
393 }
394 
395 NV_STATUS  NV_API_CALL  rm_gpu_ops_dup_allocation(nvidia_stack_t *sp,
396                                                   gpuAddressSpaceHandle srcVaSpace,
397                                                   NvU64 srcAddress,
398                                                   gpuAddressSpaceHandle dstVaSpace,
399                                                   NvU64 dstVaAlignment,
400                                                   NvU64 *dstAddress)
401 {
402     NV_STATUS rmStatus;
403     void *fp;
404     NV_ENTER_RM_RUNTIME(sp,fp);
405     rmStatus = nvGpuOpsDupAllocation(srcVaSpace, srcAddress, dstVaSpace, dstVaAlignment, dstAddress);
406     NV_EXIT_RM_RUNTIME(sp,fp);
407     return rmStatus;
408 }
409 
410 NV_STATUS  NV_API_CALL  rm_gpu_ops_dup_memory (nvidia_stack_t *sp,
411                                                gpuDeviceHandle device,
412                                                NvHandle hClient,
413                                                NvHandle hPhysMemory,
414                                                NvHandle *hDupMemory,
415                                                nvgpuMemoryInfo_t gpuMemoryInfo)
416 {
417     NV_STATUS rmStatus;
418     void *fp;
419     NV_ENTER_RM_RUNTIME(sp,fp);
420     rmStatus = nvGpuOpsDupMemory(device, hClient, hPhysMemory, hDupMemory, gpuMemoryInfo);
421     NV_EXIT_RM_RUNTIME(sp,fp);
422     return rmStatus;
423 }
424 
425 NV_STATUS  NV_API_CALL  rm_gpu_ops_free_duped_handle (nvidia_stack_t *sp,
426                                                 gpuDeviceHandle device,
427                                                 NvHandle hPhysHandle)
428 {
429     NV_STATUS rmStatus;
430     void *fp;
431     NV_ENTER_RM_RUNTIME(sp,fp);
432     rmStatus = nvGpuOpsFreeDupedHandle(device, hPhysHandle);
433     NV_EXIT_RM_RUNTIME(sp,fp);
434     return rmStatus;
435 }
436 
437 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_fb_info (nvidia_stack_t *sp,
438                                                 gpuDeviceHandle device,
439                                                 gpuFbInfo * fbInfo)
440 {
441     NV_STATUS rmStatus;
442     void *fp;
443     NV_ENTER_RM_RUNTIME(sp,fp);
444     rmStatus = nvGpuOpsGetFbInfo(device, fbInfo);
445     NV_EXIT_RM_RUNTIME(sp,fp);
446     return rmStatus;
447 }
448 
449 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_ecc_info (nvidia_stack_t *sp,
450                                                  gpuDeviceHandle device,
451                                                  gpuEccInfo * eccInfo)
452 {
453     NV_STATUS rmStatus;
454     void *fp;
455     NV_ENTER_RM_RUNTIME(sp,fp);
456     rmStatus = nvGpuOpsGetEccInfo(device, eccInfo);
457     NV_EXIT_RM_RUNTIME(sp,fp);
458     return rmStatus;
459 }
460 
461 //
462 // Please see the comments for nvUvmInterfaceOwnPageFaultIntr(), in
463 // nv_uvm_interface.h, for the recommended way to use this routine.
464 //
465 // How it works:
466 //
467 // The rmGpuLocksAcquire call generally saves the current GPU interrupt
468 // state, then disables interrupt generation for one (or all) GPUs.
469 // Likewise, the rmGpuLocksRelease call restores (re-enables) those
470 // interrupts to their previous state. However, the rmGpuLocksRelease
471 // call does NOT restore interrupts that RM does not own.
472 //
473 // This is rather hard to find in the code, so: very approximately, the
474 // following sequence happens: rmGpuLocksRelease, osEnableInterrupts,
475 // intrRestoreNonStall_HAL, intrEncodeIntrEn_HAL, and that last one skips
476 // over any interrupts that RM does not own.
477 //
478 // This means that things are a bit asymmetric, because this routine
479 // actually changes that ownership in between the rmGpuLocksAcquire and
480 // rmGpuLocksRelease calls. So:
481 //
482 // -- If you call this routine with bOwnInterrupts == NV_TRUE (UVM is
483 //    taking ownership from the RM), then rmGpuLocksAcquire disables all
484 //    GPU interrupts. Then the ownership is taken away from RM, so the
485 //    rmGpuLocksRelease call leaves the replayable page fault interrupts
486 //    disabled. It is then up to UVM (the caller) to enable replayable
487 //    page fault interrupts when it is ready.
488 //
489 // -- If you call this routine with bOwnInterrupts == NV_FALSE (UVM is
490 //    returning ownership to the RM), then rmGpuLocksAcquire disables
491 //    all GPU interrupts that RM owns. Then the ownership is returned to
492 //    RM, so the rmGpuLocksRelease call re-enables replayable page fault
493 //    interrupts. So, that implies that you need to disable replayable page
494 //    fault interrupts before calling this routine, in order to hand
495 //    over a GPU to RM that is not generating interrupts, until RM is
496 //    ready to handle the interrupts.
497 //
498 NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *sp,
499                                                      struct gpuDevice *device,
500                                                      NvBool bOwnInterrupts)
501 {
502     NV_STATUS rmStatus;
503     void *fp;
504     NV_ENTER_RM_RUNTIME(sp,fp);
505     rmStatus = nvGpuOpsOwnPageFaultIntr(device, bOwnInterrupts);
506     NV_EXIT_RM_RUNTIME(sp,fp);
507     return rmStatus;
508 }
509 
510 NV_STATUS  NV_API_CALL  rm_gpu_ops_init_fault_info (nvidia_stack_t *sp,
511                                                     gpuDeviceHandle device,
512                                                     gpuFaultInfo *pFaultInfo)
513 {
514     NV_STATUS rmStatus;
515     void *fp;
516     NV_ENTER_RM_RUNTIME(sp,fp);
517     rmStatus = nvGpuOpsInitFaultInfo(device, pFaultInfo);
518     NV_EXIT_RM_RUNTIME(sp,fp);
519     return rmStatus;
520 }
521 
522 NV_STATUS  NV_API_CALL  rm_gpu_ops_destroy_fault_info (nvidia_stack_t *sp,
523                                                        gpuDeviceHandle device,
524                                                        gpuFaultInfo *pFaultInfo)
525 {
526     NV_STATUS rmStatus;
527     void *fp;
528     NV_ENTER_RM_RUNTIME(sp,fp);
529     rmStatus = nvGpuOpsDestroyFaultInfo(device, pFaultInfo);
530     NV_EXIT_RM_RUNTIME(sp,fp);
531     return rmStatus;
532 }
533 
534 // Functions
535 //
536 // - rm_gpu_ops_has_pending_non_replayable_faults
537 // - rm_gpu_ops_get_non_replayable_faults
538 //
539 // Cannot take the GPU/RM lock because it is called during fault servicing.
540 // This could produce deadlocks if the UVM bottom half gets stuck behind a
541 // stalling interrupt that cannot be serviced if UVM is holding the lock.
542 //
543 // However, these functions can be safely called with no locks because it is
544 // just accessing the given client shadow fault buffer, which is implemented
545 // using a lock-free queue. There is a different client shadow fault buffer
546 // per GPU: RM top-half producer, UVM top/bottom-half consumer.
547 
548 NV_STATUS  NV_API_CALL  rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *sp,
549                                                                      gpuFaultInfo *pFaultInfo,
550                                                                      NvBool *hasPendingFaults)
551 {
552     NV_STATUS rmStatus;
553     void *fp;
554     NV_ENTER_RM_RUNTIME(sp,fp);
555     rmStatus = nvGpuOpsHasPendingNonReplayableFaults(pFaultInfo, hasPendingFaults);
556     NV_EXIT_RM_RUNTIME(sp,fp);
557     return rmStatus;
558 }
559 
560 NV_STATUS  NV_API_CALL  rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *sp,
561                                                              gpuFaultInfo *pFaultInfo,
562                                                              void *faultBuffer,
563                                                              NvU32 *numFaults)
564 {
565     NV_STATUS rmStatus;
566     void *fp;
567     NV_ENTER_RM_RUNTIME(sp,fp);
568     rmStatus = nvGpuOpsGetNonReplayableFaults(pFaultInfo, faultBuffer, numFaults);
569     NV_EXIT_RM_RUNTIME(sp,fp);
570     return rmStatus;
571 }
572 
573 NV_STATUS  NV_API_CALL  rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *sp,
574                                                                  gpuDeviceHandle device)
575 {
576     NV_STATUS rmStatus;
577     void *fp;
578     NV_ENTER_RM_RUNTIME(sp,fp);
579     rmStatus = nvGpuOpsFlushReplayableFaultBuffer(device);
580     NV_EXIT_RM_RUNTIME(sp,fp);
581     return rmStatus;
582 }
583 
584 NV_STATUS  NV_API_CALL  rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *sp,
585                                                          gpuDeviceHandle device,
586                                                          gpuAccessCntrInfo *accessCntrInfo)
587 {
588     NV_STATUS rmStatus;
589     void *fp;
590     NV_ENTER_RM_RUNTIME(sp,fp);
591     rmStatus = nvGpuOpsInitAccessCntrInfo(device, accessCntrInfo);
592     NV_EXIT_RM_RUNTIME(sp,fp);
593     return rmStatus;
594 }
595 
596 NV_STATUS  NV_API_CALL  rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *sp,
597                                                             gpuDeviceHandle device,
598                                                             gpuAccessCntrInfo *accessCntrInfo)
599 {
600     NV_STATUS rmStatus;
601     void *fp;
602     NV_ENTER_RM_RUNTIME(sp,fp);
603     rmStatus = nvGpuOpsDestroyAccessCntrInfo(device, accessCntrInfo);
604     NV_EXIT_RM_RUNTIME(sp,fp);
605     return rmStatus;
606 }
607 
608 NV_STATUS  NV_API_CALL  rm_gpu_ops_enable_access_cntr(nvidia_stack_t *sp,
609                                                       gpuDeviceHandle device,
610                                                       gpuAccessCntrInfo *accessCntrInfo,
611                                                       gpuAccessCntrConfig *accessCntrConfig)
612 {
613     NV_STATUS rmStatus;
614     void *fp;
615     NV_ENTER_RM_RUNTIME(sp,fp);
616     rmStatus = nvGpuOpsEnableAccessCntr(device, accessCntrInfo, accessCntrConfig);
617     NV_EXIT_RM_RUNTIME(sp,fp);
618     return rmStatus;
619 }
620 
621 NV_STATUS  NV_API_CALL  rm_gpu_ops_disable_access_cntr(nvidia_stack_t *sp,
622                                                        gpuDeviceHandle device,
623                                                        gpuAccessCntrInfo *accessCntrInfo)
624 {
625     NV_STATUS rmStatus;
626     void *fp;
627     NV_ENTER_RM_RUNTIME(sp,fp);
628     rmStatus = nvGpuOpsDisableAccessCntr(device, accessCntrInfo);
629     NV_EXIT_RM_RUNTIME(sp,fp);
630     return rmStatus;
631 }
632 
633 NV_STATUS NV_API_CALL
634 rm_gpu_ops_p2p_object_create(nvidia_stack_t *sp,
635                              gpuDeviceHandle device1,
636                              gpuDeviceHandle device2,
637                              NvHandle *hP2pObject)
638 {
639     NV_STATUS rmStatus;
640     void *fp;
641     NV_ENTER_RM_RUNTIME(sp, fp);
642     rmStatus = nvGpuOpsP2pObjectCreate(device1, device2, hP2pObject);
643     NV_EXIT_RM_RUNTIME(sp, fp);
644     return rmStatus;
645 }
646 
647 void NV_API_CALL
648 rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *sp,
649                               nvgpuSessionHandle_t session,
650                               NvHandle hP2pObject)
651 {
652     void *fp;
653     NV_ENTER_RM_RUNTIME(sp, fp);
654     nvGpuOpsP2pObjectDestroy(session, hP2pObject);
655     NV_EXIT_RM_RUNTIME(sp, fp);
656 }
657 
658 NV_STATUS  NV_API_CALL
659 rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t* sp,
660                                    nvgpuAddressSpaceHandle_t vaSpace,
661                                    NvHandle hDupedMemory,
662                                    NvU64 offset,
663                                    NvU64 size,
664                                    nvgpuExternalMappingInfo_t gpuExternalMappingInfo)
665 {
666     NV_STATUS rmStatus;
667     void *fp;
668     NV_ENTER_RM_RUNTIME(sp, fp);
669     rmStatus = nvGpuOpsGetExternalAllocPtes(vaSpace, hDupedMemory, offset, size,
670                                             gpuExternalMappingInfo);
671     NV_EXIT_RM_RUNTIME(sp, fp);
672     return rmStatus;
673 }
674 
675 NV_STATUS  NV_API_CALL
676 rm_gpu_ops_retain_channel(nvidia_stack_t* sp,
677                           nvgpuAddressSpaceHandle_t vaSpace,
678                           NvHandle hClient,
679                           NvHandle hChannel,
680                           void **retainedChannel,
681                           nvgpuChannelInstanceInfo_t channelInstanceInfo)
682 {
683     NV_STATUS rmStatus;
684     void *fp;
685     NV_ENTER_RM_RUNTIME(sp, fp);
686     rmStatus = nvGpuOpsRetainChannel(vaSpace, hClient, hChannel,
687                                      (gpuRetainedChannel **)retainedChannel,
688                                      channelInstanceInfo);
689     NV_EXIT_RM_RUNTIME(sp, fp);
690     return rmStatus;
691 }
692 
693 NV_STATUS  NV_API_CALL
694 rm_gpu_ops_bind_channel_resources(nvidia_stack_t* sp,
695                                   void *retainedChannel,
696                                   nvgpuChannelResourceBindParams_t channelResourceBindParams)
697 {
698     NV_STATUS rmStatus;
699     void *fp;
700     NV_ENTER_RM_RUNTIME(sp, fp);
701     rmStatus = nvGpuOpsBindChannelResources(retainedChannel,
702                                             channelResourceBindParams);
703     NV_EXIT_RM_RUNTIME(sp, fp);
704     return rmStatus;
705 }
706 
707 void NV_API_CALL
708 rm_gpu_ops_release_channel(nvidia_stack_t *sp, void *retainedChannel)
709 {
710     void *fp;
711     NV_ENTER_RM_RUNTIME(sp, fp);
712     nvGpuOpsReleaseChannel(retainedChannel);
713     NV_EXIT_RM_RUNTIME(sp, fp);
714 }
715 
716 void NV_API_CALL
717 rm_gpu_ops_stop_channel(nvidia_stack_t * sp,
718                         void *retainedChannel,
719                         NvBool bImmediate)
720 {
721     void *fp;
722     NV_ENTER_RM_RUNTIME(sp,fp);
723     nvGpuOpsStopChannel(retainedChannel, bImmediate);
724     NV_EXIT_RM_RUNTIME(sp, fp);
725 }
726 
727 NV_STATUS  NV_API_CALL
728 rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t* sp,
729                                      nvgpuAddressSpaceHandle_t vaSpace,
730                                      NvP64 resourceDescriptor,
731                                      NvU64 offset,
732                                      NvU64 size,
733                                      nvgpuExternalMappingInfo_t gpuExternalMappingInfo)
734 {
735     NV_STATUS rmStatus;
736     void *fp;
737     NV_ENTER_RM_RUNTIME(sp, fp);
738     rmStatus = nvGpuOpsGetChannelResourcePtes(vaSpace, resourceDescriptor,
739                                               offset, size,
740                                               gpuExternalMappingInfo);
741     NV_EXIT_RM_RUNTIME(sp, fp);
742     return rmStatus;
743 }
744 
745 NV_STATUS NV_API_CALL
746 rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *sp,
747                                        nvgpuDeviceHandle_t device,
748                                        const void *pFaultPacket)
749 {
750     NV_STATUS rmStatus;
751     void *fp;
752     NV_ENTER_RM_RUNTIME(sp,fp);
753     rmStatus = nvGpuOpsReportNonReplayableFault(device, pFaultPacket);
754     NV_EXIT_RM_RUNTIME(sp,fp);
755     return rmStatus;
756 }
757 
758 NV_STATUS NV_API_CALL
759 rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *sp,
760                                    gpuDeviceHandle device,
761                                    const gpuPagingChannelAllocParams *allocParams,
762                                    gpuPagingChannelHandle *channel,
763                                    gpuPagingChannelInfo *channelInfo)
764 {
765     NV_STATUS rmStatus;
766     void *fp;
767     NV_ENTER_RM_RUNTIME(sp,fp);
768     rmStatus = nvGpuOpsPagingChannelAllocate(device, allocParams, channel,
769                                              channelInfo);
770     NV_EXIT_RM_RUNTIME(sp,fp);
771     return rmStatus;
772 }
773 
774 void NV_API_CALL
775 rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *sp,
776                                   gpuPagingChannelHandle channel)
777 {
778     void *fp;
779     NV_ENTER_RM_RUNTIME(sp,fp);
780     nvGpuOpsPagingChannelDestroy(channel);
781     NV_EXIT_RM_RUNTIME(sp,fp);
782 }
783 
784 NV_STATUS NV_API_CALL
785 rm_gpu_ops_paging_channels_map(nvidia_stack_t *sp,
786                                gpuAddressSpaceHandle srcVaSpace,
787                                NvU64 srcAddress,
788                                gpuDeviceHandle device,
789                                NvU64 *dstAddress)
790 {
791     NV_STATUS rmStatus;
792     void *fp;
793     NV_ENTER_RM_RUNTIME(sp,fp);
794     rmStatus = nvGpuOpsPagingChannelsMap(srcVaSpace, srcAddress, device, dstAddress);
795     NV_EXIT_RM_RUNTIME(sp,fp);
796     return rmStatus;
797 }
798 
799 void NV_API_CALL
800 rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *sp,
801                                  gpuAddressSpaceHandle srcVaSpace,
802                                  NvU64 srcAddress,
803                                  gpuDeviceHandle device)
804 {
805     void *fp;
806     NV_ENTER_RM_RUNTIME(sp,fp);
807     nvGpuOpsPagingChannelsUnmap(srcVaSpace, srcAddress, device);
808     NV_EXIT_RM_RUNTIME(sp,fp);
809 }
810 
811 NV_STATUS NV_API_CALL
812 rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *sp,
813                                       gpuPagingChannelHandle channel,
814                                       char *methodStream,
815                                       NvU32 methodStreamSize)
816 {
817     NV_STATUS rmStatus;
818     void *fp;
819     NV_ENTER_RM_RUNTIME(sp,fp);
820     rmStatus = nvGpuOpsPagingChannelPushStream(channel, methodStream, methodStreamSize);
821     NV_EXIT_RM_RUNTIME(sp,fp);
822     return rmStatus;
823 }
824 
825