1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 #include "core/core.h"
24 #include "core/locks.h"
25 #include "core/thread_state.h"
26 #include "gpu/subdevice/subdevice.h"
27 #include "gpu/device/device.h"
28 #include "kernel/mem_mgr/virtual_mem.h"
29 #include "class/cl0000.h" // NV01_NULL_OBJECT
30 
31 #include "rmapi/rs_utils.h"
32 
33 #include "entry_points.h"
34 #include "gpu/gpu.h"
35 #include "gpu/mem_mgr/mem_desc.h"
36 #include "gpu/mem_mgr/mem_mgr.h"
37 
38 static NvU64
39 _getMappingPageSize
40 (
41     RsResourceRef *pMappableRef
42 )
43 {
44     Memory *pMemory = dynamicCast(pMappableRef->pResource, Memory);
45     if (pMemory != NULL)
46     {
47         return memdescGetPageSize(pMemory->pMemDesc, AT_GPU);
48     }
49     return RM_PAGE_SIZE;
50 }
51 
52 NV_STATUS
53 serverInterMap_Prologue
54 (
55     RsServer *pServer,
56     RsResourceRef *pMapperRef,
57     RsResourceRef *pMappableRef,
58     RS_INTER_MAP_PARAMS *pParams,
59     NvU32 *pReleaseFlags
60 )
61 {
62     OBJGPU     *pGpu;
63     Device     *pDevice;
64     Subdevice  *pSubdevice;
65     NV_STATUS   rmStatus = NV_OK;
66     NvU64       offset = pParams->offset;
67     NvU64       length = pParams->length;
68 
69     MEMORY_DESCRIPTOR *pSrcMemDesc = NULL;
70     NvHandle    hBroadcastDevice;
71     NvBool      bSubdeviceHandleProvided;
72 
73     CALL_CONTEXT  *pCallContext = resservGetTlsCallContext();
74     RsResourceRef *pDeviceRef = pCallContext->pContextRef;
75     RS_INTER_MAP_PRIVATE *pPrivate = pParams->pPrivate;
76 
77     NV_ASSERT_OR_RETURN(pPrivate != NULL, NV_ERR_INVALID_ARGUMENT);
78 
79     // Get pGpu, assuming user passed in either a device or subdevice handle.
80     pDevice = dynamicCast(pDeviceRef->pResource, Device);
81     if (pDevice == NULL)
82     {
83         pSubdevice = dynamicCast(pDeviceRef->pResource, Subdevice);
84         if (pSubdevice == NULL)
85             return NV_ERR_INVALID_OBJECT;
86 
87         pGpu = GPU_RES_GET_GPU(pSubdevice);
88         pDevice = GPU_RES_GET_DEVICE(pSubdevice);
89         GPU_RES_SET_THREAD_BC_STATE(pSubdevice);
90 
91         hBroadcastDevice = RES_GET_HANDLE(pSubdevice->pDevice);
92         bSubdeviceHandleProvided = NV_TRUE;
93         pPrivate->gpuMask = NVBIT(gpuGetInstance(pGpu));
94     }
95     else
96     {
97         pGpu = GPU_RES_GET_GPU(pDevice);
98         GPU_RES_SET_THREAD_BC_STATE(pDevice);
99 
100         hBroadcastDevice = pParams->hDevice;
101         bSubdeviceHandleProvided = NV_FALSE;
102         pPrivate->gpuMask = gpumgrGetGpuMask(pGpu);
103     }
104 
105     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
106 
107     // For non-memory/dma objects, below call simply returns
108     if (memmgrIsPmaInitialized(pMemoryManager) &&
109             memmgrAreClientPageTablesPmaManaged(pMemoryManager))
110     {
111         VirtualMemory *pVirtualMemory;
112 
113         pVirtualMemory = dynamicCast(pMapperRef->pResource, VirtualMemory);
114 
115         if (pVirtualMemory != NULL)
116         {
117             NvU64 pageSize = RM_PAGE_SIZE;
118 
119             if (pVirtualMemory->bOptimizePageTableMempoolUsage)
120             {
121                 pageSize = _getMappingPageSize(pMappableRef);
122             }
123 
124             NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
125                virtmemReserveMempool(pVirtualMemory, pGpu, pDevice,
126                                      pParams->length, pageSize));
127         }
128     }
129 
130     rmStatus = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, pReleaseFlags);
131     if (rmStatus != NV_OK)
132         return rmStatus;
133 
134     pPrivate->pGpu = pGpu;
135 
136     API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE);
137 
138     // Use virtual GetMemInterMapParams to get information needed for mapping from pMappableRef->pResource
139     RMRES_MEM_INTER_MAP_PARAMS memInterMapParams;
140     portMemSet(&memInterMapParams, 0, sizeof(memInterMapParams));
141 
142     memInterMapParams.pGpu = pGpu;
143     memInterMapParams.pMemoryRef = pMappableRef;
144     memInterMapParams.bSubdeviceHandleProvided = bSubdeviceHandleProvided;
145 
146     rmStatus = rmresGetMemInterMapParams(dynamicCast(pMappableRef->pResource, RmResource), &memInterMapParams);
147     if (rmStatus != NV_OK)
148         return rmStatus;
149 
150     pSrcMemDesc = memInterMapParams.pSrcMemDesc;
151     NV_ASSERT_OR_RETURN(pSrcMemDesc != NULL, NV_ERR_INVALID_OBJECT_HANDLE);
152 
153     pPrivate->pSrcGpu = memInterMapParams.pSrcGpu;
154     pPrivate->hMemoryDevice = memInterMapParams.hMemoryDevice;
155     pPrivate->bDmaMapNeeded = memInterMapParams.bDmaMapNeeded;
156     pPrivate->bFlaMapping   = memInterMapParams.bFlaMapping;
157 
158     // Check length for overflow and against the physical memory size.
159     if (((offset + length) < offset) ||
160         ((offset + length) > pSrcMemDesc->Size))
161     {
162         NV_PRINTF(LEVEL_ERROR,
163                   "Mapping offset 0x%llX or length 0x%llX out of bounds!\n",
164                   offset, length);
165         DBG_BREAKPOINT();
166         return NV_ERR_INVALID_LIMIT;
167     }
168 
169     if (memdescGetFlag(memdescGetMemDescFromGpu(pSrcMemDesc, pGpu), MEMDESC_FLAGS_DEVICE_READ_ONLY) &&
170         !FLD_TEST_DRF(OS46, _FLAGS, _ACCESS, _READ_ONLY, pParams->flags))
171     {
172         NV_PRINTF(LEVEL_ERROR, "Attempting to map READ_ONLY surface as READ_WRITE / WRITE_ONLY!\n");
173         return NV_ERR_INVALID_ARGUMENT;
174     }
175 
176     pPrivate->hBroadcastDevice = hBroadcastDevice;
177     pPrivate->pSrcMemDesc = pSrcMemDesc;
178     pPrivate->bSubdeviceHandleProvided = bSubdeviceHandleProvided;
179 
180     return NV_OK;
181 }
182 
183 void
184 serverInterMap_Epilogue
185 (
186     RsServer *pServer,
187     RS_INTER_MAP_PARAMS *pParams,
188     NvU32 *pReleaseFlags
189 )
190 {
191     serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, pReleaseFlags);
192 }
193 
194 NV_STATUS
195 serverInterUnmap_Prologue
196 (
197     RsServer *pServer,
198     RS_INTER_UNMAP_PARAMS *pParams
199 )
200 {
201     OBJGPU       *pGpu        = NULL;
202     Device       *pDevice     = NULL;
203     Subdevice    *pSubdevice  = NULL;
204 
205     CALL_CONTEXT  *pCallContext = resservGetTlsCallContext();
206     RsResourceRef *pDeviceRef = pCallContext->pContextRef;
207 
208     RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate;
209 
210     // Alloc pPrivate if not set, Unmap does not require any input into Prologue
211     if (pPrivate == NULL)
212     {
213         pPrivate = portMemAllocNonPaged(sizeof(*pPrivate));
214         if (pPrivate == NULL)
215             return NV_ERR_NO_MEMORY;
216 
217         portMemSet(pPrivate, 0, sizeof(*pPrivate));
218         pParams->pPrivate = pPrivate;
219         pPrivate->bAllocated = NV_TRUE;
220     }
221 
222     // Set subdevice or device context.
223     pDevice = dynamicCast(pDeviceRef->pResource, Device);
224     if (pDevice == NULL)
225     {
226         pSubdevice = dynamicCast(pDeviceRef->pResource, Subdevice);
227         if (pSubdevice == NULL)
228             return NV_ERR_INVALID_OBJECT;
229 
230         pGpu = GPU_RES_GET_GPU(pSubdevice);
231         pPrivate->bcState = gpumgrGetBcEnabledStatus(pGpu);
232         GPU_RES_SET_THREAD_BC_STATE(pSubdevice);
233         pPrivate->hBroadcastDevice = RES_GET_HANDLE(pSubdevice->pDevice);
234         pPrivate->bSubdeviceHandleProvided = NV_TRUE;
235         pPrivate->gpuMask = NVBIT(gpuGetInstance(pGpu));
236     }
237     else
238     {
239         pGpu = GPU_RES_GET_GPU(pDevice);
240         pPrivate->bcState = gpumgrGetBcEnabledStatus(pGpu);
241         GPU_RES_SET_THREAD_BC_STATE(pDevice);
242         pPrivate->hBroadcastDevice = RES_GET_HANDLE(pDevice);
243         pPrivate->bSubdeviceHandleProvided = NV_FALSE;
244         pPrivate->gpuMask = gpumgrGetGpuMask(pGpu);
245     }
246 
247     pPrivate->pGpu = pGpu;
248 
249     API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_FALSE, NV_FALSE);
250 
251     return NV_OK;
252 }
253 
254 void
255 serverInterUnmap_Epilogue
256 (
257     RsServer *pServer,
258     RS_INTER_UNMAP_PARAMS *pParams
259 )
260 {
261     RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate;
262     OBJGPU *pGpu;
263 
264     if (pPrivate == NULL)
265         return;
266 
267     pGpu = pPrivate->pGpu;
268 
269     if (pGpu != NULL)
270     {
271         gpumgrSetBcEnabledStatus(pGpu, pPrivate->bcState);
272     }
273 
274     if (pPrivate->bAllocated)
275     {
276         portMemFree(pPrivate);
277         pParams->pPrivate = NULL;
278     }
279 }
280 
281 static NV_STATUS
282 _rmapiRmUnmapMemoryDma
283 (
284     NvHandle            hClient,
285     NvHandle            hDevice,
286     NvHandle            hMemCtx,
287     NvHandle            hMemory,
288     NvU32               flags,
289     NvU64               dmaOffset,
290     RS_LOCK_INFO       *pLockInfo,
291     API_SECURITY_INFO  *pSecInfo
292 )
293 {
294     RsClient           *pRsClient   = NULL;
295     MEMORY_DESCRIPTOR  *pMemDesc    = NULL;
296     Memory             *pMemory     = NULL;
297 
298     RS_INTER_UNMAP_PARAMS params;
299     RS_INTER_UNMAP_PRIVATE private;
300 
301     NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient));
302 
303     // Translate hMemory to pMemDesc
304     if (memGetByHandle(pRsClient, hMemory, &pMemory) == NV_OK)
305     {
306         pMemDesc = pMemory->pMemDesc;
307     }
308 
309     portMemSet(&params, 0, sizeof(params));
310     params.hClient = hClient;
311     params.hMapper = hMemCtx;
312     params.hDevice = hDevice;
313     params.hMappable = hMemory;
314     params.flags = flags;
315     params.dmaOffset = dmaOffset;
316     params.pMemDesc = pMemDesc;
317     params.pLockInfo = pLockInfo;
318     params.pSecInfo = pSecInfo;
319 
320     portMemSet(&private, 0, sizeof(private));
321     params.pPrivate = &private;
322 
323     return serverInterUnmap(&g_resServ, &params);
324 }
325 
326 NV_STATUS
327 rmapiMap
328 (
329     RM_API   *pRmApi,
330     NvHandle  hClient,
331     NvHandle  hDevice,
332     NvHandle  hMemCtx,
333     NvHandle  hMemory,
334     NvU64     offset,
335     NvU64     length,
336     NvU32     flags,
337     NvU64    *pDmaOffset
338 )
339 {
340     if (!pRmApi->bHasDefaultSecInfo)
341         return NV_ERR_NOT_SUPPORTED;
342 
343     return pRmApi->MapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, offset,
344                                   length, flags, pDmaOffset, &pRmApi->defaultSecInfo);
345 }
346 
347 NV_STATUS
348 rmapiMapWithSecInfo
349 (
350     RM_API            *pRmApi,
351     NvHandle           hClient,
352     NvHandle           hDevice,
353     NvHandle           hMemCtx,
354     NvHandle           hMemory,
355     NvU64              offset,
356     NvU64              length,
357     NvU32              flags,
358     NvU64             *pDmaOffset,
359     API_SECURITY_INFO *pSecInfo
360 )
361 {
362     NV_STATUS status;
363     RM_API_CONTEXT rmApiContext = {0};
364     RS_INTER_MAP_PARAMS params;
365     RS_INTER_MAP_PRIVATE private;
366     RS_LOCK_INFO lockInfo;
367 
368     NV_PRINTF(LEVEL_INFO,
369               "Nv04Map: client:0x%x device:0x%x context:0x%x memory:0x%x flags:0x%x\n",
370               hClient, hDevice, hMemCtx, hMemory, flags);
371     NV_PRINTF(LEVEL_INFO,
372               "Nv04Map:  offset:0x%llx length:0x%llx dmaOffset:0x%08llx\n",
373               offset, length, *pDmaOffset);
374 
375     NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04Map 0x%x\n", flags);
376 
377     status = rmapiPrologue(pRmApi, &rmApiContext);
378     if (status != NV_OK)
379         return status;
380 
381     portMemSet(&lockInfo, 0, sizeof(lockInfo));
382     status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
383     if (status != NV_OK)
384     {
385         rmapiEpilogue(pRmApi, &rmApiContext);
386         return status;
387     }
388 
389     lockInfo.flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK |
390                       RM_LOCK_FLAGS_NO_GPUS_LOCK;
391 
392     LOCK_METER_DATA(MAPMEM_DMA, flags, 0, 0);
393 
394 
395     portMemSet(&params, 0, sizeof(params));
396     params.hClient = hClient;
397     params.hMapper = hMemCtx;
398     params.hDevice = hDevice;
399     params.hMappable = hMemory;
400     params.offset = offset;
401     params.length = length;
402     params.flags = flags;
403     params.dmaOffset = *pDmaOffset;
404     params.pLockInfo = &lockInfo;
405     params.pSecInfo = pSecInfo;
406 
407     portMemSet(&private, 0, sizeof(private));
408     params.pPrivate = &private;
409 
410     // map DMA memory
411     status = serverInterMap(&g_resServ, &params);
412 
413     *pDmaOffset = params.dmaOffset;
414 
415     rmapiEpilogue(pRmApi, &rmApiContext);
416 
417     if (status == NV_OK)
418     {
419         NV_PRINTF(LEVEL_INFO, "Nv04Map: map complete\n");
420         NV_PRINTF(LEVEL_INFO, "Nv04Map:  dmaOffset: 0x%08llx\n", *pDmaOffset);
421     }
422     else
423     {
424         NV_PRINTF(LEVEL_ERROR, "Nv04Map: map failed; status: %s (0x%08x)\n",
425                   nvstatusToString(status), status);
426     }
427 
428     return status;
429 }
430 
431 NV_STATUS
432 rmapiMapWithSecInfoTls
433 (
434     RM_API            *pRmApi,
435     NvHandle           hClient,
436     NvHandle           hDevice,
437     NvHandle           hMemCtx,
438     NvHandle           hMemory,
439     NvU64              offset,
440     NvU64              length,
441     NvU32              flags,
442     NvU64             *pDmaOffset,
443     API_SECURITY_INFO *pSecInfo
444 )
445 {
446     THREAD_STATE_NODE threadState;
447     NV_STATUS         status;
448 
449     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
450 
451     status = rmapiMapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, offset,
452                                  length, flags, pDmaOffset, pSecInfo);
453 
454     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
455 
456     return status;
457 }
458 
459 NV_STATUS
460 rmapiUnmap
461 (
462     RM_API   *pRmApi,
463     NvHandle  hClient,
464     NvHandle  hDevice,
465     NvHandle  hMemCtx,
466     NvHandle  hMemory,
467     NvU32     flags,
468     NvU64     dmaOffset
469 )
470 {
471     if (!pRmApi->bHasDefaultSecInfo)
472         return NV_ERR_NOT_SUPPORTED;
473 
474     return pRmApi->UnmapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory,
475                                     flags, dmaOffset, &pRmApi->defaultSecInfo);
476 }
477 
478 NV_STATUS
479 rmapiUnmapWithSecInfo
480 (
481     RM_API            *pRmApi,
482     NvHandle           hClient,
483     NvHandle           hDevice,
484     NvHandle           hMemCtx,
485     NvHandle           hMemory,
486     NvU32              flags,
487     NvU64              dmaOffset,
488     API_SECURITY_INFO *pSecInfo
489 )
490 {
491     NV_STATUS                     status;
492     RM_API_CONTEXT                rmApiContext   = {0};
493     RS_LOCK_INFO                  lockInfo;
494 
495     NV_PRINTF(LEVEL_INFO,
496               "Nv04Unmap: client:0x%x device:0x%x context:0x%x memory:0x%x\n",
497               hClient, hDevice, hMemCtx, hMemory);
498     NV_PRINTF(LEVEL_INFO, "Nv04Unmap:  flags:0x%x dmaOffset:0x%08llx\n",
499               flags, dmaOffset);
500 
501     status = rmapiPrologue(pRmApi, &rmApiContext);
502     if (status != NV_OK)
503         return status;
504 
505     portMemSet(&lockInfo, 0, sizeof(lockInfo));
506     status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
507     if (status != NV_OK)
508     {
509         rmapiEpilogue(pRmApi, &rmApiContext);
510         return status;
511     }
512     lockInfo.flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK |
513                       RM_LOCK_FLAGS_NO_GPUS_LOCK;
514 
515     LOCK_METER_DATA(UNMAPMEM_DMA, flags, 0, 0);
516 
517     // Unmap DMA memory
518     status = _rmapiRmUnmapMemoryDma(hClient, hDevice, hMemCtx, hMemory, flags,
519                                     dmaOffset, &lockInfo, pSecInfo);
520 
521     rmapiEpilogue(pRmApi, &rmApiContext);
522 
523     if (status == NV_OK)
524     {
525         NV_PRINTF(LEVEL_INFO, "Nv04Unmap: Unmap complete\n");
526     }
527     else
528     {
529         NV_PRINTF(LEVEL_ERROR,
530                   "Nv04Unmap: ummap failed; status: %s (0x%08x)\n",
531                   nvstatusToString(status), status);
532     }
533 
534     return status;
535 }
536 
537 NV_STATUS
538 rmapiUnmapWithSecInfoTls
539 (
540     RM_API            *pRmApi,
541     NvHandle           hClient,
542     NvHandle           hDevice,
543     NvHandle           hMemCtx,
544     NvHandle           hMemory,
545     NvU32              flags,
546     NvU64              dmaOffset,
547     API_SECURITY_INFO *pSecInfo
548 )
549 {
550     THREAD_STATE_NODE threadState;
551     NV_STATUS         status;
552 
553     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
554 
555     status = rmapiUnmapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, flags, dmaOffset, pSecInfo);
556 
557     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
558 
559     return status;
560 }
561 
562 NV_STATUS
563 serverInterMapLookupLockFlags
564 (
565     RsServer *pServer,
566     RS_LOCK_ENUM lock,
567     RS_INTER_MAP_PARAMS *pParams,
568     LOCK_ACCESS_TYPE *pAccess
569 )
570 {
571     NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT);
572 
573     *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_MAP))
574         ? LOCK_ACCESS_READ
575         : LOCK_ACCESS_WRITE;
576     return NV_OK;
577 }
578 
579 NV_STATUS
580 serverInterUnmapLookupLockFlags
581 (
582     RsServer *pServer,
583     RS_LOCK_ENUM lock,
584     RS_INTER_UNMAP_PARAMS *pParams,
585     LOCK_ACCESS_TYPE *pAccess
586 )
587 {
588     NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT);
589 
590     *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_UNMAP))
591         ? LOCK_ACCESS_READ
592         : LOCK_ACCESS_WRITE;
593     return NV_OK;
594 }
595 
596 NV_STATUS
597 serverUpdateLockFlagsForInterAutoUnmap
598 (
599     RsServer *pServer,
600     RS_INTER_UNMAP_PARAMS *pParams
601 )
602 {
603     pParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK |
604                                  RM_LOCK_FLAGS_GPU_GROUP_LOCK;
605 
606     return NV_OK;
607 }
608