1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "core/core.h"
25 #include "core/locks.h"
26 #include "core/thread_state.h"
27 #include "os/os.h"
28 #include "gpu/mem_mgr/mem_desc.h"
29 #include "gpu/device/device.h"
30 #include "gpu/subdevice/generic_engine.h"
31 #include "gpu/subdevice/subdevice.h"
32 #include "gpu/mem_mgr/mem_mgr.h"
33 #include "mem_mgr/fla_mem.h"
34 
35 #include "class/cl0000.h" // NV01_NULL_OBJECT
36 
37 #include "resserv/rs_server.h"
38 #include "resserv/rs_client.h"
39 #include "resserv/rs_resource.h"
40 
41 #include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
42 #include "gpu/mem_sys/kern_mem_sys.h"
43 #include "gpu/bus/kern_bus.h"
44 
45 #include "rmapi/rs_utils.h"
46 #include "rmapi/mapping_list.h"
47 #include "entry_points.h"
48 
49 static void RmUnmapBusAperture (OBJGPU *, NvP64, NvU64, NvBool, NvP64);
50 
51 #include "gpu/conf_compute/conf_compute.h"
52 
53 typedef struct RS_CPU_MAP_PARAMS RmMapParams;
54 typedef struct RS_CPU_UNMAP_PARAMS RmUnmapParams;
55 
56 NV_STATUS
rmapiMapGpuCommon(RsResource * pResource,CALL_CONTEXT * pCallContext,RsCpuMapping * pCpuMapping,OBJGPU * pGpu,NvU32 regionOffset,NvU32 regionSize)57 rmapiMapGpuCommon
58 (
59     RsResource *pResource,
60     CALL_CONTEXT *pCallContext,
61     RsCpuMapping *pCpuMapping,
62     OBJGPU *pGpu,
63     NvU32 regionOffset,
64     NvU32 regionSize
65 )
66 {
67     NV_STATUS rmStatus;
68     RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient);
69     NvU64 offset;
70 
71     // Validate the offset and limit passed in.
72     if (pCpuMapping->offset >= regionSize)
73         return NV_ERR_INVALID_BASE;
74     if (pCpuMapping->length == 0)
75         return NV_ERR_INVALID_LIMIT;
76     if ((pCpuMapping->offset + pCpuMapping->length > regionSize) ||
77         !portSafeAddU64(pCpuMapping->offset, pCpuMapping->length, &offset))
78         return NV_ERR_INVALID_LIMIT;
79 
80     if (!portSafeAddU64((NvU64)regionOffset, pCpuMapping->offset, &offset))
81         return NV_ERR_INVALID_OFFSET;
82 
83     // Create a mapping of BAR0
84     rmStatus = osMapGPU(pGpu,
85                         rmclientGetCachedPrivilege(pClient),
86                         offset,
87                         pCpuMapping->length,
88                         pCpuMapping->pPrivate->protect,
89                         &pCpuMapping->pLinearAddress,
90                         &pCpuMapping->pPrivate->pPriv);
91     return rmStatus;
92 }
93 
94 
95 
96 NV_STATUS
rmapiGetEffectiveAddrSpace(OBJGPU * pGpu,MEMORY_DESCRIPTOR * pMemDesc,NvU32 mapFlags,NV_ADDRESS_SPACE * pAddrSpace)97 rmapiGetEffectiveAddrSpace
98 (
99     OBJGPU *pGpu,
100     MEMORY_DESCRIPTOR *pMemDesc,
101     NvU32 mapFlags,
102     NV_ADDRESS_SPACE *pAddrSpace
103 )
104 {
105     NV_ADDRESS_SPACE addrSpace;
106     NvBool bDirectSysMappingAllowed = NV_TRUE;
107 
108     KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
109 
110     NV_ASSERT_OK_OR_RETURN(
111         kbusIsDirectMappingAllowed_HAL(pGpu, pKernelBus, pMemDesc, mapFlags,
112                                       &bDirectSysMappingAllowed));
113 
114     //
115     // Bug 1482818: Deprecate reflected mappings in production code.
116     //  The usage of reflected writes, in addition to causing several deadlock
117     //  scenarios involving P2P transfers, are disallowed on NVLINK (along with
118     //  reflected reads), and should no longer be used.
119     //  The below PDB property should be unset once the remaining usages in MODS
120     //  have been culled. (Bug 1780557)
121     //
122     if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
123         !bDirectSysMappingAllowed &&
124         (DRF_VAL(OS33, _FLAGS, _MAPPING, mapFlags) != NVOS33_FLAGS_MAPPING_DIRECT) &&
125         !kbusIsReflectedMappingAccessAllowed(pKernelBus))
126     {
127         NV_ASSERT(0);
128         return NV_ERR_NOT_SUPPORTED;
129     }
130 
131     if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1))
132     {
133         addrSpace = ADDR_FBMEM;
134     }
135     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
136         (bDirectSysMappingAllowed || FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, mapFlags) ||
137         (IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_FMODEL(pGpu) && !IS_RTLSIM(pGpu))))
138     {
139         addrSpace = ADDR_SYSMEM;
140     }
141     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ||
142              ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && !bDirectSysMappingAllowed))
143     {
144         addrSpace = ADDR_FBMEM;
145     }
146     else
147     {
148         addrSpace = memdescGetAddressSpace(pMemDesc);
149     }
150 
151     if (pAddrSpace)
152         *pAddrSpace = addrSpace;
153 
154     return NV_OK;
155 }
156 
157 // Asserts to check caching type matches across sdk and nv_memory_types
158 ct_assert(NVOS33_FLAGS_CACHING_TYPE_CACHED        == NV_MEMORY_CACHED);
159 ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED      == NV_MEMORY_UNCACHED);
160 ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITECOMBINED == NV_MEMORY_WRITECOMBINED);
161 ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITEBACK     == NV_MEMORY_WRITEBACK);
162 ct_assert(NVOS33_FLAGS_CACHING_TYPE_DEFAULT       == NV_MEMORY_DEFAULT);
163 ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED_WEAK == NV_MEMORY_UNCACHED_WEAK);
164 
165 //
166 // Map memory entry points.
167 //
168 NV_STATUS
memMap_IMPL(Memory * pMemory,CALL_CONTEXT * pCallContext,RS_CPU_MAP_PARAMS * pMapParams,RsCpuMapping * pCpuMapping)169 memMap_IMPL
170 (
171     Memory *pMemory,
172     CALL_CONTEXT *pCallContext,
173     RS_CPU_MAP_PARAMS *pMapParams,
174     RsCpuMapping *pCpuMapping
175 )
176 {
177     OBJGPU *pGpu = NULL;
178     KernelBus *pKernelBus = NULL;
179     MemoryManager *pMemoryManager = NULL;
180     KernelMemorySystem *pKernelMemorySystem = NULL;
181     RmClient *pClient;
182     RsResourceRef *pContextRef;
183     RsResourceRef *pMemoryRef;
184     Memory *pMemoryInfo; // TODO: rename this field. pMemoryInfo is the legacy name.
185                          // Name should be clear on how pMemoryInfo different from pMemory
186     MEMORY_DESCRIPTOR *pMemDesc;
187     NvP64 priv = NvP64_NULL;
188     NV_STATUS rmStatus = NV_OK;
189     NV_ADDRESS_SPACE effectiveAddrSpace;
190     NvBool bBroadcast;
191     NvU64 mapLimit;
192     NvBool bIsSysmem = NV_FALSE;
193     NvBool bSkipSizeCheck = (DRF_VAL(OS33, _FLAGS, _SKIP_SIZE_CHECK, pMapParams->flags) ==
194                              NVOS33_FLAGS_SKIP_SIZE_CHECK_ENABLE);
195 
196     NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED);
197 
198     NV_ASSERT_OR_RETURN(pMapParams->pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT);
199     pContextRef = pMapParams->pLockInfo->pContextRef;
200     if (pContextRef != NULL)
201     {
202         NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pContextRef, &bBroadcast, &pGpu));
203         gpuSetThreadBcState(pGpu, bBroadcast);
204 
205         pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
206         pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
207         pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
208     }
209 
210     pClient = serverutilGetClientUnderLock(pMapParams->hClient);
211     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
212     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
213                 pMapParams->hMemory, &pMemoryRef));
214 
215     pMemoryInfo = dynamicCast(pMemoryRef->pResource, Memory);
216     NV_ASSERT_OR_RETURN(pMemoryInfo != NULL, NV_ERR_NOT_SUPPORTED);
217     pMemDesc = pMemoryInfo->pMemDesc;
218 
219     if ((pMemoryInfo->categoryClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) &&
220         !(memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM &&
221           RMCFG_FEATURE_PLATFORM_MODS))
222     {
223         return NV_ERR_NOT_SUPPORTED;
224     }
225 
226     //
227     // PROTECTED memory is memory which is hidden from the CPU and used for
228     // storing protected content.  The CPU is not allowed to read it, but is
229     // allowed to write it in order to initialize memory allocated within the
230     // PROTECTED region.
231     //
232     // CPU to directly access protected memory is allowed on MODS
233     //
234     // The check below is for VPR and should be skipped for Hopper CC
235     if ((pGpu != NULL) && !gpuIsCCFeatureEnabled(pGpu))
236     {
237         if ((pMemoryInfo->Flags & NVOS32_ALLOC_FLAGS_PROTECTED) &&
238             (pMapParams->protect != NV_PROTECT_WRITEABLE) &&
239             ! RMCFG_FEATURE_PLATFORM_MODS)
240         {
241             return NV_ERR_NOT_SUPPORTED;
242         }
243     }
244 
245     if ((pGpu != NULL) && gpuIsCCFeatureEnabled(pGpu) &&
246         (pMemoryInfo->Flags & NVOS32_ALLOC_FLAGS_PROTECTED))
247     {
248         ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
249         //
250         // If neither BAR1 nor PCIE as a whole is trusted, fail the mapping
251         // for allocations in CPR region. Mapping should still succeed for
252         // allocations in non-CPR region
253         // Deny BAR1 access to CPU-RM by default irrespective of prod or devtools
254         // mode. Some mappings made by CPU-RM may be allowed to go thorough in
255         // devtools mode.
256         // However, allow the mapping to go through on platforms where GSP-DMA
257         // is not present e.g. MODS. User may have also set a regkey to force
258         // BAR accesses.
259         //
260         if (((pCC != NULL) && !pCC->ccStaticInfo.bIsBar1Trusted &&
261             !pCC->ccStaticInfo.bIsPcieTrusted) ||
262             (IS_GSP_CLIENT(pGpu) && pMapParams->bKernel && !pKernelBus->bForceBarAccessOnHcc &&
263              FLD_TEST_DRF(OS33, _FLAGS, _ALLOW_MAPPING_ON_HCC, _NO, pMapParams->flags)))
264         {
265             NV_PRINTF(LEVEL_ERROR, "BAR1 mapping to CPR vidmem not supported\n");
266             NV_ASSERT(0);
267             return NV_ERR_NOT_SUPPORTED;
268         }
269     }
270 
271     if (!pMapParams->bKernel &&
272         FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, pMemoryInfo->Attr2) &&
273         (pMapParams->protect != NV_PROTECT_READABLE))
274     {
275         return NV_ERR_INVALID_ARGUMENT;
276     }
277 
278     // Validate the offset and limit passed in.
279     if (pMapParams->offset >= pMemoryInfo->Length)
280     {
281         return NV_ERR_INVALID_BASE;
282     }
283     if (pMapParams->length == 0)
284     {
285         return NV_ERR_INVALID_LIMIT;
286     }
287 
288     if (bSkipSizeCheck && (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL))
289     {
290         return NV_ERR_INSUFFICIENT_PERMISSIONS;
291     }
292 
293     //
294     // See bug #140807 and #150889 - we need to pad memory mappings to past their
295     // actual allocation size (to PAGE_SIZE+1) because of a buggy ms function so
296     // skip the allocation size sanity check so the map operation still succeeds.
297     //
298     if (!portSafeAddU64(pMapParams->offset, pMapParams->length, &mapLimit) ||
299         (!bSkipSizeCheck && (mapLimit > pMemoryInfo->Length)))
300     {
301         return NV_ERR_INVALID_LIMIT;
302     }
303 
304     if (pGpu != NULL)
305     {
306         NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, memdescGetMemDescFromGpu(pMemDesc, pGpu), pMapParams->flags, &effectiveAddrSpace));
307     }
308     else
309     {
310         effectiveAddrSpace = ADDR_SYSMEM;
311     }
312 
313     bIsSysmem = (effectiveAddrSpace == ADDR_SYSMEM);
314     bIsSysmem = bIsSysmem || (effectiveAddrSpace == ADDR_EGM);
315 
316     if (dynamicCast(pMemoryInfo, FlaMemory) != NULL)
317     {
318         NV_PRINTF(LEVEL_WARNING, "CPU mapping to FLA memory not allowed\n");
319         return NV_ERR_NOT_SUPPORTED;
320     }
321 
322     //
323     //  NVLINK2 ATS: Coherent NVLINK mappings may be returned if the client
324     //    doesn't specifically request PCI-E and if the surface is pitch.
325     //
326     if ((pGpu != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) &&
327         (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM))
328     {
329         NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED));
330         if ((memdescGetPteKind(pMemDesc) ==
331             memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, RM_DEFAULT_PTE_KIND)) && // pitch
332             (!memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_ENCRYPTED)))
333         {
334             if (pMapParams->bKernel)
335             {
336                 if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)
337                 {
338                     NvP64 tempCpuPtr = kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc);
339                     if (tempCpuPtr == NULL)
340                     {
341                         rmStatus = NV_ERR_GENERIC;
342                     }
343                     else
344                     {
345                         rmStatus = NV_OK;
346                         tempCpuPtr = NvP64_PLUS_OFFSET(tempCpuPtr, pMapParams->offset);
347                     }
348                     *pMapParams->ppCpuVirtAddr = tempCpuPtr;
349 
350                     if (rmStatus != NV_OK)
351                         return rmStatus;
352                 }
353                 else
354                 {
355                     rmStatus = osMapSystemMemory(pMemDesc,
356                                                  pMapParams->offset,
357                                                  pMapParams->length,
358                                                  pMapParams->bKernel,
359                                                  pMapParams->protect,
360                                                  pMapParams->ppCpuVirtAddr,
361                                                  &priv);
362                     if (rmStatus != NV_OK)
363                         return rmStatus;
364                 }
365             }
366             else
367             {
368 
369                 //
370                 // Allocating mapping for user mode client
371                 // NOTE: This function intentionally leaves priv uninitialized.
372                 //       It simply copies the busAddress [argument 2] into ppCpuVirtAddr.
373                 //       During the FD mapping cleanup for bug 1784955, it is expected that
374                 //       this function will transition to storing the mapping parameters onto
375                 //       the FD.  Also note: All mapping parameters are ignored (!).
376                 //
377                 //   For now, we're going to return the first page of the nvlink aperture
378                 //   mapping of this allocation.  See nvidia_mmap_helper for establishment
379                 //   of direct mapping.
380                 //
381 
382                 rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo,
383                                               ((NvUPtr)pKernelMemorySystem->coherentCpuFbBase +
384                                                (NvUPtr)memdescGetPhysAddr(pMemDesc,
385                                                 AT_CPU, pMapParams->offset)),
386                                               pMapParams->length,
387                                               pMapParams->protect,
388                                               pMapParams->ppCpuVirtAddr,
389                                               &priv,
390                                               NV_MEMORY_UNCACHED);
391                 if (rmStatus != NV_OK)
392                     return rmStatus;
393             }
394 
395             NV_PRINTF(LEVEL_INFO,
396                       "NVLINK mapping allocated: AtsBase=0x%llx, _pteArray[0]=0x%llx, mappedCpuAddr=0x%llx, length=%d\n",
397                       (NvU64)pKernelMemorySystem->coherentCpuFbBase,
398                       (NvU64)((NvUPtr)pMemDesc->_pteArray[0]),
399                       (*((NvU64 *)(pMapParams->ppCpuVirtAddr))),
400                       (int)pMapParams->length);
401 
402             rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
403                                                     pMapParams->bKernel,
404                                                     priv,
405                                                     *(pMapParams->ppCpuVirtAddr),
406                                                     pMapParams->length,
407                                                     -1,
408                                                     -1,
409                                                     pMapParams->flags);
410             pCpuMapping->pPrivate->pGpu = pGpu;
411 
412             if (rmStatus != NV_OK)
413                 return rmStatus;
414 
415         }
416         else
417         {
418             //
419             // RM should fail gracefully when clients map FB in the Coherent link path with special KIND.
420             // There is no GMMU in the Coherent link path, only regular KIND(GMK) is supported and other special
421             // KIND(s) (like encrypted, compressed etc.) are not supported.
422             //
423             NV_PRINTF(LEVEL_ERROR, "Need BAR mapping on coherent link! FAIL!!\n");
424             return NV_ERR_NOT_SUPPORTED;
425         }
426     }
427     else if (effectiveAddrSpace == ADDR_FBMEM)
428     {
429         RmPhysAddr fbAddr = 0;
430         NvBool bcState = NV_FALSE;
431         NvU64 gpuVirtAddr = 0;
432         NvU64 gpuMapLength = 0;
433 
434         //
435         // MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1 indicates a special mapping type of HW registers,
436         // so map it as device memory (uncached).
437         //
438         NvU32 cachingType = NV_MEMORY_WRITECOMBINED;
439         if (pMemDesc != NULL && !memdescHasSubDeviceMemDescs(pMemDesc))
440         {
441             cachingType = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1) ?
442                           NV_MEMORY_UNCACHED : NV_MEMORY_WRITECOMBINED;
443         }
444 
445         if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
446         {
447             //
448             // For Multi-Board, the BC area has a NULL address range.  So we have
449             // to bring in the master.
450             //
451             bcState = gpumgrGetBcEnabledStatus(pGpu);
452             if (bcState)
453             {
454                 pGpu = gpumgrGetParentGPU(pGpu);
455                 gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
456             }
457 
458             //
459             // Allocate a GPU virtual address space for the video memory region
460             // for those GPUs that support it.
461             //
462 
463             gpuMapLength = pMapParams->length;
464 
465             //
466             // If client ask for Direct mapping , we cannot do much here but just
467             // simulate as it is non encrypted surface.
468             // It is currently totaly for testing purpose.
469             //
470             NV_ASSERT(pGpu->busInfo.gpuPhysFbAddr);
471 
472             {
473                 Device *pDevice = NULL;
474 
475                 // Below, we only map one GPU's address for CPU access, so we can use UNICAST here
476                 NvU32 busMapFbFlags = BUS_MAP_FB_FLAGS_MAP_UNICAST;
477                 if(DRF_VAL(OS33, _FLAGS, _MAPPING, pMapParams->flags) == NVOS33_FLAGS_MAPPING_DIRECT)
478                 {
479                     busMapFbFlags |= BUS_MAP_FB_FLAGS_DISABLE_ENCRYPTION;
480                 }
481 
482                 switch (pMapParams->protect)
483                 {
484                     case NV_PROTECT_READABLE:
485                         busMapFbFlags |= BUS_MAP_FB_FLAGS_READ_ONLY;
486                         break;
487                     case NV_PROTECT_WRITEABLE:
488                         busMapFbFlags |= BUS_MAP_FB_FLAGS_WRITE_ONLY;
489                         break;
490                 }
491 
492                 pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu);
493 
494                 // WAR for Bug 3564398, need to allocate doorbell for windows differently
495                 if (RMCFG_FEATURE_PLATFORM_WINDOWS &&
496                     memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1))
497                 {
498                     busMapFbFlags |= BUS_MAP_FB_FLAGS_MAP_DOWNWARDS;
499                 }
500 
501                 (void) deviceGetByHandle(staticCast(pClient, RsClient),
502                                          pMapParams->hDevice, &pDevice);
503 
504                 rmStatus = kbusMapFbAperture_HAL(pGpu, pKernelBus,
505                                                  pMemDesc, pMapParams->offset,
506                                                  &gpuVirtAddr, &gpuMapLength,
507                                                  busMapFbFlags, pDevice);
508             }
509 
510             if (rmStatus != NV_OK)
511                 goto _rmMapMemory_busFail;
512         }
513         else
514         {
515             NV_ASSERT_OR_RETURN(memdescGetContiguity(pMemDesc, AT_GPU),
516                    NV_ERR_NOT_SUPPORTED);
517 
518             fbAddr = gpumgrGetGpuPhysFbAddr(pGpu) + memdescGetPte(pMemDesc, AT_GPU, 0) +
519                      memdescGetPteAdjust(pMemDesc) + pMapParams->offset;
520         }
521 
522         if (pMapParams->bKernel)
523         {
524             rmStatus = osMapPciMemoryKernel64(pGpu,
525                                               (kbusIsBar1PhysicalModeEnabled(pKernelBus)?
526                                               fbAddr: gpumgrGetGpuPhysFbAddr(pGpu) + gpuVirtAddr),
527                                               pMapParams->length,
528                                               pMapParams->protect,
529                                               pMapParams->ppCpuVirtAddr,
530                                               cachingType);
531         }
532         else
533         {
534             rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo,
535                                           (kbusIsBar1PhysicalModeEnabled(pKernelBus)?
536                                           fbAddr: gpumgrGetGpuPhysFbAddr(pGpu) + gpuVirtAddr),
537                                           pMapParams->length,
538                                           pMapParams->protect,
539                                           pMapParams->ppCpuVirtAddr,
540                                           &priv,
541                                           cachingType);
542         }
543 
544         //
545         // It's possible that NVOS33_FLAGS_MAPPING is set to NVOS33_FLAGS_MAPPING_DIRECT
546         // at this point--set it to REFLECTED to indicate that we aren't using
547         // direct mapping.
548         //
549         pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _REFLECTED, pMapParams->flags);
550         pMapParams->flags = FLD_SET_DRF_NUM(OS33, _FLAGS, _CACHING_TYPE, cachingType, pMapParams->flags);
551 
552         if (rmStatus != NV_OK)
553             goto _rmMapMemory_pciFail;
554 
555         rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
556                                                 pMapParams->bKernel,
557                                                 priv,
558                                                 *(pMapParams->ppCpuVirtAddr),
559                                                 pMapParams->length,
560                                                 kbusIsBar1PhysicalModeEnabled(pKernelBus)
561                                                     ? (NvU64)-1
562                                                     : gpuVirtAddr,
563                                                 kbusIsBar1PhysicalModeEnabled(pKernelBus)
564                                                     ? (NvU64)-1
565                                                     : gpuMapLength,
566                                                 pMapParams->flags);
567         pCpuMapping->pPrivate->pGpu = pGpu;
568 
569         if (rmStatus != NV_OK)
570         {
571             RmUnmapBusAperture(pGpu,
572                                *(pMapParams->ppCpuVirtAddr),
573                                pMapParams->length,
574                                pMapParams->bKernel,
575                                priv);
576     _rmMapMemory_pciFail:
577             if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
578             {
579                 kbusUnmapFbAperture_HAL(pGpu,
580                                         pKernelBus,
581                                         pMemDesc,
582                                         gpuVirtAddr,
583                                         gpuMapLength,
584                                         BUS_MAP_FB_FLAGS_MAP_UNICAST);
585     _rmMapMemory_busFail:
586                 gpumgrSetBcEnabledStatus(pGpu, bcState);
587             }
588         }
589     }
590     else
591     if (bIsSysmem)
592     {
593         // A client can specify not to map memory by default when
594         // calling into RmAllocMemory. In those cases, we don't have
595         // a mapping yet, so go ahead and map it for the client now.
596         rmStatus = memdescMap(pMemDesc,
597                               pMapParams->offset,
598                               pMapParams->length,
599                               pMapParams->bKernel,
600                               pMapParams->protect,
601                               pMapParams->ppCpuVirtAddr,
602                               &priv);
603 
604         // Associate this mapping with the client
605         if (rmStatus == NV_OK && *(pMapParams->ppCpuVirtAddr))
606         {
607             pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags);
608             rmStatus = CliUpdateMemoryMappingInfo(pCpuMapping,
609                                                   pMapParams->bKernel,
610                                                   *(pMapParams->ppCpuVirtAddr),
611                                                   priv,
612                                                   pMapParams->length,
613                                                   pMapParams->flags);
614             pCpuMapping->pPrivate->pGpu = pGpu;
615         }
616     }
617     else if (effectiveAddrSpace == ADDR_VIRTUAL)
618     {
619         rmStatus = NV_ERR_NOT_SUPPORTED;
620     }
621     else if (effectiveAddrSpace == ADDR_REGMEM)
622     {
623         RS_PRIV_LEVEL privLevel;
624 
625         privLevel = rmclientGetCachedPrivilege(pClient);
626         if (!rmclientIsAdmin(pClient, privLevel) &&
627             !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK))
628         {
629             return NV_ERR_PROTECTION_FAULT;
630         }
631 
632         if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pMapParams->flags) == NVOS33_FLAGS_MEM_SPACE_USER)
633         {
634             privLevel = RS_PRIV_LEVEL_USER;
635         }
636 
637         // Create a mapping of BAR0
638         rmStatus = osMapGPU(pGpu,
639                             privLevel,
640                             pMapParams->offset + pMemDesc-> _pteArray[0],
641                             pMapParams->length,
642                             pMapParams->protect,
643                             pMapParams->ppCpuVirtAddr,
644                             &priv);
645         if (rmStatus != NV_OK)
646             return rmStatus;
647 
648         // Save off the mapping
649         rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
650                                                 pMapParams->bKernel,
651                                                 priv,
652                                                 *(pMapParams->ppCpuVirtAddr),
653                                                 pMapParams->length,
654                                                 -1, // gpu virtual addr
655                                                 -1, // gpu map length
656                                                 pMapParams->flags);
657         pCpuMapping->pPrivate->pGpu = pGpu;
658 
659         if (rmStatus != NV_OK)
660         {
661             osUnmapGPU(pGpu->pOsGpuInfo,
662                        privLevel,
663                        *(pMapParams->ppCpuVirtAddr),
664                        pMapParams->length,
665                        priv);
666             return rmStatus;
667         }
668     }
669     else
670     {
671         return NV_ERR_INVALID_CLASS;
672     }
673 
674     if (rmStatus == NV_OK)
675     {
676         NV_PRINTF(LEVEL_INFO,
677                   "%s created. CPU Virtual Address: " NvP64_fmt "\n",
678                   FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags) ? "Direct mapping" : "Mapping",
679                   *(pMapParams->ppCpuVirtAddr));
680     }
681 
682     return rmStatus;
683 }
684 
685 NV_STATUS
memUnmap_IMPL(Memory * pMemory,CALL_CONTEXT * pCallContext,RsCpuMapping * pCpuMapping)686 memUnmap_IMPL
687 (
688     Memory *pMemory,
689     CALL_CONTEXT *pCallContext,
690     RsCpuMapping *pCpuMapping
691 )
692 {
693     RmClient           *pClient             = dynamicCast(pCallContext->pClient, RmClient);
694     OBJGPU             *pGpu                = pCpuMapping->pPrivate->pGpu;
695     MEMORY_DESCRIPTOR  *pMemDesc            = pMemory->pMemDesc;
696 
697     KernelBus          *pKernelBus          = NULL;
698     MemoryManager      *pMemoryManager      = NULL;
699 
700     if (pGpu != NULL)
701     {
702         pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
703         pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
704     }
705 
706     if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, pCpuMapping->flags))
707     {
708         // Nothing more to do
709     }
710     else if ((pGpu != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) &&
711              (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM))
712     {
713         NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED));
714         NV_ASSERT((memdescGetPteKind(pMemDesc) ==
715                    memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, RM_DEFAULT_PTE_KIND)) && // pitch
716                   (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED)));
717 
718         if (pCpuMapping->pPrivate->bKernel)
719         {
720             if(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)
721             {
722                 NV_ASSERT(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS);
723                 kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc);
724             }
725             else
726             {
727                 osUnmapSystemMemory(pMemDesc,
728                                     pCpuMapping->pPrivate->bKernel,
729                                     pCpuMapping->processId,
730                                     pCpuMapping->pLinearAddress,
731                                     pCpuMapping->pPrivate->pPriv);
732             }
733         }
734 
735         NV_PRINTF(LEVEL_INFO,
736                   "Unmapping from NVLINK handle = 0x%x, addr= 0x%llx\n",
737                   RES_GET_HANDLE(pMemory), (NvU64)pCpuMapping->pLinearAddress);
738 
739         //
740         // No BAR aperture mapping to delete.
741         // No kernel mapping to remove
742         // User-space will call munmap to eliminate PTE mappings
743         //
744     }
745     // System Memory case
746     else if ((pGpu == NULL) || (((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM)
747                                  || (memdescGetAddressSpace(pMemDesc) == ADDR_EGM)
748                                 ) && FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags)))
749     {
750         if (FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags))
751         {
752             memdescUnmap(pMemDesc,
753                          pCpuMapping->pPrivate->bKernel,
754                          pCpuMapping->processId,
755                          pCpuMapping->pLinearAddress,
756                          pCpuMapping->pPrivate->pPriv);
757         }
758     }
759     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ||
760              ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
761               FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _REFLECTED, pCpuMapping->flags)))
762     {
763         RmUnmapBusAperture(pGpu,
764                            pCpuMapping->pLinearAddress,
765                            pCpuMapping->length,
766                            pCpuMapping->pPrivate->bKernel,
767                            pCpuMapping->pPrivate->pPriv);
768 
769         if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
770         {
771             {
772                 kbusUnmapFbAperture_HAL(pGpu, pKernelBus,
773                                         pMemory->pMemDesc,
774                                         pCpuMapping->pPrivate->gpuAddress,
775                                         pCpuMapping->pPrivate->gpuMapLength,
776                                         BUS_MAP_FB_FLAGS_MAP_UNICAST);
777             }
778         }
779     }
780     else if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL)
781     {
782         NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE);
783     }
784     else if (memdescGetAddressSpace(pMemDesc) == ADDR_REGMEM)
785     {
786         osUnmapGPU(pGpu->pOsGpuInfo,
787                    rmclientGetCachedPrivilege(pClient),
788                    pCpuMapping->pLinearAddress,
789                    pCpuMapping->length,
790                    pCpuMapping->pPrivate->pPriv);
791     }
792     return NV_OK;
793 }
794 
795 NV_STATUS
rmapiValidateKernelMapping(RS_PRIV_LEVEL privLevel,NvU32 flags,NvBool * pbKernel)796 rmapiValidateKernelMapping
797 (
798     RS_PRIV_LEVEL privLevel,
799     NvU32 flags,
800     NvBool *pbKernel
801 )
802 {
803     NvBool bKernel;
804     NV_STATUS status = NV_OK;
805     if (privLevel < RS_PRIV_LEVEL_KERNEL)
806     {
807         // only kernel clients should be specifying the user mapping flags
808         if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_USER)
809             status = NV_ERR_INVALID_FLAGS;
810         bKernel = NV_FALSE;
811     }
812     else
813     {
814         //
815         // Kernel clients can only use the persistent flag if they are
816         // doing a user mapping.
817         //
818         bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT);
819     }
820 
821     // OS descriptor will already be mapped
822     if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags))
823         status = NV_ERR_INVALID_FLAGS;
824 
825     if (pbKernel != NULL)
826         *pbKernel = bKernel;
827 
828     return status;
829 }
830 
831 NV_STATUS
serverMap_Prologue(RsServer * pServer,RS_CPU_MAP_PARAMS * pMapParams)832 serverMap_Prologue
833 (
834     RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams
835 )
836 {
837     NV_STATUS           rmStatus;
838     RmClient           *pClient;
839     RsResourceRef      *pMemoryRef;
840     NvHandle            hClient = pMapParams->hClient;
841     NvHandle            hParent = hClient;
842     NvHandle            hSubDevice = NV01_NULL_OBJECT;
843     NvBool              bClientAlloc = (hClient == pMapParams->hDevice);
844     NvU32               flags = pMapParams->flags;
845     RS_PRIV_LEVEL       privLevel;
846 
847     // Persistent sysmem mapping support is no longer supported
848     if (DRF_VAL(OS33, _FLAGS, _PERSISTENT, flags) == NVOS33_FLAGS_PERSISTENT_ENABLE)
849         return NV_ERR_INVALID_FLAGS;
850 
851     // Populate Resource Server information
852     pClient = serverutilGetClientUnderLock(hClient);
853     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
854 
855     // Validate hClient
856     privLevel = rmclientGetCachedPrivilege(pClient);
857 
858     // RS-TODO: Assert if this fails after all objects are converted
859     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
860                 pMapParams->hMemory, &pMemoryRef));
861 
862     if (pMemoryRef->pParentRef != NULL)
863         hParent = pMemoryRef->pParentRef->hResource;
864 
865     // check if we have a user or kernel RM client
866     rmStatus = rmapiValidateKernelMapping(privLevel, flags, &pMapParams->bKernel);
867     if (rmStatus != NV_OK)
868         return rmStatus;
869 
870     //
871     // First check to see if it is a standard device or the BC region of
872     // a MC adapter.
873     //
874     pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK;
875     if (!bClientAlloc)
876     {
877         NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT);
878 
879         RsResourceRef *pContextRef;
880         rmStatus = clientGetResourceRef(staticCast(pClient, RsClient),
881                 pMapParams->hDevice, &pContextRef);
882 
883         if (rmStatus != NV_OK)
884             return rmStatus;
885 
886         if (pContextRef->internalClassId == classId(Device))
887         {
888         }
889         else if (pContextRef->internalClassId == classId(Subdevice))
890         {
891             hSubDevice = pMapParams->hDevice;
892             pMapParams->hDevice = pContextRef->pParentRef->hResource;
893         }
894         else
895         {
896             return NV_ERR_INVALID_OBJECT_PARENT;
897         }
898 
899         pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK;
900         pMapParams->pLockInfo->pContextRef = pContextRef;
901     }
902     else
903     {
904         NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT);
905     }
906 
907     pMapParams->hContext = (hSubDevice != NV01_NULL_OBJECT)
908                       ? hSubDevice
909                       : pMapParams->hDevice;
910 
911 
912     // convert from OS33 flags to RM's memory protection flags
913     switch (DRF_VAL(OS33, _FLAGS, _ACCESS, flags))
914     {
915         case NVOS33_FLAGS_ACCESS_READ_WRITE:
916             pMapParams->protect = NV_PROTECT_READ_WRITE;
917             break;
918         case NVOS33_FLAGS_ACCESS_READ_ONLY:
919             pMapParams->protect = NV_PROTECT_READABLE;
920             break;
921         case NVOS33_FLAGS_ACCESS_WRITE_ONLY:
922             pMapParams->protect = NV_PROTECT_WRITEABLE;
923             break;
924         default:
925             return NV_ERR_INVALID_FLAGS;
926     }
927 
928     return NV_OK;
929 }
930 
931 NV_STATUS
serverUnmap_Prologue(RsServer * pServer,RS_CPU_UNMAP_PARAMS * pUnmapParams)932 serverUnmap_Prologue
933 (
934     RsServer *pServer,
935     RS_CPU_UNMAP_PARAMS *pUnmapParams
936 )
937 {
938     OBJGPU *pGpu = NULL;
939     NV_STATUS rmStatus;
940     RmClient *pClient;
941     RsResourceRef *pMemoryRef;
942     NvHandle hClient = pUnmapParams->hClient;
943     NvHandle hParent = hClient;
944     NvHandle hMemory = pUnmapParams->hMemory;
945     NvBool bClientAlloc = (pUnmapParams->hDevice == pUnmapParams->hClient);
946     NvBool bKernel;
947     NvBool bBroadcast;
948     NvU32 ProcessId = pUnmapParams->processId;
949     RS_PRIV_LEVEL privLevel;
950     void *pProcessHandle = NULL;
951 
952     // Populate Resource Server information
953     pClient = serverutilGetClientUnderLock(hClient);
954     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
955 
956     // check if we have a user or kernel RM client
957     privLevel = rmclientGetCachedPrivilege(pClient);
958 
959     // RS-TODO: Assert if this fails after all objects are converted
960     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
961                 hMemory, &pMemoryRef));
962 
963     if (pMemoryRef->pParentRef != NULL)
964         hParent = pMemoryRef->pParentRef->hResource;
965 
966     //
967     // First check to see if it is a standard device or the BC region of
968     // a MC adapter.
969     //
970     pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK;
971     if (!bClientAlloc)
972     {
973         NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT);
974 
975         RsResourceRef *pContextRef;
976         rmStatus = clientGetResourceRef(staticCast(pClient, RsClient),
977                 pUnmapParams->hDevice, &pContextRef);
978 
979         if (rmStatus != NV_OK)
980             return rmStatus;
981 
982         if (pContextRef->internalClassId == classId(Subdevice))
983         {
984             pUnmapParams->hDevice = pContextRef->pParentRef->hResource;
985         }
986         else if (pContextRef->internalClassId != classId(Device))
987         {
988             return NV_ERR_INVALID_OBJECT_PARENT;
989         }
990 
991         pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK;
992         pUnmapParams->pLockInfo->pContextRef = pContextRef;
993         NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pUnmapParams->pLockInfo->pContextRef, &bBroadcast, &pGpu));
994         gpuSetThreadBcState(pGpu, bBroadcast);
995     }
996     else
997     {
998         NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT);
999     }
1000 
1001     // Decide what sort of mapping it is, user or kernel
1002     if (privLevel < RS_PRIV_LEVEL_KERNEL)
1003     {
1004         bKernel = NV_FALSE;
1005     }
1006     else
1007     {
1008         bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pUnmapParams->flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT);
1009     }
1010 
1011     //
1012     // If it's a user mapping, and we're not currently in the same process that
1013     // it's mapped into, then attempt to attach to the other process first.
1014     //
1015     if (!bKernel && (ProcessId != osGetCurrentProcess()))
1016     {
1017         rmStatus = osAttachToProcess(&pProcessHandle, ProcessId);
1018         if (rmStatus != NV_OK)
1019         {
1020             if (pUnmapParams->bTeardown)
1021                 pProcessHandle = NULL;
1022             else
1023                 return rmStatus;
1024         }
1025 
1026         pUnmapParams->pProcessHandle = pProcessHandle;
1027     }
1028 
1029     // Don't do any filtering if this is a tear-down path
1030     if (pUnmapParams->bTeardown)
1031     {
1032         pUnmapParams->fnFilter = NULL;
1033         return NV_OK;
1034     }
1035 
1036 
1037     pUnmapParams->fnFilter = bKernel
1038         ? serverutilMappingFilterKernel
1039         : serverutilMappingFilterCurrentUserProc;
1040 
1041     return NV_OK;
1042 }
1043 
1044 void
serverUnmap_Epilogue(RsServer * pServer,RS_CPU_UNMAP_PARAMS * pUnmapParams)1045 serverUnmap_Epilogue
1046 (
1047     RsServer *pServer,
1048     RS_CPU_UNMAP_PARAMS *pUnmapParams
1049 )
1050 {
1051     // do we need to detach?
1052     if (pUnmapParams->pProcessHandle != NULL)
1053     {
1054         osDetachFromProcess(pUnmapParams->pProcessHandle);
1055         pUnmapParams->pProcessHandle = NULL;
1056     }
1057 }
1058 
RmUnmapBusAperture(OBJGPU * pGpu,NvP64 pCpuVirtualAddress,NvU64 length,NvBool bKernel,NvP64 pPrivateData)1059 void RmUnmapBusAperture
1060 (
1061     OBJGPU *pGpu,
1062     NvP64   pCpuVirtualAddress,
1063     NvU64   length,
1064     NvBool  bKernel,
1065     NvP64   pPrivateData
1066 )
1067 {
1068     if (bKernel)
1069     {
1070         osUnmapPciMemoryKernel64(pGpu, pCpuVirtualAddress);
1071     }
1072     else
1073     {
1074         osUnmapPciMemoryUser(pGpu->pOsGpuInfo, pCpuVirtualAddress, length, pPrivateData);
1075     }
1076 }
1077 
1078 NV_STATUS
rmapiMapToCpu(RM_API * pRmApi,NvHandle hClient,NvHandle hDevice,NvHandle hMemory,NvU64 offset,NvU64 length,void ** ppCpuVirtAddr,NvU32 flags)1079 rmapiMapToCpu
1080 (
1081     RM_API   *pRmApi,
1082     NvHandle  hClient,
1083     NvHandle  hDevice,
1084     NvHandle  hMemory,
1085     NvU64     offset,
1086     NvU64     length,
1087     void    **ppCpuVirtAddr,
1088     NvU32     flags
1089 )
1090 {
1091     NvP64     pCpuVirtAddrNvP64 = NvP64_NULL;
1092     NV_STATUS status;
1093 
1094     if (!pRmApi->bHasDefaultSecInfo)
1095         return NV_ERR_NOT_SUPPORTED;
1096 
1097     status = pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length,
1098                                           &pCpuVirtAddrNvP64, flags, &pRmApi->defaultSecInfo);
1099 
1100     if (ppCpuVirtAddr)
1101         *ppCpuVirtAddr = NvP64_VALUE(pCpuVirtAddrNvP64);
1102 
1103     return status;
1104 }
1105 
1106 /**
1107  * Call into Resource Server to register and execute a CPU mapping operation.
1108  *
1109  * Resource Server will:
1110  *    1. Callback into RM (serverMap_Prologue) to set up mapping parameters, mapping context object,
1111  *       and locking requirements
1112  *    2. Take locks (if required)
1113  *    3. Allocate and register a RsCpuMapping book-keeping entry on the target object's RsResourceRef
1114  *    4. Call the target object's mapping virtual function (xxxMap_IMPL, defined in RM)
1115  *    5. Setup back-references to the mapping context object (if required.) This mapping will automatically
1116  *       be unmapped if either the target object or mapping context object are freed.
1117  *    6. Release any locks taken
1118  */
1119 NV_STATUS
rmapiMapToCpuWithSecInfoV2(RM_API * pRmApi,NvHandle hClient,NvHandle hDevice,NvHandle hMemory,NvU64 offset,NvU64 length,NvP64 * ppCpuVirtAddr,NvU32 * flags,API_SECURITY_INFO * pSecInfo)1120 rmapiMapToCpuWithSecInfoV2
1121 (
1122     RM_API            *pRmApi,
1123     NvHandle           hClient,
1124     NvHandle           hDevice,
1125     NvHandle           hMemory,
1126     NvU64              offset,
1127     NvU64              length,
1128     NvP64             *ppCpuVirtAddr,
1129     NvU32             *flags,
1130     API_SECURITY_INFO *pSecInfo
1131 )
1132 {
1133     NV_STATUS  status;
1134     RM_API_CONTEXT rmApiContext = {0};
1135     RmMapParams rmMapParams;
1136     RS_LOCK_INFO lockInfo;
1137 
1138     NV_PRINTF(LEVEL_INFO,
1139               "Nv04MapMemory: client:0x%x device:0x%x memory:0x%x\n", hClient,
1140               hDevice, hMemory);
1141     NV_PRINTF(LEVEL_INFO,
1142               "Nv04MapMemory:  offset: %llx length: %llx flags:0x%x\n",
1143               offset, length, *flags);
1144 
1145     status = rmapiPrologue(pRmApi, &rmApiContext);
1146     if (status != NV_OK)
1147         return status;
1148 
1149     NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04MapMemory 0x%x\n", *flags);
1150 
1151     portMemSet(&lockInfo, 0, sizeof(lockInfo));
1152     status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
1153     if (status != NV_OK)
1154     {
1155         rmapiEpilogue(pRmApi, &rmApiContext);
1156         return status;
1157     }
1158 
1159     LOCK_METER_DATA(MAPMEM, flags, 0, 0);
1160 
1161     // clear params for good measure
1162     portMemSet(&rmMapParams, 0, sizeof (rmMapParams));
1163 
1164     // load user args
1165     rmMapParams.hClient = hClient;
1166     rmMapParams.hDevice = hDevice;
1167     rmMapParams.hMemory = hMemory;
1168     rmMapParams.offset = offset;
1169     rmMapParams.length = length;
1170     rmMapParams.ppCpuVirtAddr = ppCpuVirtAddr;
1171     rmMapParams.flags = *flags;
1172     rmMapParams.pLockInfo = &lockInfo;
1173     rmMapParams.pSecInfo = pSecInfo;
1174 
1175     status = serverMap(&g_resServ, rmMapParams.hClient, rmMapParams.hMemory, &rmMapParams);
1176 
1177     rmapiEpilogue(pRmApi, &rmApiContext);
1178 
1179     *flags = rmMapParams.flags;
1180 
1181     if (status == NV_OK)
1182     {
1183         NV_PRINTF(LEVEL_INFO, "Nv04MapMemory: complete\n");
1184         NV_PRINTF(LEVEL_INFO,
1185                   "Nv04MapMemory:  *ppCpuVirtAddr:" NvP64_fmt "\n",
1186                   *ppCpuVirtAddr);
1187     }
1188     else
1189     {
1190         NV_PRINTF(LEVEL_WARNING,
1191                   "Nv04MapMemory: map failed; status: %s (0x%08x)\n",
1192                   nvstatusToString(status), status);
1193     }
1194 
1195     return status;
1196 }
1197 
1198 NV_STATUS
rmapiMapToCpuWithSecInfo(RM_API * pRmApi,NvHandle hClient,NvHandle hDevice,NvHandle hMemory,NvU64 offset,NvU64 length,NvP64 * ppCpuVirtAddr,NvU32 flags,API_SECURITY_INFO * pSecInfo)1199 rmapiMapToCpuWithSecInfo
1200 (
1201     RM_API            *pRmApi,
1202     NvHandle           hClient,
1203     NvHandle           hDevice,
1204     NvHandle           hMemory,
1205     NvU64              offset,
1206     NvU64              length,
1207     NvP64             *ppCpuVirtAddr,
1208     NvU32              flags,
1209     API_SECURITY_INFO *pSecInfo
1210 )
1211 {
1212     return rmapiMapToCpuWithSecInfoV2(pRmApi, hClient,
1213         hDevice, hMemory, offset, length, ppCpuVirtAddr,
1214         &flags, pSecInfo);
1215 }
1216 
1217 NV_STATUS
rmapiMapToCpuWithSecInfoTls(RM_API * pRmApi,NvHandle hClient,NvHandle hDevice,NvHandle hMemory,NvU64 offset,NvU64 length,NvP64 * ppCpuVirtAddr,NvU32 flags,API_SECURITY_INFO * pSecInfo)1218 rmapiMapToCpuWithSecInfoTls
1219 (
1220     RM_API            *pRmApi,
1221     NvHandle           hClient,
1222     NvHandle           hDevice,
1223     NvHandle           hMemory,
1224     NvU64              offset,
1225     NvU64              length,
1226     NvP64             *ppCpuVirtAddr,
1227     NvU32              flags,
1228     API_SECURITY_INFO *pSecInfo
1229 )
1230 {
1231     THREAD_STATE_NODE threadState;
1232     NV_STATUS         status;
1233 
1234     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1235 
1236     status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, &flags, pSecInfo);
1237 
1238     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1239 
1240     return status;
1241 }
1242 NV_STATUS
rmapiMapToCpuWithSecInfoTlsV2(RM_API * pRmApi,NvHandle hClient,NvHandle hDevice,NvHandle hMemory,NvU64 offset,NvU64 length,NvP64 * ppCpuVirtAddr,NvU32 * flags,API_SECURITY_INFO * pSecInfo)1243 rmapiMapToCpuWithSecInfoTlsV2
1244 (
1245     RM_API            *pRmApi,
1246     NvHandle           hClient,
1247     NvHandle           hDevice,
1248     NvHandle           hMemory,
1249     NvU64              offset,
1250     NvU64              length,
1251     NvP64             *ppCpuVirtAddr,
1252     NvU32             *flags,
1253     API_SECURITY_INFO *pSecInfo
1254 )
1255 {
1256     THREAD_STATE_NODE threadState;
1257     NV_STATUS         status;
1258 
1259     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1260 
1261     status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, pSecInfo);
1262 
1263     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1264 
1265     return status;
1266 }
1267 
1268 NV_STATUS
rmapiUnmapFromCpu(RM_API * pRmApi,NvHandle hClient,NvHandle hDevice,NvHandle hMemory,void * pLinearAddress,NvU32 flags,NvU32 ProcessId)1269 rmapiUnmapFromCpu
1270 (
1271     RM_API   *pRmApi,
1272     NvHandle  hClient,
1273     NvHandle  hDevice,
1274     NvHandle  hMemory,
1275     void     *pLinearAddress,
1276     NvU32     flags,
1277     NvU32     ProcessId
1278 )
1279 {
1280     if (!pRmApi->bHasDefaultSecInfo)
1281         return NV_ERR_NOT_SUPPORTED;
1282 
1283     return pRmApi->UnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, NV_PTR_TO_NvP64(pLinearAddress),
1284                                            flags, ProcessId, &pRmApi->defaultSecInfo);
1285 }
1286 
1287 /**
1288  * Call into Resource Server to execute a CPU unmapping operation.
1289  *
1290  * Resource Server will:
1291  *    1. Callback into RM (serverUnmap_Prologue) to set up unmapping parameters, locking requirements,
1292  *       and attempt to attach to the mapping's user process (for user mappings only)
1293  *    2. Take locks (if required)
1294  *    3. Lookup the mapping
1295  *    4. Call the target object's unmapping virtual function (xxxUnmap_IMPL, defined in RM)
1296  *    5. Unregister the mapping from its back-references, and free the mapping
1297  *    6. Callback into RM (serverUnmap_Epilogue) to detach from the mapping's user process (if required)
1298  *    7. Release any locks taken
1299  */
1300 NV_STATUS
rmapiUnmapFromCpuWithSecInfo(RM_API * pRmApi,NvHandle hClient,NvHandle hDevice,NvHandle hMemory,NvP64 pLinearAddress,NvU32 flags,NvU32 ProcessId,API_SECURITY_INFO * pSecInfo)1301 rmapiUnmapFromCpuWithSecInfo
1302 (
1303     RM_API            *pRmApi,
1304     NvHandle           hClient,
1305     NvHandle           hDevice,
1306     NvHandle           hMemory,
1307     NvP64              pLinearAddress,
1308     NvU32              flags,
1309     NvU32              ProcessId,
1310     API_SECURITY_INFO *pSecInfo
1311 )
1312 {
1313     NV_STATUS status;
1314     RM_API_CONTEXT rmApiContext = {0};
1315     RmUnmapParams rmUnmapParams;
1316     RS_LOCK_INFO lockInfo;
1317 
1318     NV_PRINTF(LEVEL_INFO,
1319               "Nv04UnmapMemory: client:0x%x device:0x%x memory:0x%x pLinearAddr:" NvP64_fmt " flags:0x%x\n",
1320               hClient, hDevice, hMemory, pLinearAddress, flags);
1321 
1322     status = rmapiPrologue(pRmApi, &rmApiContext);
1323     if (status != NV_OK)
1324         return status;
1325 
1326     portMemSet(&lockInfo, 0, sizeof(lockInfo));
1327     status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
1328     if (status != NV_OK)
1329     {
1330         rmapiEpilogue(pRmApi, &rmApiContext);
1331         return NV_OK;
1332     }
1333 
1334     LOCK_METER_DATA(UNMAPMEM, flags, 0, 0);
1335 
1336     portMemSet(&rmUnmapParams, 0, sizeof (rmUnmapParams));
1337     rmUnmapParams.hClient = hClient;
1338     rmUnmapParams.hDevice = hDevice;
1339     rmUnmapParams.hMemory = hMemory;
1340     rmUnmapParams.pLinearAddress = pLinearAddress;
1341     rmUnmapParams.flags = flags;
1342     rmUnmapParams.processId = ProcessId;
1343     rmUnmapParams.pLockInfo = &lockInfo;
1344     rmUnmapParams.pSecInfo = pSecInfo;
1345 
1346     status = serverUnmap(&g_resServ, hClient, hMemory, &rmUnmapParams);
1347 
1348     rmapiEpilogue(pRmApi, &rmApiContext);
1349 
1350     if (status == NV_OK)
1351     {
1352         NV_PRINTF(LEVEL_INFO, "Nv04UnmapMemory: unmap complete\n");
1353     }
1354     else
1355     {
1356         NV_PRINTF(LEVEL_WARNING,
1357                   "Nv04UnmapMemory: unmap failed; status: %s (0x%08x)\n",
1358                   nvstatusToString(status), status);
1359     }
1360 
1361     return status;
1362 }
1363 
1364 NV_STATUS
rmapiUnmapFromCpuWithSecInfoTls(RM_API * pRmApi,NvHandle hClient,NvHandle hDevice,NvHandle hMemory,NvP64 pLinearAddress,NvU32 flags,NvU32 ProcessId,API_SECURITY_INFO * pSecInfo)1365 rmapiUnmapFromCpuWithSecInfoTls
1366 (
1367     RM_API            *pRmApi,
1368     NvHandle           hClient,
1369     NvHandle           hDevice,
1370     NvHandle           hMemory,
1371     NvP64              pLinearAddress,
1372     NvU32              flags,
1373     NvU32              ProcessId,
1374     API_SECURITY_INFO *pSecInfo
1375 )
1376 {
1377     THREAD_STATE_NODE threadState;
1378     NV_STATUS         status;
1379 
1380     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1381 
1382     status = rmapiUnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, pLinearAddress,
1383                                           flags, ProcessId, pSecInfo);
1384 
1385     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1386 
1387     return status;
1388 }
1389 
1390 NV_STATUS
serverMapLookupLockFlags(RsServer * pServer,RS_LOCK_ENUM lock,RS_CPU_MAP_PARAMS * pParams,LOCK_ACCESS_TYPE * pAccess)1391 serverMapLookupLockFlags
1392 (
1393     RsServer *pServer,
1394     RS_LOCK_ENUM lock,
1395     RS_CPU_MAP_PARAMS *pParams,
1396     LOCK_ACCESS_TYPE *pAccess
1397 )
1398 {
1399     NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT);
1400 
1401     *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP))
1402         ? LOCK_ACCESS_READ
1403         : LOCK_ACCESS_WRITE;
1404     return NV_OK;
1405 }
1406 
1407 NV_STATUS
serverUnmapLookupLockFlags(RsServer * pServer,RS_LOCK_ENUM lock,RS_CPU_UNMAP_PARAMS * pParams,LOCK_ACCESS_TYPE * pAccess)1408 serverUnmapLookupLockFlags
1409 (
1410     RsServer *pServer,
1411     RS_LOCK_ENUM lock,
1412     RS_CPU_UNMAP_PARAMS *pParams,
1413     LOCK_ACCESS_TYPE *pAccess
1414 )
1415 {
1416     NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT);
1417 
1418     *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP))
1419         ? LOCK_ACCESS_READ
1420         : LOCK_ACCESS_WRITE;
1421     return NV_OK;
1422 }
1423 
1424 NV_STATUS
refAllocCpuMappingPrivate(RS_CPU_MAP_PARAMS * pMapParams,RsCpuMapping * pCpuMapping)1425 refAllocCpuMappingPrivate
1426 (
1427     RS_CPU_MAP_PARAMS *pMapParams,
1428     RsCpuMapping *pCpuMapping
1429 )
1430 {
1431     pCpuMapping->pPrivate = portMemAllocNonPaged(sizeof(RS_CPU_MAPPING_PRIVATE));
1432     if (pCpuMapping->pPrivate == NULL)
1433         return NV_ERR_NO_MEMORY;
1434 
1435     pCpuMapping->pPrivate->protect = pMapParams->protect;
1436     pCpuMapping->pPrivate->bKernel = pMapParams->bKernel;
1437 
1438     return NV_OK;
1439 }
1440 
1441 void
refFreeCpuMappingPrivate(RsCpuMapping * pCpuMapping)1442 refFreeCpuMappingPrivate
1443 (
1444     RsCpuMapping *pCpuMapping
1445 )
1446 {
1447     portMemFree(pCpuMapping->pPrivate);
1448 }
1449