1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "core/core.h"
25 #include "core/locks.h"
26 #include "core/thread_state.h"
27 #include "os/os.h"
28 #include "gpu/mem_mgr/mem_desc.h"
29 #include "gpu/device/device.h"
30 #include "gpu/subdevice/generic_engine.h"
31 #include "gpu/subdevice/subdevice.h"
32 #include "gpu/mem_mgr/mem_mgr.h"
33 #include "mem_mgr/fla_mem.h"
34 
35 #include "class/cl0000.h" // NV01_NULL_OBJECT
36 
37 #include "resserv/rs_server.h"
38 #include "resserv/rs_client.h"
39 #include "resserv/rs_resource.h"
40 
41 #include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
42 #include "gpu/mem_sys/kern_mem_sys.h"
43 #include "gpu/bus/kern_bus.h"
44 
45 #include "rmapi/rs_utils.h"
46 #include "rmapi/mapping_list.h"
47 #include "entry_points.h"
48 
49 static void RmUnmapBusAperture (OBJGPU *, NvP64, NvU64, NvBool, NvP64);
50 
51 #include "gpu/conf_compute/conf_compute.h"
52 
53 typedef struct RS_CPU_MAP_PARAMS RmMapParams;
54 typedef struct RS_CPU_UNMAP_PARAMS RmUnmapParams;
55 
56 NV_STATUS
57 rmapiMapGpuCommon
58 (
59     RsResource *pResource,
60     CALL_CONTEXT *pCallContext,
61     RsCpuMapping *pCpuMapping,
62     OBJGPU *pGpu,
63     NvU32 regionOffset,
64     NvU32 regionSize
65 )
66 {
67     NV_STATUS rmStatus;
68     RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient);
69     NvU64 offset;
70 
71     // Validate the offset and limit passed in.
72     if (pCpuMapping->offset >= regionSize)
73         return NV_ERR_INVALID_BASE;
74     if (pCpuMapping->length == 0)
75         return NV_ERR_INVALID_LIMIT;
76     if ((pCpuMapping->offset + pCpuMapping->length > regionSize) ||
77         !portSafeAddU64(pCpuMapping->offset, pCpuMapping->length, &offset))
78         return NV_ERR_INVALID_LIMIT;
79 
80     if (!portSafeAddU64((NvU64)regionOffset, pCpuMapping->offset, &offset))
81         return NV_ERR_INVALID_OFFSET;
82 
83     // Create a mapping of BAR0
84     rmStatus = osMapGPU(pGpu,
85                         rmclientGetCachedPrivilege(pClient),
86                         offset,
87                         pCpuMapping->length,
88                         pCpuMapping->pPrivate->protect,
89                         &pCpuMapping->pLinearAddress,
90                         &pCpuMapping->pPrivate->pPriv);
91     return rmStatus;
92 }
93 
94 
95 
96 NV_STATUS
97 rmapiGetEffectiveAddrSpace
98 (
99     OBJGPU *pGpu,
100     MEMORY_DESCRIPTOR *pMemDesc,
101     NvU32 mapFlags,
102     NV_ADDRESS_SPACE *pAddrSpace
103 )
104 {
105     NV_ADDRESS_SPACE addrSpace;
106     NvBool bDirectSysMappingAllowed = NV_TRUE;
107 
108     KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
109 
110     NV_ASSERT_OK_OR_RETURN(
111         kbusIsDirectMappingAllowed_HAL(pGpu, pKernelBus, pMemDesc, mapFlags,
112                                       &bDirectSysMappingAllowed));
113 
114     //
115     // Bug 1482818: Deprecate reflected mappings in production code.
116     //  The usage of reflected writes, in addition to causing several deadlock
117     //  scenarios involving P2P transfers, are disallowed on NVLINK (along with
118     //  reflected reads), and should no longer be used.
119     //  The below PDB property should be unset once the remaining usages in MODS
120     //  have been culled. (Bug 1780557)
121     //
122     if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
123         !bDirectSysMappingAllowed &&
124         (DRF_VAL(OS33, _FLAGS, _MAPPING, mapFlags) != NVOS33_FLAGS_MAPPING_DIRECT) &&
125         !kbusIsReflectedMappingAccessAllowed(pKernelBus))
126     {
127         NV_ASSERT(0);
128         return NV_ERR_NOT_SUPPORTED;
129     }
130 
131     if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1))
132     {
133         addrSpace = ADDR_FBMEM;
134     }
135     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
136         (bDirectSysMappingAllowed || FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, mapFlags) ||
137         (IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_FMODEL(pGpu) && !IS_RTLSIM(pGpu))))
138     {
139         addrSpace = ADDR_SYSMEM;
140     }
141     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ||
142              ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && !bDirectSysMappingAllowed))
143     {
144         addrSpace = ADDR_FBMEM;
145     }
146     else
147     {
148         addrSpace = memdescGetAddressSpace(pMemDesc);
149     }
150 
151     if (pAddrSpace)
152         *pAddrSpace = addrSpace;
153 
154     return NV_OK;
155 }
156 
157 // Asserts to check caching type matches across sdk and nv_memory_types
158 ct_assert(NVOS33_FLAGS_CACHING_TYPE_CACHED        == NV_MEMORY_CACHED);
159 ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED      == NV_MEMORY_UNCACHED);
160 ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITECOMBINED == NV_MEMORY_WRITECOMBINED);
161 ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITEBACK     == NV_MEMORY_WRITEBACK);
162 ct_assert(NVOS33_FLAGS_CACHING_TYPE_DEFAULT       == NV_MEMORY_DEFAULT);
163 ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED_WEAK == NV_MEMORY_UNCACHED_WEAK);
164 
165 //
166 // Map memory entry points.
167 //
168 NV_STATUS
169 memMap_IMPL
170 (
171     Memory *pMemory,
172     CALL_CONTEXT *pCallContext,
173     RS_CPU_MAP_PARAMS *pMapParams,
174     RsCpuMapping *pCpuMapping
175 )
176 {
177     OBJGPU *pGpu = NULL;
178     KernelBus *pKernelBus = NULL;
179     MemoryManager *pMemoryManager = NULL;
180     KernelMemorySystem *pKernelMemorySystem = NULL;
181     RmClient *pClient;
182     RsResourceRef *pContextRef;
183     RsResourceRef *pMemoryRef;
184     Memory *pMemoryInfo; // TODO: rename this field. pMemoryInfo is the legacy name.
185                          // Name should be clear on how pMemoryInfo different from pMemory
186     MEMORY_DESCRIPTOR *pMemDesc;
187     NvP64 priv = NvP64_NULL;
188     NV_STATUS rmStatus = NV_OK;
189     NV_ADDRESS_SPACE effectiveAddrSpace;
190     NvBool bBroadcast;
191     NvU64 mapLimit;
192     NvBool bIsSysmem = NV_FALSE;
193     NvBool bSkipSizeCheck = (DRF_VAL(OS33, _FLAGS, _SKIP_SIZE_CHECK, pMapParams->flags) ==
194                              NVOS33_FLAGS_SKIP_SIZE_CHECK_ENABLE);
195 
196     NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED);
197 
198     NV_ASSERT_OR_RETURN(pMapParams->pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT);
199     pContextRef = pMapParams->pLockInfo->pContextRef;
200     if (pContextRef != NULL)
201     {
202         NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pContextRef, &bBroadcast, &pGpu));
203         gpuSetThreadBcState(pGpu, bBroadcast);
204 
205         pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
206         pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
207         pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
208     }
209 
210     pClient = serverutilGetClientUnderLock(pMapParams->hClient);
211     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
212     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
213                 pMapParams->hMemory, &pMemoryRef));
214 
215     pMemoryInfo = dynamicCast(pMemoryRef->pResource, Memory);
216     NV_ASSERT_OR_RETURN(pMemoryInfo != NULL, NV_ERR_NOT_SUPPORTED);
217     pMemDesc = pMemoryInfo->pMemDesc;
218 
219     if ((pMemoryInfo->categoryClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) &&
220         !(memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM &&
221           RMCFG_FEATURE_PLATFORM_MODS))
222     {
223         return NV_ERR_NOT_SUPPORTED;
224     }
225 
226     //
227     // PROTECTED memory is memory which is hidden from the CPU and used for
228     // storing protected content.  The CPU is not allowed to read it, but is
229     // allowed to write it in order to initialize memory allocated within the
230     // PROTECTED region.
231     //
232     // CPU to directly access protected memory is allowed on MODS
233     //
234     // The check below is for VPR and should be skipped for Hopper CC
235     if ((pGpu != NULL) && !gpuIsCCFeatureEnabled(pGpu))
236     {
237         if ((pMemoryInfo->Flags & NVOS32_ALLOC_FLAGS_PROTECTED) &&
238             (pMapParams->protect != NV_PROTECT_WRITEABLE) &&
239             ! RMCFG_FEATURE_PLATFORM_MODS)
240         {
241             return NV_ERR_NOT_SUPPORTED;
242         }
243     }
244 
245     if ((pGpu != NULL) && (pMemoryInfo->Flags & NVOS32_ALLOC_FLAGS_PROTECTED))
246     {
247         ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
248         //
249         // If neither BAR1 nor PCIE as a whole is trusted, fail the mapping
250         // for allocations in CPR region. Mapping should still succeed for
251         // allocations in non-CPR region
252         //
253         if ((pCC != NULL) && !pCC->ccStaticInfo.bIsBar1Trusted &&
254             !pCC->ccStaticInfo.bIsPcieTrusted)
255         {
256             NV_ASSERT(0);
257             return NV_ERR_NOT_SUPPORTED;
258         }
259     }
260 
261     if (!pMapParams->bKernel &&
262         FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, pMemoryInfo->Attr2) &&
263         (pMapParams->protect != NV_PROTECT_READABLE))
264     {
265         return NV_ERR_INVALID_ARGUMENT;
266     }
267 
268     // Validate the offset and limit passed in.
269     if (pMapParams->offset >= pMemoryInfo->Length)
270     {
271         return NV_ERR_INVALID_BASE;
272     }
273     if (pMapParams->length == 0)
274     {
275         return NV_ERR_INVALID_LIMIT;
276     }
277 
278     if (bSkipSizeCheck && (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL))
279     {
280         return NV_ERR_INSUFFICIENT_PERMISSIONS;
281     }
282 
283     //
284     // See bug #140807 and #150889 - we need to pad memory mappings to past their
285     // actual allocation size (to PAGE_SIZE+1) because of a buggy ms function so
286     // skip the allocation size sanity check so the map operation still succeeds.
287     //
288     if (!portSafeAddU64(pMapParams->offset, pMapParams->length, &mapLimit) ||
289         (!bSkipSizeCheck && (mapLimit > pMemoryInfo->Length)))
290     {
291         return NV_ERR_INVALID_LIMIT;
292     }
293 
294     if (pGpu != NULL)
295     {
296         NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, memdescGetMemDescFromGpu(pMemDesc, pGpu), pMapParams->flags, &effectiveAddrSpace));
297     }
298     else
299     {
300         effectiveAddrSpace = ADDR_SYSMEM;
301     }
302 
303     bIsSysmem = (effectiveAddrSpace == ADDR_SYSMEM);
304     bIsSysmem = bIsSysmem || (effectiveAddrSpace == ADDR_EGM);
305 
306     if (dynamicCast(pMemoryInfo, FlaMemory) != NULL)
307     {
308         NV_PRINTF(LEVEL_WARNING, "CPU mapping to FLA memory not allowed\n");
309         return NV_ERR_NOT_SUPPORTED;
310     }
311 
312     //
313     //  NVLINK2 ATS: Coherent NVLINK mappings may be returned if the client
314     //    doesn't specifically request PCI-E and if the surface is pitch.
315     //
316     if ((pGpu != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) &&
317         (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM))
318     {
319         NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED));
320         if ((memdescGetPteKind(pMemDesc) ==
321             memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, RM_DEFAULT_PTE_KIND)) && // pitch
322             (!memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_ENCRYPTED)))
323         {
324             if (pMapParams->bKernel)
325             {
326                 if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)
327                 {
328                     NvP64 tempCpuPtr = kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc);
329                     if (tempCpuPtr == NULL)
330                     {
331                         rmStatus = NV_ERR_GENERIC;
332                     }
333                     else
334                     {
335                         rmStatus = NV_OK;
336                         tempCpuPtr = NvP64_PLUS_OFFSET(tempCpuPtr, pMapParams->offset);
337                     }
338                     *pMapParams->ppCpuVirtAddr = tempCpuPtr;
339 
340                     if (rmStatus != NV_OK)
341                         return rmStatus;
342                 }
343                 else
344                 {
345                     rmStatus = osMapSystemMemory(pMemDesc,
346                                                  pMapParams->offset,
347                                                  pMapParams->length,
348                                                  pMapParams->bKernel,
349                                                  pMapParams->protect,
350                                                  pMapParams->ppCpuVirtAddr,
351                                                  &priv);
352                     if (rmStatus != NV_OK)
353                         return rmStatus;
354                 }
355             }
356             else
357             {
358 
359                 //
360                 // Allocating mapping for user mode client
361                 // NOTE: This function intentionally leaves priv uninitialized.
362                 //       It simply copies the busAddress [argument 2] into ppCpuVirtAddr.
363                 //       During the FD mapping cleanup for bug 1784955, it is expected that
364                 //       this function will transition to storing the mapping parameters onto
365                 //       the FD.  Also note: All mapping parameters are ignored (!).
366                 //
367                 //   For now, we're going to return the first page of the nvlink aperture
368                 //   mapping of this allocation.  See nvidia_mmap_helper for establishment
369                 //   of direct mapping.
370                 //
371 
372                 rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo,
373                                               ((NvUPtr)pKernelMemorySystem->coherentCpuFbBase +
374                                                (NvUPtr)memdescGetPhysAddr(pMemDesc,
375                                                 AT_CPU, pMapParams->offset)),
376                                               pMapParams->length,
377                                               pMapParams->protect,
378                                               pMapParams->ppCpuVirtAddr,
379                                               &priv,
380                                               NV_MEMORY_UNCACHED);
381                 if (rmStatus != NV_OK)
382                     return rmStatus;
383             }
384 
385             NV_PRINTF(LEVEL_INFO,
386                       "NVLINK mapping allocated: AtsBase=0x%llx, _pteArray[0]=0x%llx, mappedCpuAddr=0x%llx, length=%d\n",
387                       (NvU64)pKernelMemorySystem->coherentCpuFbBase,
388                       (NvU64)((NvUPtr)pMemDesc->_pteArray[0]),
389                       (*((NvU64 *)(pMapParams->ppCpuVirtAddr))),
390                       (int)pMapParams->length);
391 
392             rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
393                                                     pMapParams->bKernel,
394                                                     priv,
395                                                     *(pMapParams->ppCpuVirtAddr),
396                                                     pMapParams->length,
397                                                     -1,
398                                                     -1,
399                                                     pMapParams->flags);
400             pCpuMapping->pPrivate->pGpu = pGpu;
401 
402             if (rmStatus != NV_OK)
403                 return rmStatus;
404 
405         }
406         else
407         {
408             //
409             // RM should fail gracefully when clients map FB in the Coherent link path with special KIND.
410             // There is no GMMU in the Coherent link path, only regular KIND(GMK) is supported and other special
411             // KIND(s) (like encrypted, compressed etc.) are not supported.
412             //
413             NV_PRINTF(LEVEL_ERROR, "Need BAR mapping on coherent link! FAIL!!\n");
414             return NV_ERR_NOT_SUPPORTED;
415         }
416     }
417     else if (effectiveAddrSpace == ADDR_FBMEM)
418     {
419         RmPhysAddr fbAddr = 0;
420         NvBool bcState = NV_FALSE;
421         NvU64 gpuVirtAddr = 0;
422         NvU64 gpuMapLength = 0;
423 
424         //
425         // MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1 indicates a special mapping type of HW registers,
426         // so map it as device memory (uncached).
427         //
428         NvU32 cachingType = NV_MEMORY_WRITECOMBINED;
429         if (pMemDesc != NULL && !memdescHasSubDeviceMemDescs(pMemDesc))
430         {
431             cachingType = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1) ?
432                           NV_MEMORY_UNCACHED : NV_MEMORY_WRITECOMBINED;
433         }
434 
435         if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
436         {
437             //
438             // For Multi-Board, the BC area has a NULL address range.  So we have
439             // to bring in the master.
440             //
441             bcState = gpumgrGetBcEnabledStatus(pGpu);
442             if (bcState)
443             {
444                 pGpu = gpumgrGetParentGPU(pGpu);
445                 gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
446             }
447 
448             //
449             // Allocate a GPU virtual address space for the video memory region
450             // for those GPUs that support it.
451             //
452 
453             gpuMapLength = pMapParams->length;
454 
455             //
456             // If client ask for Direct mapping , we cannot do much here but just
457             // simulate as it is non encrypted surface.
458             // It is currently totaly for testing purpose.
459             //
460             NV_ASSERT(pGpu->busInfo.gpuPhysFbAddr);
461 
462             {
463                 Device *pDevice = NULL;
464 
465                 // Below, we only map one GPU's address for CPU access, so we can use UNICAST here
466                 NvU32 busMapFbFlags = BUS_MAP_FB_FLAGS_MAP_UNICAST;
467                 if(DRF_VAL(OS33, _FLAGS, _MAPPING, pMapParams->flags) == NVOS33_FLAGS_MAPPING_DIRECT)
468                 {
469                     busMapFbFlags |= BUS_MAP_FB_FLAGS_DISABLE_ENCRYPTION;
470                 }
471 
472                 switch (pMapParams->protect)
473                 {
474                     case NV_PROTECT_READABLE:
475                         busMapFbFlags |= BUS_MAP_FB_FLAGS_READ_ONLY;
476                         break;
477                     case NV_PROTECT_WRITEABLE:
478                         busMapFbFlags |= BUS_MAP_FB_FLAGS_WRITE_ONLY;
479                         break;
480                 }
481 
482                 pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu);
483 
484                 // WAR for Bug 3564398, need to allocate doorbell for windows differently
485                 if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM &&
486                     memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1))
487                 {
488                     busMapFbFlags |= BUS_MAP_FB_FLAGS_MAP_DOWNWARDS;
489                 }
490 
491                 (void) deviceGetByHandle(staticCast(pClient, RsClient),
492                                          pMapParams->hDevice, &pDevice);
493 
494                 rmStatus = kbusMapFbAperture_HAL(pGpu, pKernelBus,
495                                                  pMemDesc, pMapParams->offset,
496                                                  &gpuVirtAddr, &gpuMapLength,
497                                                  busMapFbFlags, pDevice);
498             }
499 
500             if (rmStatus != NV_OK)
501                 goto _rmMapMemory_busFail;
502         }
503         else
504         {
505             NV_ASSERT_OR_RETURN(memdescGetContiguity(pMemDesc, AT_GPU),
506                    NV_ERR_NOT_SUPPORTED);
507 
508             fbAddr = gpumgrGetGpuPhysFbAddr(pGpu) + memdescGetPte(pMemDesc, AT_GPU, 0) +
509                      memdescGetPteAdjust(pMemDesc) + pMapParams->offset;
510         }
511 
512         if (pMapParams->bKernel)
513         {
514             rmStatus = osMapPciMemoryKernel64(pGpu,
515                                               (kbusIsBar1PhysicalModeEnabled(pKernelBus)?
516                                               fbAddr: gpumgrGetGpuPhysFbAddr(pGpu) + gpuVirtAddr),
517                                               pMapParams->length,
518                                               pMapParams->protect,
519                                               pMapParams->ppCpuVirtAddr,
520                                               cachingType);
521         }
522         else
523         {
524             rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo,
525                                           (kbusIsBar1PhysicalModeEnabled(pKernelBus)?
526                                           fbAddr: gpumgrGetGpuPhysFbAddr(pGpu) + gpuVirtAddr),
527                                           pMapParams->length,
528                                           pMapParams->protect,
529                                           pMapParams->ppCpuVirtAddr,
530                                           &priv,
531                                           cachingType);
532         }
533 
534         //
535         // It's possible that NVOS33_FLAGS_MAPPING is set to NVOS33_FLAGS_MAPPING_DIRECT
536         // at this point--set it to REFLECTED to indicate that we aren't using
537         // direct mapping.
538         //
539         pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _REFLECTED, pMapParams->flags);
540         pMapParams->flags = FLD_SET_DRF_NUM(OS33, _FLAGS, _CACHING_TYPE, cachingType, pMapParams->flags);
541 
542         if (rmStatus != NV_OK)
543             goto _rmMapMemory_pciFail;
544 
545         rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
546                                                 pMapParams->bKernel,
547                                                 priv,
548                                                 *(pMapParams->ppCpuVirtAddr),
549                                                 pMapParams->length,
550                                                 kbusIsBar1PhysicalModeEnabled(pKernelBus)
551                                                     ? (NvU64)-1
552                                                     : gpuVirtAddr,
553                                                 kbusIsBar1PhysicalModeEnabled(pKernelBus)
554                                                     ? (NvU64)-1
555                                                     : gpuMapLength,
556                                                 pMapParams->flags);
557         pCpuMapping->pPrivate->pGpu = pGpu;
558 
559         if (rmStatus != NV_OK)
560         {
561             RmUnmapBusAperture(pGpu,
562                                *(pMapParams->ppCpuVirtAddr),
563                                pMapParams->length,
564                                pMapParams->bKernel,
565                                priv);
566     _rmMapMemory_pciFail:
567             if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
568             {
569                 kbusUnmapFbAperture_HAL(pGpu,
570                                         pKernelBus,
571                                         pMemDesc,
572                                         gpuVirtAddr,
573                                         gpuMapLength,
574                                         BUS_MAP_FB_FLAGS_MAP_UNICAST);
575     _rmMapMemory_busFail:
576                 gpumgrSetBcEnabledStatus(pGpu, bcState);
577             }
578         }
579     }
580     else
581     if (bIsSysmem)
582     {
583         // A client can specify not to map memory by default when
584         // calling into RmAllocMemory. In those cases, we don't have
585         // a mapping yet, so go ahead and map it for the client now.
586         rmStatus = memdescMap(pMemDesc,
587                               pMapParams->offset,
588                               pMapParams->length,
589                               pMapParams->bKernel,
590                               pMapParams->protect,
591                               pMapParams->ppCpuVirtAddr,
592                               &priv);
593 
594         // Associate this mapping with the client
595         if (rmStatus == NV_OK && *(pMapParams->ppCpuVirtAddr))
596         {
597             pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags);
598             rmStatus = CliUpdateMemoryMappingInfo(pCpuMapping,
599                                                   pMapParams->bKernel,
600                                                   *(pMapParams->ppCpuVirtAddr),
601                                                   priv,
602                                                   pMapParams->length,
603                                                   pMapParams->flags);
604             pCpuMapping->pPrivate->pGpu = pGpu;
605         }
606     }
607     else if (effectiveAddrSpace == ADDR_VIRTUAL)
608     {
609         rmStatus = NV_ERR_NOT_SUPPORTED;
610     }
611     else if (effectiveAddrSpace == ADDR_REGMEM)
612     {
613         RS_PRIV_LEVEL privLevel;
614 
615         privLevel = rmclientGetCachedPrivilege(pClient);
616         if (!rmclientIsAdmin(pClient, privLevel) &&
617             !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK))
618         {
619             return NV_ERR_PROTECTION_FAULT;
620         }
621 
622         if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pMapParams->flags) == NVOS33_FLAGS_MEM_SPACE_USER)
623         {
624             privLevel = RS_PRIV_LEVEL_USER;
625         }
626 
627         // Create a mapping of BAR0
628         rmStatus = osMapGPU(pGpu,
629                             privLevel,
630                             pMapParams->offset + pMemDesc-> _pteArray[0],
631                             pMapParams->length,
632                             pMapParams->protect,
633                             pMapParams->ppCpuVirtAddr,
634                             &priv);
635         if (rmStatus != NV_OK)
636             return rmStatus;
637 
638         // Save off the mapping
639         rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
640                                                 pMapParams->bKernel,
641                                                 priv,
642                                                 *(pMapParams->ppCpuVirtAddr),
643                                                 pMapParams->length,
644                                                 -1, // gpu virtual addr
645                                                 -1, // gpu map length
646                                                 pMapParams->flags);
647         pCpuMapping->pPrivate->pGpu = pGpu;
648 
649         if (rmStatus != NV_OK)
650         {
651             osUnmapGPU(pGpu->pOsGpuInfo,
652                        privLevel,
653                        *(pMapParams->ppCpuVirtAddr),
654                        pMapParams->length,
655                        priv);
656             return rmStatus;
657         }
658     }
659     else
660     {
661         return NV_ERR_INVALID_CLASS;
662     }
663 
664     if (rmStatus == NV_OK)
665     {
666         NV_PRINTF(LEVEL_INFO,
667                   "%s created. CPU Virtual Address: " NvP64_fmt "\n",
668                   FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags) ? "Direct mapping" : "Mapping",
669                   *(pMapParams->ppCpuVirtAddr));
670     }
671 
672     return rmStatus;
673 }
674 
675 NV_STATUS
676 memUnmap_IMPL
677 (
678     Memory *pMemory,
679     CALL_CONTEXT *pCallContext,
680     RsCpuMapping *pCpuMapping
681 )
682 {
683     RmClient           *pClient             = dynamicCast(pCallContext->pClient, RmClient);
684     OBJGPU             *pGpu                = pCpuMapping->pPrivate->pGpu;
685     MEMORY_DESCRIPTOR  *pMemDesc            = pMemory->pMemDesc;
686 
687     KernelBus          *pKernelBus          = NULL;
688     MemoryManager      *pMemoryManager      = NULL;
689 
690     if (pGpu != NULL)
691     {
692         pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
693         pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
694     }
695 
696     if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, pCpuMapping->flags))
697     {
698         // Nothing more to do
699     }
700     else if ((pGpu != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) &&
701              (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM))
702     {
703         NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED));
704         NV_ASSERT((memdescGetPteKind(pMemDesc) ==
705                    memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, RM_DEFAULT_PTE_KIND)) && // pitch
706                   (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED)));
707 
708         if (pCpuMapping->pPrivate->bKernel)
709         {
710             if(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)
711             {
712                 NV_ASSERT(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS);
713                 kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc);
714             }
715             else
716             {
717                 osUnmapSystemMemory(pMemDesc,
718                                     pCpuMapping->pPrivate->bKernel,
719                                     pCpuMapping->processId,
720                                     pCpuMapping->pLinearAddress,
721                                     pCpuMapping->pPrivate->pPriv);
722             }
723         }
724 
725         NV_PRINTF(LEVEL_INFO,
726                   "Unmapping from NVLINK handle = 0x%x, addr= 0x%llx\n",
727                   RES_GET_HANDLE(pMemory), (NvU64)pCpuMapping->pLinearAddress);
728 
729         //
730         // No BAR aperture mapping to delete.
731         // No kernel mapping to remove
732         // User-space will call munmap to eliminate PTE mappings
733         //
734     }
735     // System Memory case
736     else if ((pGpu == NULL) || ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
737                                  FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags)))
738     {
739         if (FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags))
740         {
741             memdescUnmap(pMemDesc,
742                          pCpuMapping->pPrivate->bKernel,
743                          pCpuMapping->processId,
744                          pCpuMapping->pLinearAddress,
745                          pCpuMapping->pPrivate->pPriv);
746         }
747     }
748     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ||
749              ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
750               FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _REFLECTED, pCpuMapping->flags)))
751     {
752         RmUnmapBusAperture(pGpu,
753                            pCpuMapping->pLinearAddress,
754                            pCpuMapping->length,
755                            pCpuMapping->pPrivate->bKernel,
756                            pCpuMapping->pPrivate->pPriv);
757 
758         if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
759         {
760             {
761                 kbusUnmapFbAperture_HAL(pGpu, pKernelBus,
762                                         pMemory->pMemDesc,
763                                         pCpuMapping->pPrivate->gpuAddress,
764                                         pCpuMapping->pPrivate->gpuMapLength,
765                                         BUS_MAP_FB_FLAGS_MAP_UNICAST);
766             }
767         }
768     }
769     else if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL)
770     {
771         NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE);
772     }
773     else if (memdescGetAddressSpace(pMemDesc) == ADDR_REGMEM)
774     {
775         osUnmapGPU(pGpu->pOsGpuInfo,
776                    rmclientGetCachedPrivilege(pClient),
777                    pCpuMapping->pLinearAddress,
778                    pCpuMapping->length,
779                    pCpuMapping->pPrivate->pPriv);
780     }
781     return NV_OK;
782 }
783 
784 NV_STATUS
785 rmapiValidateKernelMapping
786 (
787     RS_PRIV_LEVEL privLevel,
788     NvU32 flags,
789     NvBool *pbKernel
790 )
791 {
792     NvBool bKernel;
793     NV_STATUS status = NV_OK;
794     if (privLevel < RS_PRIV_LEVEL_KERNEL)
795     {
796         // only kernel clients should be specifying the user mapping flags
797         if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_USER)
798             status = NV_ERR_INVALID_FLAGS;
799         bKernel = NV_FALSE;
800     }
801     else
802     {
803         //
804         // Kernel clients can only use the persistent flag if they are
805         // doing a user mapping.
806         //
807         bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT);
808     }
809 
810     // OS descriptor will already be mapped
811     if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags))
812         status = NV_ERR_INVALID_FLAGS;
813 
814     if (pbKernel != NULL)
815         *pbKernel = bKernel;
816 
817     return status;
818 }
819 
820 NV_STATUS
821 serverMap_Prologue
822 (
823     RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams
824 )
825 {
826     NV_STATUS           rmStatus;
827     RmClient           *pClient;
828     RsResourceRef      *pMemoryRef;
829     NvHandle            hClient = pMapParams->hClient;
830     NvHandle            hParent = hClient;
831     NvHandle            hSubDevice = NV01_NULL_OBJECT;
832     NvBool              bClientAlloc = (hClient == pMapParams->hDevice);
833     NvU32               flags = pMapParams->flags;
834     RS_PRIV_LEVEL       privLevel;
835 
836     // Persistent sysmem mapping support is no longer supported
837     if (DRF_VAL(OS33, _FLAGS, _PERSISTENT, flags) == NVOS33_FLAGS_PERSISTENT_ENABLE)
838         return NV_ERR_INVALID_FLAGS;
839 
840     // Populate Resource Server information
841     pClient = serverutilGetClientUnderLock(hClient);
842     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
843 
844     // Validate hClient
845     privLevel = rmclientGetCachedPrivilege(pClient);
846 
847     // RS-TODO: Assert if this fails after all objects are converted
848     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
849                 pMapParams->hMemory, &pMemoryRef));
850 
851     if (pMemoryRef->pParentRef != NULL)
852         hParent = pMemoryRef->pParentRef->hResource;
853 
854     // check if we have a user or kernel RM client
855     rmStatus = rmapiValidateKernelMapping(privLevel, flags, &pMapParams->bKernel);
856     if (rmStatus != NV_OK)
857         return rmStatus;
858 
859     //
860     // First check to see if it is a standard device or the BC region of
861     // a MC adapter.
862     //
863     pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK;
864     if (!bClientAlloc)
865     {
866         NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT);
867 
868         RsResourceRef *pContextRef;
869         rmStatus = clientGetResourceRef(staticCast(pClient, RsClient),
870                 pMapParams->hDevice, &pContextRef);
871 
872         if (rmStatus != NV_OK)
873             return rmStatus;
874 
875         if (pContextRef->internalClassId == classId(Device))
876         {
877         }
878         else if (pContextRef->internalClassId == classId(Subdevice))
879         {
880             hSubDevice = pMapParams->hDevice;
881             pMapParams->hDevice = pContextRef->pParentRef->hResource;
882         }
883         else
884         {
885             return NV_ERR_INVALID_OBJECT_PARENT;
886         }
887 
888         pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK;
889         pMapParams->pLockInfo->pContextRef = pContextRef;
890     }
891     else
892     {
893         NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT);
894     }
895 
896     pMapParams->hContext = (hSubDevice != NV01_NULL_OBJECT)
897                       ? hSubDevice
898                       : pMapParams->hDevice;
899 
900 
901     // convert from OS33 flags to RM's memory protection flags
902     switch (DRF_VAL(OS33, _FLAGS, _ACCESS, flags))
903     {
904         case NVOS33_FLAGS_ACCESS_READ_WRITE:
905             pMapParams->protect = NV_PROTECT_READ_WRITE;
906             break;
907         case NVOS33_FLAGS_ACCESS_READ_ONLY:
908             pMapParams->protect = NV_PROTECT_READABLE;
909             break;
910         case NVOS33_FLAGS_ACCESS_WRITE_ONLY:
911             pMapParams->protect = NV_PROTECT_WRITEABLE;
912             break;
913         default:
914             return NV_ERR_INVALID_FLAGS;
915     }
916 
917     return NV_OK;
918 }
919 
920 NV_STATUS
921 serverUnmap_Prologue
922 (
923     RsServer *pServer,
924     RS_CPU_UNMAP_PARAMS *pUnmapParams
925 )
926 {
927     OBJGPU *pGpu = NULL;
928     NV_STATUS rmStatus;
929     RmClient *pClient;
930     RsResourceRef *pMemoryRef;
931     NvHandle hClient = pUnmapParams->hClient;
932     NvHandle hParent = hClient;
933     NvHandle hMemory = pUnmapParams->hMemory;
934     NvBool bClientAlloc = (pUnmapParams->hDevice == pUnmapParams->hClient);
935     NvBool bKernel;
936     NvBool bBroadcast;
937     NvU32 ProcessId = pUnmapParams->processId;
938     RS_PRIV_LEVEL privLevel;
939     void *pProcessHandle = NULL;
940 
941     // Populate Resource Server information
942     pClient = serverutilGetClientUnderLock(hClient);
943     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
944 
945     // check if we have a user or kernel RM client
946     privLevel = rmclientGetCachedPrivilege(pClient);
947 
948     // RS-TODO: Assert if this fails after all objects are converted
949     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
950                 hMemory, &pMemoryRef));
951 
952     if (pMemoryRef->pParentRef != NULL)
953         hParent = pMemoryRef->pParentRef->hResource;
954 
955     //
956     // First check to see if it is a standard device or the BC region of
957     // a MC adapter.
958     //
959     pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK;
960     if (!bClientAlloc)
961     {
962         NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT);
963 
964         RsResourceRef *pContextRef;
965         rmStatus = clientGetResourceRef(staticCast(pClient, RsClient),
966                 pUnmapParams->hDevice, &pContextRef);
967 
968         if (rmStatus != NV_OK)
969             return rmStatus;
970 
971         if (pContextRef->internalClassId == classId(Subdevice))
972         {
973             pUnmapParams->hDevice = pContextRef->pParentRef->hResource;
974         }
975         else if (pContextRef->internalClassId != classId(Device))
976         {
977             return NV_ERR_INVALID_OBJECT_PARENT;
978         }
979 
980         pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK;
981         pUnmapParams->pLockInfo->pContextRef = pContextRef;
982         NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pUnmapParams->pLockInfo->pContextRef, &bBroadcast, &pGpu));
983         gpuSetThreadBcState(pGpu, bBroadcast);
984     }
985     else
986     {
987         NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT);
988     }
989 
990     // Decide what sort of mapping it is, user or kernel
991     if (privLevel < RS_PRIV_LEVEL_KERNEL)
992     {
993         bKernel = NV_FALSE;
994     }
995     else
996     {
997         bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pUnmapParams->flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT);
998     }
999 
1000     //
1001     // If it's a user mapping, and we're not currently in the same process that
1002     // it's mapped into, then attempt to attach to the other process first.
1003     //
1004     if (!bKernel && (ProcessId != osGetCurrentProcess()))
1005     {
1006         rmStatus = osAttachToProcess(&pProcessHandle, ProcessId);
1007         if (rmStatus != NV_OK)
1008         {
1009             if (pUnmapParams->bTeardown)
1010                 pProcessHandle = NULL;
1011             else
1012                 return rmStatus;
1013         }
1014 
1015         pUnmapParams->pProcessHandle = pProcessHandle;
1016     }
1017 
1018     // Don't do any filtering if this is a tear-down path
1019     if (pUnmapParams->bTeardown)
1020     {
1021         pUnmapParams->fnFilter = NULL;
1022         return NV_OK;
1023     }
1024 
1025 
1026     pUnmapParams->fnFilter = bKernel
1027         ? serverutilMappingFilterKernel
1028         : serverutilMappingFilterCurrentUserProc;
1029 
1030     return NV_OK;
1031 }
1032 
1033 void
1034 serverUnmap_Epilogue
1035 (
1036     RsServer *pServer,
1037     RS_CPU_UNMAP_PARAMS *pUnmapParams
1038 )
1039 {
1040     // do we need to detach?
1041     if (pUnmapParams->pProcessHandle != NULL)
1042     {
1043         osDetachFromProcess(pUnmapParams->pProcessHandle);
1044         pUnmapParams->pProcessHandle = NULL;
1045     }
1046 }
1047 
1048 void RmUnmapBusAperture
1049 (
1050     OBJGPU *pGpu,
1051     NvP64   pCpuVirtualAddress,
1052     NvU64   length,
1053     NvBool  bKernel,
1054     NvP64   pPrivateData
1055 )
1056 {
1057     if (bKernel)
1058     {
1059         osUnmapPciMemoryKernel64(pGpu, pCpuVirtualAddress);
1060     }
1061     else
1062     {
1063         osUnmapPciMemoryUser(pGpu->pOsGpuInfo, pCpuVirtualAddress, length, pPrivateData);
1064     }
1065 }
1066 
1067 NV_STATUS
1068 rmapiMapToCpu
1069 (
1070     RM_API   *pRmApi,
1071     NvHandle  hClient,
1072     NvHandle  hDevice,
1073     NvHandle  hMemory,
1074     NvU64     offset,
1075     NvU64     length,
1076     void    **ppCpuVirtAddr,
1077     NvU32     flags
1078 )
1079 {
1080     NvP64     pCpuVirtAddrNvP64 = NvP64_NULL;
1081     NV_STATUS status;
1082 
1083     if (!pRmApi->bHasDefaultSecInfo)
1084         return NV_ERR_NOT_SUPPORTED;
1085 
1086     status = pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length,
1087                                           &pCpuVirtAddrNvP64, flags, &pRmApi->defaultSecInfo);
1088 
1089     if (ppCpuVirtAddr)
1090         *ppCpuVirtAddr = NvP64_VALUE(pCpuVirtAddrNvP64);
1091 
1092     return status;
1093 }
1094 
1095 /**
1096  * Call into Resource Server to register and execute a CPU mapping operation.
1097  *
1098  * Resource Server will:
1099  *    1. Callback into RM (serverMap_Prologue) to set up mapping parameters, mapping context object,
1100  *       and locking requirements
1101  *    2. Take locks (if required)
1102  *    3. Allocate and register a RsCpuMapping book-keeping entry on the target object's RsResourceRef
1103  *    4. Call the target object's mapping virtual function (xxxMap_IMPL, defined in RM)
1104  *    5. Setup back-references to the mapping context object (if required.) This mapping will automatically
1105  *       be unmapped if either the target object or mapping context object are freed.
1106  *    6. Release any locks taken
1107  */
1108 NV_STATUS
1109 rmapiMapToCpuWithSecInfoV2
1110 (
1111     RM_API            *pRmApi,
1112     NvHandle           hClient,
1113     NvHandle           hDevice,
1114     NvHandle           hMemory,
1115     NvU64              offset,
1116     NvU64              length,
1117     NvP64             *ppCpuVirtAddr,
1118     NvU32             *flags,
1119     API_SECURITY_INFO *pSecInfo
1120 )
1121 {
1122     NV_STATUS  status;
1123     RM_API_CONTEXT rmApiContext = {0};
1124     RmMapParams rmMapParams;
1125     RS_LOCK_INFO lockInfo;
1126 
1127     NV_PRINTF(LEVEL_INFO,
1128               "Nv04MapMemory: client:0x%x device:0x%x memory:0x%x\n", hClient,
1129               hDevice, hMemory);
1130     NV_PRINTF(LEVEL_INFO,
1131               "Nv04MapMemory:  offset: %llx length: %llx flags:0x%x\n",
1132               offset, length, *flags);
1133 
1134     status = rmapiPrologue(pRmApi, &rmApiContext);
1135     if (status != NV_OK)
1136         return status;
1137 
1138     NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04MapMemory 0x%x\n", *flags);
1139 
1140     portMemSet(&lockInfo, 0, sizeof(lockInfo));
1141     status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
1142     if (status != NV_OK)
1143     {
1144         rmapiEpilogue(pRmApi, &rmApiContext);
1145         return status;
1146     }
1147 
1148     LOCK_METER_DATA(MAPMEM, flags, 0, 0);
1149 
1150     // clear params for good measure
1151     portMemSet(&rmMapParams, 0, sizeof (rmMapParams));
1152 
1153     // load user args
1154     rmMapParams.hClient = hClient;
1155     rmMapParams.hDevice = hDevice;
1156     rmMapParams.hMemory = hMemory;
1157     rmMapParams.offset = offset;
1158     rmMapParams.length = length;
1159     rmMapParams.ppCpuVirtAddr = ppCpuVirtAddr;
1160     rmMapParams.flags = *flags;
1161     rmMapParams.pLockInfo = &lockInfo;
1162     rmMapParams.pSecInfo = pSecInfo;
1163 
1164     status = serverMap(&g_resServ, rmMapParams.hClient, rmMapParams.hMemory, &rmMapParams);
1165 
1166     rmapiEpilogue(pRmApi, &rmApiContext);
1167 
1168     *flags = rmMapParams.flags;
1169 
1170     if (status == NV_OK)
1171     {
1172         NV_PRINTF(LEVEL_INFO, "Nv04MapMemory: complete\n");
1173         NV_PRINTF(LEVEL_INFO,
1174                   "Nv04MapMemory:  *ppCpuVirtAddr:" NvP64_fmt "\n",
1175                   *ppCpuVirtAddr);
1176     }
1177     else
1178     {
1179         NV_PRINTF(LEVEL_WARNING,
1180                   "Nv04MapMemory: map failed; status: %s (0x%08x)\n",
1181                   nvstatusToString(status), status);
1182     }
1183 
1184     return status;
1185 }
1186 
1187 NV_STATUS
1188 rmapiMapToCpuWithSecInfo
1189 (
1190     RM_API            *pRmApi,
1191     NvHandle           hClient,
1192     NvHandle           hDevice,
1193     NvHandle           hMemory,
1194     NvU64              offset,
1195     NvU64              length,
1196     NvP64             *ppCpuVirtAddr,
1197     NvU32              flags,
1198     API_SECURITY_INFO *pSecInfo
1199 )
1200 {
1201     return rmapiMapToCpuWithSecInfoV2(pRmApi, hClient,
1202         hDevice, hMemory, offset, length, ppCpuVirtAddr,
1203         &flags, pSecInfo);
1204 }
1205 
1206 NV_STATUS
1207 rmapiMapToCpuWithSecInfoTls
1208 (
1209     RM_API            *pRmApi,
1210     NvHandle           hClient,
1211     NvHandle           hDevice,
1212     NvHandle           hMemory,
1213     NvU64              offset,
1214     NvU64              length,
1215     NvP64             *ppCpuVirtAddr,
1216     NvU32              flags,
1217     API_SECURITY_INFO *pSecInfo
1218 )
1219 {
1220     THREAD_STATE_NODE threadState;
1221     NV_STATUS         status;
1222 
1223     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1224 
1225     status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, &flags, pSecInfo);
1226 
1227     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1228 
1229     return status;
1230 }
1231 NV_STATUS
1232 rmapiMapToCpuWithSecInfoTlsV2
1233 (
1234     RM_API            *pRmApi,
1235     NvHandle           hClient,
1236     NvHandle           hDevice,
1237     NvHandle           hMemory,
1238     NvU64              offset,
1239     NvU64              length,
1240     NvP64             *ppCpuVirtAddr,
1241     NvU32             *flags,
1242     API_SECURITY_INFO *pSecInfo
1243 )
1244 {
1245     THREAD_STATE_NODE threadState;
1246     NV_STATUS         status;
1247 
1248     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1249 
1250     status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, pSecInfo);
1251 
1252     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1253 
1254     return status;
1255 }
1256 
1257 NV_STATUS
1258 rmapiUnmapFromCpu
1259 (
1260     RM_API   *pRmApi,
1261     NvHandle  hClient,
1262     NvHandle  hDevice,
1263     NvHandle  hMemory,
1264     void     *pLinearAddress,
1265     NvU32     flags,
1266     NvU32     ProcessId
1267 )
1268 {
1269     if (!pRmApi->bHasDefaultSecInfo)
1270         return NV_ERR_NOT_SUPPORTED;
1271 
1272     return pRmApi->UnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, NV_PTR_TO_NvP64(pLinearAddress),
1273                                            flags, ProcessId, &pRmApi->defaultSecInfo);
1274 }
1275 
1276 /**
1277  * Call into Resource Server to execute a CPU unmapping operation.
1278  *
1279  * Resource Server will:
1280  *    1. Callback into RM (serverUnmap_Prologue) to set up unmapping parameters, locking requirements,
1281  *       and attempt to attach to the mapping's user process (for user mappings only)
1282  *    2. Take locks (if required)
1283  *    3. Lookup the mapping
1284  *    4. Call the target object's unmapping virtual function (xxxUnmap_IMPL, defined in RM)
1285  *    5. Unregister the mapping from its back-references, and free the mapping
1286  *    6. Callback into RM (serverUnmap_Epilogue) to detach from the mapping's user process (if required)
1287  *    7. Release any locks taken
1288  */
1289 NV_STATUS
1290 rmapiUnmapFromCpuWithSecInfo
1291 (
1292     RM_API            *pRmApi,
1293     NvHandle           hClient,
1294     NvHandle           hDevice,
1295     NvHandle           hMemory,
1296     NvP64              pLinearAddress,
1297     NvU32              flags,
1298     NvU32              ProcessId,
1299     API_SECURITY_INFO *pSecInfo
1300 )
1301 {
1302     NV_STATUS status;
1303     RM_API_CONTEXT rmApiContext = {0};
1304     RmUnmapParams rmUnmapParams;
1305     RS_LOCK_INFO lockInfo;
1306 
1307     NV_PRINTF(LEVEL_INFO,
1308               "Nv04UnmapMemory: client:0x%x device:0x%x memory:0x%x pLinearAddr:" NvP64_fmt " flags:0x%x\n",
1309               hClient, hDevice, hMemory, pLinearAddress, flags);
1310 
1311     status = rmapiPrologue(pRmApi, &rmApiContext);
1312     if (status != NV_OK)
1313         return status;
1314 
1315     portMemSet(&lockInfo, 0, sizeof(lockInfo));
1316     status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
1317     if (status != NV_OK)
1318     {
1319         rmapiEpilogue(pRmApi, &rmApiContext);
1320         return NV_OK;
1321     }
1322 
1323     LOCK_METER_DATA(UNMAPMEM, flags, 0, 0);
1324 
1325     portMemSet(&rmUnmapParams, 0, sizeof (rmUnmapParams));
1326     rmUnmapParams.hClient = hClient;
1327     rmUnmapParams.hDevice = hDevice;
1328     rmUnmapParams.hMemory = hMemory;
1329     rmUnmapParams.pLinearAddress = pLinearAddress;
1330     rmUnmapParams.flags = flags;
1331     rmUnmapParams.processId = ProcessId;
1332     rmUnmapParams.pLockInfo = &lockInfo;
1333     rmUnmapParams.pSecInfo = pSecInfo;
1334 
1335     status = serverUnmap(&g_resServ, hClient, hMemory, &rmUnmapParams);
1336 
1337     rmapiEpilogue(pRmApi, &rmApiContext);
1338 
1339     if (status == NV_OK)
1340     {
1341         NV_PRINTF(LEVEL_INFO, "Nv04UnmapMemory: unmap complete\n");
1342     }
1343     else
1344     {
1345         NV_PRINTF(LEVEL_WARNING,
1346                   "Nv04UnmapMemory: unmap failed; status: %s (0x%08x)\n",
1347                   nvstatusToString(status), status);
1348     }
1349 
1350     return status;
1351 }
1352 
1353 NV_STATUS
1354 rmapiUnmapFromCpuWithSecInfoTls
1355 (
1356     RM_API            *pRmApi,
1357     NvHandle           hClient,
1358     NvHandle           hDevice,
1359     NvHandle           hMemory,
1360     NvP64              pLinearAddress,
1361     NvU32              flags,
1362     NvU32              ProcessId,
1363     API_SECURITY_INFO *pSecInfo
1364 )
1365 {
1366     THREAD_STATE_NODE threadState;
1367     NV_STATUS         status;
1368 
1369     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1370 
1371     status = rmapiUnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, pLinearAddress,
1372                                           flags, ProcessId, pSecInfo);
1373 
1374     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1375 
1376     return status;
1377 }
1378 
1379 NV_STATUS
1380 serverMapLookupLockFlags
1381 (
1382     RsServer *pServer,
1383     RS_LOCK_ENUM lock,
1384     RS_CPU_MAP_PARAMS *pParams,
1385     LOCK_ACCESS_TYPE *pAccess
1386 )
1387 {
1388     NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT);
1389 
1390     *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP))
1391         ? LOCK_ACCESS_READ
1392         : LOCK_ACCESS_WRITE;
1393     return NV_OK;
1394 }
1395 
1396 NV_STATUS
1397 serverUnmapLookupLockFlags
1398 (
1399     RsServer *pServer,
1400     RS_LOCK_ENUM lock,
1401     RS_CPU_UNMAP_PARAMS *pParams,
1402     LOCK_ACCESS_TYPE *pAccess
1403 )
1404 {
1405     NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT);
1406 
1407     *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP))
1408         ? LOCK_ACCESS_READ
1409         : LOCK_ACCESS_WRITE;
1410     return NV_OK;
1411 }
1412 
1413 NV_STATUS
1414 refAllocCpuMappingPrivate
1415 (
1416     RS_CPU_MAP_PARAMS *pMapParams,
1417     RsCpuMapping *pCpuMapping
1418 )
1419 {
1420     pCpuMapping->pPrivate = portMemAllocNonPaged(sizeof(RS_CPU_MAPPING_PRIVATE));
1421     if (pCpuMapping->pPrivate == NULL)
1422         return NV_ERR_NO_MEMORY;
1423 
1424     pCpuMapping->pPrivate->protect = pMapParams->protect;
1425     pCpuMapping->pPrivate->bKernel = pMapParams->bKernel;
1426 
1427     return NV_OK;
1428 }
1429 
1430 void
1431 refFreeCpuMappingPrivate
1432 (
1433     RsCpuMapping *pCpuMapping
1434 )
1435 {
1436     portMemFree(pCpuMapping->pPrivate);
1437 }
1438