1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "core/core.h"
25 #include "core/locks.h"
26 #include "core/thread_state.h"
27 #include "os/os.h"
28 #include "gpu/mem_mgr/mem_desc.h"
29 #include "gpu/device/device.h"
30 #include "gpu/subdevice/generic_engine.h"
31 #include "gpu/subdevice/subdevice.h"
32 #include "gpu/mem_mgr/mem_mgr.h"
33 #include "mem_mgr/fla_mem.h"
34 
35 #include "class/cl0000.h" // NV01_NULL_OBJECT
36 
37 #include "resserv/rs_server.h"
38 #include "resserv/rs_client.h"
39 #include "resserv/rs_resource.h"
40 
41 #include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
42 #include "gpu/mem_sys/kern_mem_sys.h"
43 #include "gpu/bus/kern_bus.h"
44 
45 #include "rmapi/rs_utils.h"
46 #include "rmapi/mapping_list.h"
47 #include "entry_points.h"
48 
49 static void RmUnmapBusAperture (OBJGPU *, NvP64, NvU64, NvBool, NvP64);
50 
51 #include "gpu/conf_compute/conf_compute.h"
52 
53 typedef struct RS_CPU_MAP_PARAMS RmMapParams;
54 typedef struct RS_CPU_UNMAP_PARAMS RmUnmapParams;
55 
56 NV_STATUS
57 rmapiMapGpuCommon
58 (
59     RsResource *pResource,
60     CALL_CONTEXT *pCallContext,
61     RsCpuMapping *pCpuMapping,
62     OBJGPU *pGpu,
63     NvU32 regionOffset,
64     NvU32 regionSize
65 )
66 {
67     NV_STATUS rmStatus;
68     RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient);
69     NvU64 offset;
70 
71     // Validate the offset and limit passed in.
72     if (pCpuMapping->offset >= regionSize)
73         return NV_ERR_INVALID_BASE;
74     if (pCpuMapping->length == 0)
75         return NV_ERR_INVALID_LIMIT;
76     if ((pCpuMapping->offset + pCpuMapping->length > regionSize) ||
77         !portSafeAddU64(pCpuMapping->offset, pCpuMapping->length, &offset))
78         return NV_ERR_INVALID_LIMIT;
79 
80     if (!portSafeAddU64((NvU64)regionOffset, pCpuMapping->offset, &offset))
81         return NV_ERR_INVALID_OFFSET;
82 
83     // Create a mapping of BAR0
84     rmStatus = osMapGPU(pGpu,
85                         rmclientGetCachedPrivilege(pClient),
86                         offset,
87                         pCpuMapping->length,
88                         pCpuMapping->pPrivate->protect,
89                         &pCpuMapping->pLinearAddress,
90                         &pCpuMapping->pPrivate->pPriv);
91     return rmStatus;
92 }
93 
94 
95 
96 NV_STATUS
97 rmapiGetEffectiveAddrSpace
98 (
99     OBJGPU *pGpu,
100     MEMORY_DESCRIPTOR *pMemDesc,
101     NvU32 mapFlags,
102     NV_ADDRESS_SPACE *pAddrSpace
103 )
104 {
105     NV_ADDRESS_SPACE addrSpace;
106     NvBool bDirectSysMappingAllowed = NV_TRUE;
107 
108     KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
109 
110     NV_ASSERT_OK_OR_RETURN(
111         kbusIsDirectMappingAllowed_HAL(pGpu, pKernelBus, pMemDesc, mapFlags,
112                                       &bDirectSysMappingAllowed));
113 
114     //
115     // Bug 1482818: Deprecate reflected mappings in production code.
116     //  The usage of reflected writes, in addition to causing several deadlock
117     //  scenarios involving P2P transfers, are disallowed on NVLINK (along with
118     //  reflected reads), and should no longer be used.
119     //  The below PDB property should be unset once the remaining usages in MODS
120     //  have been culled. (Bug 1780557)
121     //
122     if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
123         !bDirectSysMappingAllowed &&
124         (DRF_VAL(OS33, _FLAGS, _MAPPING, mapFlags) != NVOS33_FLAGS_MAPPING_DIRECT) &&
125         !kbusIsReflectedMappingAccessAllowed(pKernelBus))
126     {
127         NV_ASSERT(0);
128         return NV_ERR_NOT_SUPPORTED;
129     }
130 
131     if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1))
132     {
133         addrSpace = ADDR_FBMEM;
134     }
135     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
136         (bDirectSysMappingAllowed || FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, mapFlags) ||
137         (IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_FMODEL(pGpu) && !IS_RTLSIM(pGpu))))
138     {
139         addrSpace = ADDR_SYSMEM;
140     }
141     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ||
142              ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && !bDirectSysMappingAllowed))
143     {
144         addrSpace = ADDR_FBMEM;
145     }
146     else
147     {
148         addrSpace = memdescGetAddressSpace(pMemDesc);
149     }
150 
151     if (pAddrSpace)
152         *pAddrSpace = addrSpace;
153 
154     return NV_OK;
155 }
156 
157 // Asserts to check caching type matches across sdk and nv_memory_types
158 ct_assert(NVOS33_FLAGS_CACHING_TYPE_CACHED        == NV_MEMORY_CACHED);
159 ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED      == NV_MEMORY_UNCACHED);
160 ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITECOMBINED == NV_MEMORY_WRITECOMBINED);
161 ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITEBACK     == NV_MEMORY_WRITEBACK);
162 ct_assert(NVOS33_FLAGS_CACHING_TYPE_DEFAULT       == NV_MEMORY_DEFAULT);
163 ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED_WEAK == NV_MEMORY_UNCACHED_WEAK);
164 
165 //
166 // Map memory entry points.
167 //
168 NV_STATUS
169 memMap_IMPL
170 (
171     Memory *pMemory,
172     CALL_CONTEXT *pCallContext,
173     RS_CPU_MAP_PARAMS *pMapParams,
174     RsCpuMapping *pCpuMapping
175 )
176 {
177     OBJGPU *pGpu = NULL;
178     KernelBus *pKernelBus = NULL;
179     MemoryManager *pMemoryManager = NULL;
180     KernelMemorySystem *pKernelMemorySystem = NULL;
181     RmClient *pClient;
182     RsResourceRef *pContextRef;
183     RsResourceRef *pMemoryRef;
184     Memory *pMemoryInfo; // TODO: rename this field. pMemoryInfo is the legacy name.
185                          // Name should be clear on how pMemoryInfo different from pMemory
186     MEMORY_DESCRIPTOR *pMemDesc;
187     NvP64 priv = NvP64_NULL;
188     NV_STATUS rmStatus = NV_OK;
189     NV_ADDRESS_SPACE effectiveAddrSpace;
190     NvBool bBroadcast;
191     NvU64 mapLimit;
192     NvBool bIsSysmem = NV_FALSE;
193     NvBool bSkipSizeCheck = (DRF_VAL(OS33, _FLAGS, _SKIP_SIZE_CHECK, pMapParams->flags) ==
194                              NVOS33_FLAGS_SKIP_SIZE_CHECK_ENABLE);
195 
196     NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED);
197 
198     NV_ASSERT_OR_RETURN(pMapParams->pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT);
199     pContextRef = pMapParams->pLockInfo->pContextRef;
200     if (pContextRef != NULL)
201     {
202         NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pContextRef, &bBroadcast, &pGpu));
203         gpuSetThreadBcState(pGpu, bBroadcast);
204 
205         pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
206         pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
207         pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
208     }
209 
210     pClient = serverutilGetClientUnderLock(pMapParams->hClient);
211     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
212     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
213                 pMapParams->hMemory, &pMemoryRef));
214 
215     pMemoryInfo = dynamicCast(pMemoryRef->pResource, Memory);
216     NV_ASSERT_OR_RETURN(pMemoryInfo != NULL, NV_ERR_NOT_SUPPORTED);
217     pMemDesc = pMemoryInfo->pMemDesc;
218 
219     if ((pMemoryInfo->categoryClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) &&
220         !(memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM &&
221           RMCFG_FEATURE_PLATFORM_MODS))
222     {
223         return NV_ERR_NOT_SUPPORTED;
224     }
225 
226     //
227     // PROTECTED memory is memory which is hidden from the CPU and used for
228     // storing protected content.  The CPU is not allowed to read it, but is
229     // allowed to write it in order to initialize memory allocated within the
230     // PROTECTED region.
231     //
232     // CPU to directly access protected memory is allowed on MODS
233     //
234     // The check below is for VPR and should be skipped for Hopper CC
235     if ((pGpu != NULL) && !gpuIsCCFeatureEnabled(pGpu))
236     {
237         if ((pMemoryInfo->Flags & NVOS32_ALLOC_FLAGS_PROTECTED) &&
238             (pMapParams->protect != NV_PROTECT_WRITEABLE) &&
239             ! RMCFG_FEATURE_PLATFORM_MODS)
240         {
241             return NV_ERR_NOT_SUPPORTED;
242         }
243     }
244 
245     if ((pGpu != NULL) && gpuIsCCFeatureEnabled(pGpu) &&
246         (pMemoryInfo->Flags & NVOS32_ALLOC_FLAGS_PROTECTED))
247     {
248         ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
249         //
250         // If neither BAR1 nor PCIE as a whole is trusted, fail the mapping
251         // for allocations in CPR region. Mapping should still succeed for
252         // allocations in non-CPR region
253         // Deny BAR1 access to CPU-RM by default irrespective of prod or devtools
254         // mode. Some mappings made by CPU-RM may be allowed to go thorough in
255         // devtools mode.
256         // However, allow the mapping to go through on platforms where GSP-DMA
257         // is not present e.g. MODS. User may have also set a regkey to force
258         // BAR accesses.
259         //
260         if (((pCC != NULL) && !pCC->ccStaticInfo.bIsBar1Trusted &&
261             !pCC->ccStaticInfo.bIsPcieTrusted) ||
262             (IS_GSP_CLIENT(pGpu) && pMapParams->bKernel && !pKernelBus->bForceBarAccessOnHcc &&
263              FLD_TEST_DRF(OS33, _FLAGS, _ALLOW_MAPPING_ON_HCC, _NO, pMapParams->flags)))
264         {
265             NV_PRINTF(LEVEL_ERROR, "BAR1 mapping to CPR vidmem not supported\n");
266             NV_ASSERT(0);
267             return NV_ERR_NOT_SUPPORTED;
268         }
269     }
270 
271     if (!pMapParams->bKernel &&
272         FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, pMemoryInfo->Attr2) &&
273         (pMapParams->protect != NV_PROTECT_READABLE))
274     {
275         return NV_ERR_INVALID_ARGUMENT;
276     }
277 
278     // Validate the offset and limit passed in.
279     if (pMapParams->offset >= pMemoryInfo->Length)
280     {
281         return NV_ERR_INVALID_BASE;
282     }
283     if (pMapParams->length == 0)
284     {
285         return NV_ERR_INVALID_LIMIT;
286     }
287 
288     if (bSkipSizeCheck && (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL))
289     {
290         return NV_ERR_INSUFFICIENT_PERMISSIONS;
291     }
292 
293     //
294     // See bug #140807 and #150889 - we need to pad memory mappings to past their
295     // actual allocation size (to PAGE_SIZE+1) because of a buggy ms function so
296     // skip the allocation size sanity check so the map operation still succeeds.
297     //
298     if (!portSafeAddU64(pMapParams->offset, pMapParams->length, &mapLimit) ||
299         (!bSkipSizeCheck && (mapLimit > pMemoryInfo->Length)))
300     {
301         return NV_ERR_INVALID_LIMIT;
302     }
303 
304     if (pGpu != NULL)
305     {
306         NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, memdescGetMemDescFromGpu(pMemDesc, pGpu), pMapParams->flags, &effectiveAddrSpace));
307     }
308     else
309     {
310         effectiveAddrSpace = ADDR_SYSMEM;
311     }
312 
313     bIsSysmem = (effectiveAddrSpace == ADDR_SYSMEM);
314     bIsSysmem = bIsSysmem || (effectiveAddrSpace == ADDR_EGM);
315 
316     if (dynamicCast(pMemoryInfo, FlaMemory) != NULL)
317     {
318         NV_PRINTF(LEVEL_WARNING, "CPU mapping to FLA memory not allowed\n");
319         return NV_ERR_NOT_SUPPORTED;
320     }
321 
322     //
323     //  NVLINK2 ATS: Coherent NVLINK mappings may be returned if the client
324     //    doesn't specifically request PCI-E and if the surface is pitch.
325     //
326     if ((pGpu != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) &&
327         (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM))
328     {
329         NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED));
330         if ((memdescGetPteKind(pMemDesc) ==
331             memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, RM_DEFAULT_PTE_KIND)) && // pitch
332             (!memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_ENCRYPTED)))
333         {
334             if (pMapParams->bKernel)
335             {
336                 if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)
337                 {
338                     NvP64 tempCpuPtr = kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc);
339                     if (tempCpuPtr == NULL)
340                     {
341                         rmStatus = NV_ERR_GENERIC;
342                     }
343                     else
344                     {
345                         rmStatus = NV_OK;
346                         tempCpuPtr = NvP64_PLUS_OFFSET(tempCpuPtr, pMapParams->offset);
347                     }
348                     *pMapParams->ppCpuVirtAddr = tempCpuPtr;
349 
350                     if (rmStatus != NV_OK)
351                         return rmStatus;
352                 }
353                 else
354                 {
355                     rmStatus = osMapSystemMemory(pMemDesc,
356                                                  pMapParams->offset,
357                                                  pMapParams->length,
358                                                  pMapParams->bKernel,
359                                                  pMapParams->protect,
360                                                  pMapParams->ppCpuVirtAddr,
361                                                  &priv);
362                     if (rmStatus != NV_OK)
363                         return rmStatus;
364                 }
365             }
366             else
367             {
368 
369                 //
370                 // Allocating mapping for user mode client
371                 // NOTE: This function intentionally leaves priv uninitialized.
372                 //       It simply copies the busAddress [argument 2] into ppCpuVirtAddr.
373                 //       During the FD mapping cleanup for bug 1784955, it is expected that
374                 //       this function will transition to storing the mapping parameters onto
375                 //       the FD.  Also note: All mapping parameters are ignored (!).
376                 //
377                 //   For now, we're going to return the first page of the nvlink aperture
378                 //   mapping of this allocation.  See nvidia_mmap_helper for establishment
379                 //   of direct mapping.
380                 //
381 
382                 rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo,
383                                               ((NvUPtr)pKernelMemorySystem->coherentCpuFbBase +
384                                                (NvUPtr)memdescGetPhysAddr(pMemDesc,
385                                                 AT_CPU, pMapParams->offset)),
386                                               pMapParams->length,
387                                               pMapParams->protect,
388                                               pMapParams->ppCpuVirtAddr,
389                                               &priv,
390                                               NV_MEMORY_UNCACHED);
391                 if (rmStatus != NV_OK)
392                     return rmStatus;
393             }
394 
395             NV_PRINTF(LEVEL_INFO,
396                       "NVLINK mapping allocated: AtsBase=0x%llx, _pteArray[0]=0x%llx, mappedCpuAddr=0x%llx, length=%d\n",
397                       (NvU64)pKernelMemorySystem->coherentCpuFbBase,
398                       (NvU64)((NvUPtr)pMemDesc->_pteArray[0]),
399                       (*((NvU64 *)(pMapParams->ppCpuVirtAddr))),
400                       (int)pMapParams->length);
401 
402             rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
403                                                     pMapParams->bKernel,
404                                                     priv,
405                                                     *(pMapParams->ppCpuVirtAddr),
406                                                     pMapParams->length,
407                                                     -1,
408                                                     -1,
409                                                     pMapParams->flags);
410             pCpuMapping->pPrivate->pGpu = pGpu;
411 
412             if (rmStatus != NV_OK)
413                 return rmStatus;
414 
415         }
416         else
417         {
418             //
419             // RM should fail gracefully when clients map FB in the Coherent link path with special KIND.
420             // There is no GMMU in the Coherent link path, only regular KIND(GMK) is supported and other special
421             // KIND(s) (like encrypted, compressed etc.) are not supported.
422             //
423             NV_PRINTF(LEVEL_ERROR, "Need BAR mapping on coherent link! FAIL!!\n");
424             return NV_ERR_NOT_SUPPORTED;
425         }
426     }
427     else if (effectiveAddrSpace == ADDR_FBMEM)
428     {
429         RmPhysAddr fbAddr = 0;
430         NvBool bcState = NV_FALSE;
431         NvU64 gpuVirtAddr = 0;
432         NvU64 gpuMapLength = 0;
433 
434         //
435         // MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1 indicates a special mapping type of HW registers,
436         // so map it as device memory (uncached).
437         //
438         NvU32 cachingType = NV_MEMORY_WRITECOMBINED;
439         if (pMemDesc != NULL && !memdescHasSubDeviceMemDescs(pMemDesc))
440         {
441             cachingType = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1) ?
442                           NV_MEMORY_UNCACHED : NV_MEMORY_WRITECOMBINED;
443         }
444 
445         if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
446         {
447             //
448             // For Multi-Board, the BC area has a NULL address range.  So we have
449             // to bring in the master.
450             //
451             bcState = gpumgrGetBcEnabledStatus(pGpu);
452             if (bcState)
453             {
454                 pGpu = gpumgrGetParentGPU(pGpu);
455                 gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
456             }
457 
458             //
459             // Allocate a GPU virtual address space for the video memory region
460             // for those GPUs that support it.
461             //
462 
463             gpuMapLength = pMapParams->length;
464 
465             //
466             // If client ask for Direct mapping , we cannot do much here but just
467             // simulate as it is non encrypted surface.
468             // It is currently totaly for testing purpose.
469             //
470             NV_ASSERT(pGpu->busInfo.gpuPhysFbAddr);
471 
472             {
473                 Device *pDevice = NULL;
474 
475                 // Below, we only map one GPU's address for CPU access, so we can use UNICAST here
476                 NvU32 busMapFbFlags = BUS_MAP_FB_FLAGS_MAP_UNICAST;
477                 if(DRF_VAL(OS33, _FLAGS, _MAPPING, pMapParams->flags) == NVOS33_FLAGS_MAPPING_DIRECT)
478                 {
479                     busMapFbFlags |= BUS_MAP_FB_FLAGS_DISABLE_ENCRYPTION;
480                 }
481 
482                 switch (pMapParams->protect)
483                 {
484                     case NV_PROTECT_READABLE:
485                         busMapFbFlags |= BUS_MAP_FB_FLAGS_READ_ONLY;
486                         break;
487                     case NV_PROTECT_WRITEABLE:
488                         busMapFbFlags |= BUS_MAP_FB_FLAGS_WRITE_ONLY;
489                         break;
490                 }
491 
492                 pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu);
493 
494                 // WAR for Bug 3564398, need to allocate doorbell for windows differently
495                 if (RMCFG_FEATURE_PLATFORM_WINDOWS &&
496                     memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1))
497                 {
498                     busMapFbFlags |= BUS_MAP_FB_FLAGS_MAP_DOWNWARDS;
499                 }
500 
501                 (void) deviceGetByHandle(staticCast(pClient, RsClient),
502                                          pMapParams->hDevice, &pDevice);
503 
504                 rmStatus = kbusMapFbAperture_HAL(pGpu, pKernelBus,
505                                                  pMemDesc, pMapParams->offset,
506                                                  &gpuVirtAddr, &gpuMapLength,
507                                                  busMapFbFlags, pDevice);
508             }
509 
510             if (rmStatus != NV_OK)
511                 goto _rmMapMemory_busFail;
512         }
513         else
514         {
515             NV_ASSERT_OR_RETURN(memdescGetContiguity(pMemDesc, AT_GPU),
516                    NV_ERR_NOT_SUPPORTED);
517 
518             fbAddr = gpumgrGetGpuPhysFbAddr(pGpu) + memdescGetPte(pMemDesc, AT_GPU, 0) +
519                      memdescGetPteAdjust(pMemDesc) + pMapParams->offset;
520         }
521 
522         if (pMapParams->bKernel)
523         {
524             rmStatus = osMapPciMemoryKernel64(pGpu,
525                                               (kbusIsBar1PhysicalModeEnabled(pKernelBus)?
526                                               fbAddr: gpumgrGetGpuPhysFbAddr(pGpu) + gpuVirtAddr),
527                                               pMapParams->length,
528                                               pMapParams->protect,
529                                               pMapParams->ppCpuVirtAddr,
530                                               cachingType);
531         }
532         else
533         {
534             rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo,
535                                           (kbusIsBar1PhysicalModeEnabled(pKernelBus)?
536                                           fbAddr: gpumgrGetGpuPhysFbAddr(pGpu) + gpuVirtAddr),
537                                           pMapParams->length,
538                                           pMapParams->protect,
539                                           pMapParams->ppCpuVirtAddr,
540                                           &priv,
541                                           cachingType);
542         }
543 
544         //
545         // It's possible that NVOS33_FLAGS_MAPPING is set to NVOS33_FLAGS_MAPPING_DIRECT
546         // at this point--set it to REFLECTED to indicate that we aren't using
547         // direct mapping.
548         //
549         pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _REFLECTED, pMapParams->flags);
550         pMapParams->flags = FLD_SET_DRF_NUM(OS33, _FLAGS, _CACHING_TYPE, cachingType, pMapParams->flags);
551 
552         if (rmStatus != NV_OK)
553             goto _rmMapMemory_pciFail;
554 
555         rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
556                                                 pMapParams->bKernel,
557                                                 priv,
558                                                 *(pMapParams->ppCpuVirtAddr),
559                                                 pMapParams->length,
560                                                 kbusIsBar1PhysicalModeEnabled(pKernelBus)
561                                                     ? (NvU64)-1
562                                                     : gpuVirtAddr,
563                                                 kbusIsBar1PhysicalModeEnabled(pKernelBus)
564                                                     ? (NvU64)-1
565                                                     : gpuMapLength,
566                                                 pMapParams->flags);
567         pCpuMapping->pPrivate->pGpu = pGpu;
568 
569         if (rmStatus != NV_OK)
570         {
571             RmUnmapBusAperture(pGpu,
572                                *(pMapParams->ppCpuVirtAddr),
573                                pMapParams->length,
574                                pMapParams->bKernel,
575                                priv);
576     _rmMapMemory_pciFail:
577             if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
578             {
579                 kbusUnmapFbAperture_HAL(pGpu,
580                                         pKernelBus,
581                                         pMemDesc,
582                                         gpuVirtAddr,
583                                         gpuMapLength,
584                                         BUS_MAP_FB_FLAGS_MAP_UNICAST);
585     _rmMapMemory_busFail:
586                 gpumgrSetBcEnabledStatus(pGpu, bcState);
587             }
588         }
589     }
590     else
591     if (bIsSysmem)
592     {
593         // A client can specify not to map memory by default when
594         // calling into RmAllocMemory. In those cases, we don't have
595         // a mapping yet, so go ahead and map it for the client now.
596         rmStatus = memdescMap(pMemDesc,
597                               pMapParams->offset,
598                               pMapParams->length,
599                               pMapParams->bKernel,
600                               pMapParams->protect,
601                               pMapParams->ppCpuVirtAddr,
602                               &priv);
603 
604         // Associate this mapping with the client
605         if (rmStatus == NV_OK && *(pMapParams->ppCpuVirtAddr))
606         {
607             pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags);
608             rmStatus = CliUpdateMemoryMappingInfo(pCpuMapping,
609                                                   pMapParams->bKernel,
610                                                   *(pMapParams->ppCpuVirtAddr),
611                                                   priv,
612                                                   pMapParams->length,
613                                                   pMapParams->flags);
614             pCpuMapping->pPrivate->pGpu = pGpu;
615         }
616     }
617     else if (effectiveAddrSpace == ADDR_VIRTUAL)
618     {
619         rmStatus = NV_ERR_NOT_SUPPORTED;
620     }
621     else if (effectiveAddrSpace == ADDR_REGMEM)
622     {
623         RS_PRIV_LEVEL privLevel;
624 
625         privLevel = rmclientGetCachedPrivilege(pClient);
626         if (!rmclientIsAdmin(pClient, privLevel) &&
627             !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK))
628         {
629             return NV_ERR_PROTECTION_FAULT;
630         }
631 
632         if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pMapParams->flags) == NVOS33_FLAGS_MEM_SPACE_USER)
633         {
634             privLevel = RS_PRIV_LEVEL_USER;
635         }
636 
637         // Create a mapping of BAR0
638         rmStatus = osMapGPU(pGpu,
639                             privLevel,
640                             pMapParams->offset + pMemDesc-> _pteArray[0],
641                             pMapParams->length,
642                             pMapParams->protect,
643                             pMapParams->ppCpuVirtAddr,
644                             &priv);
645         if (rmStatus != NV_OK)
646             return rmStatus;
647 
648         // Save off the mapping
649         rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping,
650                                                 pMapParams->bKernel,
651                                                 priv,
652                                                 *(pMapParams->ppCpuVirtAddr),
653                                                 pMapParams->length,
654                                                 -1, // gpu virtual addr
655                                                 -1, // gpu map length
656                                                 pMapParams->flags);
657         pCpuMapping->pPrivate->pGpu = pGpu;
658 
659         if (rmStatus != NV_OK)
660         {
661             osUnmapGPU(pGpu->pOsGpuInfo,
662                        privLevel,
663                        *(pMapParams->ppCpuVirtAddr),
664                        pMapParams->length,
665                        priv);
666             return rmStatus;
667         }
668     }
669     else
670     {
671         return NV_ERR_INVALID_CLASS;
672     }
673 
674     if (rmStatus == NV_OK)
675     {
676         NV_PRINTF(LEVEL_INFO,
677                   "%s created. CPU Virtual Address: " NvP64_fmt "\n",
678                   FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags) ? "Direct mapping" : "Mapping",
679                   *(pMapParams->ppCpuVirtAddr));
680     }
681 
682     return rmStatus;
683 }
684 
685 NV_STATUS
686 memUnmap_IMPL
687 (
688     Memory *pMemory,
689     CALL_CONTEXT *pCallContext,
690     RsCpuMapping *pCpuMapping
691 )
692 {
693     RmClient           *pClient             = dynamicCast(pCallContext->pClient, RmClient);
694     OBJGPU             *pGpu                = pCpuMapping->pPrivate->pGpu;
695     MEMORY_DESCRIPTOR  *pMemDesc            = pMemory->pMemDesc;
696 
697     KernelBus          *pKernelBus          = NULL;
698     MemoryManager      *pMemoryManager      = NULL;
699 
700     if (pGpu != NULL)
701     {
702         pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
703         pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
704     }
705 
706     if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, pCpuMapping->flags))
707     {
708         // Nothing more to do
709     }
710     else if ((pGpu != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) &&
711              (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM))
712     {
713         NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED));
714         NV_ASSERT((memdescGetPteKind(pMemDesc) ==
715                    memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, RM_DEFAULT_PTE_KIND)) && // pitch
716                   (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED)));
717 
718         if (pCpuMapping->pPrivate->bKernel)
719         {
720             if(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)
721             {
722                 NV_ASSERT(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS);
723                 kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc);
724             }
725             else
726             {
727                 osUnmapSystemMemory(pMemDesc,
728                                     pCpuMapping->pPrivate->bKernel,
729                                     pCpuMapping->processId,
730                                     pCpuMapping->pLinearAddress,
731                                     pCpuMapping->pPrivate->pPriv);
732             }
733         }
734 
735         NV_PRINTF(LEVEL_INFO,
736                   "Unmapping from NVLINK handle = 0x%x, addr= 0x%llx\n",
737                   RES_GET_HANDLE(pMemory), (NvU64)pCpuMapping->pLinearAddress);
738 
739         //
740         // No BAR aperture mapping to delete.
741         // No kernel mapping to remove
742         // User-space will call munmap to eliminate PTE mappings
743         //
744     }
745     // System Memory case
746     else if ((pGpu == NULL) || ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
747                                  FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags)))
748     {
749         if (FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags))
750         {
751             memdescUnmap(pMemDesc,
752                          pCpuMapping->pPrivate->bKernel,
753                          pCpuMapping->processId,
754                          pCpuMapping->pLinearAddress,
755                          pCpuMapping->pPrivate->pPriv);
756         }
757     }
758     else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ||
759              ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) &&
760               FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _REFLECTED, pCpuMapping->flags)))
761     {
762         RmUnmapBusAperture(pGpu,
763                            pCpuMapping->pLinearAddress,
764                            pCpuMapping->length,
765                            pCpuMapping->pPrivate->bKernel,
766                            pCpuMapping->pPrivate->pPriv);
767 
768         if (!kbusIsBar1PhysicalModeEnabled(pKernelBus))
769         {
770             {
771                 kbusUnmapFbAperture_HAL(pGpu, pKernelBus,
772                                         pMemory->pMemDesc,
773                                         pCpuMapping->pPrivate->gpuAddress,
774                                         pCpuMapping->pPrivate->gpuMapLength,
775                                         BUS_MAP_FB_FLAGS_MAP_UNICAST);
776             }
777         }
778     }
779     else if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL)
780     {
781         NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE);
782     }
783     else if (memdescGetAddressSpace(pMemDesc) == ADDR_REGMEM)
784     {
785         osUnmapGPU(pGpu->pOsGpuInfo,
786                    rmclientGetCachedPrivilege(pClient),
787                    pCpuMapping->pLinearAddress,
788                    pCpuMapping->length,
789                    pCpuMapping->pPrivate->pPriv);
790     }
791     return NV_OK;
792 }
793 
794 NV_STATUS
795 rmapiValidateKernelMapping
796 (
797     RS_PRIV_LEVEL privLevel,
798     NvU32 flags,
799     NvBool *pbKernel
800 )
801 {
802     NvBool bKernel;
803     NV_STATUS status = NV_OK;
804     if (privLevel < RS_PRIV_LEVEL_KERNEL)
805     {
806         // only kernel clients should be specifying the user mapping flags
807         if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_USER)
808             status = NV_ERR_INVALID_FLAGS;
809         bKernel = NV_FALSE;
810     }
811     else
812     {
813         //
814         // Kernel clients can only use the persistent flag if they are
815         // doing a user mapping.
816         //
817         bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT);
818     }
819 
820     // OS descriptor will already be mapped
821     if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags))
822         status = NV_ERR_INVALID_FLAGS;
823 
824     if (pbKernel != NULL)
825         *pbKernel = bKernel;
826 
827     return status;
828 }
829 
830 NV_STATUS
831 serverMap_Prologue
832 (
833     RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams
834 )
835 {
836     NV_STATUS           rmStatus;
837     RmClient           *pClient;
838     RsResourceRef      *pMemoryRef;
839     NvHandle            hClient = pMapParams->hClient;
840     NvHandle            hParent = hClient;
841     NvHandle            hSubDevice = NV01_NULL_OBJECT;
842     NvBool              bClientAlloc = (hClient == pMapParams->hDevice);
843     NvU32               flags = pMapParams->flags;
844     RS_PRIV_LEVEL       privLevel;
845 
846     // Persistent sysmem mapping support is no longer supported
847     if (DRF_VAL(OS33, _FLAGS, _PERSISTENT, flags) == NVOS33_FLAGS_PERSISTENT_ENABLE)
848         return NV_ERR_INVALID_FLAGS;
849 
850     // Populate Resource Server information
851     pClient = serverutilGetClientUnderLock(hClient);
852     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
853 
854     // Validate hClient
855     privLevel = rmclientGetCachedPrivilege(pClient);
856 
857     // RS-TODO: Assert if this fails after all objects are converted
858     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
859                 pMapParams->hMemory, &pMemoryRef));
860 
861     if (pMemoryRef->pParentRef != NULL)
862         hParent = pMemoryRef->pParentRef->hResource;
863 
864     // check if we have a user or kernel RM client
865     rmStatus = rmapiValidateKernelMapping(privLevel, flags, &pMapParams->bKernel);
866     if (rmStatus != NV_OK)
867         return rmStatus;
868 
869     //
870     // First check to see if it is a standard device or the BC region of
871     // a MC adapter.
872     //
873     pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK;
874     if (!bClientAlloc)
875     {
876         NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT);
877 
878         RsResourceRef *pContextRef;
879         rmStatus = clientGetResourceRef(staticCast(pClient, RsClient),
880                 pMapParams->hDevice, &pContextRef);
881 
882         if (rmStatus != NV_OK)
883             return rmStatus;
884 
885         if (pContextRef->internalClassId == classId(Device))
886         {
887         }
888         else if (pContextRef->internalClassId == classId(Subdevice))
889         {
890             hSubDevice = pMapParams->hDevice;
891             pMapParams->hDevice = pContextRef->pParentRef->hResource;
892         }
893         else
894         {
895             return NV_ERR_INVALID_OBJECT_PARENT;
896         }
897 
898         pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK;
899         pMapParams->pLockInfo->pContextRef = pContextRef;
900     }
901     else
902     {
903         NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT);
904     }
905 
906     pMapParams->hContext = (hSubDevice != NV01_NULL_OBJECT)
907                       ? hSubDevice
908                       : pMapParams->hDevice;
909 
910 
911     // convert from OS33 flags to RM's memory protection flags
912     switch (DRF_VAL(OS33, _FLAGS, _ACCESS, flags))
913     {
914         case NVOS33_FLAGS_ACCESS_READ_WRITE:
915             pMapParams->protect = NV_PROTECT_READ_WRITE;
916             break;
917         case NVOS33_FLAGS_ACCESS_READ_ONLY:
918             pMapParams->protect = NV_PROTECT_READABLE;
919             break;
920         case NVOS33_FLAGS_ACCESS_WRITE_ONLY:
921             pMapParams->protect = NV_PROTECT_WRITEABLE;
922             break;
923         default:
924             return NV_ERR_INVALID_FLAGS;
925     }
926 
927     return NV_OK;
928 }
929 
930 NV_STATUS
931 serverUnmap_Prologue
932 (
933     RsServer *pServer,
934     RS_CPU_UNMAP_PARAMS *pUnmapParams
935 )
936 {
937     OBJGPU *pGpu = NULL;
938     NV_STATUS rmStatus;
939     RmClient *pClient;
940     RsResourceRef *pMemoryRef;
941     NvHandle hClient = pUnmapParams->hClient;
942     NvHandle hParent = hClient;
943     NvHandle hMemory = pUnmapParams->hMemory;
944     NvBool bClientAlloc = (pUnmapParams->hDevice == pUnmapParams->hClient);
945     NvBool bKernel;
946     NvBool bBroadcast;
947     NvU32 ProcessId = pUnmapParams->processId;
948     RS_PRIV_LEVEL privLevel;
949     void *pProcessHandle = NULL;
950 
951     // Populate Resource Server information
952     pClient = serverutilGetClientUnderLock(hClient);
953     NV_ASSERT_OR_ELSE(pClient != NULL, return NV_ERR_INVALID_CLIENT);
954 
955     // check if we have a user or kernel RM client
956     privLevel = rmclientGetCachedPrivilege(pClient);
957 
958     // RS-TODO: Assert if this fails after all objects are converted
959     NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(staticCast(pClient, RsClient),
960                 hMemory, &pMemoryRef));
961 
962     if (pMemoryRef->pParentRef != NULL)
963         hParent = pMemoryRef->pParentRef->hResource;
964 
965     //
966     // First check to see if it is a standard device or the BC region of
967     // a MC adapter.
968     //
969     pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK;
970     if (!bClientAlloc)
971     {
972         NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT);
973 
974         RsResourceRef *pContextRef;
975         rmStatus = clientGetResourceRef(staticCast(pClient, RsClient),
976                 pUnmapParams->hDevice, &pContextRef);
977 
978         if (rmStatus != NV_OK)
979             return rmStatus;
980 
981         if (pContextRef->internalClassId == classId(Subdevice))
982         {
983             pUnmapParams->hDevice = pContextRef->pParentRef->hResource;
984         }
985         else if (pContextRef->internalClassId != classId(Device))
986         {
987             return NV_ERR_INVALID_OBJECT_PARENT;
988         }
989 
990         pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK;
991         pUnmapParams->pLockInfo->pContextRef = pContextRef;
992         NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pUnmapParams->pLockInfo->pContextRef, &bBroadcast, &pGpu));
993         gpuSetThreadBcState(pGpu, bBroadcast);
994     }
995     else
996     {
997         NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT);
998     }
999 
1000     // Decide what sort of mapping it is, user or kernel
1001     if (privLevel < RS_PRIV_LEVEL_KERNEL)
1002     {
1003         bKernel = NV_FALSE;
1004     }
1005     else
1006     {
1007         bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pUnmapParams->flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT);
1008     }
1009 
1010     //
1011     // If it's a user mapping, and we're not currently in the same process that
1012     // it's mapped into, then attempt to attach to the other process first.
1013     //
1014     if (!bKernel && (ProcessId != osGetCurrentProcess()))
1015     {
1016         rmStatus = osAttachToProcess(&pProcessHandle, ProcessId);
1017         if (rmStatus != NV_OK)
1018         {
1019             if (pUnmapParams->bTeardown)
1020                 pProcessHandle = NULL;
1021             else
1022                 return rmStatus;
1023         }
1024 
1025         pUnmapParams->pProcessHandle = pProcessHandle;
1026     }
1027 
1028     // Don't do any filtering if this is a tear-down path
1029     if (pUnmapParams->bTeardown)
1030     {
1031         pUnmapParams->fnFilter = NULL;
1032         return NV_OK;
1033     }
1034 
1035 
1036     pUnmapParams->fnFilter = bKernel
1037         ? serverutilMappingFilterKernel
1038         : serverutilMappingFilterCurrentUserProc;
1039 
1040     return NV_OK;
1041 }
1042 
1043 void
1044 serverUnmap_Epilogue
1045 (
1046     RsServer *pServer,
1047     RS_CPU_UNMAP_PARAMS *pUnmapParams
1048 )
1049 {
1050     // do we need to detach?
1051     if (pUnmapParams->pProcessHandle != NULL)
1052     {
1053         osDetachFromProcess(pUnmapParams->pProcessHandle);
1054         pUnmapParams->pProcessHandle = NULL;
1055     }
1056 }
1057 
1058 void RmUnmapBusAperture
1059 (
1060     OBJGPU *pGpu,
1061     NvP64   pCpuVirtualAddress,
1062     NvU64   length,
1063     NvBool  bKernel,
1064     NvP64   pPrivateData
1065 )
1066 {
1067     if (bKernel)
1068     {
1069         osUnmapPciMemoryKernel64(pGpu, pCpuVirtualAddress);
1070     }
1071     else
1072     {
1073         osUnmapPciMemoryUser(pGpu->pOsGpuInfo, pCpuVirtualAddress, length, pPrivateData);
1074     }
1075 }
1076 
1077 NV_STATUS
1078 rmapiMapToCpu
1079 (
1080     RM_API   *pRmApi,
1081     NvHandle  hClient,
1082     NvHandle  hDevice,
1083     NvHandle  hMemory,
1084     NvU64     offset,
1085     NvU64     length,
1086     void    **ppCpuVirtAddr,
1087     NvU32     flags
1088 )
1089 {
1090     NvP64     pCpuVirtAddrNvP64 = NvP64_NULL;
1091     NV_STATUS status;
1092 
1093     if (!pRmApi->bHasDefaultSecInfo)
1094         return NV_ERR_NOT_SUPPORTED;
1095 
1096     status = pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length,
1097                                           &pCpuVirtAddrNvP64, flags, &pRmApi->defaultSecInfo);
1098 
1099     if (ppCpuVirtAddr)
1100         *ppCpuVirtAddr = NvP64_VALUE(pCpuVirtAddrNvP64);
1101 
1102     return status;
1103 }
1104 
1105 /**
1106  * Call into Resource Server to register and execute a CPU mapping operation.
1107  *
1108  * Resource Server will:
1109  *    1. Callback into RM (serverMap_Prologue) to set up mapping parameters, mapping context object,
1110  *       and locking requirements
1111  *    2. Take locks (if required)
1112  *    3. Allocate and register a RsCpuMapping book-keeping entry on the target object's RsResourceRef
1113  *    4. Call the target object's mapping virtual function (xxxMap_IMPL, defined in RM)
1114  *    5. Setup back-references to the mapping context object (if required.) This mapping will automatically
1115  *       be unmapped if either the target object or mapping context object are freed.
1116  *    6. Release any locks taken
1117  */
1118 NV_STATUS
1119 rmapiMapToCpuWithSecInfoV2
1120 (
1121     RM_API            *pRmApi,
1122     NvHandle           hClient,
1123     NvHandle           hDevice,
1124     NvHandle           hMemory,
1125     NvU64              offset,
1126     NvU64              length,
1127     NvP64             *ppCpuVirtAddr,
1128     NvU32             *flags,
1129     API_SECURITY_INFO *pSecInfo
1130 )
1131 {
1132     NV_STATUS  status;
1133     RM_API_CONTEXT rmApiContext = {0};
1134     RmMapParams rmMapParams;
1135     RS_LOCK_INFO lockInfo;
1136 
1137     NV_PRINTF(LEVEL_INFO,
1138               "Nv04MapMemory: client:0x%x device:0x%x memory:0x%x\n", hClient,
1139               hDevice, hMemory);
1140     NV_PRINTF(LEVEL_INFO,
1141               "Nv04MapMemory:  offset: %llx length: %llx flags:0x%x\n",
1142               offset, length, *flags);
1143 
1144     status = rmapiPrologue(pRmApi, &rmApiContext);
1145     if (status != NV_OK)
1146         return status;
1147 
1148     NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04MapMemory 0x%x\n", *flags);
1149 
1150     portMemSet(&lockInfo, 0, sizeof(lockInfo));
1151     status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
1152     if (status != NV_OK)
1153     {
1154         rmapiEpilogue(pRmApi, &rmApiContext);
1155         return status;
1156     }
1157 
1158     LOCK_METER_DATA(MAPMEM, flags, 0, 0);
1159 
1160     // clear params for good measure
1161     portMemSet(&rmMapParams, 0, sizeof (rmMapParams));
1162 
1163     // load user args
1164     rmMapParams.hClient = hClient;
1165     rmMapParams.hDevice = hDevice;
1166     rmMapParams.hMemory = hMemory;
1167     rmMapParams.offset = offset;
1168     rmMapParams.length = length;
1169     rmMapParams.ppCpuVirtAddr = ppCpuVirtAddr;
1170     rmMapParams.flags = *flags;
1171     rmMapParams.pLockInfo = &lockInfo;
1172     rmMapParams.pSecInfo = pSecInfo;
1173 
1174     status = serverMap(&g_resServ, rmMapParams.hClient, rmMapParams.hMemory, &rmMapParams);
1175 
1176     rmapiEpilogue(pRmApi, &rmApiContext);
1177 
1178     *flags = rmMapParams.flags;
1179 
1180     if (status == NV_OK)
1181     {
1182         NV_PRINTF(LEVEL_INFO, "Nv04MapMemory: complete\n");
1183         NV_PRINTF(LEVEL_INFO,
1184                   "Nv04MapMemory:  *ppCpuVirtAddr:" NvP64_fmt "\n",
1185                   *ppCpuVirtAddr);
1186     }
1187     else
1188     {
1189         NV_PRINTF(LEVEL_WARNING,
1190                   "Nv04MapMemory: map failed; status: %s (0x%08x)\n",
1191                   nvstatusToString(status), status);
1192     }
1193 
1194     return status;
1195 }
1196 
1197 NV_STATUS
1198 rmapiMapToCpuWithSecInfo
1199 (
1200     RM_API            *pRmApi,
1201     NvHandle           hClient,
1202     NvHandle           hDevice,
1203     NvHandle           hMemory,
1204     NvU64              offset,
1205     NvU64              length,
1206     NvP64             *ppCpuVirtAddr,
1207     NvU32              flags,
1208     API_SECURITY_INFO *pSecInfo
1209 )
1210 {
1211     return rmapiMapToCpuWithSecInfoV2(pRmApi, hClient,
1212         hDevice, hMemory, offset, length, ppCpuVirtAddr,
1213         &flags, pSecInfo);
1214 }
1215 
1216 NV_STATUS
1217 rmapiMapToCpuWithSecInfoTls
1218 (
1219     RM_API            *pRmApi,
1220     NvHandle           hClient,
1221     NvHandle           hDevice,
1222     NvHandle           hMemory,
1223     NvU64              offset,
1224     NvU64              length,
1225     NvP64             *ppCpuVirtAddr,
1226     NvU32              flags,
1227     API_SECURITY_INFO *pSecInfo
1228 )
1229 {
1230     THREAD_STATE_NODE threadState;
1231     NV_STATUS         status;
1232 
1233     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1234 
1235     status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, &flags, pSecInfo);
1236 
1237     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1238 
1239     return status;
1240 }
1241 NV_STATUS
1242 rmapiMapToCpuWithSecInfoTlsV2
1243 (
1244     RM_API            *pRmApi,
1245     NvHandle           hClient,
1246     NvHandle           hDevice,
1247     NvHandle           hMemory,
1248     NvU64              offset,
1249     NvU64              length,
1250     NvP64             *ppCpuVirtAddr,
1251     NvU32             *flags,
1252     API_SECURITY_INFO *pSecInfo
1253 )
1254 {
1255     THREAD_STATE_NODE threadState;
1256     NV_STATUS         status;
1257 
1258     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1259 
1260     status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, pSecInfo);
1261 
1262     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1263 
1264     return status;
1265 }
1266 
1267 NV_STATUS
1268 rmapiUnmapFromCpu
1269 (
1270     RM_API   *pRmApi,
1271     NvHandle  hClient,
1272     NvHandle  hDevice,
1273     NvHandle  hMemory,
1274     void     *pLinearAddress,
1275     NvU32     flags,
1276     NvU32     ProcessId
1277 )
1278 {
1279     if (!pRmApi->bHasDefaultSecInfo)
1280         return NV_ERR_NOT_SUPPORTED;
1281 
1282     return pRmApi->UnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, NV_PTR_TO_NvP64(pLinearAddress),
1283                                            flags, ProcessId, &pRmApi->defaultSecInfo);
1284 }
1285 
1286 /**
1287  * Call into Resource Server to execute a CPU unmapping operation.
1288  *
1289  * Resource Server will:
1290  *    1. Callback into RM (serverUnmap_Prologue) to set up unmapping parameters, locking requirements,
1291  *       and attempt to attach to the mapping's user process (for user mappings only)
1292  *    2. Take locks (if required)
1293  *    3. Lookup the mapping
1294  *    4. Call the target object's unmapping virtual function (xxxUnmap_IMPL, defined in RM)
1295  *    5. Unregister the mapping from its back-references, and free the mapping
1296  *    6. Callback into RM (serverUnmap_Epilogue) to detach from the mapping's user process (if required)
1297  *    7. Release any locks taken
1298  */
1299 NV_STATUS
1300 rmapiUnmapFromCpuWithSecInfo
1301 (
1302     RM_API            *pRmApi,
1303     NvHandle           hClient,
1304     NvHandle           hDevice,
1305     NvHandle           hMemory,
1306     NvP64              pLinearAddress,
1307     NvU32              flags,
1308     NvU32              ProcessId,
1309     API_SECURITY_INFO *pSecInfo
1310 )
1311 {
1312     NV_STATUS status;
1313     RM_API_CONTEXT rmApiContext = {0};
1314     RmUnmapParams rmUnmapParams;
1315     RS_LOCK_INFO lockInfo;
1316 
1317     NV_PRINTF(LEVEL_INFO,
1318               "Nv04UnmapMemory: client:0x%x device:0x%x memory:0x%x pLinearAddr:" NvP64_fmt " flags:0x%x\n",
1319               hClient, hDevice, hMemory, pLinearAddress, flags);
1320 
1321     status = rmapiPrologue(pRmApi, &rmApiContext);
1322     if (status != NV_OK)
1323         return status;
1324 
1325     portMemSet(&lockInfo, 0, sizeof(lockInfo));
1326     status = rmapiInitLockInfo(pRmApi, hClient, NV01_NULL_OBJECT, &lockInfo);
1327     if (status != NV_OK)
1328     {
1329         rmapiEpilogue(pRmApi, &rmApiContext);
1330         return NV_OK;
1331     }
1332 
1333     LOCK_METER_DATA(UNMAPMEM, flags, 0, 0);
1334 
1335     portMemSet(&rmUnmapParams, 0, sizeof (rmUnmapParams));
1336     rmUnmapParams.hClient = hClient;
1337     rmUnmapParams.hDevice = hDevice;
1338     rmUnmapParams.hMemory = hMemory;
1339     rmUnmapParams.pLinearAddress = pLinearAddress;
1340     rmUnmapParams.flags = flags;
1341     rmUnmapParams.processId = ProcessId;
1342     rmUnmapParams.pLockInfo = &lockInfo;
1343     rmUnmapParams.pSecInfo = pSecInfo;
1344 
1345     status = serverUnmap(&g_resServ, hClient, hMemory, &rmUnmapParams);
1346 
1347     rmapiEpilogue(pRmApi, &rmApiContext);
1348 
1349     if (status == NV_OK)
1350     {
1351         NV_PRINTF(LEVEL_INFO, "Nv04UnmapMemory: unmap complete\n");
1352     }
1353     else
1354     {
1355         NV_PRINTF(LEVEL_WARNING,
1356                   "Nv04UnmapMemory: unmap failed; status: %s (0x%08x)\n",
1357                   nvstatusToString(status), status);
1358     }
1359 
1360     return status;
1361 }
1362 
1363 NV_STATUS
1364 rmapiUnmapFromCpuWithSecInfoTls
1365 (
1366     RM_API            *pRmApi,
1367     NvHandle           hClient,
1368     NvHandle           hDevice,
1369     NvHandle           hMemory,
1370     NvP64              pLinearAddress,
1371     NvU32              flags,
1372     NvU32              ProcessId,
1373     API_SECURITY_INFO *pSecInfo
1374 )
1375 {
1376     THREAD_STATE_NODE threadState;
1377     NV_STATUS         status;
1378 
1379     threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
1380 
1381     status = rmapiUnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, pLinearAddress,
1382                                           flags, ProcessId, pSecInfo);
1383 
1384     threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
1385 
1386     return status;
1387 }
1388 
1389 NV_STATUS
1390 serverMapLookupLockFlags
1391 (
1392     RsServer *pServer,
1393     RS_LOCK_ENUM lock,
1394     RS_CPU_MAP_PARAMS *pParams,
1395     LOCK_ACCESS_TYPE *pAccess
1396 )
1397 {
1398     NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT);
1399 
1400     *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP))
1401         ? LOCK_ACCESS_READ
1402         : LOCK_ACCESS_WRITE;
1403     return NV_OK;
1404 }
1405 
1406 NV_STATUS
1407 serverUnmapLookupLockFlags
1408 (
1409     RsServer *pServer,
1410     RS_LOCK_ENUM lock,
1411     RS_CPU_UNMAP_PARAMS *pParams,
1412     LOCK_ACCESS_TYPE *pAccess
1413 )
1414 {
1415     NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT);
1416 
1417     *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP))
1418         ? LOCK_ACCESS_READ
1419         : LOCK_ACCESS_WRITE;
1420     return NV_OK;
1421 }
1422 
1423 NV_STATUS
1424 refAllocCpuMappingPrivate
1425 (
1426     RS_CPU_MAP_PARAMS *pMapParams,
1427     RsCpuMapping *pCpuMapping
1428 )
1429 {
1430     pCpuMapping->pPrivate = portMemAllocNonPaged(sizeof(RS_CPU_MAPPING_PRIVATE));
1431     if (pCpuMapping->pPrivate == NULL)
1432         return NV_ERR_NO_MEMORY;
1433 
1434     pCpuMapping->pPrivate->protect = pMapParams->protect;
1435     pCpuMapping->pPrivate->bKernel = pMapParams->bKernel;
1436 
1437     return NV_OK;
1438 }
1439 
1440 void
1441 refFreeCpuMappingPrivate
1442 (
1443     RsCpuMapping *pCpuMapping
1444 )
1445 {
1446     portMemFree(pCpuMapping->pPrivate);
1447 }
1448