1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "mem_mgr/egm_mem.h"
25
26 #include "gpu/mem_mgr/mem_mgr.h"
27 #include "gpu/mem_mgr/mem_utils.h"
28 #include "os/os.h"
29 #include "deprecated/rmapi_deprecated.h"
30 #include "vgpu/rpc.h"
31
32 #include "class/cl0042.h" // NV_MEMORY_EXTENDED_USER
33
34 NV_STATUS
egmmemConstruct_IMPL(ExtendedGpuMemory * pExtendedGpuMemory,CALL_CONTEXT * pCallContext,RS_RES_ALLOC_PARAMS_INTERNAL * pParams)35 egmmemConstruct_IMPL
36 (
37 ExtendedGpuMemory *pExtendedGpuMemory,
38 CALL_CONTEXT *pCallContext,
39 RS_RES_ALLOC_PARAMS_INTERNAL *pParams
40 )
41 {
42 Memory *pMemory = staticCast(pExtendedGpuMemory, Memory);
43 OBJGPU *pGpu = pMemory->pGpu;
44 MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
45 NvHandle hClient = pCallContext->pClient->hClient;
46 NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource;
47 NV_MEMORY_ALLOCATION_PARAMS *pAllocData = pParams->pAllocParams;
48 MEMORY_ALLOCATION_REQUEST allocRequest = {0};
49 MEMORY_ALLOCATION_REQUEST *pAllocRequest = &allocRequest;
50 RsResourceRef *pResourceRef = pCallContext->pResourceRef;
51 NV_STATUS rmStatus = NV_OK;
52 FB_ALLOC_INFO *pFbAllocInfo = NULL;
53 FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL;
54 RM_ATTR_PAGE_SIZE pageSizeAttr;
55 MEMORY_DESCRIPTOR *pMemDesc;
56 HWRESOURCE_INFO hwResource;
57 NvU64 sizeOut;
58 NvU64 offsetOut;
59 NvU32 flags;
60 NvU32 gpuCacheAttrib;
61 NvU32 Cache;
62
63 // Copy-construction has already been done by the base Memory class
64 if (RS_IS_COPY_CTOR(pParams))
65 return NV_OK;
66
67 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, egmmemValidateParams(pGpu, hClient, pAllocData));
68
69 //
70 // For non self-hosted case, we mimic vidmem as EGM and is only used for
71 // ARCH/MODS verification
72 //
73 if (!gpuIsSelfHosted(pGpu))
74 {
75 return NV_ERR_NOT_SUPPORTED;
76 }
77
78 stdmemDumpInputAllocParams(pAllocData, pCallContext);
79
80 NV_PRINTF(LEVEL_ERROR, "EGM Allocation requested\n");
81
82 pAllocRequest->classNum = NV_MEMORY_EXTENDED_USER;
83 pAllocRequest->pUserParams = pAllocData;
84 pAllocRequest->hMemory = pResourceRef->hResource;
85 pAllocRequest->hClient = hClient;
86 pAllocRequest->hParent = hParent;
87 pAllocRequest->pGpu = pGpu;
88 pAllocRequest->internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC;
89 pAllocRequest->pHwResource = &hwResource;
90
91 // Unsure if we need to keep separate copies, but keeping old behavior for now.
92 sizeOut = pAllocData->size;
93 offsetOut = pAllocData->offset;
94
95 // Allocate and initialize FB_ALLOC_INFO
96 pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO));
97 NV_ASSERT_TRUE_OR_GOTO(rmStatus, pFbAllocInfo != NULL, NV_ERR_NO_MEMORY, free_params_and_return);
98
99 pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT));
100 NV_ASSERT_TRUE_OR_GOTO(rmStatus, pFbAllocPageFormat != NULL, NV_ERR_NO_MEMORY, free_params_and_return);
101
102 portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO));
103 portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT));
104 pFbAllocInfo->pageFormat = pFbAllocPageFormat;
105
106 memUtilsInitFBAllocInfo(pAllocRequest->pUserParams, pFbAllocInfo,
107 pAllocRequest->hClient, pAllocRequest->hParent);
108
109 NV_ASSERT_OK_OR_GOTO(rmStatus,
110 memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo),
111 free_params_and_return);
112
113 NV_ASSERT_OK_OR_GOTO(rmStatus,
114 egmmemAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo),
115 free_params_and_return);
116
117 NV_ASSERT(pAllocRequest->pMemDesc);
118 pMemDesc = pAllocRequest->pMemDesc;
119
120 offsetOut = memdescGetPhysAddr(pMemDesc, AT_GPU, 0);
121 sizeOut = pMemDesc->Size;
122 pAllocData->limit = sizeOut - 1;
123
124 if (FLD_TEST_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, pAllocData->attr2))
125 {
126 pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO,
127 pAllocData->attr2);
128 }
129
130 if (FLD_TEST_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _YES, pAllocData->attr2))
131 {
132 gpuCacheAttrib = NV_MEMORY_CACHED;
133 }
134 else
135 {
136 gpuCacheAttrib = NV_MEMORY_UNCACHED;
137 }
138
139 if (FLD_TEST_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, pAllocData->attr))
140 Cache = NV_MEMORY_UNCACHED;
141 else if (FLD_TEST_DRF(OS32, _ATTR, _COHERENCY, _CACHED, pAllocData->attr))
142 Cache = NV_MEMORY_CACHED;
143 else if (FLD_TEST_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, pAllocData->attr))
144 Cache = NV_MEMORY_WRITECOMBINED;
145 else if (FLD_TEST_DRF(OS32, _ATTR, _COHERENCY, _WRITE_THROUGH, pAllocData->attr))
146 Cache = NV_MEMORY_CACHED;
147 else if (FLD_TEST_DRF(OS32, _ATTR, _COHERENCY, _WRITE_PROTECT, pAllocData->attr))
148 Cache = NV_MEMORY_CACHED;
149 else if (FLD_TEST_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, pAllocData->attr))
150 Cache = NV_MEMORY_CACHED;
151 else
152 Cache = 0;
153
154 ct_assert(NVOS32_ATTR_COHERENCY_UNCACHED == NVOS02_FLAGS_COHERENCY_UNCACHED);
155 ct_assert(NVOS32_ATTR_COHERENCY_CACHED == NVOS02_FLAGS_COHERENCY_CACHED);
156 ct_assert(NVOS32_ATTR_COHERENCY_WRITE_COMBINE == NVOS02_FLAGS_COHERENCY_WRITE_COMBINE);
157 ct_assert(NVOS32_ATTR_COHERENCY_WRITE_THROUGH == NVOS02_FLAGS_COHERENCY_WRITE_THROUGH);
158 ct_assert(NVOS32_ATTR_COHERENCY_WRITE_PROTECT == NVOS02_FLAGS_COHERENCY_WRITE_PROTECT);
159 ct_assert(NVOS32_ATTR_COHERENCY_WRITE_BACK == NVOS02_FLAGS_COHERENCY_WRITE_BACK);
160
161 flags = DRF_DEF(OS02, _FLAGS, _LOCATION, _PCI) |
162 DRF_DEF(OS02, _FLAGS, _MAPPING, _NO_MAP) |
163 DRF_NUM(OS02, _FLAGS, _COHERENCY, DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr));
164
165 NV_ASSERT(memdescGetAddressSpace(pMemDesc) == ADDR_EGM);
166 memdescSetCpuCacheAttrib(pMemDesc, Cache);
167
168 if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL)
169 memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE);
170
171 memdescSetFlag(pMemDesc, MEMDESC_FLAGS_SYSMEM_OWNED_BY_CLIENT, NV_TRUE);
172
173 memdescSetGpuCacheAttrib(pMemDesc, gpuCacheAttrib);
174
175
176 pageSizeAttr = dmaNvos32ToPageSizeAttr(pAllocData->attr, pAllocData->attr2);
177 NV_ASSERT_OK_OR_GOTO(rmStatus, memmgrSetMemDescPageSize_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pMemDesc,
178 AT_GPU, pageSizeAttr),
179 mem_construct_failed);
180
181 memdescTagAlloc(rmStatus, NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_45,
182 pMemDesc);
183 NV_ASSERT_OK_OR_GOTO(rmStatus, rmStatus, mem_construct_failed);
184
185 NV_ASSERT_OK_OR_GOTO(rmStatus, memConstructCommon(pMemory,
186 pAllocRequest->classNum,
187 flags, pMemDesc, 0,
188 NULL, pAllocData->attr,
189 pAllocData->attr2, 0, 0,
190 pAllocData->tag, &hwResource),
191 mem_construct_failed);
192 if (IS_VIRTUAL(pGpu))
193 {
194 NvU32 os02Flags;
195 NvU32 os32Flags = pAllocData->flags;
196
197 //
198 // Calculate os02flags as VGPU plugin allocates sysmem with legacy
199 // RmAllocMemory API
200 //
201 NV_ASSERT_OK_OR_GOTO(rmStatus, RmDeprecatedConvertOs32ToOs02Flags(pAllocData->attr,
202 pAllocData->attr2,
203 os32Flags,
204 &os02Flags),
205 convert_flags_failed);
206
207 //
208 // vGPU:
209 //
210 // Since vGPU does all real hardware management in the
211 // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true),
212 // do an RPC to the host to do the hardware update.
213 //
214 NV_RM_RPC_ALLOC_MEMORY(pGpu,
215 hClient,
216 hParent,
217 pAllocRequest->hMemory,
218 pAllocRequest->classNum,
219 os02Flags,
220 pMemDesc,
221 rmStatus);
222 pMemory->bRpcAlloc = NV_TRUE;
223 }
224
225 pAllocData->size = sizeOut;
226 pAllocData->offset = offsetOut;
227
228 stdmemDumpOutputAllocParams(pAllocData);
229
230 goto free_params_and_return;
231
232 convert_flags_failed:
233 memDestructCommon(pMemory);
234
235 mem_construct_failed:
236 memdescFree(pMemDesc);
237 memdescDestroy(pMemDesc);
238
239 free_params_and_return:
240 portMemFree(pFbAllocPageFormat);
241 portMemFree(pFbAllocInfo);
242
243 return rmStatus;
244 }
245
246 NV_STATUS
egmmemValidateParams(OBJGPU * pGpu,NvHandle hClient,NV_MEMORY_ALLOCATION_PARAMS * pAllocData)247 egmmemValidateParams
248 (
249 OBJGPU *pGpu,
250 NvHandle hClient,
251 NV_MEMORY_ALLOCATION_PARAMS *pAllocData
252 )
253 {
254 MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
255
256 NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, stdmemValidateParams(pGpu, hClient, pAllocData));
257
258 NV_CHECK_OR_RETURN(LEVEL_ERROR,
259 FLD_TEST_DRF(OS32, _ATTR2, _USE_EGM, _TRUE, pAllocData->attr2),
260 NV_ERR_INVALID_ARGUMENT);
261
262 // Make sure EGM memory is not requested if local EGM is not supported
263 if (!memmgrIsLocalEgmEnabled(pMemoryManager))
264 {
265 NV_PRINTF(LEVEL_ERROR,
266 "Allocation requested from EGM when local EGM is not supported\n");
267 return NV_ERR_INVALID_ARGUMENT;
268 }
269
270 // For Self-Hosted platforms which support EGM, EGM pool is the same as sysmem pool
271 // Make sure _USE_EGM attribute is set only for sysmem allocations for SHH
272 if (gpuIsSelfHosted(pGpu))
273 {
274 if (!FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _PCI, pAllocData->attr))
275 {
276 NV_PRINTF(LEVEL_ERROR,
277 "NVOS32_ATTR2_USE_EGM can be set to true only when NVOS32_ATTR_LOCATION_PCI is set for SHH\n");
278 return NV_ERR_INVALID_ARGUMENT;
279 }
280 }
281 else
282 {
283 // Make sure _USE_EGM attribute is set only for NVOS32_ATTR_LOCATION_VIDMEM for non-SHH platforms
284 if (!FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pAllocData->attr))
285 {
286 NV_PRINTF(LEVEL_ERROR,
287 "NVOS32_ATTR2_USE_EGM can be set to true only when NVOS32_ATTR_LOCATION_VIDMEM is set\n");
288 return NV_ERR_INVALID_ARGUMENT;
289 }
290 }
291
292 return NV_OK;
293 }
294
295 NV_STATUS
egmmemAllocResources(OBJGPU * pGpu,MemoryManager * pMemoryManager,MEMORY_ALLOCATION_REQUEST * pAllocRequest,FB_ALLOC_INFO * pFbAllocInfo)296 egmmemAllocResources
297 (
298 OBJGPU *pGpu,
299 MemoryManager *pMemoryManager,
300 MEMORY_ALLOCATION_REQUEST *pAllocRequest,
301 FB_ALLOC_INFO *pFbAllocInfo
302 )
303 {
304 NV_STATUS status = NV_OK;
305 NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams;
306 MEMORY_DESCRIPTOR *pMemDesc = NULL;
307 NvBool bAllocedMemDesc = NV_FALSE;
308 NvBool bContig = FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY,
309 _CONTIGUOUS, pVidHeapAlloc->attr);
310
311 //
312 // Check for virtual-only parameters used on physical allocs.
313 //
314 if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_VIRTUAL_ONLY)
315 {
316 NV_PRINTF(LEVEL_ERROR,
317 "Virtual-only flag used with physical allocation\n");
318 status = NV_ERR_INVALID_ARGUMENT;
319 goto failed;
320 }
321 if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pVidHeapAlloc->attr2))
322 {
323 NV_PRINTF(LEVEL_ERROR,
324 "Virtual-only 32-bit pointer attr used with physical allocation\n");
325 status = NV_ERR_INVALID_ARGUMENT;
326 goto failed;
327 }
328 if (pVidHeapAlloc->hVASpace != 0)
329 {
330 NV_PRINTF(LEVEL_ERROR,
331 "VA space handle used with physical allocation\n");
332 status = NV_ERR_INVALID_ARGUMENT;
333 goto failed;
334 }
335
336 NV_ASSERT(!(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR1) && !(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR2));
337
338 if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE)
339 {
340 NV_PRINTF(LEVEL_ERROR,
341 "Expected fixed address allocation\n");
342 status = NV_ERR_INVALID_ARGUMENT;
343 goto failed;
344 }
345
346 NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, memUtilsAllocMemDesc(pGpu, pAllocRequest, pFbAllocInfo, &pMemDesc, NULL,
347 ADDR_EGM, bContig, &bAllocedMemDesc), failed);
348
349 // get possibly updated surface attributes
350 pVidHeapAlloc->attr = pFbAllocInfo->retAttr;
351 pVidHeapAlloc->attr2 = pFbAllocInfo->retAttr2;
352
353 pVidHeapAlloc->offset = pFbAllocInfo->offset;
354
355 if (pAllocRequest->pHwResource != NULL)
356 {
357 pAllocRequest->pHwResource->attr = pFbAllocInfo->retAttr;
358 pAllocRequest->pHwResource->attr2 = pFbAllocInfo->retAttr2;
359 pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId;
360 pAllocRequest->pHwResource->comprCovg = pFbAllocInfo->comprCovg;
361 pAllocRequest->pHwResource->ctagOffset = pFbAllocInfo->ctagOffset;
362 pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId;
363 }
364
365 return NV_OK;
366
367 failed:
368 memmgrFreeHwResources(pGpu, pMemoryManager, pFbAllocInfo);
369
370 if (bAllocedMemDesc)
371 {
372 memdescDestroy(pAllocRequest->pMemDesc);
373 pAllocRequest->pMemDesc = NULL;
374 }
375
376 return status;
377 }
378