1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #define NVOC_KERNEL_GRAPHICS_H_PRIVATE_ACCESS_ALLOWED
25 
26 #include "kernel/gpu/gr/kernel_graphics.h"
27 #include "kernel/gpu/mem_mgr/mem_mgr.h"
28 
29 #include "ctrl/ctrl0080/ctrl0080fifo.h"
30 
31 /*!
32  * @brief Allocate common local/global buffers that are required by the graphics context
33  *
34  * @param[in] pGpu
35  * @param[in] pKernelGraphics
36  * @param[in] gfid                   host or guest gfid
37  * @param[in] pKernelGraphicsContext context pointer - if valid allocate local
38  */
39 NV_STATUS
40 kgraphicsAllocGrGlobalCtxBuffers_TU102
41 (
42     OBJGPU *pGpu,
43     KernelGraphics *pKernelGraphics,
44     NvU32 gfid,
45     KernelGraphicsContext *pKernelGraphicsContext
46 )
47 {
48     extern NV_STATUS kgraphicsAllocGrGlobalCtxBuffers_GP100(OBJGPU *pGpu, KernelGraphics *pKernelGraphics, NvU32 gfid, KernelGraphicsContext *pKernelGraphicsContext);
49     MEMORY_DESCRIPTOR           **ppMemDesc;
50     GR_GLOBALCTX_BUFFERS         *pCtxBuffers;
51     GR_BUFFER_ATTR               *pCtxAttr;
52     NvU64                         allocFlags = MEMDESC_FLAGS_NONE;
53     NvBool                        bIsFbBroken = NV_FALSE;
54     NvU32                         rtvcbBufferSize;
55     NvU32                         rtvcbBufferAlign;
56     NV_STATUS                     status;
57     const KGRAPHICS_STATIC_INFO  *pKernelGraphicsStaticInfo;
58     CTX_BUF_POOL_INFO            *pCtxBufPool = NULL;
59 
60     NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE);
61 
62     if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) ||
63        (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM)))
64     {
65         bIsFbBroken = NV_TRUE;
66     }
67 
68     // Setup the Circular Buffer DB
69     allocFlags = MEMDESC_FLAGS_LOST_ON_SUSPEND;
70 
71     if (kgraphicsShouldSetContextBuffersGPUPrivileged(pGpu, pKernelGraphics))
72     {
73         allocFlags |= MEMDESC_FLAGS_GPU_PRIVILEGED;
74     }
75 
76     if (pKernelGraphicsContext != NULL)
77     {
78         KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast;
79         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
80             kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast));
81 
82         pCtxBuffers = &pKernelGraphicsContextUnicast->localCtxBuffer;
83         pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.localCtxAttr;
84 
85         //
86         // if we already have local buffers allocated, return as we may get
87         // called multiple times per-channel
88         //
89         if (pCtxBuffers->bAllocated)
90             return NV_OK;
91 
92         // check for allocating local buffers in VPR memory (don't want for global memory)
93         if (
94             pKernelGraphicsContextUnicast->bVprChannel)
95             allocFlags |= MEMDESC_ALLOC_FLAGS_PROTECTED;
96 
97         // If allocated per channel, ensure allocations goes into Suballocator if available
98         allocFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE;
99     }
100     else
101     {
102         pCtxBuffers = &pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid];
103         pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr;
104         NV_ASSERT_OK_OR_RETURN(ctxBufPoolGetGlobalPool(pGpu, CTX_BUF_ID_GR_GLOBAL,
105             RM_ENGINE_TYPE_GR(pKernelGraphics->instance), &pCtxBufPool));
106     }
107 
108     // Don't use context buffer pool for VF allocations managed by host RM.
109     if (ctxBufPoolIsSupported(pGpu) && (pCtxBufPool != NULL))
110     {
111         allocFlags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL;
112     }
113 
114     if (IS_GFID_VF(gfid))
115     {
116         pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.vfGlobalCtxAttr;
117         allocFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE;
118     }
119 
120     pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics);
121     NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE);
122     NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE);
123 
124     rtvcbBufferSize =
125         pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL].size;
126     rtvcbBufferAlign =
127         pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL].alignment;
128 
129     if (rtvcbBufferSize > 0)
130     {
131         ppMemDesc = &pCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_RTV_CB];
132         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
133             memdescCreate(ppMemDesc, pGpu,
134                           rtvcbBufferSize,
135                           rtvcbBufferAlign,
136                           !bIsFbBroken,
137                           ADDR_UNKNOWN,
138                           pCtxAttr[GR_GLOBALCTX_BUFFER_RTV_CB].cpuAttr,
139                           allocFlags));
140 
141         memdescSetGpuCacheAttrib(*ppMemDesc, NV_MEMORY_CACHED);
142         if ((allocFlags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL) != 0)
143         {
144             MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
145 
146             memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, *ppMemDesc, AT_GPU, RM_ATTR_PAGE_SIZE_4KB);
147             NV_ASSERT_OK_OR_RETURN(memdescSetCtxBufPool(*ppMemDesc, pCtxBufPool));
148         }
149         memdescTagAllocList(status, NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_113,
150                     (*ppMemDesc), pCtxAttr[GR_GLOBALCTX_BUFFER_RTV_CB].pAllocList);
151         NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, status);
152     }
153     status = kgraphicsAllocGrGlobalCtxBuffers_GP100(pGpu, pKernelGraphics, gfid, pKernelGraphicsContext);
154 
155     return status;
156 }
157