1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /******************************************************************************
25 *
26 *   Description:
27 *       This file contains functions managing DispChannel and its derived classes.
28 *
29 ******************************************************************************/
30 
31 #define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS     0
32 
33 #include "resserv/resserv.h"
34 #include "core/locks.h"
35 #include "rmapi/rs_utils.h"
36 
37 #include "gpu/device/device.h"
38 #include "gpu/gpu_resource.h"
39 #include "gpu/disp/disp_channel.h"
40 #include "gpu/disp/disp_objs.h"
41 #include "gpu/disp/kern_disp.h"
42 #include "gpu/disp/inst_mem/disp_inst_mem.h"
43 #include "gpu/mem_mgr/context_dma.h"
44 #include "gpu/gpu.h"
45 #include "gpu_mgr/gpu_mgr.h"
46 #include "platform/sli/sli.h"
47 #include "vgpu/rpc.h"
48 
49 static void
dispchnParseAllocParams(DispChannel * pDispChannel,void * pAllocParams,NvU32 * pChannelInstance,NvHandle * pHObjectBuffer,NvU32 * pInitialGetPutOffset,NvBool * pAllowGrabWithinSameClient,NvBool * pConnectPbAtGrab)50 dispchnParseAllocParams
51 (
52     DispChannel *pDispChannel,
53     void        *pAllocParams,
54     NvU32       *pChannelInstance,
55     NvHandle    *pHObjectBuffer,
56     NvU32       *pInitialGetPutOffset,
57     NvBool      *pAllowGrabWithinSameClient,
58     NvBool      *pConnectPbAtGrab
59 )
60 {
61     NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = NULL;
62     NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = NULL;
63 
64     *pAllowGrabWithinSameClient = NV_FALSE;
65     *pConnectPbAtGrab = NV_FALSE;
66 
67     if (pDispChannel->bIsDma)
68     {
69         pDmaChannelAllocParams = pAllocParams;
70         *pChannelInstance      = pDmaChannelAllocParams->channelInstance;
71         *pHObjectBuffer        = pDmaChannelAllocParams->hObjectBuffer;
72         *pInitialGetPutOffset  = pDmaChannelAllocParams->offset;
73 
74         if (FLD_TEST_DRF(50VAIO_CHANNELDMA_ALLOCATION, _FLAGS,
75                          _CONNECT_PB_AT_GRAB, _YES,
76                          pDmaChannelAllocParams->flags))
77         {
78             *pConnectPbAtGrab = NV_TRUE;
79         }
80 
81         if (pDmaChannelAllocParams->hObjectNotify != 0)
82         {
83             NV_PRINTF(LEVEL_WARNING, "Error notifier parameter is not used in Display channel allocation.\n");
84         }
85     }
86     else
87     {
88         pPioChannelAllocParams = pAllocParams;
89         *pChannelInstance      = pPioChannelAllocParams->channelInstance;
90         *pHObjectBuffer        = 0; // No one should look at this. So, 0 should be fine.
91         *pInitialGetPutOffset  = 0; // No one should look at this. So, 0 should be fine.
92 
93         if (pPioChannelAllocParams->hObjectNotify != 0)
94         {
95             NV_PRINTF(LEVEL_WARNING, "Error notifier parameter is not used in Display channel allocation.\n");
96         }
97     }
98 }
99 
100 NV_STATUS
dispchnConstruct_IMPL(DispChannel * pDispChannel,CALL_CONTEXT * pCallContext,RS_RES_ALLOC_PARAMS_INTERNAL * pParams,NvU32 isDma)101 dispchnConstruct_IMPL
102 (
103     DispChannel                  *pDispChannel,
104     CALL_CONTEXT                 *pCallContext,
105     RS_RES_ALLOC_PARAMS_INTERNAL *pParams,
106     NvU32                         isDma
107 )
108 {
109     OBJGPU         *pGpu = GPU_RES_GET_GPU(pDispChannel);
110     KernelDisplay  *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
111     NV_STATUS       rmStatus = NV_OK;
112     NvU32           channelInstance;
113     NvHandle        hObjectBuffer;
114     NvBool          bIsDma = !!isDma;
115     NvU32           initialGetPutOffset;
116     NvBool          allowGrabWithinSameClient;
117     NvBool          connectPbAtGrab;
118     DISPCHNCLASS    internalDispChnClass;
119     void           *pAllocParams = pParams->pAllocParams;
120     RsResourceRef  *pParentRef = RES_GET_REF(pDispChannel)->pParentRef;
121     DispObject     *pDispObject = dynamicCast(pParentRef->pResource, DispObject);
122     ContextDma     *pBufferContextDma = NULL;
123     NvU32           hClass = RES_GET_EXT_CLASS_ID(pDispChannel);
124 
125     NV_ASSERT_OR_RETURN(pDispObject, NV_ERR_INVALID_OBJECT_HANDLE);
126 
127     if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT)
128     {
129         NV_PRINTF(LEVEL_ERROR,
130                   "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n",
131                   pParams->externalClassId);
132 
133         //
134         // GPUSWSEC-1560 introduced a central object privilege check in RS. Please mark derived external classes
135         // of DispChannel privileged in their RS_ENTRY. Since DispChannel doesn't have an external class of its own
136         // and is used as a base class, leaving this check inline to catch future derivations.
137         //
138         osAssertFailed();
139 
140         return NV_ERR_INSUFFICIENT_PERMISSIONS;
141     }
142 
143     //
144     // Make sure this channel class is supported on this chip.
145     // Need to have the check below since, the switch in RmAlloc
146     // doesn't tell if the current chip supports the class
147     //
148     if (!gpuIsClassSupported(pGpu, RES_GET_EXT_CLASS_ID(pDispChannel)))
149     {
150         NV_PRINTF(LEVEL_ERROR, "Unsupported class in\n");
151         return NV_ERR_INVALID_CLASS;
152     }
153 
154     // Move params into RM's address space
155     pDispChannel->pDispObject = pDispObject;
156     pDispChannel->bIsDma = bIsDma;
157     dispchnParseAllocParams(pDispChannel, pAllocParams,
158                             &channelInstance,
159                             &hObjectBuffer,
160                             &initialGetPutOffset,
161                             &allowGrabWithinSameClient,
162                             &connectPbAtGrab);
163 
164     rmStatus = kdispGetIntChnClsForHwCls(pKernelDisplay,
165                                          RES_GET_EXT_CLASS_ID(pDispChannel),
166                                         &internalDispChnClass);
167     if (rmStatus != NV_OK)
168         return rmStatus;
169 
170     if (internalDispChnClass == dispChnClass_Any)
171     {
172         //
173         // Any channel is kernel only channel, Physical RM doesn't need ANY channel information.
174         // return from here as ANY channel is constructed.
175         //
176         pDispChannel->DispClass        = internalDispChnClass;
177         pDispChannel->InstanceNumber   = channelInstance;
178         return NV_OK;
179     }
180 
181     API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE);
182     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY);
183     {
184         rmStatus = kdispSetPushBufferParamsToPhysical_HAL(pGpu,
185                                             pKernelDisplay,
186                                             pDispChannel,
187                                             hObjectBuffer,
188                                             pBufferContextDma,
189                                             hClass,
190                                             channelInstance,
191                                             internalDispChnClass);
192         if (rmStatus != NV_OK)
193            return rmStatus;
194     }
195     SLI_LOOP_END
196 
197     // Acquire the underlying HW resources
198     rmStatus = kdispAcquireDispChannelHw_HAL(pKernelDisplay,
199                                              pDispChannel,
200                                              channelInstance,
201                                              hObjectBuffer,
202                                              initialGetPutOffset,
203                                              allowGrabWithinSameClient,
204                                              connectPbAtGrab);
205     if (rmStatus != NV_OK)
206     {
207         NV_PRINTF(LEVEL_ERROR,
208                   "disp channel[0x%x] alloc failed. Return status = 0x%x\n",
209                   channelInstance, rmStatus);
210 
211         return rmStatus;
212     }
213 
214     // Channel allocation is successful, initialize new channel's data structures
215     pDispChannel->DispClass        = internalDispChnClass;
216     pDispChannel->InstanceNumber   = channelInstance;
217     dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu);
218 
219     // Map memory for parent GPU
220     rmStatus = kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel);
221 
222     // setup to return pControl to client
223     if (pDispChannel->bIsDma)
224     {
225         NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = pAllocParams;
226         pDmaChannelAllocParams->pControl = pDispChannel->pControl;
227     }
228     else
229     {
230         NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = pAllocParams;
231         pPioChannelAllocParams->pControl = pDispChannel->pControl;
232     }
233 
234     return rmStatus;
235 }
236 
237 //
238 // Performs grab operation for a channel.
239 //
240 // Pre-Volta Linux swapgroups is the only remaining use of channel grabbing.
241 // Bug 2869820 is tracking the transition of swapgroups from requiring this
242 // RM feature.
243 //
244 NV_STATUS
dispchnGrabChannel_IMPL(DispChannel * pDispChannel,NvHandle hClient,NvHandle hParent,NvHandle hChannel,NvU32 hClass,void * pAllocParams)245 dispchnGrabChannel_IMPL
246 (
247     DispChannel *pDispChannel,
248     NvHandle     hClient,
249     NvHandle     hParent,
250     NvHandle     hChannel,
251     NvU32        hClass,
252     void        *pAllocParams
253 )
254 {
255     NV_STATUS          rmStatus = NV_OK;
256     OBJGPU            *pGpu = GPU_RES_GET_GPU(pDispChannel);
257     KernelDisplay     *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
258     NvU32              channelInstance;
259     NvHandle           hObjectBuffer;
260     NvU32              initialGetPutOffset;
261     NvBool             allowGrabWithinSameClient;
262     NvBool             connectPbAtGrab;
263     ContextDma        *pBufferContextDma = NULL;
264     DISPCHNCLASS       internalDispChnClass;
265 
266     if (RES_GET_PARENT_HANDLE(pDispChannel) != hParent)
267     {
268         NV_PRINTF(LEVEL_ERROR,
269                   "disp channel grab failed because of bad display parent 0x%x\n",
270                   hParent);
271         DBG_BREAKPOINT();
272         return NV_ERR_INVALID_OBJECT_PARENT;
273     }
274 
275     // Move params into RM's address space
276     dispchnParseAllocParams(pDispChannel, pAllocParams,
277                             &channelInstance,
278                             &hObjectBuffer,
279                             &initialGetPutOffset,
280                             &allowGrabWithinSameClient,
281                             &connectPbAtGrab);
282 
283     //
284     // The handle already exists in our DB.
285     // The supplied params must be same as what we already have with us
286     //
287     if (RES_GET_EXT_CLASS_ID(pDispChannel) != hClass                ||
288         pDispChannel->InstanceNumber       != channelInstance)
289     {
290         NV_PRINTF(LEVEL_ERROR,
291                   "Information supplied for handle 0x%x doesn't match that in RM's client DB\n",
292                   hChannel);
293         return NV_ERR_INVALID_OBJECT_HANDLE;
294     }
295 
296     rmStatus = kdispGetIntChnClsForHwCls(pKernelDisplay,
297                                      hClass,
298                                     &internalDispChnClass);
299 
300     API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE);
301     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY);
302     {
303       rmStatus = kdispSetPushBufferParamsToPhysical_HAL(pGpu,
304                                            pKernelDisplay,
305                                            pDispChannel,
306                                            hObjectBuffer,
307                                            pBufferContextDma,
308                                            hClass,
309                                            channelInstance,
310                                            internalDispChnClass);
311       if (rmStatus != NV_OK)
312           return rmStatus;
313     }
314     SLI_LOOP_END
315 
316     // Acquire the underlying HW resources
317     rmStatus = kdispAcquireDispChannelHw_HAL(pKernelDisplay,
318                                              pDispChannel,
319                                              channelInstance,
320                                              hObjectBuffer,
321                                              initialGetPutOffset,
322                                              allowGrabWithinSameClient,
323                                              connectPbAtGrab);
324 
325     // setup to return pControl to client
326     if (pDispChannel->bIsDma)
327     {
328         NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = pAllocParams;
329         pDmaChannelAllocParams->pControl = pDispChannel->pControl;
330     }
331     else
332     {
333         NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = pAllocParams;
334         pPioChannelAllocParams->pControl = pDispChannel->pControl;
335     }
336 
337     return rmStatus;
338 }
339 
340 NV_STATUS
dispchnGetRegBaseOffsetAndSize_IMPL(DispChannel * pDispChannel,OBJGPU * pGpu,NvU32 * pOffset,NvU32 * pSize)341 dispchnGetRegBaseOffsetAndSize_IMPL
342 (
343     DispChannel *pDispChannel,
344     OBJGPU *pGpu,
345     NvU32 *pOffset,
346     NvU32 *pSize
347 )
348 {
349     if (pOffset)
350         *pOffset = pDispChannel->ControlOffset;
351 
352     if (pSize)
353         *pSize = pDispChannel->ControlLength;
354 
355     return NV_OK;
356 }
357 
358 void
dispchnSetRegBaseOffsetAndSize_IMPL(DispChannel * pDispChannel,OBJGPU * pGpu)359 dispchnSetRegBaseOffsetAndSize_IMPL
360 (
361     DispChannel *pDispChannel,
362     OBJGPU      *pGpu
363 )
364 {
365     KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
366 
367     (void)kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay,
368                                                     pDispChannel->DispClass,
369                                                     pDispChannel->InstanceNumber,
370                                                    &pDispChannel->ControlOffset,
371                                                    &pDispChannel->ControlLength);
372 
373     // Tegra offsets needs to be subtracted with -0x610000.
374     pDispChannel->ControlOffset += kdispGetBaseOffset_HAL(pGpu, pKernelDisplay);
375 }
376 
377 /*!
378  * @brief Maps channel user area for parent GPU.
379  */
380 NV_STATUS
kdispMapDispChannel_IMPL(KernelDisplay * pKernelDisplay,DispChannel * pDispChannel)381 kdispMapDispChannel_IMPL
382 (
383     KernelDisplay *pKernelDisplay,
384     DispChannel   *pDispChannel
385 )
386 {
387     NV_STATUS       rmStatus;
388     OBJGPU         *pGpu        = GPU_RES_GET_GPU(pDispChannel);
389     RsClient       *pClient     = RES_GET_CLIENT(pDispChannel);
390     RmClient       *pRmClient   = dynamicCast(pClient, RmClient);
391     RS_PRIV_LEVEL   privLevel   = rmclientGetCachedPrivilege(pRmClient);
392     RM_API         *pRmApi      = GPU_GET_PHYSICAL_RMAPI(pGpu);
393 
394     //
395     // Only need the map for the parent GPU since we require the client to
396     // use RmMapMemory for subdevice channel mapping.
397     //
398     rmStatus = osMapGPU(pGpu, privLevel,
399                         pDispChannel->ControlOffset,
400                         pDispChannel->ControlLength,
401                         NV_PROTECT_READ_WRITE,
402                        &pDispChannel->pControl,
403                        &pDispChannel->pPriv);
404     if (rmStatus != NV_OK)
405     {
406         NV_PRINTF(LEVEL_ERROR,
407                   "disp channel[0x%x] mapping failed. Return status = 0x%x\n",
408                   pDispChannel->InstanceNumber, rmStatus);
409 
410         (void) pRmApi->Free(pRmApi,
411                             RES_GET_CLIENT_HANDLE(pDispChannel),
412                             RES_GET_HANDLE(pDispChannel));
413 
414         return rmStatus;
415     }
416 
417     return NV_OK;
418 }
419 
420 /*!
421  * @brief Unbinds Context DMAs and unmaps channel user area for the given channel.
422  */
kdispUnbindUnmapDispChannel_IMPL(KernelDisplay * pKernelDisplay,DispChannel * pDispChannel)423 void kdispUnbindUnmapDispChannel_IMPL
424 (
425     KernelDisplay *pKernelDisplay,
426     DispChannel *pDispChannel
427 )
428 {
429     OBJGPU             *pGpu        = GPU_RES_GET_GPU(pDispChannel);
430     RsClient           *pClient     = RES_GET_CLIENT(pDispChannel);
431     RmClient           *pRmClient   = dynamicCast(pClient, RmClient);
432     RS_PRIV_LEVEL       privLevel   = rmclientGetCachedPrivilege(pRmClient);
433 
434     // Unbind all ContextDmas from this channel
435     dispchnUnbindAllCtx(pGpu, pDispChannel);
436 
437     // Unmap the channel
438     osUnmapGPU(pGpu->pOsGpuInfo, privLevel, pDispChannel->pControl,
439                pDispChannel->ControlLength, pDispChannel->pPriv);
440 }
441 
442 void
dispchnDestruct_IMPL(DispChannel * pDispChannel)443 dispchnDestruct_IMPL
444 (
445     DispChannel *pDispChannel
446 )
447 {
448     NV_STATUS           rmStatus  = NV_OK;
449     OBJGPU             *pGpu      = GPU_RES_GET_GPU(pDispChannel);
450     KernelDisplay      *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
451     RM_API             *pRmApi    = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
452 
453     LOCK_METER_DATA(FREE_CHANNEL_DISP, pDispChannel->DispClass, 0, 0);
454 
455     //
456     // Before freeing the CORE channel, make sure all satellite channels are
457     // torn down. This is currently necessary on UNIX to deal with cases
458     // where X (i.e. the userspace display driver) terminates before other
459     // RM clients with satellite channel allocations, e.g. OpenGL clients with
460     // BASE channel allocations.
461     //
462     if ((pDispChannel->DispClass == dispChnClass_Core) &&
463         pKernelDisplay->bWarPurgeSatellitesOnCoreFree)
464     {
465         RmClient     **ppClient;
466         RmClient      *pClient;
467         RsClient      *pRsClient;
468         RS_ITERATOR    it;
469         Device        *pDevice;
470         OBJGPU        *pTmpGpu;
471         DispChannel   *pTmpDispChannel;
472 
473         NV_ASSERT(gpuIsGpuFullPower(pGpu));
474 
475         for (ppClient = serverutilGetFirstClientUnderLock();
476              ppClient;
477              ppClient = serverutilGetNextClientUnderLock(ppClient))
478         {
479             pClient = *ppClient;
480             pRsClient = staticCast(pClient, RsClient);
481 
482             it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE);
483 
484             while (clientRefIterNext(it.pClient, &it))
485             {
486                 RS_ITERATOR    dispIt;
487                 RsResourceRef *pResourceRef;
488                 DispObject    *pDispObject;
489 
490                 pDevice = dynamicCast(it.pResourceRef->pResource, Device);
491 
492                 pTmpGpu = GPU_RES_GET_GPU(pDevice);
493                 if (pTmpGpu != pGpu)
494                     continue;
495 
496                 rmStatus = dispobjGetByDevice(pRsClient, pDevice, &pDispObject);
497                 if (rmStatus != NV_OK)
498                     continue;
499 
500                 pResourceRef = RES_GET_REF(pDispObject);
501 
502                 dispIt = clientRefIter(pRsClient, pResourceRef, classId(DispChannel), RS_ITERATE_CHILDREN, NV_FALSE);
503 
504                 while (clientRefIterNext(dispIt.pClient, &dispIt))
505                 {
506                     pTmpDispChannel = dynamicCast(dispIt.pResourceRef->pResource, DispChannel);
507 
508                     if (pTmpDispChannel->DispClass != dispChnClass_Core)
509                     {
510                         rmStatus = pRmApi->Free(pRmApi,
511                                                 RES_GET_CLIENT_HANDLE(pTmpDispChannel),
512                                                 RES_GET_HANDLE(pTmpDispChannel));
513 
514                         if (rmStatus == NV_OK)
515                         {
516                             // Client's resource map has been modified, re-snap iterators
517                             it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE);
518                             dispIt = clientRefIter(pRsClient, it.pResourceRef, classId(DispChannel), RS_ITERATE_DESCENDANTS, NV_FALSE);
519                         }
520                         else
521                         {
522                             NV_PRINTF(LEVEL_ERROR,
523                                       "Failed to free satellite DispChannel 0x%x!\n",
524                                       RES_GET_HANDLE(pTmpDispChannel));
525                         }
526                     }
527                 }
528             }
529         }
530     }
531 
532     //
533     // Unbind all context dmas bound to this channel, unmap the channel and
534     // finally release HW resources.
535     //
536     kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel);
537     rmStatus = kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel);
538 
539     if (rmStatus != NV_OK)
540     {
541         // Try to avoid returning error codes on free under new resource server design
542         NV_ASSERT(0);
543     }
544 }
545 
546 NV_STATUS
dispchnpioConstruct_IMPL(DispChannelPio * pDispChannelPio,CALL_CONTEXT * pCallContext,RS_RES_ALLOC_PARAMS_INTERNAL * pParams)547 dispchnpioConstruct_IMPL
548 (
549     DispChannelPio               *pDispChannelPio,
550     CALL_CONTEXT                 *pCallContext,
551     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
552 )
553 {
554     return NV_OK;
555 }
556 
557 NV_STATUS
dispchndmaConstruct_IMPL(DispChannelDma * pDispChannelDma,CALL_CONTEXT * pCallContext,RS_RES_ALLOC_PARAMS_INTERNAL * pParams)558 dispchndmaConstruct_IMPL
559 (
560     DispChannelDma               *pDispChannelDma,
561     CALL_CONTEXT                 *pCallContext,
562     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
563 )
564 {
565     return NV_OK;
566 }
567 
568 NV_STATUS
dispchnGetByHandle_IMPL(RsClient * pClient,NvHandle hDisplayChannel,DispChannel ** ppDispChannel)569 dispchnGetByHandle_IMPL
570 (
571     RsClient     *pClient,
572     NvHandle      hDisplayChannel,
573     DispChannel **ppDispChannel
574 )
575 {
576     RsResourceRef  *pResourceRef;
577     NV_STATUS       status;
578 
579     *ppDispChannel = NULL;
580 
581     status = clientGetResourceRef(pClient, hDisplayChannel, &pResourceRef);
582     if (status != NV_OK)
583         return status;
584 
585     *ppDispChannel = dynamicCast(pResourceRef->pResource, DispChannel);
586 
587     return (*ppDispChannel) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE;
588 }
589 
590 //
591 // Bind the DMA context to a display channel
592 //
593 NV_STATUS
dispchnBindCtx_IMPL(OBJGPU * pGpu,ContextDma * pContextDma,NvHandle hChannel)594 dispchnBindCtx_IMPL
595 (
596     OBJGPU       *pGpu,
597     ContextDma   *pContextDma,
598     NvHandle     hChannel
599 )
600 {
601     RsClient     *pClient = RES_GET_CLIENT(pContextDma);
602     DispChannel  *pDispChannel = NULL;
603     NV_STATUS rmStatus = NV_OK;
604     KernelDisplay *pKernelDisplay;
605     DisplayInstanceMemory *pInstMem;
606 
607     // Look-up channel
608     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
609         dispchnGetByHandle(pClient, hChannel, &pDispChannel));
610 
611     // Ensure ContextDma and DisplayChannel are on the save device
612     NV_CHECK_OR_RETURN(LEVEL_ERROR, pContextDma->pDevice == GPU_RES_GET_DEVICE(pDispChannel),
613                        NV_ERR_INVALID_DEVICE);
614 
615     //
616     // Enforce alignment requirements
617     // ISO  ctx dmas need to be a multiple of 256B and 256B aligned
618     // NISO ctx dmas need to be a multiple of 4K   and 4K   aligned
619     // We can only ensure common minimum -- 4K alignment and 4K size
620     // Limit alignment is handled by rounding up in lower-level code.
621     // This will be in hw in future.
622     //
623     if (pContextDma->pMemDesc->PteAdjust != 0)
624     {
625         NV_PRINTF(LEVEL_ERROR,
626                   "ISO ctx dmas must be 4K aligned. PteAdjust = 0x%x\n",
627                   pContextDma->pMemDesc->PteAdjust);
628         return NV_ERR_INVALID_OFFSET;
629     }
630 
631     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
632     pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
633     pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay);
634 
635     rmStatus = instmemBindContextDma(pGpu, pInstMem, pContextDma, pDispChannel);
636     if (rmStatus != NV_OK)
637     {
638         SLI_LOOP_RETURN(rmStatus);
639     }
640 
641     SLI_LOOP_END
642 
643     return NV_OK;
644 }
645 
646 NV_STATUS
dispchnUnbindCtx_IMPL(OBJGPU * pGpu,ContextDma * pContextDma,NvHandle hChannel)647 dispchnUnbindCtx_IMPL
648 (
649     OBJGPU      *pGpu,
650     ContextDma  *pContextDma,
651     NvHandle     hChannel
652 )
653 {
654     RsClient     *pClient = RES_GET_CLIENT(pContextDma);
655     DispChannel  *pDispChannel = NULL;
656     NV_STATUS  rmStatus = NV_OK;
657     KernelDisplay *pKernelDisplay;
658     DisplayInstanceMemory *pInstMem;
659     NvBool bFound = NV_FALSE;
660 
661     // Look-up channel given by client
662     NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
663         dispchnGetByHandle(pClient, hChannel, &pDispChannel));
664 
665     // Ensure ContextDma and DisplayChannel are on the save device
666     NV_CHECK_OR_RETURN(LEVEL_ERROR, pContextDma->pDevice == GPU_RES_GET_DEVICE(pDispChannel),
667                        NV_ERR_INVALID_DEVICE);
668 
669     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
670     pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
671     pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay);
672 
673     rmStatus = instmemUnbindContextDma(pGpu, pInstMem, pContextDma, pDispChannel);
674     if (rmStatus == NV_OK)
675     {
676         bFound = NV_TRUE;
677     }
678 
679     SLI_LOOP_END
680 
681     return bFound ? NV_OK : NV_ERR_INVALID_STATE;
682 }
683 
684 /*!
685  * @brief Unbind all ContextDmas from the given channel
686  */
687 void
dispchnUnbindAllCtx_IMPL(OBJGPU * pGpu,DispChannel * pDispChannel)688 dispchnUnbindAllCtx_IMPL
689 (
690     OBJGPU      *pGpu,
691     DispChannel *pDispChannel
692 )
693 {
694     KernelDisplay *pKernelDisplay;
695     DisplayInstanceMemory *pInstMem;
696 
697     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
698     pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
699     pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay);
700 
701     instmemUnbindDispChannelContextDmas(pGpu, pInstMem, pDispChannel);
702 
703     SLI_LOOP_END
704 }
705 
706 /*!
707  * @brief Unbind ContextDma from all display channels
708  */
709 void
dispchnUnbindCtxFromAllChannels_IMPL(OBJGPU * pGpu,ContextDma * pContextDma)710 dispchnUnbindCtxFromAllChannels_IMPL
711 (
712     OBJGPU      *pGpu,
713     ContextDma  *pContextDma
714 )
715 {
716     KernelDisplay *pKernelDisplay;
717     DisplayInstanceMemory *pInstMem;
718 
719     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
720     pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
721     pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay);
722 
723     instmemUnbindContextDmaFromAllChannels(pGpu, pInstMem, pContextDma);
724 
725     SLI_LOOP_END
726 }
727 
728 NV_STATUS
kdispSetPushBufferParamsToPhysical_IMPL(OBJGPU * pGpu,KernelDisplay * pKernelDisplay,DispChannel * pDispChannel,NvHandle hObjectBuffer,ContextDma * pBufferContextDma,NvU32 hClass,NvU32 channelInstance,DISPCHNCLASS internalDispChnClass)729 kdispSetPushBufferParamsToPhysical_IMPL
730 (
731     OBJGPU          *pGpu,
732     KernelDisplay   *pKernelDisplay,
733     DispChannel     *pDispChannel,
734     NvHandle         hObjectBuffer,
735     ContextDma      *pBufferContextDma,
736     NvU32            hClass,
737     NvU32            channelInstance,
738     DISPCHNCLASS    internalDispChnClass
739 )
740 {
741     RsClient       *pClient  = RES_GET_CLIENT(pDispChannel);
742     RM_API         *pRmApi   = GPU_GET_PHYSICAL_RMAPI(pGpu);
743     NV_STATUS       rmStatus = NV_OK;
744     NvU32           dispChannelNum;
745     NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS  pushBufferParams = {0};
746 
747     rmStatus = kdispGetChannelNum_HAL(pKernelDisplay, internalDispChnClass, channelInstance, &dispChannelNum);
748     if (rmStatus != NV_OK)
749     {
750         return rmStatus;
751     }
752 
753     pushBufferParams.hclass = hClass;
754     pushBufferParams.channelInstance = channelInstance;
755 
756     if (pDispChannel->bIsDma)
757     {
758         rmStatus = ctxdmaGetByHandle(pClient, hObjectBuffer, &pBufferContextDma);
759         if (rmStatus != NV_OK)
760         {
761             NV_PRINTF(LEVEL_ERROR,
762                       "disp channel[0x%x] didn't have valid ctxdma 0x%x\n",
763                       channelInstance, hObjectBuffer);
764             return rmStatus;
765         }
766 
767         pushBufferParams.limit = pBufferContextDma->Limit;
768         pushBufferParams.addressSpace = memdescGetAddressSpace(pBufferContextDma->pMemDesc);
769         if ((pushBufferParams.addressSpace != ADDR_SYSMEM) && (pushBufferParams.addressSpace != ADDR_FBMEM))
770         {
771             DBG_BREAKPOINT();
772             return NV_ERR_GENERIC;
773         }
774         // Generate PUSHBUFFER_ADDR. Shift the addr to get the size in 4KB
775         pushBufferParams.physicalAddr = memdescGetPhysAddr(memdescGetMemDescFromGpu(pBufferContextDma->pMemDesc, pGpu), AT_GPU, 0);
776         pushBufferParams.cacheSnoop= pBufferContextDma->CacheSnoop;
777         pushBufferParams.pbTargetAperture = kdispGetPBTargetAperture_HAL(pGpu,
778                                                                          pKernelDisplay,
779                                                                          pushBufferParams.addressSpace,
780                                                                          pushBufferParams.cacheSnoop);
781         pushBufferParams.valid = NV_TRUE;
782     }
783     else
784     {
785         pushBufferParams.valid = NV_FALSE;
786     }
787 
788     pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
789                             NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
790                             &pushBufferParams, sizeof(pushBufferParams));
791 
792     return NV_OK;
793 }
794