1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /******************************************************************************
25 *
26 *       Kernel Display Module
27 *       This file contains functions managing display on CPU RM
28 *
29 ******************************************************************************/
30 
31 #define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS     0
32 
33 #include "resserv/resserv.h"
34 #include "rmapi/rmapi.h"
35 #include "rmapi/rs_utils.h"
36 #include "os/os.h"
37 
38 #include "gpu/gpu.h"
39 #include "gpu/device/device.h"
40 #include "gpu/disp/kern_disp.h"
41 #include "gpu/disp/inst_mem/disp_inst_mem.h"
42 #include "gpu/disp/head/kernel_head.h"
43 #include "gpu/disp/disp_objs.h"
44 #include "gpu_mgr/gpu_mgr.h"
45 #include "objtmr.h"
46 #include "core/locks.h"
47 #include "ctrl/ctrl402c.h"
48 #include "platform/acpi_common.h"
49 #include "nvrm_registry.h"
50 
51 #include "kernel/gpu/intr/engine_idx.h"
52 
53 #include "ctrl/ctrl2080.h"
54 
55 #include "class/cl5070.h"
56 #include "class/cl917a.h"
57 #include "class/cl917b.h"
58 #include "class/cl917e.h"
59 #include "class/cl927c.h"
60 #include "class/cl947d.h"
61 #include "class/cl957d.h"
62 #include "class/cl977d.h"
63 #include "class/cl987d.h"
64 #include "class/clc37a.h"
65 #include "class/clc37b.h"
66 #include "class/clc37d.h"
67 #include "class/clc37e.h"
68 #include "class/clc57a.h"
69 #include "class/clc57b.h"
70 #include "class/clc57d.h"
71 #include "class/clc57e.h"
72 #include "class/clc67a.h"
73 #include "class/clc67b.h"
74 #include "class/clc67d.h"
75 #include "class/clc67e.h"
76 #include "class/clc77f.h" //NVC77F_ANY_CHANNEL_DMA
77 
78 #include "class/clc77d.h"
79 
80 #include "gpu/disp/rg_line_callback/rg_line_callback.h"
81 
82 #include "rmapi/rmapi_utils.h"
83 #include "class/cl0073.h"
84 
85 NV_STATUS
86 kdispConstructEngine_IMPL(OBJGPU        *pGpu,
87                           KernelDisplay *pKernelDisplay,
88                           ENGDESCRIPTOR  engDesc)
89 {
90     NV_STATUS status;
91 
92     //
93     // NOTE: DO NOT call IpVersion _HAL functions in ConstructEngine.
94     // IP version based _HAL functions can only be used starting StatePreInit.
95     // Long-term: RM offload initialization will be moved earlier so KernelDisplay
96     // has the ability to use IP version HAL functions even in construct phase.
97     //
98 
99     //
100     // Sanity check: the only time KERNEL_DISPLAY module should be enabled
101     // while DISP is disabled is on KERNEL_ONLY build.
102     //
103     NV_ASSERT(IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu) || RMCFG_MODULE_DISP);
104 
105     //
106     // We also need to check if we are in certain configurations which can't
107     // even attempt a control call to DISP.
108     //
109     if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IS_MISSING))
110         return NV_ERR_NOT_SUPPORTED;
111 
112     // Create children
113     pKernelDisplay->pInst = NULL;
114     status = kdispConstructInstMem_HAL(pKernelDisplay);
115     if (status != NV_OK)
116     {
117         return status;
118     }
119 
120     status = kdispConstructKhead(pKernelDisplay);
121 
122     // We defer checking whether DISP has been disabled some other way until
123     // StateInit, when we can do a control call.
124 
125     return status;
126 }
127 
128 void
129 kdispDestruct_IMPL
130 (
131     KernelDisplay *pKernelDisplay
132 )
133 {
134     // Destroy children
135     kdispDestructInstMem_HAL(pKernelDisplay);
136     kdispDestructKhead(pKernelDisplay);
137 }
138 
139 /*! Constructor for DisplayInstanceMemory */
140 NV_STATUS
141 kdispConstructInstMem_IMPL
142 (
143     KernelDisplay *pKernelDisplay
144 )
145 {
146     NV_STATUS status;
147     DisplayInstanceMemory *pInst;
148 
149     status = objCreate(&pInst, pKernelDisplay, DisplayInstanceMemory);
150     if (status != NV_OK)
151     {
152         return status;
153     }
154 
155     pKernelDisplay->pInst = pInst;
156     return NV_OK;
157 }
158 
159 /*! Destructor for DisplayInstanceMemory */
160 void
161 kdispDestructInstMem_IMPL
162 (
163     KernelDisplay *pKernelDisplay
164 )
165 {
166     objDelete(pKernelDisplay->pInst);
167     pKernelDisplay->pInst = NULL;
168 }
169 
170 /*! Constructor for Kernel head */
171 NV_STATUS
172 kdispConstructKhead_IMPL
173 (
174     KernelDisplay *pKernelDisplay
175 )
176 {
177     NV_STATUS   status;
178     KernelHead *pKernelHead;
179     NvU8        headIdx;
180 
181     for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++)
182     {
183         status = objCreate(&pKernelHead, pKernelDisplay, KernelHead);
184         if (status != NV_OK)
185         {
186             return status;
187         }
188 
189         pKernelDisplay->pKernelHead[headIdx] = pKernelHead;
190         pKernelDisplay->pKernelHead[headIdx]->PublicId = headIdx;
191     }
192     return NV_OK;
193 }
194 
195 /*! Destructor for Kernel head */
196 void
197 kdispDestructKhead_IMPL
198 (
199     KernelDisplay *pKernelDisplay
200 )
201 {
202     NvU8      headIdx;
203 
204     for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++)
205     {
206         objDelete(pKernelDisplay->pKernelHead[headIdx]);
207         pKernelDisplay->pKernelHead[headIdx] = NULL;
208     }
209 }
210 
211 NV_STATUS
212 kdispAllocateCommonHandle_IMPL
213 (
214     OBJGPU *pGpu,
215     KernelDisplay *pKernelDisplay
216 )
217 {
218     NV_STATUS rmStatus;
219     NvHandle  hClient;
220     NvHandle  hDevice;
221     NvHandle  hSubdevice;
222     NvHandle  hSubscription = NV01_NULL_OBJECT;
223     RM_API   *pRmApi        = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
224 
225     rmStatus = rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, &hClient,
226                                                     &hDevice, &hSubdevice);
227     NV_ASSERT_OR_RETURN(rmStatus == NV_OK, NV_FALSE);
228 
229     rmStatus = pRmApi->AllocWithSecInfo(pRmApi, hClient, hDevice, &hSubscription,
230                                         NV04_DISPLAY_COMMON, NULL, 0, RMAPI_ALLOC_FLAGS_NONE,
231                                         NULL, &pRmApi->defaultSecInfo);
232     NV_ASSERT_OR_RETURN(rmStatus == NV_OK, NV_FALSE);
233 
234     pKernelDisplay->hInternalClient = hClient;
235     pKernelDisplay->hInternalDevice = hDevice;
236     pKernelDisplay->hInternalSubdevice = hSubdevice;
237     pKernelDisplay->hDispCommonHandle = hSubscription;
238 
239     return NV_OK;
240 }
241 
242 void
243 kdispDestroyCommonHandle_IMPL
244 (
245     KernelDisplay *pKernelDisplay
246 )
247 {
248     NV_STATUS rmStatus;
249     RM_API   *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
250 
251     rmStatus = pRmApi->FreeWithSecInfo(pRmApi, pKernelDisplay->hInternalClient,
252                                         pKernelDisplay->hDispCommonHandle,
253                                         RMAPI_ALLOC_FLAGS_NONE, &pRmApi->defaultSecInfo);
254     NV_ASSERT(rmStatus == NV_OK);
255 
256     rmapiutilFreeClientAndDeviceHandles(pRmApi, &pKernelDisplay->hInternalClient,
257                                         &pKernelDisplay->hInternalDevice,
258                                         &pKernelDisplay->hInternalSubdevice);
259 
260     pKernelDisplay->hInternalClient = 0;
261     pKernelDisplay->hInternalDevice = 0;
262     pKernelDisplay->hInternalSubdevice = 0;
263     pKernelDisplay->hDispCommonHandle = 0;
264 }
265 
266 NV_STATUS
267 kdispStatePreInitLocked_IMPL(OBJGPU        *pGpu,
268                              KernelDisplay *pKernelDisplay)
269 {
270     NV_STATUS status;
271     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
272     NvU32   hClient = pGpu->hInternalClient;
273     NvU32   hSubdevice = pGpu->hInternalSubdevice;
274     NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS ctrlParams;
275 
276     if (!gpuFuseSupportsDisplay_HAL(pGpu))
277        return NV_ERR_NOT_SUPPORTED;
278 
279     status = pRmApi->Control(pRmApi, hClient, hSubdevice,
280                              NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION,
281                              &ctrlParams, sizeof(ctrlParams));
282     if (status != NV_OK)
283     {
284         NV_PRINTF(LEVEL_WARNING,
285                   "Failed to read display IP version (FUSE disabled), status=0x%x\n",
286                   status);
287         return status;
288     }
289 
290     // NOTE: KernelDisplay IpVersion _HAL functions can only be called after this point.
291     status = gpuInitDispIpHal(pGpu, ctrlParams.ipVersion);
292 
293     kdispInitRegistryOverrides_HAL(pGpu, pKernelDisplay);
294 
295     kdispAllocateCommonHandle(pGpu, pKernelDisplay);
296 
297     return status;
298 }
299 
300 NV_STATUS
301 kdispInitBrightcStateLoad_IMPL(OBJGPU *pGpu,
302                                KernelDisplay *pKernelDisplay)
303 {
304     NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *pBrightcInfo = NULL;
305     NvU32 status = NV_ERR_NOT_SUPPORTED;
306     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
307 
308     pBrightcInfo = portMemAllocNonPaged(sizeof(NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS));
309     if (pBrightcInfo == NULL)
310     {
311         NV_PRINTF(LEVEL_ERROR, "Could not allocate memory for pBrightcInfo\n");
312         return NV_ERR_NO_MEMORY;
313     }
314     portMemSet(pBrightcInfo, 0, sizeof(*pBrightcInfo));
315 
316     pBrightcInfo->status = status;
317     if ((pKernelDisplay != NULL) && (pKernelDisplay->pStaticInfo->internalDispActiveMask != 0))
318     {
319         // Fill in the Backlight Method Data.
320         pBrightcInfo->backLightDataSize = sizeof(pBrightcInfo->backLightData);
321         status = osCallACPI_DSM(pGpu, ACPI_DSM_FUNCTION_CURRENT, NV_ACPI_GENERIC_FUNC_GETBACKLIGHT,
322                                 (NvU32 *)(pBrightcInfo->backLightData),
323                                 &pBrightcInfo->backLightDataSize);
324         pBrightcInfo->status = status;
325     }
326 
327     status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
328                     NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD,
329                     pBrightcInfo, sizeof(*pBrightcInfo));
330 
331     portMemFree(pBrightcInfo);
332 
333     return status;
334 }
335 
336 NV_STATUS
337 kdispSetupAcpiEdid_IMPL
338 (
339     OBJGPU        *pGpu,
340     KernelDisplay *pKernelDisplay
341 )
342 {
343     NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS *pEdidParams = NULL;
344     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
345     NvU32 status = NV_ERR_GENERIC;
346     NvU32 index;
347 
348     pEdidParams = portMemAllocNonPaged(sizeof(NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS));
349     if (pEdidParams == NULL)
350     {
351         NV_PRINTF(LEVEL_ERROR, "Could not allocate memory for pEdidParams\n");
352         return NV_ERR_NO_MEMORY;
353     }
354     portMemSet(pEdidParams, 0, sizeof(*pEdidParams));
355 
356     pEdidParams->tableLen = pGpu->acpiMethodData.dodMethodData.acpiIdListLen / sizeof(NvU32);
357 
358     for (index = 0; index < pEdidParams->tableLen; index++)
359     {
360         pEdidParams->edidTable[index].bufferSize = MAX_EDID_SIZE_FROM_SBIOS;
361         status = osCallACPI_DDC(pGpu, pGpu->acpiMethodData.dodMethodData.acpiIdList[index],
362                                     pEdidParams->edidTable[index].edidBuffer,
363                                     &pEdidParams->edidTable[index].bufferSize, NV_TRUE);
364         pEdidParams->edidTable[index].acpiId = pGpu->acpiMethodData.dodMethodData.acpiIdList[index];
365         pEdidParams->edidTable[index].status = status;
366     }
367 
368     status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
369                     NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA,
370                     pEdidParams, sizeof(*pEdidParams));
371 
372     portMemFree(pEdidParams);
373 
374     return status;
375 }
376 
377 void
378 kdispInitRegistryOverrides_IMPL(OBJGPU        *pGpu,
379                                 KernelDisplay *pKernelDisplay)
380 {
381     NvU32 data32 = 0;
382 
383     if (pKernelDisplay == NULL)
384     {
385         return;
386     }
387 
388     if (NV_OK == osReadRegistryDword(pGpu, NV_REG_STR_RM_BUG_2089053_WAR, &data32))
389     {
390         if (data32 == NV_REG_STR_RM_BUG_2089053_WAR_DISABLE)
391         {
392             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANK_ALWAYS, NV_FALSE);
393             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANKS_ONLY_ON_HMD_ACTIVE, NV_FALSE);
394         }
395         else if (data32 == NV_REG_STR_RM_BUG_2089053_WAR_ENABLE_ALWAYS)
396         {
397             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANK_ALWAYS, NV_TRUE);
398             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANKS_ONLY_ON_HMD_ACTIVE, NV_FALSE);
399         }
400         else if (data32 == NV_REG_STR_RM_BUG_2089053_WAR_ENABLE_ON_HMD_ACTIVE_ONLY)
401         {
402             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANKS_ONLY_ON_HMD_ACTIVE, NV_TRUE);
403             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANK_ALWAYS, NV_FALSE);
404         }
405     }
406 }
407 
408 NV_STATUS
409 kdispStateInitLocked_IMPL(OBJGPU        *pGpu,
410                           KernelDisplay *pKernelDisplay)
411 {
412     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
413     NV_STATUS status = NV_OK;
414     KernelDisplayStaticInfo *pStaticInfo;
415 
416     pStaticInfo = portMemAllocNonPaged(sizeof(KernelDisplayStaticInfo));
417     if (pStaticInfo == NULL)
418     {
419         NV_PRINTF(LEVEL_ERROR, "Could not allocate KernelDisplayStaticInfo");
420         status = NV_ERR_NO_MEMORY;
421         goto exit;
422     }
423     portMemSet(pStaticInfo, 0, sizeof(*pStaticInfo));
424 
425     NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
426         pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
427                         NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
428                         pStaticInfo, sizeof(*pStaticInfo)),
429         exit);
430 
431     pKernelDisplay->pStaticInfo = pStaticInfo;
432     pKernelDisplay->numHeads = pStaticInfo->numHeads;
433     pStaticInfo = NULL;
434 
435     // Initiate Brightc module state load
436     status = kdispInitBrightcStateLoad_HAL(pGpu, pKernelDisplay);
437     if (status != NV_OK)
438     {
439         NV_PRINTF(LEVEL_ERROR, "rmapi control call for brightc state load failed\n");
440         goto exit;
441     }
442 
443     // Set up ACPI DDC data in Physical RM for future usage
444     status = kdispSetupAcpiEdid_HAL(pGpu, pKernelDisplay);
445     if (status != NV_OK)
446     {
447         NV_PRINTF(LEVEL_WARNING, "rmapi control call for acpi child device init failed\n");
448         goto exit;
449     }
450 
451     if (pKernelDisplay->pInst != NULL)
452     {
453         NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
454                 instmemStateInitLocked(pGpu, pKernelDisplay->pInst), exit);
455     }
456 
457     // Initialize any external daughterboards that
458     // might be out there.
459 
460     pGpu->i2cPortForExtdev = NV402C_CTRL_NUM_I2C_PORTS;
461 
462     if (pKernelDisplay->pStaticInfo->i2cPort == NV402C_CTRL_NUM_I2C_PORTS)
463     {
464         NV_PRINTF(LEVEL_INFO, "Error in getting valid I2Cport for Extdevice or extdevice doesn't exist\n");
465     }
466     else
467     {
468         pGpu->i2cPortForExtdev = pKernelDisplay->pStaticInfo->i2cPort;
469 
470         if (NV_OK != gpuExtdevConstruct_HAL(pGpu))
471         {
472             NV_PRINTF(LEVEL_INFO, "gpuExtdevConstruct() failed or not supported\n");
473         }
474     }
475 
476     if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IMP_ENABLE))
477     {
478         // NOTE: Fills IMP parameters and populate those to disp object in Tegra
479         NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
480                 kdispImportImpData_HAL(pKernelDisplay), exit);
481     }
482 
483 exit:
484     portMemFree(pStaticInfo);
485 
486     return status;
487 }
488 
489 void
490 kdispStateDestroy_IMPL(OBJGPU *pGpu,
491                        KernelDisplay *pKernelDisplay)
492 {
493     if (pKernelDisplay->pInst != NULL)
494     {
495         instmemStateDestroy(pGpu, pKernelDisplay->pInst);
496     }
497 
498     portMemFree((void*) pKernelDisplay->pStaticInfo);
499     pKernelDisplay->pStaticInfo = NULL;
500 
501     kdispDestroyCommonHandle(pKernelDisplay);
502 }
503 
504 NV_STATUS
505 kdispStateLoad_IMPL
506 (
507     OBJGPU        *pGpu,
508     KernelDisplay *pKernelDisplay,
509     NvU32         flags
510 )
511 {
512     NV_STATUS status = NV_OK;
513 
514     if (pKernelDisplay->pInst != NULL)
515         status = instmemStateLoad(pGpu, pKernelDisplay->pInst, flags);
516 
517     return status;
518 }
519 
520 NV_STATUS
521 kdispStateUnload_IMPL
522 (
523     OBJGPU        *pGpu,
524     KernelDisplay *pKernelDisplay,
525     NvU32         flags
526 )
527 {
528     NV_STATUS status = NV_OK;
529 
530     if (pKernelDisplay->pInst != NULL)
531         status = instmemStateUnload(pGpu, pKernelDisplay->pInst, flags);
532 
533     return status;
534 }
535 
536 /*! Get and Populate IMP init data for Tegra */
537 NV_STATUS
538 kdispImportImpData_IMPL(KernelDisplay *pKernelDisplay)
539 {
540     OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay);
541     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
542     NvU32   hClient = pGpu->hInternalClient;
543     NvU32   hSubdevice = pGpu->hInternalSubdevice;
544     NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS params;
545     NvU32   simulationMode;
546 
547     //
548     // FPGA has different latency characteristics, and the current code latency
549     // models that IMP uses for silicon will not work for FPGA, so keep IMP
550     // disabled by default on Tegra FPGA.
551     //
552     simulationMode = osGetSimulationMode();
553     if (simulationMode == NV_SIM_MODE_TEGRA_FPGA)
554     {
555         pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_IMP_ENABLE, NV_FALSE);
556         return NV_OK;
557     }
558 
559     NV_ASSERT_OK_OR_RETURN(osTegraSocGetImpImportData(&params.tegraImpImportData));
560 
561     NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, hClient, hSubdevice,
562                            NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO,
563                            &params, sizeof(params)));
564 
565     return NV_OK;
566 }
567 
568 /*! Get internal enum equivalent of the HW class number */
569 NV_STATUS
570 kdispGetIntChnClsForHwCls_IMPL
571 (
572     KernelDisplay *pKernelDisplay,
573     NvU32          hwClass,
574     DISPCHNCLASS  *pDispChnClass
575 )
576 {
577     // sanity check
578     if (pDispChnClass == NULL)
579         return NV_ERR_INVALID_ARGUMENT;
580 
581     switch (hwClass)
582     {
583         case NV917A_CURSOR_CHANNEL_PIO:
584         case NVC37A_CURSOR_IMM_CHANNEL_PIO:
585         case NVC57A_CURSOR_IMM_CHANNEL_PIO:
586         case NVC67A_CURSOR_IMM_CHANNEL_PIO:
587             *pDispChnClass = dispChnClass_Curs;
588             break;
589 
590         case NV917B_OVERLAY_IMM_CHANNEL_PIO:
591             *pDispChnClass = dispChnClass_Ovim;
592             break;
593 
594         case NV927C_BASE_CHANNEL_DMA:
595             *pDispChnClass = dispChnClass_Base;
596             break;
597 
598         case NV947D_CORE_CHANNEL_DMA:
599         case NV957D_CORE_CHANNEL_DMA:
600         case NV977D_CORE_CHANNEL_DMA:
601         case NV987D_CORE_CHANNEL_DMA:
602         case NVC37D_CORE_CHANNEL_DMA:
603         case NVC57D_CORE_CHANNEL_DMA:
604         case NVC67D_CORE_CHANNEL_DMA:
605         case NVC77D_CORE_CHANNEL_DMA:
606             *pDispChnClass = dispChnClass_Core;
607             break;
608 
609         case NV917E_OVERLAY_CHANNEL_DMA:
610             *pDispChnClass = dispChnClass_Ovly;
611             break;
612 
613         case NVC37B_WINDOW_IMM_CHANNEL_DMA:
614         case NVC57B_WINDOW_IMM_CHANNEL_DMA:
615         case NVC67B_WINDOW_IMM_CHANNEL_DMA:
616             *pDispChnClass = dispChnClass_Winim;
617             break;
618 
619         case NVC37E_WINDOW_CHANNEL_DMA:
620         case NVC57E_WINDOW_CHANNEL_DMA:
621         case NVC67E_WINDOW_CHANNEL_DMA:
622             *pDispChnClass = dispChnClass_Win;
623             break;
624 
625         case NVC77F_ANY_CHANNEL_DMA:
626             // Assert incase of physical RM, Any channel is kernel only channel.
627             NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_INVALID_CHANNEL);
628             *pDispChnClass = dispChnClass_Any;
629             break;
630 
631         default:
632             NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", hwClass);
633             return NV_ERR_INVALID_ARGUMENT;
634     }
635 
636     return NV_OK;
637 }
638 
639 void
640 kdispNotifyEvent_IMPL
641 (
642     OBJGPU        *pGpu,
643     KernelDisplay *pKernelDisplay,
644     NvU32          notifyIndex,
645     void          *pNotifyParams,
646     NvU32          notifyParamsSize,
647     NvV32          info32,
648     NvV16          info16
649 )
650 {
651     PEVENTNOTIFICATION pEventNotifications;
652     NvU32             *pNotifyActions;
653     NvU32              disableCmd, singleCmd;
654     NvU32              subDeviceInst;
655     RS_SHARE_ITERATOR  it = serverutilShareIter(classId(NotifShare));
656 
657     // search notifiers with events hooked up for this gpu
658     while (serverutilShareIterNext(&it))
659     {
660         RsShared   *pShared = it.pShared;
661         DisplayApi *pDisplayApi;
662         INotifier  *pNotifier;
663         Device     *pDevice;
664         NotifShare *pNotifierShare = dynamicCast(pShared, NotifShare);
665 
666         if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL))
667             continue;
668 
669         pNotifier = pNotifierShare->pNotifier;
670         pDisplayApi = dynamicCast(pNotifier, DisplayApi);
671 
672         // Only notify matching GPUs
673         if (pDisplayApi == NULL)
674             continue;
675 
676         pDevice = dynamicCast(RES_GET_REF(pDisplayApi)->pParentRef->pResource, Device);
677 
678         if (GPU_RES_GET_GPU(pDevice) != pGpu)
679             continue;
680 
681         gpuSetThreadBcState(GPU_RES_GET_GPU(pDevice), pDisplayApi->bBcResource);
682 
683         disableCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
684         singleCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
685 
686         // get notify actions list
687         subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
688         pNotifyActions = pDisplayApi->pNotifyActions[subDeviceInst];
689         if (pNotifyActions == NULL)
690         {
691             continue;
692         }
693 
694         // get event list
695         pEventNotifications = inotifyGetNotificationList(pNotifier);
696         if (pEventNotifications == NULL)
697         {
698             continue;
699         }
700 
701         // skip if client not "listening" to events of this type
702         if (pNotifyActions[notifyIndex] == disableCmd)
703         {
704             continue;
705         }
706 
707         if (pDisplayApi->hNotifierMemory != NV01_NULL_OBJECT &&
708             pDisplayApi->pNotifierMemory != NULL)
709         {
710             notifyFillNotifierMemory(pGpu, pDisplayApi->pNotifierMemory, info32, info16,
711                                      NV5070_NOTIFICATION_STATUS_DONE_SUCCESS, notifyIndex);
712         }
713 
714         // ping events bound to subdevice associated with pGpu
715         osEventNotification(pGpu, pEventNotifications,
716                             (notifyIndex | OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV),
717                             pNotifyParams, notifyParamsSize);
718 
719         // reset if single shot notify action
720         if (pNotifyActions[notifyIndex] == singleCmd)
721         {
722             pNotifyActions[notifyIndex] = disableCmd;
723         }
724     }
725 }
726 
727 void
728 kdispSetWarPurgeSatellitesOnCoreFree_IMPL
729 (
730     KernelDisplay *pKernelDisplay,
731     NvBool value
732 )
733 {
734     pKernelDisplay->bWarPurgeSatellitesOnCoreFree = value;
735 }
736 
737 NV_STATUS
738 kdispRegisterRgLineCallback_IMPL
739 (
740     KernelDisplay *pKernelDisplay,
741     RgLineCallback *pRgLineCallback,
742     NvU32 head,
743     NvU32 rgIntrLine,
744     NvBool bEnable
745 )
746 {
747     NV_ASSERT_OR_RETURN(head < OBJ_MAX_HEADS, NV_ERR_INVALID_ARGUMENT);
748     NV_ASSERT_OR_RETURN(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD, NV_ERR_INVALID_ARGUMENT);
749 
750     RgLineCallback **slot = &pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine];
751 
752     if (bEnable && *slot == NULL)
753     {
754         *slot = pRgLineCallback;
755     }
756     else if (!bEnable && *slot == pRgLineCallback)
757     {
758         *slot = NULL;
759     }
760     else
761     {
762         //
763         // OBJDISP is the authority for *allocating* these "slots";
764         // KernelDisplay trusts it as an allocator.
765         // If we try to register a callback in an existing slot, or free an
766         // empty slot, it means OBJDISP has created conflicting allocations or
767         // has allowed a double-free. (Or RgLineCallback has provided invalid
768         // parameters.)
769         //
770         NV_ASSERT_FAILED("Invalid KernelDisplay state for RgLineCallback");
771         return NV_ERR_INVALID_STATE;
772     }
773 
774     return NV_OK;
775 }
776 
777 void
778 kdispInvokeRgLineCallback_KERNEL
779 (
780     KernelDisplay *pKernelDisplay,
781     NvU32 head,
782     NvU32 rgIntrLine,
783     NvBool bIsIrqlIsr
784 )
785 {
786     NV_ASSERT_OR_RETURN_VOID(head < OBJ_MAX_HEADS);
787     NV_ASSERT_OR_RETURN_VOID(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD);
788 
789     RgLineCallback *pCallbackObject = pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine];
790 
791     if (pCallbackObject != NULL)
792     {
793         rglcbInvoke(pCallbackObject, bIsIrqlIsr);
794     }
795     else if (IS_GSP_CLIENT(ENG_GET_GPU(pKernelDisplay)))
796     {
797         //
798         // For offloaded RM case, getting a callback invocation without a registered callback could
799         // happen during or after deregistration: there might already have been an event in the
800         // queue by the time we asked physical RM to deconfigure the interrupt.
801         //
802         // Because this could lead to an A-B-A situation where a new callback is registered to the
803         // same slot and invoked in place of the old callback, we must assert against this case.
804         // To avoid this, RgLineCallback must drain the client RM event queue after deconfiguring
805         // the interrupt and before calling kdispRegisterRgLineCallback to deregister the callback.
806         //
807         NV_ASSERT_FAILED("got RgLineCallback invocation for null callback");
808     }
809     else
810     {
811         //
812         // For the monolithic RM case, getting a callback invocation without a registered callback
813         // could happen during registration: after configuring hardware for the interrupt, but
814         // before registering the callback with KernelDisplay, the interrupt could be handled.
815         //
816         // This is not a bug in and of itself as it is harmless and expected. On the other hand we
817         // would not expect to see this warning in the log more than a few times per registration,
818         // e.g. if it were printed for every single interrupt, as the callback ought to be fully
819         // registered before excessively many interrupts are handled.
820         //
821         NV_PRINTF(LEVEL_WARNING, "got RgLineCallback invocation for null callback\n");
822     }
823 }
824 
825 #define HOTPLUG_PROFILE 0
826 
827 #if HOTPLUG_PROFILE
828 
829     #define ISR_TSTAMP_SIZE 18000 /* 5 minutes (5*60Hz*60)*/
830 
831     NvU32 timeStampIndexISR = ISR_TSTAMP_SIZE-1;
832 
833     tmr_tstamp_u timeStampStartISR[ISR_TSTAMP_SIZE];
834     tmr_tstamp_u timeStampDeltaISR[ISR_TSTAMP_SIZE];
835 
836 #endif
837 
838 void
839 kdispServiceVblank_KERNEL
840 (
841     OBJGPU            *pGpu,
842     KernelDisplay     *pKernelDisplay,
843     NvU32              headmask,
844     NvU32              state,
845     THREAD_STATE_NODE *pThreadState
846 )
847 {
848     NvU32      pending, check_pending, pending_checked;
849     NvU32      Head;
850     NvU32      maskNonEmptyQueues[OBJ_MAX_HEADS];  // array of masks of VBLANK_STATE_PROCESS_XXX_LATENCY bits, indicating which queues are non-empty
851     NvU32      unionNonEmptyQueues = 0;            // mask of VBLANK_STATE_PROCESS_XXX_LATENCY bits, union of queue states of all heads w/ pending vblank ints
852     NvU32      Count = 0;
853     NvU32      i, skippedcallbacks;
854     NvU32      maskCallbacksStillPending = 0;
855     KernelHead    *pKernelHead = NULL;
856 
857 #if HOTPLUG_PROFILE
858     OBJTMR    *pTmr;
859     pTmr = GPU_GET_TIMER(pGpu);
860     if (++timeStampIndexISR >= ISR_TSTAMP_SIZE)
861         timeStampIndexISR = 0;
862 
863     tmrGetCurrentTime(pTmr, &timeStampStartISR[timeStampIndexISR].time32.hi, &timeStampStartISR[timeStampIndexISR].time32.lo);
864 
865     // For the ISR we want to know how much time since the last ISR.
866     if (timeStampIndexISR)
867     {
868         NvU64 temp64;
869 
870         temp64 = timeStampStartISR[timeStampIndexISR].time64;
871         temp64 -= timeStampStartISR[timeStampIndexISR-1].time64;
872 
873         timeStampDeltaISR[timeStampIndexISR].time64 = temp64;
874     }
875 #endif
876 
877 
878     // If the caller failed to spec which queue, figure they wanted all of them
879     if (!(state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) )
880     {
881         state |= VBLANK_STATE_PROCESS_ALL_CALLBACKS;
882     }
883 
884     // If the headmask is 0, we should process all heads
885     if (headmask == 0)
886     {
887         headmask = 0xFFFFFFFF;
888     }
889 
890     //
891     // If we are being asked to process the callbacks now, regardless of the true irqspending,
892     // we force the pending mask to the head mask passed in.
893     //
894     if (state & VBLANK_STATE_PROCESS_IMMEDIATE)
895     {
896         pending = headmask;
897     }
898     else
899     {
900         // We're here because at least one of the PCRTC bits MAY be pending.
901         pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState);
902     }
903 
904     //  No sense in doing anything if there is nothing pending.
905     if (pending == 0)
906     {
907         return;
908     }
909 
910     //
911     // We want to check for pending service now and then we check again each
912     // time through the loop. Keep these seperate.
913     //
914     check_pending = pending;
915 
916     // We have not checked anything yet
917     pending_checked = 0;
918 
919     // Start with head 0
920     Head = 0;
921 
922     //
923     // We keep scanning all supported heads, and if we have something pending,
924     // check the associated queues
925     //
926     while(pending_checked != pending)
927     {
928         pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head);
929 
930         // Move on if this crtc's interrupt isn't pending...
931         if ( (headmask & check_pending & ~pending_checked) & NVBIT(Head))
932         {
933             // Track that we have now checked this head
934             pending_checked |= NVBIT(Head);
935 
936             // If our queues are empty, we can bail early
937             maskNonEmptyQueues[Head]  = kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, state, NULL);
938             unionNonEmptyQueues      |= maskNonEmptyQueues[Head];
939 
940             // This function will check to see if there are callback states in which the
941             // caller has skipped execution.
942             skippedcallbacks = ((state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ^ VBLANK_STATE_PROCESS_ALL_CALLBACKS);
943             skippedcallbacks |= (state & (VBLANK_STATE_PROCESS_CALLED_FROM_ISR | VBLANK_STATE_PROCESS_IMMEDIATE));
944 
945             // now lets see if there's callbacks pending on the skipped callbacks
946             maskCallbacksStillPending |= NVBIT(Head) * !!kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, skippedcallbacks, NULL);
947         }
948 
949         // Don't check for new interrupts if we are in immediate mode
950         if (!(state & VBLANK_STATE_PROCESS_IMMEDIATE) )
951         {
952             pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState);
953         }
954 
955         // if there was a change in the pending state, we should recheck everything
956         if (check_pending != pending)
957         {
958             // We need to recheck heads that were not pending before
959             check_pending = pending;
960             Head = 0;
961         }
962         else
963         {
964             // Nothing changed, so move on to the next head
965             Head++;
966         }
967 
968         // Make sure we dont waste time on heads that dont exist
969         if (Head >= OBJ_MAX_HEADS)
970         {
971             break;
972         }
973     }
974 
975     if (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR)
976     {
977         // store off which heads have pending vblank interrupts, for comparison at the next DPC time.
978         pKernelDisplay->isrVblankHeads = pending;
979 
980     }
981 
982     // increment the per-head vblank total counter, for any head with a pending vblank intr
983     for (Head=0; Head < OBJ_MAX_HEADS; Head++)
984     {
985         // Move on if this crtc's interrupt isn't pending...
986         if ((pending & NVBIT(Head)) == 0)
987         {
988             continue;
989         }
990 
991         pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head);
992         //
993         // increment vblank counters, as appropriate.
994         //
995 
996         // Track the fact that we passed through here. This keeps the RC manager happy.
997         Count = kheadGetVblankTotalCounter_HAL(pKernelHead) + 1;
998         kheadSetVblankTotalCounter_HAL(pKernelHead, Count);
999 
1000         //
1001         // Update the vblank counter if we are single chip or multichip master.
1002         // We now have two queues, so we need to have two vblank counters.
1003         //
1004 
1005         // did they ask for processing of low-latency work?
1006         if (state & VBLANK_STATE_PROCESS_LOW_LATENCY /* & maskNonEmptyQueues[Head]*/)
1007         {
1008             //
1009             // don't let the DPC thread increment the low-latency counter.
1010             // otherwise, the counter will frequently increment at double the
1011             // expected rate, breaking things like swapInterval.
1012             //
1013             // XXX actually, there is one case where it would be OK for the DPC
1014             // thread to increment this counter:  if the DPC thread could ascertain
1015             // that 'pending & NVBIT(Head)' represented a new interrupt event, and
1016             // not simply the one that the ISR left uncleared in PCRTC_INTR_0, for
1017             // the purpose of causing this DPC thread to get queued.
1018             // Not sure how to do that.
1019             //
1020             if ( !(state & VBLANK_STATE_PROCESS_CALLED_FROM_DPC) || (pending & NVBIT(Head) & ~pKernelDisplay->isrVblankHeads) )
1021             {
1022                 // either we were called from the ISR, or vblank is asserted in DPC when it wasn't in the ISR
1023 
1024                 // low latency queue requested, and this isn't a DPC thread.
1025                 Count = kheadGetVblankLowLatencyCounter_HAL(pKernelHead) + 1;
1026                 kheadSetVblankLowLatencyCounter_HAL(pKernelHead, Count);
1027            }
1028         }
1029 
1030         // did they ask for processing of normal-latency work?
1031         if (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY /* & maskNonEmptyQueues[Head]*/)
1032         {
1033             // processing of the normal latency queue requested
1034             Count = kheadGetVblankNormLatencyCounter_HAL(pKernelHead) + 1;
1035             kheadSetVblankNormLatencyCounter_HAL(pKernelHead, Count);
1036         }
1037     }
1038 
1039     //
1040     // If we have nothing to process (no work to do in queue),
1041     // we can bail early. We got here for some reason, so make
1042     // sure we clear the interrupts.
1043     //
1044 
1045     if (!unionNonEmptyQueues)
1046     {
1047         // all queues (belonging to heads with pending vblank ints) are empty.
1048         if (IS_GSP_CLIENT(pGpu))
1049         {
1050             kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState);
1051         }
1052         return;
1053     }
1054 
1055     //
1056     // Although we have separate handlers for each head, attempt to process all
1057     // interrupting heads now. What about DPCs schedule already?
1058     //
1059     for (Head = 0; Head < OBJ_MAX_HEADS; Head++)
1060     {
1061         pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head);
1062         // Move on if this crtc's interrupt isn't pending...
1063         if ((pending & NVBIT(Head)) == 0)
1064         {
1065             continue;
1066         }
1067 
1068         // Process the callback list for this Head...
1069         kheadProcessVblankCallbacks(pGpu, pKernelHead, state);
1070     }
1071 
1072     //
1073     // if there are still callbacks pending, and we are in an ISR,
1074     // then don't clear PCRTC_INTR; XXXar why would we *ever* want
1075     // to clear PCRTC_INTR if there are still things pending?
1076     //
1077     if ( (maskCallbacksStillPending) &&
1078          (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) )
1079     {
1080         //
1081         // there are still callbacks pending; don't clear
1082         // PCRTC_INTR, yet. The expectation is that the OS layer
1083         // will see that interrupts are still pending and queue a
1084         // DPC/BottomHalf/whatever to service the rest of the
1085         // vblank callback queues
1086         //
1087         for(i=0; i< OBJ_MAX_HEADS; i++)
1088         {
1089             pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i);
1090             if (IS_GSP_CLIENT(pGpu))
1091             {
1092                 kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState);
1093             }
1094         }
1095     }
1096     else
1097     {
1098         // reset the VBlank intrs we've handled, and don't reset the vblank intrs we haven't.
1099         for(i=0; i< OBJ_MAX_HEADS; i++)
1100         {
1101             pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i);
1102             if (pending & NVBIT(i) & ~maskCallbacksStillPending)
1103             {
1104                 kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState);
1105             }
1106         }
1107     }
1108 
1109     return;
1110 }
1111 
1112 NvU32 kdispReadPendingVblank_IMPL(OBJGPU *pGpu, KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *pThreadState)
1113 {
1114     KernelHead *pKernelHead;
1115     NvU32       headIdx, pending = 0;
1116 
1117     for (headIdx = 0; headIdx < kdispGetNumHeads(pKernelDisplay); headIdx++)
1118     {
1119         pKernelHead = KDISP_GET_HEAD(pKernelDisplay, headIdx);
1120 
1121         if (kheadReadPendingVblank_HAL(pGpu, pKernelHead, NULL, pThreadState))
1122         {
1123             pending |= NVBIT(headIdx);
1124         }
1125     }
1126 
1127     return pending;
1128 }
1129 
1130 /**
1131  * @brief Provides an opportunity to register some IntrService during intrStateInit.
1132  */
1133 void
1134 kdispRegisterIntrService_IMPL
1135 (
1136     OBJGPU *pGpu,
1137     KernelDisplay *pKernelDisplay,
1138     IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX]
1139 )
1140 {
1141     NvU32 engineIdx = MC_ENGINE_IDX_DISP;
1142     NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL);
1143     pRecords[engineIdx].pInterruptService = staticCast(pKernelDisplay, IntrService);
1144 }
1145 
1146 /*!
1147  * @brief Route modeset start/end notification to kernel RM
1148  *
1149  * Physical RM is expected to send a "start" notification at the beginning of
1150  * every display modeset (supervisor interrupt sequence), and an "end"
1151  * notification at the end.  However, if physical RM detects back-to-back
1152  * modesets, the intervening "end" notification MAY be skipped; in this case,
1153  * the "start" notification for the next modeset serves as the "end notification
1154  * for the previous modeset.
1155  *
1156  * Kernel RM will use the notification to update the BW allocation for display.
1157  * The ICC call that is required to update the BW allocation cannot be made
1158  * from physical RM.
1159  *
1160  * @param[in] pKernelDisplay                KernelDisplay pointer
1161  * @param[in] bModesetStart                 NV_TRUE -> start of modeset;
1162  *                                          NV_FALSE -> end of modeset
1163  * @param[in] minRequiredIsoBandwidthKBPS   Min ISO BW required by IMP (KB/sec)
1164  * @param[in] minRequiredFloorBandwidthKBPS Min dramclk freq * pipe width (KB/sec)
1165  */
1166 void
1167 kdispInvokeDisplayModesetCallback_KERNEL
1168 (
1169     KernelDisplay *pKernelDisplay,
1170     NvBool bModesetStart,
1171     NvU32 minRequiredIsoBandwidthKBPS,
1172     NvU32 minRequiredFloorBandwidthKBPS
1173 )
1174 {
1175     NV_STATUS   status;
1176 
1177     NV_PRINTF(LEVEL_INFO,
1178               "Kernel RM received \"%s of modeset\" notification "
1179               "(minRequiredIsoBandwidthKBPS = %u, minRequiredFloorBandwidthKBPS = %u)\n",
1180               bModesetStart ? "start" : "end",
1181               minRequiredIsoBandwidthKBPS,
1182               minRequiredFloorBandwidthKBPS);
1183 
1184     OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay);
1185     status =
1186         kdispArbAndAllocDisplayBandwidth_HAL(pGpu,
1187                                              pKernelDisplay,
1188                                              DISPLAY_ICC_BW_CLIENT_RM,
1189                                              minRequiredIsoBandwidthKBPS,
1190                                              minRequiredFloorBandwidthKBPS);
1191     //
1192     // The modeset cannot be aborted, so, if there is an error, no recovery
1193     // is possible.
1194     //
1195     NV_ASSERT_OK(status);
1196 }
1197