1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /******************************************************************************
25 *
26 *       Kernel Display Module
27 *       This file contains functions managing display on CPU RM
28 *
29 ******************************************************************************/
30 
31 #define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS     0
32 
33 #include "resserv/resserv.h"
34 #include "rmapi/rmapi.h"
35 #include "rmapi/rs_utils.h"
36 #include "os/os.h"
37 
38 #include "gpu/gpu.h"
39 #include "gpu/device/device.h"
40 #include "gpu/disp/kern_disp.h"
41 #include "gpu/disp/inst_mem/disp_inst_mem.h"
42 #include "gpu/disp/head/kernel_head.h"
43 #include "gpu/disp/disp_objs.h"
44 #include "gpu_mgr/gpu_mgr.h"
45 #include "objtmr.h"
46 #include "core/locks.h"
47 #include "ctrl/ctrl402c.h"
48 #include "platform/acpi_common.h"
49 #include "nvrm_registry.h"
50 
51 #include "kernel/gpu/intr/engine_idx.h"
52 
53 #include "gpu/external_device/external_device.h"
54 
55 #include "ctrl/ctrl2080.h"
56 
57 #include "class/cl5070.h"
58 #include "class/cl917a.h"
59 #include "class/cl917b.h"
60 #include "class/cl917e.h"
61 #include "class/cl927c.h"
62 #include "class/cl947d.h"
63 #include "class/cl957d.h"
64 #include "class/cl977d.h"
65 #include "class/cl987d.h"
66 #include "class/clc37a.h"
67 #include "class/clc37b.h"
68 #include "class/clc37d.h"
69 #include "class/clc37e.h"
70 #include "class/clc57a.h"
71 #include "class/clc57b.h"
72 #include "class/clc57d.h"
73 #include "class/clc57e.h"
74 #include "class/clc67a.h"
75 #include "class/clc67b.h"
76 #include "class/clc67d.h"
77 #include "class/clc67e.h"
78 #include "class/clc77f.h" //NVC77F_ANY_CHANNEL_DMA
79 
80 #include "class/clc77d.h"
81 
82 #include "gpu/disp/rg_line_callback/rg_line_callback.h"
83 
84 #include "rmapi/rmapi_utils.h"
85 #include "class/cl0073.h"
86 
87 NV_STATUS
88 kdispConstructEngine_IMPL(OBJGPU        *pGpu,
89                           KernelDisplay *pKernelDisplay,
90                           ENGDESCRIPTOR  engDesc)
91 {
92     NV_STATUS status;
93 
94     //
95     // NOTE: DO NOT call IpVersion _HAL functions in ConstructEngine.
96     // IP version based _HAL functions can only be used starting StatePreInit.
97     // Long-term: RM offload initialization will be moved earlier so KernelDisplay
98     // has the ability to use IP version HAL functions even in construct phase.
99     //
100 
101     //
102     // Sanity check: the only time KERNEL_DISPLAY module should be enabled
103     // while DISP is disabled is on KERNEL_ONLY build.
104     //
105     NV_ASSERT(IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu) || RMCFG_MODULE_DISP);
106 
107     //
108     // We also need to check if we are in certain configurations which can't
109     // even attempt a control call to DISP.
110     //
111     if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IS_MISSING))
112         return NV_ERR_NOT_SUPPORTED;
113 
114     // Create children
115     pKernelDisplay->pInst = NULL;
116     status = kdispConstructInstMem_HAL(pKernelDisplay);
117     if (status != NV_OK)
118     {
119         return status;
120     }
121 
122     status = kdispConstructKhead(pKernelDisplay);
123 
124     // We defer checking whether DISP has been disabled some other way until
125     // StateInit, when we can do a control call.
126 
127     return status;
128 }
129 
130 void
131 kdispDestruct_IMPL
132 (
133     KernelDisplay *pKernelDisplay
134 )
135 {
136     // Destroy children
137     kdispDestructInstMem_HAL(pKernelDisplay);
138     kdispDestructKhead(pKernelDisplay);
139 }
140 
141 /*! Constructor for DisplayInstanceMemory */
142 NV_STATUS
143 kdispConstructInstMem_IMPL
144 (
145     KernelDisplay *pKernelDisplay
146 )
147 {
148     NV_STATUS status;
149     DisplayInstanceMemory *pInst;
150 
151     status = objCreate(&pInst, pKernelDisplay, DisplayInstanceMemory);
152     if (status != NV_OK)
153     {
154         return status;
155     }
156 
157     pKernelDisplay->pInst = pInst;
158     return NV_OK;
159 }
160 
161 /*! Destructor for DisplayInstanceMemory */
162 void
163 kdispDestructInstMem_IMPL
164 (
165     KernelDisplay *pKernelDisplay
166 )
167 {
168     objDelete(pKernelDisplay->pInst);
169     pKernelDisplay->pInst = NULL;
170 }
171 
172 /*! Constructor for Kernel head */
173 NV_STATUS
174 kdispConstructKhead_IMPL
175 (
176     KernelDisplay *pKernelDisplay
177 )
178 {
179     NV_STATUS   status;
180     KernelHead *pKernelHead;
181     NvU8        headIdx;
182 
183     for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++)
184     {
185         status = objCreate(&pKernelHead, pKernelDisplay, KernelHead);
186         if (status != NV_OK)
187         {
188             return status;
189         }
190 
191         pKernelDisplay->pKernelHead[headIdx] = pKernelHead;
192         pKernelDisplay->pKernelHead[headIdx]->PublicId = headIdx;
193     }
194     return NV_OK;
195 }
196 
197 /*! Destructor for Kernel head */
198 void
199 kdispDestructKhead_IMPL
200 (
201     KernelDisplay *pKernelDisplay
202 )
203 {
204     NvU8      headIdx;
205 
206     for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++)
207     {
208         objDelete(pKernelDisplay->pKernelHead[headIdx]);
209         pKernelDisplay->pKernelHead[headIdx] = NULL;
210     }
211 }
212 
213 NV_STATUS
214 kdispAllocateCommonHandle_IMPL
215 (
216     OBJGPU *pGpu,
217     KernelDisplay *pKernelDisplay
218 )
219 {
220     NV_STATUS rmStatus;
221     NvHandle  hClient;
222     NvHandle  hDevice;
223     NvHandle  hSubdevice;
224     NvHandle  hSubscription = NV01_NULL_OBJECT;
225     RM_API   *pRmApi        = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
226 
227     rmStatus = rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, &hClient,
228                                                     &hDevice, &hSubdevice);
229     NV_ASSERT_OR_RETURN(rmStatus == NV_OK, NV_FALSE);
230 
231     rmStatus = pRmApi->AllocWithSecInfo(pRmApi, hClient, hDevice, &hSubscription,
232                                         NV04_DISPLAY_COMMON, NULL, 0, RMAPI_ALLOC_FLAGS_NONE,
233                                         NULL, &pRmApi->defaultSecInfo);
234     NV_ASSERT_OR_RETURN(rmStatus == NV_OK, NV_FALSE);
235 
236     pKernelDisplay->hInternalClient = hClient;
237     pKernelDisplay->hInternalDevice = hDevice;
238     pKernelDisplay->hInternalSubdevice = hSubdevice;
239     pKernelDisplay->hDispCommonHandle = hSubscription;
240 
241     return NV_OK;
242 }
243 
244 void
245 kdispDestroyCommonHandle_IMPL
246 (
247     KernelDisplay *pKernelDisplay
248 )
249 {
250     NV_STATUS rmStatus;
251     RM_API   *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
252 
253     rmStatus = pRmApi->FreeWithSecInfo(pRmApi, pKernelDisplay->hInternalClient,
254                                         pKernelDisplay->hDispCommonHandle,
255                                         RMAPI_ALLOC_FLAGS_NONE, &pRmApi->defaultSecInfo);
256     NV_ASSERT(rmStatus == NV_OK);
257 
258     rmapiutilFreeClientAndDeviceHandles(pRmApi, &pKernelDisplay->hInternalClient,
259                                         &pKernelDisplay->hInternalDevice,
260                                         &pKernelDisplay->hInternalSubdevice);
261 
262     pKernelDisplay->hInternalClient = 0;
263     pKernelDisplay->hInternalDevice = 0;
264     pKernelDisplay->hInternalSubdevice = 0;
265     pKernelDisplay->hDispCommonHandle = 0;
266 }
267 
268 NV_STATUS
269 kdispStatePreInitLocked_IMPL(OBJGPU        *pGpu,
270                              KernelDisplay *pKernelDisplay)
271 {
272     NV_STATUS status;
273     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
274     NvU32   hClient = pGpu->hInternalClient;
275     NvU32   hSubdevice = pGpu->hInternalSubdevice;
276     NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS ctrlParams;
277 
278     if (!gpuFuseSupportsDisplay_HAL(pGpu))
279        return NV_ERR_NOT_SUPPORTED;
280 
281     status = pRmApi->Control(pRmApi, hClient, hSubdevice,
282                              NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION,
283                              &ctrlParams, sizeof(ctrlParams));
284     if (status != NV_OK)
285     {
286         NV_PRINTF(LEVEL_WARNING,
287                   "Failed to read display IP version (FUSE disabled), status=0x%x\n",
288                   status);
289         return status;
290     }
291 
292     // NOTE: KernelDisplay IpVersion _HAL functions can only be called after this point.
293     status = gpuInitDispIpHal(pGpu, ctrlParams.ipVersion);
294 
295     kdispInitRegistryOverrides_HAL(pGpu, pKernelDisplay);
296 
297     kdispAllocateCommonHandle(pGpu, pKernelDisplay);
298 
299     return status;
300 }
301 
302 NV_STATUS
303 kdispInitBrightcStateLoad_IMPL(OBJGPU *pGpu,
304                                KernelDisplay *pKernelDisplay)
305 {
306     NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *pBrightcInfo = NULL;
307     NvU32 status = NV_ERR_NOT_SUPPORTED;
308     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
309 
310     pBrightcInfo = portMemAllocNonPaged(sizeof(NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS));
311     if (pBrightcInfo == NULL)
312     {
313         NV_PRINTF(LEVEL_ERROR, "Could not allocate memory for pBrightcInfo\n");
314         return NV_ERR_NO_MEMORY;
315     }
316     portMemSet(pBrightcInfo, 0, sizeof(*pBrightcInfo));
317 
318     pBrightcInfo->status = status;
319     if ((pKernelDisplay != NULL) && (pKernelDisplay->pStaticInfo->internalDispActiveMask != 0))
320     {
321         // Fill in the Backlight Method Data.
322         pBrightcInfo->backLightDataSize = sizeof(pBrightcInfo->backLightData);
323         status = osCallACPI_DSM(pGpu, ACPI_DSM_FUNCTION_CURRENT, NV_ACPI_GENERIC_FUNC_GETBACKLIGHT,
324                                 (NvU32 *)(pBrightcInfo->backLightData),
325                                 &pBrightcInfo->backLightDataSize);
326         pBrightcInfo->status = status;
327     }
328 
329     status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
330                     NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD,
331                     pBrightcInfo, sizeof(*pBrightcInfo));
332 
333     portMemFree(pBrightcInfo);
334 
335     return status;
336 }
337 
338 NV_STATUS
339 kdispSetupAcpiEdid_IMPL
340 (
341     OBJGPU        *pGpu,
342     KernelDisplay *pKernelDisplay
343 )
344 {
345     NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS *pEdidParams = NULL;
346     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
347     NvU32 status = NV_ERR_GENERIC;
348     NvU32 index;
349 
350     pEdidParams = portMemAllocNonPaged(sizeof(NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA_PARAMS));
351     if (pEdidParams == NULL)
352     {
353         NV_PRINTF(LEVEL_ERROR, "Could not allocate memory for pEdidParams\n");
354         return NV_ERR_NO_MEMORY;
355     }
356     portMemSet(pEdidParams, 0, sizeof(*pEdidParams));
357 
358     pEdidParams->tableLen = pGpu->acpiMethodData.dodMethodData.acpiIdListLen / sizeof(NvU32);
359 
360     for (index = 0; index < pEdidParams->tableLen; index++)
361     {
362         pEdidParams->edidTable[index].bufferSize = MAX_EDID_SIZE_FROM_SBIOS;
363         status = osCallACPI_DDC(pGpu, pGpu->acpiMethodData.dodMethodData.acpiIdList[index],
364                                     pEdidParams->edidTable[index].edidBuffer,
365                                     &pEdidParams->edidTable[index].bufferSize, NV_TRUE);
366         pEdidParams->edidTable[index].acpiId = pGpu->acpiMethodData.dodMethodData.acpiIdList[index];
367         pEdidParams->edidTable[index].status = status;
368     }
369 
370     status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
371                     NV2080_CTRL_CMD_INTERNAL_SET_STATIC_EDID_DATA,
372                     pEdidParams, sizeof(*pEdidParams));
373 
374     portMemFree(pEdidParams);
375 
376     return status;
377 }
378 
379 void
380 kdispInitRegistryOverrides_IMPL(OBJGPU        *pGpu,
381                                 KernelDisplay *pKernelDisplay)
382 {
383     NvU32 data32 = 0;
384 
385     if (pKernelDisplay == NULL)
386     {
387         return;
388     }
389 
390     if (NV_OK == osReadRegistryDword(pGpu, NV_REG_STR_RM_BUG_2089053_WAR, &data32))
391     {
392         if (data32 == NV_REG_STR_RM_BUG_2089053_WAR_DISABLE)
393         {
394             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANK_ALWAYS, NV_FALSE);
395             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANKS_ONLY_ON_HMD_ACTIVE, NV_FALSE);
396         }
397         else if (data32 == NV_REG_STR_RM_BUG_2089053_WAR_ENABLE_ALWAYS)
398         {
399             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANK_ALWAYS, NV_TRUE);
400             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANKS_ONLY_ON_HMD_ACTIVE, NV_FALSE);
401         }
402         else if (data32 == NV_REG_STR_RM_BUG_2089053_WAR_ENABLE_ON_HMD_ACTIVE_ONLY)
403         {
404             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANKS_ONLY_ON_HMD_ACTIVE, NV_TRUE);
405             pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_BUG_2089053_SERIALIZE_AGGRESSIVE_VBLANK_ALWAYS, NV_FALSE);
406         }
407     }
408 }
409 
410 NV_STATUS
411 kdispStateInitLocked_IMPL(OBJGPU        *pGpu,
412                           KernelDisplay *pKernelDisplay)
413 {
414     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
415     NV_STATUS status = NV_OK;
416     KernelDisplayStaticInfo *pStaticInfo;
417 
418     pStaticInfo = portMemAllocNonPaged(sizeof(KernelDisplayStaticInfo));
419     if (pStaticInfo == NULL)
420     {
421         NV_PRINTF(LEVEL_ERROR, "Could not allocate KernelDisplayStaticInfo");
422         status = NV_ERR_NO_MEMORY;
423         goto exit;
424     }
425     portMemSet(pStaticInfo, 0, sizeof(*pStaticInfo));
426 
427     NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
428         pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
429                         NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
430                         pStaticInfo, sizeof(*pStaticInfo)),
431         exit);
432 
433     pKernelDisplay->pStaticInfo = pStaticInfo;
434     pKernelDisplay->numHeads = pStaticInfo->numHeads;
435     pStaticInfo = NULL;
436 
437     // Initiate Brightc module state load
438     status = kdispInitBrightcStateLoad_HAL(pGpu, pKernelDisplay);
439     if (status != NV_OK)
440     {
441         NV_PRINTF(LEVEL_ERROR, "rmapi control call for brightc state load failed\n");
442         goto exit;
443     }
444 
445     // Set up ACPI DDC data in Physical RM for future usage
446     status = kdispSetupAcpiEdid_HAL(pGpu, pKernelDisplay);
447     if (status != NV_OK)
448     {
449         NV_PRINTF(LEVEL_WARNING, "rmapi control call for acpi child device init failed\n");
450         goto exit;
451     }
452 
453     if (pKernelDisplay->pInst != NULL)
454     {
455         NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
456                 instmemStateInitLocked(pGpu, pKernelDisplay->pInst), exit);
457     }
458 
459     // Initialize any external daughterboards that
460     // might be out there.
461 
462     pGpu->i2cPortForExtdev = NV402C_CTRL_NUM_I2C_PORTS;
463 
464     if (pKernelDisplay->pStaticInfo->i2cPort == NV402C_CTRL_NUM_I2C_PORTS)
465     {
466         NV_PRINTF(LEVEL_INFO, "Error in getting valid I2Cport for Extdevice or extdevice doesn't exist\n");
467     }
468     else
469     {
470         pGpu->i2cPortForExtdev = pKernelDisplay->pStaticInfo->i2cPort;
471 
472         if (NV_OK != gpuExtdevConstruct_HAL(pGpu))
473         {
474             NV_PRINTF(LEVEL_INFO, "gpuExtdevConstruct() failed or not supported\n");
475         }
476     }
477 
478     if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IMP_ENABLE))
479     {
480         // NOTE: Fills IMP parameters and populate those to disp object in Tegra
481         NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
482                 kdispImportImpData_HAL(pKernelDisplay), exit);
483     }
484 
485 exit:
486     portMemFree(pStaticInfo);
487 
488     return status;
489 }
490 
491 void
492 kdispStateDestroy_IMPL(OBJGPU *pGpu,
493                        KernelDisplay *pKernelDisplay)
494 {
495     extdevDestroy(pGpu);
496 
497     if (pKernelDisplay->pInst != NULL)
498     {
499         instmemStateDestroy(pGpu, pKernelDisplay->pInst);
500     }
501 
502     portMemFree((void*) pKernelDisplay->pStaticInfo);
503     pKernelDisplay->pStaticInfo = NULL;
504 
505     kdispDestroyCommonHandle(pKernelDisplay);
506 }
507 
508 NV_STATUS
509 kdispStateLoad_IMPL
510 (
511     OBJGPU        *pGpu,
512     KernelDisplay *pKernelDisplay,
513     NvU32         flags
514 )
515 {
516     NV_STATUS status = NV_OK;
517 
518     if (pKernelDisplay->pInst != NULL)
519         status = instmemStateLoad(pGpu, pKernelDisplay->pInst, flags);
520 
521     return status;
522 }
523 
524 NV_STATUS
525 kdispStateUnload_IMPL
526 (
527     OBJGPU        *pGpu,
528     KernelDisplay *pKernelDisplay,
529     NvU32         flags
530 )
531 {
532     NV_STATUS status = NV_OK;
533 
534     if (pKernelDisplay->pInst != NULL)
535         status = instmemStateUnload(pGpu, pKernelDisplay->pInst, flags);
536 
537     return status;
538 }
539 
540 /*! Get and Populate IMP init data for Tegra */
541 NV_STATUS
542 kdispImportImpData_IMPL(KernelDisplay *pKernelDisplay)
543 {
544     OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay);
545     RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
546     NvU32   hClient = pGpu->hInternalClient;
547     NvU32   hSubdevice = pGpu->hInternalSubdevice;
548     NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS params;
549     NvU32   simulationMode;
550 
551     //
552     // FPGA has different latency characteristics, and the current code latency
553     // models that IMP uses for silicon will not work for FPGA, so keep IMP
554     // disabled by default on Tegra FPGA.
555     //
556     simulationMode = osGetSimulationMode();
557     if (simulationMode == NV_SIM_MODE_TEGRA_FPGA)
558     {
559         pKernelDisplay->setProperty(pKernelDisplay, PDB_PROP_KDISP_IMP_ENABLE, NV_FALSE);
560         return NV_OK;
561     }
562 
563     NV_ASSERT_OK_OR_RETURN(osTegraSocGetImpImportData(&params.tegraImpImportData));
564 
565     NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, hClient, hSubdevice,
566                            NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO,
567                            &params, sizeof(params)));
568 
569     return NV_OK;
570 }
571 
572 /*! Get internal enum equivalent of the HW class number */
573 NV_STATUS
574 kdispGetIntChnClsForHwCls_IMPL
575 (
576     KernelDisplay *pKernelDisplay,
577     NvU32          hwClass,
578     DISPCHNCLASS  *pDispChnClass
579 )
580 {
581     // sanity check
582     if (pDispChnClass == NULL)
583         return NV_ERR_INVALID_ARGUMENT;
584 
585     switch (hwClass)
586     {
587         case NV917A_CURSOR_CHANNEL_PIO:
588         case NVC37A_CURSOR_IMM_CHANNEL_PIO:
589         case NVC57A_CURSOR_IMM_CHANNEL_PIO:
590         case NVC67A_CURSOR_IMM_CHANNEL_PIO:
591             *pDispChnClass = dispChnClass_Curs;
592             break;
593 
594         case NV917B_OVERLAY_IMM_CHANNEL_PIO:
595             *pDispChnClass = dispChnClass_Ovim;
596             break;
597 
598         case NV927C_BASE_CHANNEL_DMA:
599             *pDispChnClass = dispChnClass_Base;
600             break;
601 
602         case NV947D_CORE_CHANNEL_DMA:
603         case NV957D_CORE_CHANNEL_DMA:
604         case NV977D_CORE_CHANNEL_DMA:
605         case NV987D_CORE_CHANNEL_DMA:
606         case NVC37D_CORE_CHANNEL_DMA:
607         case NVC57D_CORE_CHANNEL_DMA:
608         case NVC67D_CORE_CHANNEL_DMA:
609         case NVC77D_CORE_CHANNEL_DMA:
610             *pDispChnClass = dispChnClass_Core;
611             break;
612 
613         case NV917E_OVERLAY_CHANNEL_DMA:
614             *pDispChnClass = dispChnClass_Ovly;
615             break;
616 
617         case NVC37B_WINDOW_IMM_CHANNEL_DMA:
618         case NVC57B_WINDOW_IMM_CHANNEL_DMA:
619         case NVC67B_WINDOW_IMM_CHANNEL_DMA:
620             *pDispChnClass = dispChnClass_Winim;
621             break;
622 
623         case NVC37E_WINDOW_CHANNEL_DMA:
624         case NVC57E_WINDOW_CHANNEL_DMA:
625         case NVC67E_WINDOW_CHANNEL_DMA:
626             *pDispChnClass = dispChnClass_Win;
627             break;
628 
629         case NVC77F_ANY_CHANNEL_DMA:
630             // Assert incase of physical RM, Any channel is kernel only channel.
631             NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_INVALID_CHANNEL);
632             *pDispChnClass = dispChnClass_Any;
633             break;
634 
635         default:
636             NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", hwClass);
637             return NV_ERR_INVALID_ARGUMENT;
638     }
639 
640     return NV_OK;
641 }
642 
643 void
644 kdispNotifyEvent_IMPL
645 (
646     OBJGPU        *pGpu,
647     KernelDisplay *pKernelDisplay,
648     NvU32          notifyIndex,
649     void          *pNotifyParams,
650     NvU32          notifyParamsSize,
651     NvV32          info32,
652     NvV16          info16
653 )
654 {
655     PEVENTNOTIFICATION pEventNotifications;
656     NvU32             *pNotifyActions;
657     NvU32              disableCmd, singleCmd;
658     NvU32              subDeviceInst;
659     RS_SHARE_ITERATOR  it = serverutilShareIter(classId(NotifShare));
660 
661     // search notifiers with events hooked up for this gpu
662     while (serverutilShareIterNext(&it))
663     {
664         RsShared   *pShared = it.pShared;
665         DisplayApi *pDisplayApi;
666         INotifier  *pNotifier;
667         Device     *pDevice;
668         NotifShare *pNotifierShare = dynamicCast(pShared, NotifShare);
669 
670         if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL))
671             continue;
672 
673         pNotifier = pNotifierShare->pNotifier;
674         pDisplayApi = dynamicCast(pNotifier, DisplayApi);
675 
676         // Only notify matching GPUs
677         if (pDisplayApi == NULL)
678             continue;
679 
680         pDevice = dynamicCast(RES_GET_REF(pDisplayApi)->pParentRef->pResource, Device);
681 
682         if (GPU_RES_GET_GPU(pDevice) != pGpu)
683             continue;
684 
685         gpuSetThreadBcState(GPU_RES_GET_GPU(pDevice), pDisplayApi->bBcResource);
686 
687         disableCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
688         singleCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE;
689 
690         // get notify actions list
691         subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
692         pNotifyActions = pDisplayApi->pNotifyActions[subDeviceInst];
693         if (pNotifyActions == NULL)
694         {
695             continue;
696         }
697 
698         // get event list
699         pEventNotifications = inotifyGetNotificationList(pNotifier);
700         if (pEventNotifications == NULL)
701         {
702             continue;
703         }
704 
705         // skip if client not "listening" to events of this type
706         if (pNotifyActions[notifyIndex] == disableCmd)
707         {
708             continue;
709         }
710 
711         if (pDisplayApi->hNotifierMemory != NV01_NULL_OBJECT &&
712             pDisplayApi->pNotifierMemory != NULL)
713         {
714             notifyFillNotifierMemory(pGpu, pDisplayApi->pNotifierMemory, info32, info16,
715                                      NV5070_NOTIFICATION_STATUS_DONE_SUCCESS, notifyIndex);
716         }
717 
718         // ping events bound to subdevice associated with pGpu
719         osEventNotification(pGpu, pEventNotifications,
720                             (notifyIndex | OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV),
721                             pNotifyParams, notifyParamsSize);
722 
723         // reset if single shot notify action
724         if (pNotifyActions[notifyIndex] == singleCmd)
725         {
726             pNotifyActions[notifyIndex] = disableCmd;
727         }
728     }
729 }
730 
731 void
732 kdispSetWarPurgeSatellitesOnCoreFree_IMPL
733 (
734     KernelDisplay *pKernelDisplay,
735     NvBool value
736 )
737 {
738     pKernelDisplay->bWarPurgeSatellitesOnCoreFree = value;
739 }
740 
741 NV_STATUS
742 kdispRegisterRgLineCallback_IMPL
743 (
744     KernelDisplay *pKernelDisplay,
745     RgLineCallback *pRgLineCallback,
746     NvU32 head,
747     NvU32 rgIntrLine,
748     NvBool bEnable
749 )
750 {
751     NV_ASSERT_OR_RETURN(head < OBJ_MAX_HEADS, NV_ERR_INVALID_ARGUMENT);
752     NV_ASSERT_OR_RETURN(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD, NV_ERR_INVALID_ARGUMENT);
753 
754     RgLineCallback **slot = &pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine];
755 
756     if (bEnable && *slot == NULL)
757     {
758         *slot = pRgLineCallback;
759     }
760     else if (!bEnable && *slot == pRgLineCallback)
761     {
762         *slot = NULL;
763     }
764     else
765     {
766         //
767         // OBJDISP is the authority for *allocating* these "slots";
768         // KernelDisplay trusts it as an allocator.
769         // If we try to register a callback in an existing slot, or free an
770         // empty slot, it means OBJDISP has created conflicting allocations or
771         // has allowed a double-free. (Or RgLineCallback has provided invalid
772         // parameters.)
773         //
774         NV_ASSERT_FAILED("Invalid KernelDisplay state for RgLineCallback");
775         return NV_ERR_INVALID_STATE;
776     }
777 
778     return NV_OK;
779 }
780 
781 void
782 kdispInvokeRgLineCallback_KERNEL
783 (
784     KernelDisplay *pKernelDisplay,
785     NvU32 head,
786     NvU32 rgIntrLine,
787     NvBool bIsIrqlIsr
788 )
789 {
790     NV_ASSERT_OR_RETURN_VOID(head < OBJ_MAX_HEADS);
791     NV_ASSERT_OR_RETURN_VOID(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD);
792 
793     RgLineCallback *pCallbackObject = pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine];
794 
795     if (pCallbackObject != NULL)
796     {
797         rglcbInvoke(pCallbackObject, bIsIrqlIsr);
798     }
799     else if (IS_GSP_CLIENT(ENG_GET_GPU(pKernelDisplay)))
800     {
801         //
802         // For offloaded RM case, getting a callback invocation without a registered callback could
803         // happen during or after deregistration: there might already have been an event in the
804         // queue by the time we asked physical RM to deconfigure the interrupt.
805         //
806         // Because this could lead to an A-B-A situation where a new callback is registered to the
807         // same slot and invoked in place of the old callback, we must assert against this case.
808         // To avoid this, RgLineCallback must drain the client RM event queue after deconfiguring
809         // the interrupt and before calling kdispRegisterRgLineCallback to deregister the callback.
810         //
811         NV_ASSERT_FAILED("got RgLineCallback invocation for null callback");
812     }
813     else
814     {
815         //
816         // For the monolithic RM case, getting a callback invocation without a registered callback
817         // could happen during registration: after configuring hardware for the interrupt, but
818         // before registering the callback with KernelDisplay, the interrupt could be handled.
819         //
820         // This is not a bug in and of itself as it is harmless and expected. On the other hand we
821         // would not expect to see this warning in the log more than a few times per registration,
822         // e.g. if it were printed for every single interrupt, as the callback ought to be fully
823         // registered before excessively many interrupts are handled.
824         //
825         NV_PRINTF(LEVEL_WARNING, "got RgLineCallback invocation for null callback\n");
826     }
827 }
828 
829 #define HOTPLUG_PROFILE 0
830 
831 #if HOTPLUG_PROFILE
832 
833     #define ISR_TSTAMP_SIZE 18000 /* 5 minutes (5*60Hz*60)*/
834 
835     NvU32 timeStampIndexISR = ISR_TSTAMP_SIZE-1;
836 
837     tmr_tstamp_u timeStampStartISR[ISR_TSTAMP_SIZE];
838     tmr_tstamp_u timeStampDeltaISR[ISR_TSTAMP_SIZE];
839 
840 #endif
841 
842 void
843 kdispServiceVblank_KERNEL
844 (
845     OBJGPU            *pGpu,
846     KernelDisplay     *pKernelDisplay,
847     NvU32              headmask,
848     NvU32              state,
849     THREAD_STATE_NODE *pThreadState
850 )
851 {
852     NvU32      pending, check_pending, pending_checked;
853     NvU32      Head;
854     NvU32      maskNonEmptyQueues[OBJ_MAX_HEADS];  // array of masks of VBLANK_STATE_PROCESS_XXX_LATENCY bits, indicating which queues are non-empty
855     NvU32      unionNonEmptyQueues = 0;            // mask of VBLANK_STATE_PROCESS_XXX_LATENCY bits, union of queue states of all heads w/ pending vblank ints
856     NvU32      Count = 0;
857     NvU32      i, skippedcallbacks;
858     NvU32      maskCallbacksStillPending = 0;
859     KernelHead    *pKernelHead = NULL;
860 
861 #if HOTPLUG_PROFILE
862     OBJTMR    *pTmr;
863     pTmr = GPU_GET_TIMER(pGpu);
864     if (++timeStampIndexISR >= ISR_TSTAMP_SIZE)
865         timeStampIndexISR = 0;
866 
867     tmrGetCurrentTime(pTmr, &timeStampStartISR[timeStampIndexISR].time32.hi, &timeStampStartISR[timeStampIndexISR].time32.lo);
868 
869     // For the ISR we want to know how much time since the last ISR.
870     if (timeStampIndexISR)
871     {
872         NvU64 temp64;
873 
874         temp64 = timeStampStartISR[timeStampIndexISR].time64;
875         temp64 -= timeStampStartISR[timeStampIndexISR-1].time64;
876 
877         timeStampDeltaISR[timeStampIndexISR].time64 = temp64;
878     }
879 #endif
880 
881 
882     // If the caller failed to spec which queue, figure they wanted all of them
883     if (!(state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) )
884     {
885         state |= VBLANK_STATE_PROCESS_ALL_CALLBACKS;
886     }
887 
888     // If the headmask is 0, we should process all heads
889     if (headmask == 0)
890     {
891         headmask = 0xFFFFFFFF;
892     }
893 
894     //
895     // If we are being asked to process the callbacks now, regardless of the true irqspending,
896     // we force the pending mask to the head mask passed in.
897     //
898     if (state & VBLANK_STATE_PROCESS_IMMEDIATE)
899     {
900         pending = headmask;
901     }
902     else
903     {
904         // We're here because at least one of the PCRTC bits MAY be pending.
905         pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState);
906     }
907 
908     //  No sense in doing anything if there is nothing pending.
909     if (pending == 0)
910     {
911         return;
912     }
913 
914     //
915     // We want to check for pending service now and then we check again each
916     // time through the loop. Keep these seperate.
917     //
918     check_pending = pending;
919 
920     // We have not checked anything yet
921     pending_checked = 0;
922 
923     // Start with head 0
924     Head = 0;
925 
926     //
927     // We keep scanning all supported heads, and if we have something pending,
928     // check the associated queues
929     //
930     while(pending_checked != pending)
931     {
932         pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head);
933 
934         // Move on if this crtc's interrupt isn't pending...
935         if ( (headmask & check_pending & ~pending_checked) & NVBIT(Head))
936         {
937             // Track that we have now checked this head
938             pending_checked |= NVBIT(Head);
939 
940             // If our queues are empty, we can bail early
941             maskNonEmptyQueues[Head]  = kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, state, NULL);
942             unionNonEmptyQueues      |= maskNonEmptyQueues[Head];
943 
944             // This function will check to see if there are callback states in which the
945             // caller has skipped execution.
946             skippedcallbacks = ((state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ^ VBLANK_STATE_PROCESS_ALL_CALLBACKS);
947             skippedcallbacks |= (state & (VBLANK_STATE_PROCESS_CALLED_FROM_ISR | VBLANK_STATE_PROCESS_IMMEDIATE));
948 
949             // now lets see if there's callbacks pending on the skipped callbacks
950             maskCallbacksStillPending |= NVBIT(Head) * !!kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, skippedcallbacks, NULL);
951         }
952 
953         // Don't check for new interrupts if we are in immediate mode
954         if (!(state & VBLANK_STATE_PROCESS_IMMEDIATE) )
955         {
956             pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState);
957         }
958 
959         // if there was a change in the pending state, we should recheck everything
960         if (check_pending != pending)
961         {
962             // We need to recheck heads that were not pending before
963             check_pending = pending;
964             Head = 0;
965         }
966         else
967         {
968             // Nothing changed, so move on to the next head
969             Head++;
970         }
971 
972         // Make sure we dont waste time on heads that dont exist
973         if (Head >= OBJ_MAX_HEADS)
974         {
975             break;
976         }
977     }
978 
979     if (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR)
980     {
981         // store off which heads have pending vblank interrupts, for comparison at the next DPC time.
982         pKernelDisplay->isrVblankHeads = pending;
983 
984     }
985 
986     // increment the per-head vblank total counter, for any head with a pending vblank intr
987     for (Head=0; Head < OBJ_MAX_HEADS; Head++)
988     {
989         // Move on if this crtc's interrupt isn't pending...
990         if ((pending & NVBIT(Head)) == 0)
991         {
992             continue;
993         }
994 
995         pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head);
996         //
997         // increment vblank counters, as appropriate.
998         //
999 
1000         // Track the fact that we passed through here. This keeps the RC manager happy.
1001         Count = kheadGetVblankTotalCounter_HAL(pKernelHead) + 1;
1002         kheadSetVblankTotalCounter_HAL(pKernelHead, Count);
1003 
1004         //
1005         // Update the vblank counter if we are single chip or multichip master.
1006         // We now have two queues, so we need to have two vblank counters.
1007         //
1008 
1009         // did they ask for processing of low-latency work?
1010         if (state & VBLANK_STATE_PROCESS_LOW_LATENCY /* & maskNonEmptyQueues[Head]*/)
1011         {
1012             //
1013             // don't let the DPC thread increment the low-latency counter.
1014             // otherwise, the counter will frequently increment at double the
1015             // expected rate, breaking things like swapInterval.
1016             //
1017             // XXX actually, there is one case where it would be OK for the DPC
1018             // thread to increment this counter:  if the DPC thread could ascertain
1019             // that 'pending & NVBIT(Head)' represented a new interrupt event, and
1020             // not simply the one that the ISR left uncleared in PCRTC_INTR_0, for
1021             // the purpose of causing this DPC thread to get queued.
1022             // Not sure how to do that.
1023             //
1024             if ( !(state & VBLANK_STATE_PROCESS_CALLED_FROM_DPC) || (pending & NVBIT(Head) & ~pKernelDisplay->isrVblankHeads) )
1025             {
1026                 // either we were called from the ISR, or vblank is asserted in DPC when it wasn't in the ISR
1027 
1028                 // low latency queue requested, and this isn't a DPC thread.
1029                 Count = kheadGetVblankLowLatencyCounter_HAL(pKernelHead) + 1;
1030                 kheadSetVblankLowLatencyCounter_HAL(pKernelHead, Count);
1031            }
1032         }
1033 
1034         // did they ask for processing of normal-latency work?
1035         if (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY /* & maskNonEmptyQueues[Head]*/)
1036         {
1037             // processing of the normal latency queue requested
1038             Count = kheadGetVblankNormLatencyCounter_HAL(pKernelHead) + 1;
1039             kheadSetVblankNormLatencyCounter_HAL(pKernelHead, Count);
1040         }
1041     }
1042 
1043     //
1044     // If we have nothing to process (no work to do in queue),
1045     // we can bail early. We got here for some reason, so make
1046     // sure we clear the interrupts.
1047     //
1048 
1049     if (!unionNonEmptyQueues)
1050     {
1051         // all queues (belonging to heads with pending vblank ints) are empty.
1052         if (IS_GSP_CLIENT(pGpu))
1053         {
1054             kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState);
1055         }
1056         return;
1057     }
1058 
1059     //
1060     // Although we have separate handlers for each head, attempt to process all
1061     // interrupting heads now. What about DPCs schedule already?
1062     //
1063     for (Head = 0; Head < OBJ_MAX_HEADS; Head++)
1064     {
1065         pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head);
1066         // Move on if this crtc's interrupt isn't pending...
1067         if ((pending & NVBIT(Head)) == 0)
1068         {
1069             continue;
1070         }
1071 
1072         // Process the callback list for this Head...
1073         kheadProcessVblankCallbacks(pGpu, pKernelHead, state);
1074     }
1075 
1076     //
1077     // if there are still callbacks pending, and we are in an ISR,
1078     // then don't clear PCRTC_INTR; XXXar why would we *ever* want
1079     // to clear PCRTC_INTR if there are still things pending?
1080     //
1081     if ( (maskCallbacksStillPending) &&
1082          (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) )
1083     {
1084         //
1085         // there are still callbacks pending; don't clear
1086         // PCRTC_INTR, yet. The expectation is that the OS layer
1087         // will see that interrupts are still pending and queue a
1088         // DPC/BottomHalf/whatever to service the rest of the
1089         // vblank callback queues
1090         //
1091         for(i=0; i< OBJ_MAX_HEADS; i++)
1092         {
1093             pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i);
1094             if (IS_GSP_CLIENT(pGpu))
1095             {
1096                 kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState);
1097             }
1098         }
1099     }
1100     else
1101     {
1102         // reset the VBlank intrs we've handled, and don't reset the vblank intrs we haven't.
1103         for(i=0; i< OBJ_MAX_HEADS; i++)
1104         {
1105             pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i);
1106             if (pending & NVBIT(i) & ~maskCallbacksStillPending)
1107             {
1108                 kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState);
1109             }
1110         }
1111     }
1112 
1113     return;
1114 }
1115 
1116 NvU32 kdispReadPendingVblank_IMPL(OBJGPU *pGpu, KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *pThreadState)
1117 {
1118     KernelHead *pKernelHead;
1119     NvU32       headIdx, pending = 0;
1120 
1121     for (headIdx = 0; headIdx < kdispGetNumHeads(pKernelDisplay); headIdx++)
1122     {
1123         pKernelHead = KDISP_GET_HEAD(pKernelDisplay, headIdx);
1124 
1125         if (kheadReadPendingVblank_HAL(pGpu, pKernelHead, NULL, pThreadState))
1126         {
1127             pending |= NVBIT(headIdx);
1128         }
1129     }
1130 
1131     return pending;
1132 }
1133 
1134 /**
1135  * @brief Provides an opportunity to register some IntrService during intrStateInit.
1136  */
1137 void
1138 kdispRegisterIntrService_IMPL
1139 (
1140     OBJGPU *pGpu,
1141     KernelDisplay *pKernelDisplay,
1142     IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX]
1143 )
1144 {
1145     NvU32 engineIdx = MC_ENGINE_IDX_DISP;
1146     NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL);
1147     pRecords[engineIdx].pInterruptService = staticCast(pKernelDisplay, IntrService);
1148 }
1149 
1150 /*!
1151  * @brief Route modeset start/end notification to kernel RM
1152  *
1153  * Physical RM is expected to send a "start" notification at the beginning of
1154  * every display modeset (supervisor interrupt sequence), and an "end"
1155  * notification at the end.  However, if physical RM detects back-to-back
1156  * modesets, the intervening "end" notification MAY be skipped; in this case,
1157  * the "start" notification for the next modeset serves as the "end notification
1158  * for the previous modeset.
1159  *
1160  * Kernel RM will use the notification to update the BW allocation for display.
1161  * The ICC call that is required to update the BW allocation cannot be made
1162  * from physical RM.
1163  *
1164  * @param[in] pKernelDisplay                KernelDisplay pointer
1165  * @param[in] bModesetStart                 NV_TRUE -> start of modeset;
1166  *                                          NV_FALSE -> end of modeset
1167  * @param[in] minRequiredIsoBandwidthKBPS   Min ISO BW required by IMP (KB/sec)
1168  * @param[in] minRequiredFloorBandwidthKBPS Min dramclk freq * pipe width (KB/sec)
1169  */
1170 void
1171 kdispInvokeDisplayModesetCallback_KERNEL
1172 (
1173     KernelDisplay *pKernelDisplay,
1174     NvBool bModesetStart,
1175     NvU32 minRequiredIsoBandwidthKBPS,
1176     NvU32 minRequiredFloorBandwidthKBPS
1177 )
1178 {
1179     NV_STATUS   status;
1180 
1181     NV_PRINTF(LEVEL_INFO,
1182               "Kernel RM received \"%s of modeset\" notification "
1183               "(minRequiredIsoBandwidthKBPS = %u, minRequiredFloorBandwidthKBPS = %u)\n",
1184               bModesetStart ? "start" : "end",
1185               minRequiredIsoBandwidthKBPS,
1186               minRequiredFloorBandwidthKBPS);
1187 
1188     OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay);
1189     status =
1190         kdispArbAndAllocDisplayBandwidth_HAL(pGpu,
1191                                              pKernelDisplay,
1192                                              DISPLAY_ICC_BW_CLIENT_RM,
1193                                              minRequiredIsoBandwidthKBPS,
1194                                              minRequiredFloorBandwidthKBPS);
1195     //
1196     // The modeset cannot be aborted, so, if there is an error, no recovery
1197     // is possible.
1198     //
1199     NV_ASSERT_OK(status);
1200 }
1201