1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 
26 #include "dp/nvdp-connector.h"
27 #include "dp/nvdp-timer.h"
28 #include "dp/nvdp-device.h"
29 #include "nvkms-rm.h"
30 #include "nvkms-rmapi.h"
31 #include "g_nvkms-evo-states.h"
32 #include "nvkms-event.h"
33 #include "nvkms-dpy.h"
34 #include "nvkms-types.h"
35 #include "nvkms-evo.h"
36 #include "nvkms-dma.h"
37 #include "nvkms-utils.h"
38 #include "nvkms-private.h"
39 #include "nvkms-modeset.h"
40 #include "nvkms-surface.h"
41 #include "nvkms-vrr.h"
42 
43 #include "nvkms-push.h"
44 #include "nvkms-difr.h"
45 
46 #include "class/cl0002.h" /* NV01_CONTEXT_DMA */
47 #include "class/cl0005.h" /* NV01_EVENT */
48 
49 #include <class/cl0070.h> // NV01_MEMORY_VIRTUAL
50 #include <class/cl0073.h> /* NV04_DISPLAY_COMMON */
51 #include <class/cl003e.h> /* NV01_MEMORY_SYSTEM */
52 #include <class/cl0076.h> /* NV01_MEMORY_FRAMEBUFFER_CONSOLE */
53 #include <class/cl0080.h> /* NV01_DEVICE_0 */
54 #include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */
55 #include <class/cl2080.h> /* NV20_SUBDEVICE_0 */
56 
57 #include "class/clc37b.h" /* NVC37B_WINDOW_IMM_CHANNEL_DMA */
58 #include "class/clc37e.h" /* NVC37E_WINDOW_CHANNEL_DMA */
59 #include "class/clc57b.h" /* NVC57B_WINDOW_IMM_CHANNEL_DMA */
60 #include "class/clc57e.h" /* NVC57E_WINDOW_CHANNEL_DMA */
61 #include "class/clc67b.h" /* NVC67B_WINDOW_IMM_CHANNEL_DMA */
62 #include "class/clc67e.h" /* NVC67E_WINDOW_CHANNEL_DMA */
63 
64 #include "class/cl917b.h" /* NV917B_OVERLAY_IMM_CHANNEL_PIO */
65 
66 #include "class/cl927c.h" /* NV927C_BASE_CHANNEL_DMA */
67 
68 #include "class/cl917e.h" /* NV917E_OVERLAY_CHANNEL_DMA */
69 
70 #include <ctrl/ctrl0000/ctrl0000gpu.h> /* NV0000_CTRL_GPU_* */
71 #include <ctrl/ctrl0002.h> /* NV0002_CTRL_CMD_BIND_CONTEXTDMA */
72 #include <ctrl/ctrl0073/ctrl0073dfp.h> /* NV0073_CTRL_CMD_DFP_GET_INFO */
73 #include <ctrl/ctrl0073/ctrl0073dp.h> /* NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID */
74 #include <ctrl/ctrl0073/ctrl0073specific.h> /* NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO */
75 #include <ctrl/ctrl0073/ctrl0073system.h> /* NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED */
76 #include <ctrl/ctrl0080/ctrl0080gpu.h> /* NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER */
77 #include <ctrl/ctrl0080/ctrl0080gr.h> /* NV0080_CTRL_CMD_GR_GET_CAPS_V2 */
78 #include <ctrl/ctrl0080/ctrl0080unix.h> /* NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH */
79 #include <ctrl/ctrl2080/ctrl2080bios.h> /* NV2080_CTRL_CMD_BIOS_GET_NBSI */
80 #include <ctrl/ctrl2080/ctrl2080bus.h> /* NV2080_CTRL_CMD_BUS_GET_INFO */
81 #include <ctrl/ctrl2080/ctrl2080event.h> /* NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION */
82 #include <ctrl/ctrl2080/ctrl2080tmr.h> /* NV2080_CTRL_CMD_TIMER_GET_TIME */
83 #include <ctrl/ctrl2080/ctrl2080unix.h> /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */
84 #include <ctrl/ctrl5070/ctrl5070chnc.h> /* NV5070_CTRL_CMD_SET_RMFREE_FLAGS */
85 #include <ctrl/ctrl5070/ctrl5070or.h> /* NV5070_CTRL_CMD_SET_DAC_PWR */
86 
87 #include "nvos.h"
88 
89 #include "displayport/dpcd.h"
90 
91 #define NVKMS_SYNCPT_ID_INVALID     (0xFFFFFFFF)
92 
93 static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId);
94 
95 static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel);
96 
97 static NvBool EngineListCheckOneSubdevice(const NVEvoSubDeviceRec *pSubDevice,
98                                           NvU32 engineType)
99 {
100     const NvU32 *engines = pSubDevice->supportedEngines;
101     int i;
102 
103     for (i = 0; i < pSubDevice->numEngines; i++) {
104         if (engines[i] == engineType) {
105             return TRUE;
106         }
107     }
108 
109     return FALSE;
110 }
111 
112 static NvBool EngineListCheck(const NVDevEvoRec *pDevEvo, NvU32 engineType)
113 {
114     int sd;
115 
116     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
117         if (!EngineListCheckOneSubdevice(pDevEvo->pSubDevices[sd],
118                                          engineType)) {
119             return FALSE;
120         }
121     }
122 
123     return TRUE;
124 }
125 
126 static NvBool QueryGpuCapabilities(NVDevEvoPtr pDevEvo)
127 {
128     NvBool ctxDmaCoherentAllowedDev = FALSE;
129     NvBool ctxDmaNonCoherentAllowedDev = FALSE;
130     NvU32 ret, sd;
131 
132     NV0000_CTRL_GPU_GET_ID_INFO_PARAMS idInfoParams = { 0 };
133 
134     pDevEvo->isHeadSurfaceSupported = FALSE;
135 
136     if (EngineListCheck(pDevEvo, NV2080_ENGINE_TYPE_GRAPHICS)) {
137         NV0080_CTRL_GR_GET_CAPS_V2_PARAMS grCaps = { 0 };
138 
139         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
140                              pDevEvo->deviceHandle,
141                              NV0080_CTRL_CMD_GR_GET_CAPS_V2,
142                              &grCaps,
143                              sizeof(grCaps));
144 
145         if (ret != NVOS_STATUS_SUCCESS) {
146             return FALSE;
147         }
148 
149         /* Assume headSurface is supported if there is a graphics engine
150          * and headSurface support is included in the NVKMS build.
151          */
152         pDevEvo->isHeadSurfaceSupported = NVKMS_INCLUDE_HEADSURFACE;
153     }
154 
155     /* ctxDma{,Non}CoherentAllowed */
156 
157     /* simulationType */
158 
159     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
160 
161         NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS simParams = { 0 };
162 
163         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
164                              pDevEvo->pSubDevices[sd]->handle,
165                              NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO,
166                              &simParams,
167                              sizeof(simParams));
168 
169         if (ret != NVOS_STATUS_SUCCESS) {
170             simParams.type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE;
171         }
172         if (sd == 0) {
173             pDevEvo->simulationType = simParams.type;
174         }
175         nvAssert(pDevEvo->simulationType == simParams.type);
176     }
177 
178     /* mobile */
179 
180     idInfoParams.gpuId = pDevEvo->pSubDevices[0]->gpuId;
181 
182     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
183                          nvEvoGlobal.clientHandle,
184                          NV0000_CTRL_CMD_GPU_GET_ID_INFO,
185                          &idInfoParams, sizeof(idInfoParams));
186 
187     if (ret != NVOS_STATUS_SUCCESS) {
188         pDevEvo->mobile = FALSE;
189         pDevEvo->isSOCDisplay = FALSE;
190     } else {
191         pDevEvo->mobile =
192             FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _MOBILE, _TRUE,
193                          idInfoParams.gpuFlags);
194 
195         pDevEvo->isSOCDisplay =
196             FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE,
197                          idInfoParams.gpuFlags);
198     }
199 
200     /* TODO: This cap bit should be queried from RM */
201     pDevEvo->requiresAllAllocationsInSysmem = pDevEvo->isSOCDisplay;
202 
203     /* ctxDma{,Non}CoherentAllowed */
204 
205     if (!pDevEvo->isSOCDisplay) {
206         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
207             NV2080_CTRL_BUS_GET_INFO_PARAMS busParams = { 0 };
208             struct {
209                 NV2080_CTRL_BUS_INFO coherentFlags;
210                 NV2080_CTRL_BUS_INFO nonCoherentFlags;
211             } busInfoList = { { 0 } };
212 
213             NvBool ctxDmaCoherentAllowed;
214             NvBool ctxDmaNonCoherentAllowed;
215 
216             busInfoList.coherentFlags.index =
217                 NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS;
218             busInfoList.nonCoherentFlags.index =
219                 NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS;
220 
221             busParams.busInfoListSize =
222                 sizeof(busInfoList) / sizeof(busInfoList.coherentFlags);
223             busParams.busInfoList = NV_PTR_TO_NvP64(&busInfoList);
224 
225             ret = nvRmApiControl(nvEvoGlobal.clientHandle,
226                                  pDevEvo->pSubDevices[sd]->handle,
227                                  NV2080_CTRL_CMD_BUS_GET_INFO,
228                                  &busParams, sizeof(busParams));
229 
230             if (ret != NVOS_STATUS_SUCCESS) {
231                 return FALSE;
232             }
233 
234             ctxDmaCoherentAllowed =
235                 FLD_TEST_DRF(2080_CTRL_BUS_INFO, _COHERENT_DMA_FLAGS,
236                              _CTXDMA, _TRUE, busInfoList.coherentFlags.data);
237 
238             ctxDmaNonCoherentAllowed =
239                 FLD_TEST_DRF(2080_CTRL_BUS_INFO, _NONCOHERENT_DMA_FLAGS,
240                              _CTXDMA, _TRUE, busInfoList.nonCoherentFlags.data);
241 
242             if (sd == 0) {
243                 ctxDmaCoherentAllowedDev = ctxDmaCoherentAllowed;
244                 ctxDmaNonCoherentAllowedDev = ctxDmaNonCoherentAllowed;
245             } else {
246                 ctxDmaCoherentAllowedDev =
247                     ctxDmaCoherentAllowedDev && ctxDmaCoherentAllowed;
248                 ctxDmaNonCoherentAllowedDev =
249                     ctxDmaNonCoherentAllowedDev && ctxDmaNonCoherentAllowed;
250             }
251         }
252         nvAssert(ctxDmaCoherentAllowedDev || ctxDmaNonCoherentAllowedDev);
253 
254         if (ctxDmaCoherentAllowedDev) {
255             pDevEvo->isoIOCoherencyModes.coherent = TRUE;
256             pDevEvo->nisoIOCoherencyModes.coherent = TRUE;
257         }
258 
259         if (ctxDmaNonCoherentAllowedDev) {
260             pDevEvo->isoIOCoherencyModes.noncoherent = TRUE;
261             pDevEvo->nisoIOCoherencyModes.noncoherent = TRUE;
262         }
263     } else {
264         /*
265          * On SOC display, NISO requests are IO-coherent and ISO
266          * requests are non-coherent.
267          */
268         pDevEvo->isoIOCoherencyModes.noncoherent = TRUE;
269         pDevEvo->nisoIOCoherencyModes.coherent = TRUE;
270     }
271 
272     pDevEvo->supportsSyncpts =
273         FALSE;
274 
275     return TRUE;
276 }
277 
278 
279 static void FreeDisplay(NVDispEvoPtr pDispEvo)
280 {
281     if (pDispEvo == NULL) {
282         return;
283     }
284 
285     nvAssert(pDispEvo->vrrSetTimeoutEventUsageCount == 0);
286     nvAssert(pDispEvo->vrrSetTimeoutEventHandle == 0);
287 
288 #if defined(DEBUG)
289     for (NvU32 apiHead = 0;
290          apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) {
291         nvAssert(pDispEvo->pSwapGroup[apiHead] == NULL);
292     }
293 #endif
294 
295     nvAssert(nvListIsEmpty(&pDispEvo->dpyList));
296 
297     nvkms_free_ref_ptr(pDispEvo->ref_ptr);
298 
299     nvInvalidateTopologiesEvo();
300     nvFree(pDispEvo);
301 }
302 
303 
304 static inline NVDispEvoPtr AllocDisplay(NVDevEvoPtr pDevEvo)
305 {
306     NVDispEvoPtr pDispEvo = nvCalloc(1, sizeof(NVDispEvoRec));
307 
308     if (pDispEvo == NULL) {
309         goto fail;
310     }
311 
312     pDispEvo->pDevEvo = pDevEvo;
313 
314     nvListInit(&pDispEvo->dpyList);
315     nvListInit(&pDispEvo->connectorList);
316 
317     pDispEvo->framelock.server = nvInvalidDpyId();
318     pDispEvo->framelock.clients = nvEmptyDpyIdList();
319     pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD;
320 
321     pDispEvo->ref_ptr = nvkms_alloc_ref_ptr(pDispEvo);
322     if (!pDispEvo->ref_ptr) {
323         goto fail;
324     }
325 
326     return pDispEvo;
327 
328 fail:
329     FreeDisplay(pDispEvo);
330 
331     return NULL;
332 }
333 
334 
335 static void FreeDisplays(NVDevEvoPtr pDevEvo)
336 {
337     unsigned int sd;
338 
339     for (sd = 0; sd < pDevEvo->nDispEvo; sd++) {
340         FreeDisplay(pDevEvo->pDispEvo[sd]);
341         pDevEvo->pDispEvo[sd] = NULL;
342     }
343     pDevEvo->nDispEvo = 0;
344 }
345 
346 
347 /*!
348  * Allocate the NVDispRecs for the given pDev.
349  *
350  * \param[in,out]  pDev  The device for which to allocate Displays.
351  */
352 static NvBool AllocDisplays(NVDevEvoPtr pDevEvo)
353 {
354     unsigned int sd;
355 
356     nvAssert(pDevEvo->nDispEvo == 0);
357 
358     pDevEvo->nDispEvo = pDevEvo->numSubDevices;
359 
360     for (sd = 0; sd < pDevEvo->nDispEvo; sd++) {
361         NVDispEvoPtr pDispEvo = AllocDisplay(pDevEvo);
362 
363         if (pDispEvo == NULL) {
364             goto fail;
365         }
366 
367         pDevEvo->pDispEvo[sd] = pDispEvo;
368 
369         pDispEvo->displayOwner = sd;
370 
371         pDispEvo->gpuLogIndex = pDevEvo->pSubDevices[sd]->gpuLogIndex;
372     }
373 
374     return TRUE;
375 
376 fail:
377     FreeDisplays(pDevEvo);
378     return FALSE;
379 }
380 
381 /*
382  * Get the (id) list of all supported display devices for this pDisp.
383  */
384 static NvBool ProbeValidDisplays(NVDispEvoPtr pDispEvo)
385 {
386     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
387     NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS getSupportedParams = { 0 };
388     NvU32 ret;
389 
390     pDispEvo->connectorIds = nvEmptyDpyIdList();
391     pDispEvo->displayPortMSTIds = nvEmptyDpyIdList();
392     pDispEvo->dynamicDpyIds = nvEmptyDpyIdList();
393     pDispEvo->validDisplays = nvEmptyDpyIdList();
394 
395     getSupportedParams.subDeviceInstance = pDispEvo->displayOwner;
396 
397     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
398                          pDevEvo->displayCommonHandle,
399                          NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED,
400                          &getSupportedParams, sizeof(getSupportedParams));
401 
402     if (ret != NVOS_STATUS_SUCCESS) {
403         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
404                     "Failed to get supported display device(s)");
405     } else {
406         NVDpyIdList dpyIdList;
407         NVDpyId dpyId;
408 
409         // Grab only the static ids from the list.  Dynamic ids are
410         // used to communicate with devices that are connected to
411         // a connector that has a static id.
412         dpyIdList = nvNvU32ToDpyIdList(getSupportedParams.displayMask);
413 
414         FOR_ALL_DPY_IDS(dpyId, dpyIdList) {
415             NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS getOrInfoParams = { 0 };
416             getOrInfoParams.subDeviceInstance = pDispEvo->displayOwner;
417             getOrInfoParams.displayId = nvDpyIdToNvU32(dpyId);
418 
419             ret = nvRmApiControl(nvEvoGlobal.clientHandle,
420                                  pDevEvo->displayCommonHandle,
421                                  NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
422                                  &getOrInfoParams,
423                                  sizeof(getOrInfoParams));
424             if (ret != NVOS_STATUS_SUCCESS) {
425                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
426                             "Failed to get supported display device(s)");
427             } else {
428                 if (!getOrInfoParams.bIsDispDynamic) {
429                     pDispEvo->connectorIds =
430                         nvAddDpyIdToDpyIdList(dpyId, pDispEvo->connectorIds);
431                 }
432             }
433         }
434     }
435 
436     pDispEvo->validDisplays = pDispEvo->connectorIds;
437 
438     return TRUE;
439 }
440 
441 /*!
442  * Return TRUE if every pDispEvo on this pDevEvo has an empty validDisplays.
443  */
444 static NvBool NoValidDisplays(NVDevEvoPtr pDevEvo)
445 {
446     NVDispEvoPtr pDispEvo;
447     unsigned int sd;
448 
449     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
450         if (!nvDpyIdListIsEmpty(pDispEvo->validDisplays)) {
451             return FALSE;
452         }
453     }
454 
455     return TRUE;
456 }
457 
458 
459 /*
460  * Find the NvKmsConnectorSignalFormat for the pConnectorEvo.
461  */
462 static NvKmsConnectorSignalFormat
463 GetSignalFormat(const NVConnectorEvoRec *pConnectorEvo)
464 {
465     // SignalFormat represents a weird combination of our OR type and protocol.
466     switch (pConnectorEvo->or.type) {
467     case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC:
468         switch (pConnectorEvo->or.protocol) {
469         default:
470             nvAssert(!"Unexpected OR protocol for DAC");
471             // fall through
472         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT:
473             return NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA;
474         }
475 
476     case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR:
477         switch (pConnectorEvo->or.protocol) {
478         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM:
479             return NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS;
480 
481         default:
482             nvAssert(!"Unexpected OR protocol for SOR");
483             // fall through
484         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
485         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
486         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
487             return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS;
488 
489         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
490         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
491             return NVKMS_CONNECTOR_SIGNAL_FORMAT_DP;
492         }
493 
494     case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR:
495         switch (pConnectorEvo->or.protocol) {
496         default:
497             nvAssert(!"Unexpected OR protocol for PIOR");
498             // fall through
499         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC:
500             return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS;
501         }
502 
503     case NV0073_CTRL_SPECIFIC_OR_TYPE_DSI:
504         switch (pConnectorEvo->or.protocol) {
505         default:
506             nvAssert(!"Unexpected OR protocol for DSI");
507             // fall through
508         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI:
509             return NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI;
510         }
511 
512     default:
513         nvAssert(!"Unexpected OR type");
514         return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN;
515     }
516 
517     return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN;
518 }
519 
520 
521 static NvU32 GetDfpInfo(const NVConnectorEvoRec *pConnectorEvo)
522 {
523     NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
524     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
525 
526     NV0073_CTRL_DFP_GET_INFO_PARAMS params = { 0 };
527     NvU32 ret;
528 
529     if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
530         return 0x0;
531     }
532 
533     params.subDeviceInstance = pDispEvo->displayOwner;
534     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
535 
536     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
537                          pDevEvo->displayCommonHandle,
538                          NV0073_CTRL_CMD_DFP_GET_INFO,
539                          &params,
540                          sizeof(params));
541 
542     if (ret != NVOS_STATUS_SUCCESS) {
543         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, "Failed to query DFP info");
544         return 0x0;
545     }
546 
547     return params.flags;
548 }
549 
550 typedef struct _AllocConnectorDispDataRec {
551     NvU32 dfpIndex;
552     NvU32 crtIndex;
553     NvU32 typeIndices[NVKMS_CONNECTOR_TYPE_MAX + 1];
554 } AllocConnectorDispDataRec;
555 
556 /*!
557  * Query and setup information for a connector.
558  */
559 static NvBool AllocConnector(
560     NVDispEvoPtr pDispEvo,
561     NVDpyId dpyId,
562     AllocConnectorDispDataRec *pAllocConnectorDispData)
563 {
564     NVConnectorEvoPtr pConnectorEvo = NULL;
565     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
566     NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS params = { 0 };
567     NvU32 ret;
568     NvBool isDP;
569 
570     pConnectorEvo = nvCalloc(1, sizeof(*pConnectorEvo));
571 
572     if (pConnectorEvo == NULL) {
573         return FALSE;
574     }
575 
576     pConnectorEvo->pDispEvo = pDispEvo;
577     pConnectorEvo->displayId = dpyId;
578     pConnectorEvo->type = NVKMS_CONNECTOR_TYPE_UNKNOWN;
579     pConnectorEvo->physicalIndex = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION;
580     pConnectorEvo->physicalLocation = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION;
581     /* Query the output resource configuration */
582     nvRmGetConnectorORInfo(pConnectorEvo, FALSE);
583 
584     isDP =
585         (pConnectorEvo->or.type ==
586          NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) &&
587         (pConnectorEvo->or.protocol ==
588          NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A ||
589          pConnectorEvo->or.protocol ==
590          NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B);
591 
592     /* Determine the connector type. */
593 
594     params.subDeviceInstance = pDispEvo->displayOwner;
595     params.displayId = nvDpyIdToNvU32(dpyId);
596 
597     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
598                          pDevEvo->displayCommonHandle,
599                          NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA,
600                          &params,
601                          sizeof(params));
602 
603     if (ret != NVOS_STATUS_SUCCESS) {
604         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
605                      "Failed to determine connector type for connector "
606                      NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId));
607         goto fail;
608     } else {
609 
610         static const struct {
611             NvU32 type0073;
612             NvKmsConnectorType typeNvKms;
613         } connectorTypeTable[] = {
614             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_EXT,
615               NVKMS_CONNECTOR_TYPE_DP },
616             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C,
617               NVKMS_CONNECTOR_TYPE_USBC },
618             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_INT,
619               NVKMS_CONNECTOR_TYPE_DP },
620             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_MINI_EXT,
621               NVKMS_CONNECTOR_TYPE_DP },
622             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_1,
623               NVKMS_CONNECTOR_TYPE_DP },
624             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_2,
625               NVKMS_CONNECTOR_TYPE_DP },
626             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VGA_15_PIN,
627               NVKMS_CONNECTOR_TYPE_VGA },
628             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_SVIDEO,
629               NVKMS_CONNECTOR_TYPE_DVI_I },
630             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_COMPOSITE,
631               NVKMS_CONNECTOR_TYPE_DVI_I },
632             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I,
633               NVKMS_CONNECTOR_TYPE_DVI_I },
634             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_D,
635               NVKMS_CONNECTOR_TYPE_DVI_D },
636             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_ADC,
637               NVKMS_CONNECTOR_TYPE_ADC },
638             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_1,
639               NVKMS_CONNECTOR_TYPE_DVI_I },
640             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_2,
641               NVKMS_CONNECTOR_TYPE_DVI_I },
642             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_SPWG,
643               NVKMS_CONNECTOR_TYPE_LVDS },
644             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_OEM,
645               NVKMS_CONNECTOR_TYPE_LVDS },
646             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_A,
647               NVKMS_CONNECTOR_TYPE_HDMI },
648             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_C_MINI,
649               NVKMS_CONNECTOR_TYPE_HDMI },
650             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VIRTUAL_WFD,
651               NVKMS_CONNECTOR_TYPE_UNKNOWN },
652             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI,
653               NVKMS_CONNECTOR_TYPE_DSI },
654             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_SERIALIZER,
655               NVKMS_CONNECTOR_TYPE_DP_SERIALIZER },
656             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN,
657               NVKMS_CONNECTOR_TYPE_UNKNOWN },
658             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN,
659               NVKMS_CONNECTOR_TYPE_UNKNOWN },
660         };
661 
662         int i, j;
663 
664         for (i = 0; i < params.count; i++) {
665             for (j = 0; j < ARRAY_LEN(connectorTypeTable); j++) {
666                 if (connectorTypeTable[j].type0073 == params.data[i].type) {
667                     if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) {
668                         pConnectorEvo->type = connectorTypeTable[j].typeNvKms;
669                     } else {
670                         /*
671                          * The only cases where we should see
672                          * params.count > 1 (and thus attempt to
673                          * assign pConnectorEvo->type multiple times)
674                          * should be where all the
675                          * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_*
676                          * values map to the same NvKmsConnectorType;
677                          */
678                         nvAssert(pConnectorEvo->type ==
679                                  connectorTypeTable[j].typeNvKms);
680                     }
681                     break;
682                 }
683             }
684             if (j == ARRAY_LEN(connectorTypeTable)) {
685                 nvAssert(!"Unhandled connector type!");
686             }
687 
688             if (i == 0) {
689                 pConnectorEvo->physicalIndex = params.data[i].index;
690                 pConnectorEvo->physicalLocation = params.data[i].location;
691             } else {
692                 nvAssert(pConnectorEvo->physicalIndex == params.data[i].index);
693                 nvAssert(pConnectorEvo->physicalLocation ==
694                          params.data[i].location);
695             }
696         }
697 
698         pConnectorEvo->ddcPartnerDpyIdsList = nvNvU32ToDpyIdList(params.DDCPartners);
699     }
700 
701     /* If the connector type is unknown, ignore this connector. */
702     if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) {
703         nvFree(pConnectorEvo);
704         return TRUE;
705     }
706 
707     /*
708      * Ignore connectors that use DP protocol, but don't have a
709      * DP-compatible type.
710      */
711     if (isDP &&
712         ((pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_DP) &&
713          !nvConnectorIsDPSerializer(pConnectorEvo) &&
714          (pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_USBC))) {
715         nvFree(pConnectorEvo);
716         return TRUE;
717     }
718 
719     /*
720      * Bind connector to the DP lib if DP capable. Serializer
721      * connector is not managed by DP lib.
722      */
723     if (isDP &&
724         !nvConnectorIsDPSerializer(pConnectorEvo)) {
725         pConnectorEvo->pDpLibConnector = nvDPCreateConnector(pConnectorEvo);
726         if (!pConnectorEvo->pDpLibConnector) {
727             nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
728                          "Failed to initialize DisplayPort support for "
729                          NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId));
730             goto fail;
731         }
732     }
733 
734     pConnectorEvo->signalFormat = GetSignalFormat(pConnectorEvo);
735 
736     pConnectorEvo->dfpInfo = GetDfpInfo(pConnectorEvo);
737 
738     /* Assign connector indices. */
739 
740     pConnectorEvo->legacyType =
741         GetLegacyConnectorType(pDispEvo, pConnectorEvo->displayId);
742 
743     switch (pConnectorEvo->legacyType) {
744         case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT:
745             pConnectorEvo->legacyTypeIndex =
746                 pAllocConnectorDispData->crtIndex++;
747             break;
748         case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP:
749             pConnectorEvo->legacyTypeIndex =
750                 pAllocConnectorDispData->dfpIndex++;
751             break;
752         default:
753             nvAssert(!"Unknown connector type");
754             break;
755     }
756 
757     nvAssert(pConnectorEvo->type <
758              ARRAY_LEN(pAllocConnectorDispData->typeIndices));
759     pConnectorEvo->typeIndex =
760         pAllocConnectorDispData->typeIndices[pConnectorEvo->type]++;
761 
762     nvListAppend(&pConnectorEvo->connectorListEntry, &pDispEvo->connectorList);
763 
764     nvkms_snprintf(pConnectorEvo->name, sizeof(pConnectorEvo->name), "%s-%u",
765                    NvKmsConnectorTypeString(pConnectorEvo->type),
766                    pConnectorEvo->typeIndex);
767 
768     return TRUE;
769 
770 fail:
771     nvFree(pConnectorEvo);
772     return FALSE;
773 }
774 
775 
776 static void FreeConnectors(NVDispEvoPtr pDispEvo)
777 {
778     NVConnectorEvoPtr pConnectorEvo, pConnectorEvoNext;
779 
780     nvListForEachEntry_safe(pConnectorEvo, pConnectorEvoNext,
781                             &pDispEvo->connectorList, connectorListEntry) {
782         // Unbind DP lib from the connector
783         nvDPDestroyConnector(pConnectorEvo->pDpLibConnector);
784         pConnectorEvo->pDpLibConnector = NULL;
785         nvListDel(&pConnectorEvo->connectorListEntry);
786         nvFree(pConnectorEvo);
787     }
788 }
789 
790 
791 /*!
792  * Allocate and initialize the connector structs for the given pDisp.
793  *
794  * NOTE: Each Display ID in pDispEvo->connectorIds (aka the
795  * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED mask) is a possible display
796  * connection to the GPU which is static after boot.
797  */
798 static NvBool AllocConnectors(NVDispEvoPtr pDispEvo)
799 {
800     NVDpyId dpyId;
801     NVConnectorEvoPtr pConnectorEvo;
802     AllocConnectorDispDataRec allocConnectorDispData = { };
803 
804     nvAssert(nvListIsEmpty(&pDispEvo->connectorList));
805 
806     if (nvDpyIdListIsEmpty(pDispEvo->connectorIds)) {
807         /* Allow boards with no connectors */
808         return TRUE;
809     }
810 
811     /* Allocate the connectors */
812     FOR_ALL_DPY_IDS(dpyId, pDispEvo->connectorIds) {
813         if (!AllocConnector(pDispEvo, dpyId, &allocConnectorDispData)) {
814             goto fail;
815         }
816     }
817 
818     /*
819      * Reassign pDispEvo->connectorIds, to exclude any connectors ignored above:
820      * AllocConnector() may return TRUE but not actually create a pConnectorEvo
821      * for some connectors reported by resman.
822      */
823     pDispEvo->connectorIds = nvEmptyDpyIdList();
824     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
825         pDispEvo->connectorIds =
826             nvAddDpyIdToDpyIdList(pConnectorEvo->displayId,
827                                   pDispEvo->connectorIds);
828     }
829 
830     pDispEvo->validDisplays = pDispEvo->connectorIds;
831 
832     return TRUE;
833 
834  fail:
835     FreeConnectors(pDispEvo);
836     return FALSE;
837 }
838 
839 static NvBool IsFlexibleWindowMapping(NvU32 windowHeadMask)
840 {
841     return (windowHeadMask ==
842             NV0073_CTRL_SPECIFIC_FLEXIBLE_HEAD_WINDOW_ASSIGNMENT);
843 }
844 
845 /*!
846  * Query the number of heads and save the result in pDevEvo->numHeads.
847  * Get window head assignment and save it in pDevEvo->headForWindow[win].
848  *
849  * Query the number of heads on each pDisp of the pDev and limit to
850  * the minimum across all pDisps. Query the headMask on each pDisp and
851  * take the intersection across pDisps. Query the window-head assignment
852  * and if it is fully flexible, assign WINDOWs (2N) and (2N + 1) to HEAD N.
853  * Otherwise, use the queried assignment.
854  *
855  * Limit the number of heads to the number of bits in the headMask. Ignore
856  * the heads which don't have any windows assigned to them and heads which
857  * create holes in the headMask. If a head which has assigned windows gets
858  * pruned out, assign NV_INVALID_HEAD to those windows.
859  *
860  * \param[in,out] pDev   This is the device pointer; the pDisps within
861  *                       it are used to query per-GPU information.
862  *                       The result is written to pDevEvo->numHeads.
863  *
864  * \return               Return TRUE if numHeads are correctly queried and
865  *                       window-head assignment is done.
866  *                       Return FALSE if numHeads or window-head assignment
867  *                       could not be queried.
868  */
869 static NvBool ProbeHeadCountAndWindowAssignment(NVDevEvoPtr pDevEvo)
870 {
871     NvU32 numHeads = 0, headMask = 0;
872     NvU32 headsWithWindowsMask = 0;
873     int sd, head, numBits;
874     NVDispEvoPtr pDispEvo;
875     NvBool first = TRUE;
876     NvBool isFlexibleWindowMapping = NV_TRUE;
877     NvU32 win;
878     NvU32 ret;
879 
880     pDevEvo->numHeads = 0;
881 
882     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
883 
884         NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS numHeadsParams = { 0 };
885         NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS headMaskParams = { 0 };
886         NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS winHeadAssignParams = { };
887 
888         numHeadsParams.subDeviceInstance = sd;
889         numHeadsParams.flags = 0;
890 
891         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
892                              pDevEvo->displayCommonHandle,
893                              NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS,
894                              &numHeadsParams, sizeof(numHeadsParams));
895 
896         if (ret != NVOS_STATUS_SUCCESS) {
897             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
898                         "Failed to get the number of heads");
899             return FALSE;
900         }
901 
902         if (numHeadsParams.numHeads == 0) {
903             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No heads found on board!");
904             return FALSE;
905         }
906 
907         if (numHeads == 0) {
908             numHeads = numHeadsParams.numHeads;
909         } else {
910             if (numHeads != numHeadsParams.numHeads) {
911                 NvU32 minNumHeads =
912                     NV_MIN(numHeads, numHeadsParams.numHeads);
913                 nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
914                             "Unexpected numbers of heads "
915                             "(%d, %d); clamping to %d",
916                             numHeads, numHeadsParams.numHeads, minNumHeads);
917                 numHeads = minNumHeads;
918             }
919         }
920 
921         headMaskParams.subDeviceInstance = sd;
922 
923         ret = nvRmApiControl(
924                 nvEvoGlobal.clientHandle,
925                 pDevEvo->displayCommonHandle,
926                 NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK,
927                 &headMaskParams, sizeof(headMaskParams));
928 
929         if (ret != NVOS_STATUS_SUCCESS) {
930             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
931                         "Failed to get head configuration");
932             return FALSE;
933         }
934 
935         if (headMask == 0) {
936             headMask = headMaskParams.headMask;
937         } else {
938             if (headMask != headMaskParams.headMask) {
939                 NvU32 intersectedHeadMask =
940                     headMask & headMaskParams.headMask;
941                 nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
942                             "Unexpected head configurations "
943                             "(0x%02x, 0x%02x); limiting to 0x%02x",
944                             headMask, headMaskParams.headMask,
945                             intersectedHeadMask);
946                 headMask = intersectedHeadMask;
947             }
948         }
949 
950         winHeadAssignParams.subDeviceInstance = sd;
951         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
952                              pDevEvo->displayCommonHandle,
953                              NV0073_CTRL_CMD_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT,
954                              &winHeadAssignParams, sizeof(winHeadAssignParams));
955 
956         if (ret == NVOS_STATUS_SUCCESS) {
957             for (win = 0; win < NVKMS_MAX_WINDOWS_PER_DISP; win++) {
958                 NvU32 windowHeadMask = winHeadAssignParams.windowHeadMask[win];
959 
960                 if ((win == 0) && first) {
961                     isFlexibleWindowMapping = IsFlexibleWindowMapping(windowHeadMask);
962                 } else if (isFlexibleWindowMapping) {
963                     /*
964                      * Currently, if one window is completely flexible, then all are.
965                      * In case of fully flexible window mapping, if windowHeadMask is
966                      * zero for a window, then that window is not present in HW.
967                      */
968                     nvAssert(!windowHeadMask || (isFlexibleWindowMapping ==
969                              IsFlexibleWindowMapping(windowHeadMask)));
970                 }
971 
972                 /*
973                  * For custom window mapping, if windowHeadMask is 0, then head
974                  * is not assigned to this window. For flexible window mapping,
975                  * if windowHeadMask is 0, then the window is not present in HW.
976                  */
977                 if (windowHeadMask == 0) {
978                     continue;
979                 }
980 
981                 if (isFlexibleWindowMapping) {
982                     /*
983                      * TODO: For now assign WINDOWs (2N) and (2N + 1) to HEAD N when
984                      * completely flexible window assignment is specified by window
985                      * head assignment mask.
986                      */
987                     head = win >> 1;
988                     windowHeadMask = NVBIT_TYPE(head, NvU8);
989                     nvAssert(head < numHeads);
990                 } else {
991                     // We don't support same window assigned to multiple heads.
992                     nvAssert(ONEBITSET(windowHeadMask));
993 
994                     head = BIT_IDX_32(windowHeadMask);
995                 }
996 
997                 if (first) {
998                     pDevEvo->headForWindow[win] = head;
999                     headsWithWindowsMask |= windowHeadMask;
1000                 } else {
1001                     nvAssert(pDevEvo->headForWindow[win] == head);
1002                 }
1003             }
1004         } else if (ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) {
1005             nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
1006                              "Failed to get window-head assignment");
1007             return FALSE;
1008         } else {
1009             // Pre-Volta, we don't need to populate pDevEvo->headForWindow[] and
1010             // each HW head has a window assigned.
1011             headsWithWindowsMask = headMask;
1012         }
1013 
1014         if (first) {
1015             first = FALSE;
1016         }
1017     }
1018 
1019     /* Check whether heads which have windows assigned are actually present in HW */
1020     nvAssert(!(~headMask & headsWithWindowsMask));
1021 
1022     /* Intersect heads present in HW with heads which have windows assigned */
1023     headMask &= headsWithWindowsMask;
1024 
1025     /* clamp numHeads to the number of bits in headMask */
1026 
1027     numBits = nvPopCount32(headMask);
1028 
1029     /* for now, we only support headMask when it is tightly packed at 0 */
1030 
1031     for (head = 0; head < numBits; head++) {
1032         if ((headMask & (1 << head)) == 0) {
1033             NvU32 modifiedHeadMask = (1 << head) - 1;
1034 
1035             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1036                         "The head configuration (0x%02x) "
1037                         "is unexpected; limiting to 0x%02x", headMask,
1038                    modifiedHeadMask);
1039 
1040             headMask = modifiedHeadMask;
1041             numBits = head;
1042             break;
1043         }
1044     }
1045 
1046     /* headMask should never increase numHeads */
1047 
1048     if (numBits > numHeads) {
1049         nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1050                     "The head configuration (0x%02x) "
1051                     "is inconsistent with the number of heads (%d)",
1052                     headMask, numHeads);
1053     } else if (numBits < numHeads) {
1054         nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1055                     "Correcting number of heads for "
1056                     "current head configuration (0x%02x)", headMask);
1057         numHeads = numBits;
1058     }
1059 
1060     pDevEvo->numHeads = numHeads;
1061 
1062     /*
1063      * If a head which has assigned windows gets pruned out, assign
1064      * NV_INVALID_HEAD to those windows.
1065      */
1066     for (win = 0; win < NVKMS_MAX_WINDOWS_PER_DISP; win++) {
1067         if ((pDevEvo->headForWindow[win] == NV_INVALID_HEAD) ||
1068             (pDevEvo->headForWindow[win] < pDevEvo->numHeads)) {
1069             continue;
1070         }
1071         pDevEvo->headForWindow[win] = NV_INVALID_HEAD;
1072     }
1073 
1074     return TRUE;
1075 }
1076 
1077 /*!
1078  * Set a pConnectorEvo's software state based on the boot head assignment.
1079  */
1080 static void MarkConnectorBootHeadActive(NVDispEvoPtr pDispEvo, NvU32 head)
1081 {
1082     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1083     NVDpyId displayId, rootPortId;
1084     NVConnectorEvoPtr pConnectorEvo;
1085     NVDispHeadStateEvoPtr pHeadState;
1086     NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 };
1087     NvU32 ret;
1088 
1089     // Use the first displayId in the boot display list.
1090     //
1091     // TODO: What should we do if more than one dpy ID is listed for a boot
1092     // display?
1093     nvAssert(nvCountDpyIdsInDpyIdList(pDispEvo->vbiosDpyConfig[head]) == 1);
1094     displayId = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(),
1095                                               pDispEvo->vbiosDpyConfig[head]);
1096 
1097     // The displayId reported by RM could be a dynamic one.  Find the root port
1098     // for this ID.
1099     params.subDeviceInstance = pDispEvo->displayOwner;
1100     params.displayId = nvDpyIdToNvU32(displayId);
1101 
1102     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1103                          pDevEvo->displayCommonHandle,
1104                          NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
1105                          &params, sizeof(params));
1106     if (ret != NVOS_STATUS_SUCCESS) {
1107         return;
1108     }
1109 
1110     if (params.bIsDispDynamic) {
1111         rootPortId = nvNvU32ToDpyId(params.rootPortId);
1112     } else {
1113         rootPortId = displayId;
1114     }
1115 
1116     pConnectorEvo = nvGetConnectorFromDisp(pDispEvo, rootPortId);
1117     if (!pConnectorEvo) {
1118         return;
1119     }
1120 
1121     if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
1122             NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
1123 
1124         nvAssert(params.index != NV_INVALID_OR);
1125         if (params.index == NV_INVALID_OR) {
1126             // If RM reported that a head is driving this dpyId, then there
1127             // should be an SOR assigned.  However, due to a bug in the way
1128             // PDB_PROP_GPU_DISABLE_VGA_CONSOLE_RESTORATION_ON_RESUME is
1129             // handled, RM can report an "active" head with no SOR assigned on
1130             // certain specific GPUs.  If that happens, just treat the head as
1131             // disabled.  See bug 1692425.
1132             pDispEvo->vbiosDpyConfig[head] = nvEmptyDpyIdList();
1133             return;
1134         } else {
1135             // Track the SOR assignment for this connector.  See the comment in
1136             // nvRmGetConnectorORInfo() for why this is deferred until now.
1137             nvAssert(pConnectorEvo->or.primary == NV_INVALID_OR);
1138             pConnectorEvo->or.primary = params.index;
1139         }
1140     }
1141     nvAssert(pConnectorEvo->or.primary == params.index);
1142 
1143     pHeadState = &pDispEvo->headState[head];
1144 
1145     nvAssert(!nvHeadIsActive(pDispEvo, head));
1146 
1147     pHeadState->pConnectorEvo = pConnectorEvo;
1148     pHeadState->activeRmId = nvDpyIdToNvU32(displayId);
1149 
1150     // Track the assigned head.
1151     pConnectorEvo->or.ownerHeadMask[params.index] |= NVBIT(head);
1152 
1153     nvEvoStateStartNoLock(&pDispEvo->pDevEvo->gpus[pDispEvo->displayOwner]);
1154 }
1155 
1156 /*!
1157  * Query the vbios assignment of heads to display devices, and cache
1158  * in pDispEvo->vbiosDpyConfig for later use by nvDPResume().
1159  *
1160  * \param[in,out] pDisp  This is the GPU display pointer; the result is
1161  *                       written to pDispEvo->vbiosDpyConfig
1162  */
1163 static void GetVbiosHeadAssignmentOneDisp(NVDispEvoPtr pDispEvo)
1164 {
1165     unsigned int head;
1166     NvU32 ret = NVOS_STATUS_ERROR_GENERIC;
1167     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1168 
1169     nvkms_memset(&pDispEvo->vbiosDpyConfig, 0,
1170                  sizeof(pDispEvo->vbiosDpyConfig));
1171 
1172     /* if there is no display, there is no origDpyConfig */
1173 
1174     nvAssert(pDevEvo->displayCommonHandle != 0);
1175 
1176     /*
1177      * get the vbios assignment of heads within the GPU, so that
1178      * later when we do head assignment, we can try to preserve the
1179      * existing assignment; see bug 208072
1180      */
1181 
1182     for (head = 0; head < pDevEvo->numHeads; head++) {
1183         NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS activeDpysParams = { 0 };
1184 
1185         activeDpysParams.subDeviceInstance = pDispEvo->displayOwner;
1186         activeDpysParams.head = head;
1187         /*
1188          * We want to check for active displays set by any low-level software
1189          * such as VBIOS, not just those set by an RM client
1190          */
1191         activeDpysParams.flags =
1192             DRF_DEF(0073, _CTRL_SYSTEM_GET_ACTIVE_FLAGS, _CLIENT, _DISABLE);
1193 
1194         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1195                              pDevEvo->displayCommonHandle,
1196                              NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE,
1197                              &activeDpysParams, sizeof(activeDpysParams));
1198 
1199         if (ret == NVOS_STATUS_SUCCESS) {
1200             // XXX TODO: If this is a dynamic display ID, it's not necessarily
1201             // correlated with the NVDpyId we'll assign to a dynamic pDpyEvo
1202             // later.  We should instead store this as an NvU32 and assign it as
1203             // the activeRmId for a dynamic pDpyEvo that DPLib reports as being
1204             // driven by the firmware group.  See bug 1656584.
1205             pDispEvo->vbiosDpyConfig[head] =
1206                 nvNvU32ToDpyIdList(activeDpysParams.displayId);
1207             if (activeDpysParams.displayId != 0) {
1208                 MarkConnectorBootHeadActive(pDispEvo, head);
1209             }
1210         }
1211 
1212         nvAssert(ret == NVOS_STATUS_SUCCESS);
1213     }
1214 }
1215 
1216 static void GetVbiosHeadAssignment(NVDevEvoPtr pDevEvo)
1217 {
1218     NVDispEvoPtr pDispEvo;
1219     NvU32 dispIndex;
1220 
1221     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1222         GetVbiosHeadAssignmentOneDisp(pDispEvo);
1223     }
1224 }
1225 
1226 /*!
1227  * Query the boot display device(s).
1228  */
1229 static void ProbeBootDisplays(NVDispEvoPtr pDispEvo)
1230 {
1231     NvU32 ret;
1232     NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS bootParams = { 0 };
1233     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1234 
1235     pDispEvo->bootDisplays = nvEmptyDpyIdList();
1236 
1237     bootParams.subDeviceInstance = pDispEvo->displayOwner;
1238 
1239     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1240                          pDevEvo->displayCommonHandle,
1241                          NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS,
1242                          &bootParams, sizeof(bootParams));
1243 
1244     if (ret == NVOS_STATUS_SUCCESS) {
1245         pDispEvo->bootDisplays =
1246             nvNvU32ToDpyIdList(bootParams.bootDisplayMask);
1247     }
1248 }
1249 
1250 /*!
1251  * Query the 0073 display common object capabilities.
1252  */
1253 static NvBool ProbeDisplayCommonCaps(NVDevEvoPtr pDevEvo)
1254 {
1255     NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { };
1256     NvU32 ret;
1257 
1258     ct_assert(sizeof(pDevEvo->commonCapsBits) == sizeof(capsParams.capsTbl));
1259     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1260                          pDevEvo->displayCommonHandle,
1261                          NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2,
1262                          &capsParams, sizeof(capsParams));
1263     if (ret != NVOS_STATUS_SUCCESS) {
1264         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1265                     "Failed to determine display common capabilities");
1266         return FALSE;
1267     }
1268     nvkms_memcpy(pDevEvo->commonCapsBits, capsParams.capsTbl,
1269                  sizeof(pDevEvo->commonCapsBits));
1270 
1271     return TRUE;
1272 }
1273 
1274 static NvBool ReadDPCDReg(NVConnectorEvoPtr pConnectorEvo,
1275                           NvU32 dpcdAddr,
1276                           NvU8 *dpcdData)
1277 {
1278     NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { };
1279     NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo;
1280 
1281     params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner;
1282     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
1283 
1284     params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX);
1285     params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _READ);
1286 
1287     params.addr = dpcdAddr;
1288 
1289     /* Requested size is 0-based */
1290     params.size = 0;
1291 
1292     if (nvRmApiControl(nvEvoGlobal.clientHandle,
1293                        pDevEvo->displayCommonHandle,
1294                        NV0073_CTRL_CMD_DP_AUXCH_CTRL,
1295                        &params, sizeof(params)) != NVOS_STATUS_SUCCESS) {
1296         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1297                     "AUX read failed for DPCD addr 0x%x",
1298                     dpcdAddr);
1299         return FALSE;
1300     }
1301 
1302     if (params.size != 1U) {
1303         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1304                     "AUX read returned 0 bytes for DPCD addr 0x%x",
1305                     dpcdAddr);
1306         return FALSE;
1307     }
1308 
1309     *dpcdData = params.data[0];
1310 
1311     return TRUE;
1312 }
1313 
1314 NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo,
1315                       NvU32 dpcdAddr,
1316                       NvU8 dpcdData)
1317 {
1318     NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { };
1319     NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo;
1320 
1321     params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner;
1322     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
1323 
1324     params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX);
1325     params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE);
1326 
1327     params.addr = dpcdAddr;
1328     params.data[0] = dpcdData;
1329 
1330     /* Requested size is 0-based */
1331     params.size = 0;
1332 
1333     if (nvRmApiControl(nvEvoGlobal.clientHandle,
1334                        pDevEvo->displayCommonHandle,
1335                        NV0073_CTRL_CMD_DP_AUXCH_CTRL,
1336                        &params, sizeof(params)) != NVOS_STATUS_SUCCESS) {
1337         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1338                     "AUX write failed for DPCD addr 0x%x",
1339                     dpcdAddr);
1340         return FALSE;
1341     }
1342 
1343     if (params.size != 1U) {
1344         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1345                     "Wrote 0 bytes for DPCD addr 0x%x",
1346                     dpcdAddr);
1347         return FALSE;
1348     }
1349 
1350     return TRUE;
1351 }
1352 
1353 static NvBool ReadDPSerializerCaps(NVConnectorEvoPtr pConnectorEvo)
1354 {
1355     NVDpyIdList oneDpyIdList =
1356         nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId);
1357     NVDpyIdList connectedList;
1358     NvU8 dpcdData = 0;
1359 
1360     /*
1361      * This call will not only confirm that the DP serializer is connected, but
1362      * will also power on the corresponding DPAUX pads if the serializer is
1363      * detected via NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE. The DPAUX pads
1364      * need to be enabled for the DPCD reads below.
1365      */
1366     connectedList = nvRmGetConnectedDpys(pConnectorEvo->pDispEvo, oneDpyIdList);
1367     if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedList)) {
1368         nvEvoLogDev(pConnectorEvo->pDispEvo->pDevEvo, EVO_LOG_ERROR,
1369                     "Serializer connector %s is not currently connected!",
1370                     pConnectorEvo->name);
1371         return FALSE;
1372     }
1373 
1374     if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LINK_BANDWIDTH, &dpcdData)) {
1375         return FALSE;
1376     }
1377     pConnectorEvo->dpSerializerCaps.maxLinkBW =
1378         DRF_VAL(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, dpcdData);
1379 
1380     if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LANE_COUNT, &dpcdData)) {
1381         return FALSE;
1382     }
1383     pConnectorEvo->dpSerializerCaps.maxLaneCount =
1384         DRF_VAL(_DPCD, _MAX_LANE_COUNT, _LANE, dpcdData);
1385 
1386     if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MSTM, &dpcdData)) {
1387         return FALSE;
1388     }
1389     pConnectorEvo->dpSerializerCaps.supportsMST =
1390         FLD_TEST_DRF(_DPCD, _MSTM, _CAP, _YES, dpcdData);
1391 
1392     return TRUE;
1393 }
1394 
1395 static NvBool AllocDPSerializerDpys(NVConnectorEvoPtr pConnectorEvo)
1396 {
1397     NvBool supportsMST;
1398     NvU32 numHeads;
1399     NvU32 i;
1400 
1401     if (!nvConnectorIsDPSerializer(pConnectorEvo)) {
1402         return TRUE;
1403     }
1404 
1405     if (!ReadDPSerializerCaps(pConnectorEvo)) {
1406         return FALSE;
1407     }
1408 
1409     supportsMST = pConnectorEvo->dpSerializerCaps.supportsMST;
1410     numHeads = pConnectorEvo->pDispEvo->pDevEvo->numHeads;
1411     for (i = 0; i < numHeads && supportsMST; i++) {
1412         NVDpyEvoPtr pDpyEvo = NULL;
1413         NvBool dynamicDpyCreated = FALSE;
1414         char address[5] = { };
1415 
1416         nvkms_snprintf(address, sizeof(address), "0.%d", i + 1);
1417         pDpyEvo = nvGetDPMSTDpyEvo(pConnectorEvo, address,
1418                                    &dynamicDpyCreated);
1419         if ((pDpyEvo == NULL) || !dynamicDpyCreated) {
1420             return FALSE;
1421         }
1422 
1423         pDpyEvo->dp.serializerStreamIndex = i;
1424     }
1425 
1426     return TRUE;
1427 }
1428 
1429 /*!
1430  *
1431  */
1432 static NvBool AllocDpys(NVDispEvoPtr pDispEvo)
1433 {
1434     NVConnectorEvoPtr pConnectorEvo;
1435 
1436     // At this point, there should be no DisplayPort multistream devices.
1437     nvAssert(nvDpyIdListsAreEqual(pDispEvo->validDisplays,
1438                                   pDispEvo->connectorIds));
1439     nvAssert(nvDpyIdListIsEmpty(pDispEvo->displayPortMSTIds));
1440     nvAssert(nvDpyIdListIsEmpty(pDispEvo->dynamicDpyIds));
1441 
1442     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
1443         NVDpyEvoPtr pDpyEvo;
1444 
1445         pDpyEvo = nvAllocDpyEvo(pDispEvo, pConnectorEvo,
1446                                 pConnectorEvo->displayId, NULL);
1447 
1448         if (pDpyEvo == NULL) {
1449             nvAssert(!"Failed to allocate pDpy");
1450             return FALSE;
1451         }
1452 
1453         if (!AllocDPSerializerDpys(pConnectorEvo)) {
1454             nvAssert(!"Failed to allocate non DPLib managed dpys");
1455             return FALSE;
1456         }
1457     }
1458 
1459     return TRUE;
1460 }
1461 
1462 static void FreeDpys(NVDispEvoPtr pDispEvo)
1463 {
1464     NVDpyEvoPtr pDpyEvo, pDpyEvoTmp;
1465 
1466     nvListForEachEntry_safe(pDpyEvo, pDpyEvoTmp,
1467                             &pDispEvo->dpyList, dpyListEntry) {
1468         nvFreeDpyEvo(pDispEvo, pDpyEvo);
1469     }
1470 }
1471 
1472 
1473 /*!
1474  * Receive hotplug notification from resman.
1475  *
1476  * This function is registered as the kernel callback function from
1477  * resman when an NV2080_NOTIFIERS_HOTPLUG event is generated.
1478  *
1479  * However, this function is called with resman's context (alternate
1480  * stack, resman locks held, etc).  Schedule deferred work, so that we
1481  * can process the hotplug event without resman's encumbrances.
1482  */
1483 static void ReceiveHotplugEvent(void *arg, void *pEventDataVoid, NvU32 hEvent,
1484                                 NvU32 Data, NV_STATUS Status)
1485 {
1486     (void) nvkms_alloc_timer_with_ref_ptr(
1487         nvHandleHotplugEventDeferredWork, /* callback */
1488         arg, /* argument (this is a ref_ptr to a pDispEvo) */
1489         0,   /* dataU32 */
1490         0);
1491 }
1492 
1493 static void ReceiveDPIRQEvent(void *arg, void *pEventDataVoid, NvU32 hEvent,
1494                               NvU32 Data, NV_STATUS Status)
1495 {
1496     // XXX The displayId of the connector that generated the event should be
1497     // available here somewhere.  We should figure out how to find that and
1498     // plumb it through to nvHandleDPIRQEventDeferredWork.
1499     (void) nvkms_alloc_timer_with_ref_ptr(
1500         nvHandleDPIRQEventDeferredWork, /* callback */
1501         arg, /* argument (this is a ref_ptr to a pDispEvo) */
1502         0,   /* dataU32 */
1503         0);
1504 }
1505 
1506 NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo,
1507                             NVOS10_EVENT_KERNEL_CALLBACK_EX *cb,
1508                             struct nvkms_ref_ptr *ref_ptr,
1509                             NvU32 parentHandle,
1510                             NvU32 eventHandle,
1511                             Callback5ArgVoidReturn func,
1512                             NvU32 event)
1513 {
1514     NV0005_ALLOC_PARAMETERS allocEventParams = { 0 };
1515 
1516     cb->func = func;
1517     cb->arg = ref_ptr;
1518 
1519     allocEventParams.hParentClient = nvEvoGlobal.clientHandle;
1520     allocEventParams.hClass        = NV01_EVENT_KERNEL_CALLBACK_EX;
1521     allocEventParams.notifyIndex   = event;
1522     allocEventParams.data          = NV_PTR_TO_NvP64(cb);
1523 
1524     return nvRmApiAlloc(nvEvoGlobal.clientHandle,
1525                         parentHandle,
1526                         eventHandle,
1527                         NV01_EVENT_KERNEL_CALLBACK_EX,
1528                         &allocEventParams)
1529         == NVOS_STATUS_SUCCESS;
1530 }
1531 
1532 static NvBool RegisterDispCallback(NVOS10_EVENT_KERNEL_CALLBACK_EX *cb,
1533                                    NVDispEvoPtr pDispEvo,
1534                                    NvU32 handle,
1535                                    Callback5ArgVoidReturn func,
1536                                    NvU32 event)
1537 {
1538     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1539     NvU32 subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle;
1540 
1541     return nvRmRegisterCallback(pDevEvo, cb, pDispEvo->ref_ptr, subDevice,
1542                                 handle, func, event);
1543 }
1544 
1545 static void
1546 DifrPrefetchEventDeferredWork(void *dataPtr, NvU32 dataU32)
1547 {
1548     NVDevEvoPtr pDevEvo = dataPtr;
1549     size_t l2CacheSize = (size_t)dataU32;
1550     NvU32 status;
1551 
1552     nvAssert(pDevEvo->pDifrState);
1553 
1554     status = nvDIFRPrefetchSurfaces(pDevEvo->pDifrState, l2CacheSize);
1555     nvDIFRSendPrefetchResponse(pDevEvo->pDifrState, status);
1556 }
1557 
1558 static void DifrPrefetchEvent(void *arg, void *pEventDataVoid,
1559                               NvU32 hEvent, NvU32 Data, NV_STATUS Status)
1560 {
1561     Nv2080LpwrDifrPrefetchNotification *notif =
1562         (Nv2080LpwrDifrPrefetchNotification *)pEventDataVoid;
1563 
1564     (void)nvkms_alloc_timer_with_ref_ptr(
1565         DifrPrefetchEventDeferredWork, /* callback */
1566         arg, /* argument (this is a ref_ptr to a pDevEvo) */
1567         notif->l2CacheSize, /* dataU32 */
1568         0);  /* timeout: schedule the work immediately */
1569 }
1570 
1571 enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo)
1572 {
1573     NVDispEvoPtr pDispEvo;
1574     unsigned int sd;
1575     enum NvKmsAllocDeviceStatus status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
1576     NvU32 totalDispNumSubDevices = 0;
1577 
1578     pDevEvo->sli.bridge.present = FALSE;
1579 
1580     if (!QueryGpuCapabilities(pDevEvo)) {
1581         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1582                     "Failed to query GPU capabilities");
1583         goto fail;
1584     }
1585 
1586     if (pDevEvo->supportsSyncpts) {
1587         pDevEvo->preSyncptTable =
1588             nvCalloc(1, sizeof(NVEvoSyncpt) * NV_SYNCPT_GLOBAL_TABLE_LENGTH);
1589         if (pDevEvo->preSyncptTable == NULL) {
1590             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1591                     "Failed to allocate memory for pre-syncpt table");
1592             goto fail;
1593         }
1594     }
1595 
1596     if (!AllocDisplays(pDevEvo)) {
1597         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate displays");
1598         goto fail;
1599     }
1600 
1601     /* allocate the display common object for this device */
1602 
1603     if (nvRmEvoClassListCheck(pDevEvo, NV04_DISPLAY_COMMON)) {
1604 
1605         pDevEvo->displayCommonHandle =
1606             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
1607 
1608         if (nvRmApiAlloc(nvEvoGlobal.clientHandle,
1609                          pDevEvo->deviceHandle,
1610                          pDevEvo->displayCommonHandle,
1611                          NV04_DISPLAY_COMMON, NULL)
1612                 != NVOS_STATUS_SUCCESS) {
1613             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1614                         "Failed to initialize the display "
1615                         "subsystem for the NVIDIA graphics device!");
1616             goto fail;
1617 
1618         }
1619     } else {
1620         /*
1621          * Not supporting NV04_DISPLAY_COMMON is expected in some
1622          * configurations: e.g., GF117 (an Optimus-only or "coproc" GPU),
1623          * emulation netlists.  Fail with "no hardware".
1624          */
1625         status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1626         goto fail;
1627     }
1628 
1629     if (!ProbeDisplayCommonCaps(pDevEvo)) {
1630         status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1631         goto fail;
1632     }
1633 
1634     if (!ProbeHeadCountAndWindowAssignment(pDevEvo)) {
1635         status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1636         goto fail;
1637     }
1638 
1639     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1640         if (!ProbeValidDisplays(pDispEvo)) {
1641             status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1642             goto fail;
1643         }
1644 
1645         /* Keep track of connectors per pDisp and bind to DP lib if capable */
1646         if (!AllocConnectors(pDispEvo)) {
1647             status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1648             goto fail;
1649         }
1650     }
1651 
1652     /*
1653      * If there are no valid display devices, fail with "no hardware".
1654      */
1655     if (NoValidDisplays(pDevEvo)) {
1656         status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1657         goto fail;
1658     }
1659 
1660     /*
1661      * The number of numSubDevices across disps should equal the
1662      * device's numSubDevices.
1663      */
1664     totalDispNumSubDevices = 0;
1665     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1666         totalDispNumSubDevices++;
1667     }
1668 
1669     if (totalDispNumSubDevices != pDevEvo->numSubDevices) {
1670         nvAssert(!"Number of disps' subdevices does not match device's");
1671     }
1672 
1673     /*
1674      * Allocate an NV event for each pDispEvo on the corresponding
1675      * subDevice, tied to the pDevEvo's OS event.
1676      */
1677     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1678         NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { };
1679         NvU32 subDevice, ret;
1680 
1681         subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle;
1682 
1683         pDispEvo->hotplugEventHandle =
1684             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
1685 
1686         if (!RegisterDispCallback(&pDispEvo->rmHotplugCallback, pDispEvo,
1687                                   pDispEvo->hotplugEventHandle,
1688                                   ReceiveHotplugEvent,
1689                                   NV2080_NOTIFIERS_HOTPLUG)) {
1690             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1691                         "Failed to register display hotplug event");
1692         }
1693 
1694         // Enable hotplug notifications from this subdevice.
1695         setEventParams.event = NV2080_NOTIFIERS_HOTPLUG;
1696         setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1697         if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1698                                   subDevice,
1699                                   NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
1700                                   &setEventParams,
1701                                   sizeof(setEventParams)))
1702                 != NVOS_STATUS_SUCCESS) {
1703             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1704                         "Failed to register display hotplug "
1705                         "handler: 0x%x\n", ret);
1706         }
1707     }
1708 
1709     // Allocate a handler for the DisplayPort "IRQ" event, which is signaled
1710     // when there's a short interruption in the hotplug detect line.
1711     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1712         NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { };
1713         NvU32 subDevice, ret;
1714 
1715         subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle;
1716 
1717         pDispEvo->DPIRQEventHandle =
1718             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
1719 
1720         if (!RegisterDispCallback(&pDispEvo->rmDPIRQCallback, pDispEvo,
1721                                   pDispEvo->DPIRQEventHandle, ReceiveDPIRQEvent,
1722                                   NV2080_NOTIFIERS_DP_IRQ)) {
1723             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1724                         "Failed to register DisplayPort interrupt event");
1725         }
1726 
1727         // Enable DP IRQ notifications from this subdevice.
1728         setEventParams.event = NV2080_NOTIFIERS_DP_IRQ;
1729         setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1730         if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1731                                   subDevice,
1732                                   NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
1733                                   &setEventParams,
1734                                   sizeof(setEventParams)))
1735                 != NVOS_STATUS_SUCCESS) {
1736             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1737                         "Failed to register DisplayPort interrupt "
1738                         "handler: 0x%x\n", ret);
1739         }
1740     }
1741 
1742     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1743         ProbeBootDisplays(pDispEvo);
1744 
1745         if (!AllocDpys(pDispEvo)) {
1746             goto fail;
1747         }
1748 
1749     }
1750 
1751     nvAllocVrrEvo(pDevEvo);
1752 
1753     return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS;
1754 
1755 fail:
1756     nvRmDestroyDisplays(pDevEvo);
1757     return status;
1758 }
1759 
1760 
1761 void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo)
1762 {
1763     NvU32 ret;
1764     NVDispEvoPtr pDispEvo;
1765     int dispIndex;
1766     NvS64 tmp;
1767 
1768     nvFreeVrrEvo(pDevEvo);
1769 
1770     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1771 
1772         // Before freeing anything, dump anything left in the RM's DisplayPort
1773         // AUX channel log.
1774         if (pDispEvo->dpAuxLoggingEnabled) {
1775             do {
1776                 ret = nvRmQueryDpAuxLog(pDispEvo, &tmp);
1777             } while (ret && tmp);
1778         }
1779 
1780         // Free the DisplayPort IRQ event.
1781         if (pDispEvo->DPIRQEventHandle != 0) {
1782             nvRmApiFree(nvEvoGlobal.clientHandle,
1783                         nvEvoGlobal.clientHandle,
1784                         pDispEvo->DPIRQEventHandle);
1785             nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1786                                pDispEvo->DPIRQEventHandle);
1787             pDispEvo->DPIRQEventHandle = 0;
1788         }
1789 
1790         // Free the hotplug event.
1791         /*
1792          * XXX I wish I could cancel anything scheduled by
1793          * ReceiveHotplugEvent() and ReceiveDPIRQEvent() for this pDispEvo...
1794          */
1795         if (pDispEvo->hotplugEventHandle != 0) {
1796             nvRmApiFree(nvEvoGlobal.clientHandle,
1797                         nvEvoGlobal.clientHandle,
1798                         pDispEvo->hotplugEventHandle);
1799             nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1800                                pDispEvo->hotplugEventHandle);
1801             pDispEvo->hotplugEventHandle = 0;
1802         }
1803     }
1804 
1805     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1806         FreeDpys(pDispEvo);
1807         FreeConnectors(pDispEvo);
1808     }
1809 
1810     FreeDisplays(pDevEvo);
1811 
1812     nvFree(pDevEvo->preSyncptTable);
1813     pDevEvo->preSyncptTable = NULL;
1814 
1815     if (pDevEvo->displayCommonHandle != 0) {
1816         ret = nvRmApiFree(nvEvoGlobal.clientHandle,
1817                           pDevEvo->deviceHandle,
1818                           pDevEvo->displayCommonHandle);
1819         if (ret != NVOS_STATUS_SUCCESS) {
1820             nvAssert(!"Free(displayCommonHandle) failed");
1821         }
1822         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1823                            pDevEvo->displayCommonHandle);
1824         pDevEvo->displayCommonHandle = 0;
1825     }
1826 }
1827 
1828 
1829 /*!
1830  * The Allocate a display ID that we use to talk to RM about the dpy(s) on
1831  * head.
1832  *
1833  * \param[in]  pDisp      The display system on which to allocate the ID.
1834  * \param[in]  dpyList    The list of dpys.
1835  *
1836  * \return  The display ID, or 0 on failure.
1837  */
1838 NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList)
1839 {
1840     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1841     NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS params = { 0 };
1842     const NVDpyEvoRec *pDpyEvo;
1843     const NVConnectorEvoRec *pConnectorEvo = NULL;
1844     NvBool isDPMST = NV_FALSE;
1845     NvU32 ret;
1846 
1847     FOR_ALL_EVO_DPYS(pDpyEvo, dpyList, pDispEvo) {
1848         if (pConnectorEvo == NULL) {
1849             /* First DPY from list, assign pConnectorEvo and isDPMST variable */
1850             pConnectorEvo = pDpyEvo->pConnectorEvo;
1851             isDPMST = nvDpyEvoIsDPMST(pDpyEvo);
1852         }
1853 
1854         if (pConnectorEvo != pDpyEvo->pConnectorEvo ||
1855             isDPMST != nvDpyEvoIsDPMST(pDpyEvo)) {
1856             return 0;
1857         }
1858     }
1859 
1860     nvAssert(nvConnectorUsesDPLib(pConnectorEvo) || !isDPMST);
1861 
1862     if (!isDPMST) {
1863         /* For non-MST dpy(s), simply return static display ID of connector */
1864         return nvDpyIdToNvU32(pConnectorEvo->displayId);
1865     }
1866 
1867     params.subDeviceInstance = pDispEvo->displayOwner;
1868     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
1869 
1870     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1871                          pDevEvo->displayCommonHandle,
1872                          NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
1873                          &params, sizeof(params));
1874 
1875     if (ret == NVOS_STATUS_SUCCESS) {
1876         return params.displayIdAssigned;
1877     } else {
1878         nvEvoLogDisp(pDispEvo, EVO_LOG_WARN,
1879                      "Failed to allocate display resource.");
1880     }
1881 
1882     return 0;
1883 }
1884 
1885 
1886 /*!
1887  * Send DISPLAY_CHANGE to resman.
1888  *
1889  * This should be called before and after each mode change, with the display
1890  * mask describing the NEW display configuration.
1891  */
1892 void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo,
1893                          enum NvKmsBeginEndModeset beginOrEnd,
1894                          NvU32 mask)
1895 {
1896     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1897     NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS bracketParams = { };
1898     NvU32 ret;
1899 
1900     bracketParams.subDeviceInstance = pDispEvo->displayOwner;
1901     bracketParams.newDevices = mask;
1902     bracketParams.properties = 0; /* this is currently unused */
1903     switch (beginOrEnd) {
1904         case BEGIN_MODESET:
1905             bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START;
1906             break;
1907         case END_MODESET:
1908             bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END;
1909             break;
1910     }
1911 
1912     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1913                          pDevEvo->displayCommonHandle,
1914                          NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE,
1915                          &bracketParams,
1916                          sizeof(bracketParams));
1917     if (ret != NVOS_STATUS_SUCCESS) {
1918         nvAssert(!"Failed NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE");
1919     }
1920 }
1921 
1922 
1923 /*!
1924  * Free a RM display ID, if it was allocated dynamically.
1925  *
1926  * This function frees a display ID if it was allocated by
1927  * nvRmAllocDisplayId.  If the display ID is static, this function does
1928  * nothing.
1929  *
1930  * From ctrl0073dp.h: You must not call this function while either the ARM
1931  * or ASSEMBLY state cache refers to this display-id.  The head must not be
1932  * attached.
1933  *
1934  * \param[in]  pDisp      The display system on which to free the ID.
1935  * \param[in]  displayId  The display ID to free.
1936  */
1937 void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 displayId)
1938 {
1939     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1940     NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS params = { 0 };
1941     NVDpyId dpyId = nvNvU32ToDpyId(displayId);
1942     NvU32 ret;
1943 
1944     /* Do nothing if display ID is static one! */
1945     if (nvDpyIdIsInDpyIdList(dpyId, pDispEvo->connectorIds)) {
1946         return;
1947     }
1948 
1949     params.subDeviceInstance = pDispEvo->displayOwner;
1950     params.displayId = displayId;
1951 
1952     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1953                          pDevEvo->displayCommonHandle,
1954                          NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID,
1955                          &params, sizeof(params));
1956 
1957     if (ret != NVOS_STATUS_SUCCESS) {
1958         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
1959                      "Failed to relinquish display resource.");
1960     }
1961 }
1962 
1963 
1964 /*!
1965  * Query Resman for the (broad) display device type.
1966  */
1967 static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId)
1968 {
1969     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1970     NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS params = { 0 };
1971     NvU32 ret;
1972 
1973     params.subDeviceInstance = pDispEvo->displayOwner;
1974     params.displayId = nvDpyIdToNvU32(dpyId);
1975 
1976     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1977                          pDevEvo->displayCommonHandle,
1978                          NV0073_CTRL_CMD_SPECIFIC_GET_TYPE,
1979                          &params, sizeof(params));
1980 
1981     if (ret != NVOS_STATUS_SUCCESS) {
1982         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
1983                      "Failure getting specific display device type.");
1984         return NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_UNKNOWN;
1985     }
1986 
1987     nvAssert((params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) ||
1988              (params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP));
1989 
1990     return params.displayType;
1991 }
1992 
1993 
1994 /*!
1995  * Query RM for the current OR properties of the given connector.
1996  *
1997  * If 'assertOnly' is TRUE, this function will only assert that the OR
1998  * configuration has not changed.
1999  */
2000 void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly)
2001 {
2002     NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
2003     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2004     NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 };
2005     NvU32 ret;
2006 
2007     params.subDeviceInstance = pDispEvo->displayOwner;
2008     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
2009 
2010     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
2011                          pDevEvo->displayCommonHandle,
2012                          NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
2013                          &params,
2014                          sizeof(params));
2015     if (ret != NVOS_STATUS_SUCCESS) {
2016         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
2017                      "Failed to determine output resource properties.");
2018 
2019         if (assertOnly) {
2020             return;
2021         }
2022         pConnectorEvo->or.type = NV0073_CTRL_SPECIFIC_OR_TYPE_DAC;
2023         pConnectorEvo->or.primary = NV_INVALID_OR;
2024         pConnectorEvo->or.secondaryMask = 0;
2025         pConnectorEvo->or.protocol =
2026             NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT;
2027         pConnectorEvo->or.ditherType = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF;
2028         pConnectorEvo->or.ditherAlgo =
2029             NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN;
2030         pConnectorEvo->or.location = NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP;
2031 
2032         return;
2033     }
2034 
2035     if (!assertOnly) {
2036         pConnectorEvo->or.type = params.type;
2037         if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
2038                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) &&
2039             params.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
2040             // For the SOR crossbar, RM may report that multiple displayIDs own
2041             // the same SOR.  For example, it may report SOR 2 for both the
2042             // DisplayPort and TMDS halves of a physical connector even though
2043             // they have separate displayIds.
2044             //
2045             // All we really need to know is which SOR is assigned to the boot
2046             // display, so we defer the query to MarkConnectorBootHeadActive().
2047             pConnectorEvo->or.secondaryMask = 0x0;
2048             pConnectorEvo->or.primary = NV_INVALID_OR;
2049         } else {
2050             pConnectorEvo->or.secondaryMask = 0x0;
2051             pConnectorEvo->or.primary = params.index;
2052         }
2053         pConnectorEvo->or.protocol = params.protocol;
2054         pConnectorEvo->or.ditherType = params.ditherType;
2055         pConnectorEvo->or.ditherAlgo = params.ditherAlgo;
2056         pConnectorEvo->or.location = params.location;
2057     } else {
2058         nvAssert(pConnectorEvo->or.type == params.type);
2059         nvAssert(pConnectorEvo->or.primary == params.index);
2060         nvAssert(pConnectorEvo->or.protocol == params.protocol);
2061         nvAssert(pConnectorEvo->or.ditherType == params.ditherType);
2062         nvAssert(pConnectorEvo->or.ditherAlgo == params.ditherAlgo);
2063         nvAssert(pConnectorEvo->or.location == params.location);
2064     }
2065 }
2066 
2067 /*!
2068  * Query connector state, and retry if necessary.
2069  */
2070 NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo,
2071                                  NVDpyIdList dpyIdList)
2072 {
2073     NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS params = { 0 };
2074     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2075     NvU32 ret;
2076 
2077     params.subDeviceInstance = pDispEvo->displayOwner;
2078     params.displayMask = nvDpyIdListToNvU32(dpyIdList);
2079     params.flags =
2080         (DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_METHOD,_DEFAULT) |
2081          DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_DDC,_DEFAULT) |
2082          DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_LOAD,_DEFAULT));
2083 
2084     do {
2085         params.retryTimeMs = 0;
2086         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
2087                              pDevEvo->displayCommonHandle,
2088                              NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE,
2089                              &params,
2090                              sizeof(params));
2091 
2092         if (ret == NVOS_STATUS_ERROR_NOT_READY &&
2093             params.retryTimeMs == 0) {
2094             // Work around bug 970351: RM returns a zero retry time on platforms
2095             // where the display driver is in user space.  Use a conservative
2096             // default.  This code can be removed once this call is fixed in RM.
2097             params.retryTimeMs = 20;
2098         }
2099 
2100         if (params.retryTimeMs > 0) {
2101             nvkms_usleep(params.retryTimeMs * 1000);
2102         } else {
2103             nvkms_yield();
2104         }
2105     } while(params.retryTimeMs > 0);
2106 
2107     if (ret == NVOS_STATUS_SUCCESS) {
2108         return nvNvU32ToDpyIdList(params.displayMask);
2109     } else {
2110         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
2111                      "Failed detecting connected display devices");
2112         return nvEmptyDpyIdList();
2113     }
2114 }
2115 
2116 /*!
2117  * Notify the DP library that we are ready to proceed after a suspend/boot, and
2118  * that it should initialize and start handling events.
2119  */
2120 NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo)
2121 {
2122     NVDispEvoPtr pDispEvo;
2123     int i;
2124 
2125     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
2126         NVConnectorEvoPtr pConnectorEvo;
2127         NVDpyIdList connectedIdsList =
2128             nvRmGetConnectedDpys(pDispEvo, pDispEvo->connectorIds);
2129 
2130         FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
2131             NvBool plugged =
2132                 nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedIdsList);
2133 
2134             if (!pConnectorEvo->pDpLibConnector) {
2135                 continue;
2136             }
2137 
2138             if (!nvDPResume(pConnectorEvo->pDpLibConnector, plugged)) {
2139                 goto failed;
2140             }
2141         }
2142     }
2143 
2144     return TRUE;
2145 
2146 failed:
2147     nvRmPauseDP(pDevEvo);
2148     return FALSE;
2149 }
2150 
2151 
2152 void nvRmPauseDP(NVDevEvoPtr pDevEvo)
2153 {
2154     NVDispEvoPtr pDispEvo;
2155     int i;
2156 
2157     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
2158         NVConnectorEvoPtr pConnectorEvo;
2159 
2160         FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
2161             if (nvConnectorUsesDPLib(pConnectorEvo)) {
2162                 nvDPPause(pConnectorEvo->pDpLibConnector);
2163             }
2164         }
2165     }
2166 }
2167 
2168 
2169 /*!
2170  * This function is called whenever the DPMS level changes; On a CRT,
2171  * you set the DPMS level by (dis/en)abling the hsync and vsync
2172  * signals:
2173  *
2174  * Hsync  Vsync  Mode
2175  * =====  =====  ====
2176  * 1      1      Normal (on).
2177  * 0      1      Standby -- RGB guns off, power supply on, tube filaments
2178  *               energized, (screen saver mode).
2179  * 1      0      Suspend -- RGB guns off, power supply off, tube filaments
2180  *               energized.
2181  * 0      0      Power off -- small auxiliary circuit stays on to monitor the
2182  *               hsync/vsync signals to know when to wake up.
2183  */
2184 NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value)
2185 {
2186     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
2187     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2188     NvU32 ret;
2189 
2190     if (nvDpyUsesDPLib(pDpyEvo)) {
2191         nvDPDeviceSetPowerState(pDpyEvo,
2192                                 (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON));
2193         return TRUE;
2194     } else if (pDpyEvo->pConnectorEvo->legacyType !=
2195                NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) {
2196         NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS powerParams = { 0 };
2197 
2198         powerParams.subDeviceInstance = pDispEvo->displayOwner;
2199         powerParams.displayId = nvDpyEvoGetConnectorId(pDpyEvo);
2200 
2201         powerParams.powerState = (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON) ?
2202             NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_ON :
2203             NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_OFF;
2204 
2205         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
2206                              pDevEvo->displayCommonHandle,
2207                              NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER,
2208                              &powerParams,
2209                              sizeof(powerParams));
2210 
2211         return (ret == NVOS_STATUS_SUCCESS);
2212     } else {
2213         NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
2214         NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS powerParams = { { 0 }, 0 };
2215 
2216         powerParams.base.subdeviceIndex = pDispEvo->displayOwner;
2217         if (pConnectorEvo->or.primary == NV_INVALID_OR) {
2218             nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR);
2219             return FALSE;
2220         }
2221         powerParams.orNumber = pConnectorEvo->or.primary;
2222 
2223         switch (value) {
2224         case NV_KMS_DPY_ATTRIBUTE_DPMS_ON:
2225             powerParams.normalHSync =
2226                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE);
2227             powerParams.normalVSync =
2228                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE);
2229             break;
2230         case NV_KMS_DPY_ATTRIBUTE_DPMS_STANDBY:
2231             powerParams.normalHSync =
2232                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO);
2233             powerParams.normalVSync =
2234                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE);
2235             break;
2236         case NV_KMS_DPY_ATTRIBUTE_DPMS_SUSPEND:
2237             powerParams.normalHSync =
2238                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE);
2239             powerParams.normalVSync =
2240                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO);
2241             break;
2242         case NV_KMS_DPY_ATTRIBUTE_DPMS_OFF:
2243             powerParams.normalHSync =
2244                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO);
2245             powerParams.normalVSync =
2246                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO);
2247             break;
2248         default:
2249             return FALSE;
2250         }
2251         // XXX These could probably be disabled too, in the DPMS_OFF case.
2252         powerParams.normalData =
2253             DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_DATA, _ENABLE);
2254         powerParams.normalPower =
2255             DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_PWR, _ON);
2256 
2257         powerParams.flags =
2258             DRF_DEF(5070, _CTRL_CMD_SET_DAC_PWR_FLAGS, _SPECIFIED_NORMAL, _YES);
2259 
2260         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
2261                              pDevEvo->displayHandle,
2262                              NV5070_CTRL_CMD_SET_DAC_PWR,
2263                              &powerParams,
2264                              sizeof(powerParams));
2265 
2266         return (ret == NVOS_STATUS_SUCCESS);
2267     }
2268 }
2269 
2270 
2271 NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle,
2272                        NvU32 *ctxDmaFlags, void **ppBase, NvU64 size,
2273                        NvKmsMemoryIsoType isoType)
2274 {
2275     NvU32 ret;
2276     NvBool bufferAllocated = FALSE;
2277     NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
2278     const NvKmsDispIOCoherencyModes *pIOCoherencyModes;
2279 
2280     memAllocParams.owner = NVKMS_RM_HEAP_ID;
2281 
2282     memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO);
2283 
2284     memAllocParams.size = size;
2285 
2286     if (isoType == NVKMS_MEMORY_NISO) {
2287         memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _YES);
2288 
2289         pIOCoherencyModes = &pDevEvo->nisoIOCoherencyModes;
2290     } else {
2291         pIOCoherencyModes = &pDevEvo->isoIOCoherencyModes;
2292     }
2293 
2294     memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) |
2295                           DRF_DEF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS) |
2296                           DRF_DEF(OS32, _ATTR, _FORMAT, _PITCH);
2297 
2298     if (pIOCoherencyModes->noncoherent) {
2299         // Model (3)
2300         // - allocate USWC system memory
2301         // - allocate ctx dma with NVOS03_FLAGS_CACHE_SNOOP_DISABLE
2302         // - to sync CPU and GPU, flush CPU WC buffer
2303 
2304         memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE,
2305                                           memAllocParams.attr);
2306 
2307         ret = nvRmApiAlloc(
2308                   nvEvoGlobal.clientHandle,
2309                   pDevEvo->deviceHandle,
2310                   memoryHandle,
2311                   NV01_MEMORY_SYSTEM,
2312                   &memAllocParams);
2313 
2314         if (ret == NVOS_STATUS_SUCCESS) {
2315             bufferAllocated = TRUE;
2316             if (ctxDmaFlags) {
2317                 *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _DISABLE);
2318             }
2319         } else {
2320             bufferAllocated = FALSE;
2321         }
2322 
2323     }
2324 
2325     if (!bufferAllocated && pIOCoherencyModes->coherent) {
2326         // Model (2b): Similar to existing PCI model
2327         // - allocate cached (or USWC) system memory
2328         // - allocate ctx DMA with NVOS03_FLAGS_CACHE_SNOOP_ENABLE
2329         // ...
2330 
2331         memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK,
2332                                           memAllocParams.attr);
2333 
2334         ret = nvRmApiAlloc(
2335                   nvEvoGlobal.clientHandle,
2336                   pDevEvo->deviceHandle,
2337                   memoryHandle,
2338                   NV01_MEMORY_SYSTEM,
2339                   &memAllocParams);
2340 
2341         if (ret == NVOS_STATUS_SUCCESS) {
2342             bufferAllocated = TRUE;
2343             if (ctxDmaFlags) {
2344                 *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _ENABLE);
2345             }
2346         } else {
2347             bufferAllocated = FALSE;
2348         }
2349     }
2350 
2351     if (bufferAllocated) {
2352         ret = nvRmApiMapMemory(
2353                   nvEvoGlobal.clientHandle,
2354                   pDevEvo->deviceHandle,
2355                   memoryHandle,
2356                   0, /* offset */
2357                   size,
2358                   ppBase,
2359                   0 /* flags */);
2360 
2361         if (ret != NVOS_STATUS_SUCCESS) {
2362             nvRmApiFree(nvEvoGlobal.clientHandle,
2363                         pDevEvo->deviceHandle,
2364                         memoryHandle);
2365 
2366             bufferAllocated = FALSE;
2367         }
2368     }
2369 
2370     return bufferAllocated;
2371 }
2372 
2373 
2374 /*****************************************************************************/
2375 /* Alloc memory and a context dma, following the rules dictated by the
2376    DMA coherence flags. */
2377 /*****************************************************************************/
2378 
2379 NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma,
2380                        NvU64 limit, NvU32 ctxDmaFlags, NvU32 subDeviceMask)
2381 {
2382     NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxdmaParams = { };
2383     NvBool bufferAllocated = FALSE;
2384     NvU32  memoryHandle = 0;
2385     void  *pBase = NULL;
2386 
2387     NvBool needBar1Mapping = FALSE;
2388 
2389     NvU32 ctxDmaHandle = 0;
2390     NvU32 localCtxDmaFlags = ctxDmaFlags |
2391         DRF_DEF(OS03, _FLAGS, _ACCESS, _READ_WRITE) |
2392         DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE);
2393 
2394     NvU32  ret;
2395 
2396     nvkms_memset(pDma, 0, sizeof(*pDma));
2397 
2398     memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
2399 
2400     /*
2401      * On certain GPUs (GF100, GF104) there exists a hardware bug that forces
2402      * us to put display NISO surfaces (pushbuffer, semaphores, notifiers
2403      * accessed by EVO) in vidmem instead of sysmem.  See bug 632241 for
2404      * details.
2405      */
2406     if (NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits,
2407             NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY)) {
2408         NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
2409 
2410         memAllocParams.owner = NVKMS_RM_HEAP_ID;
2411         memAllocParams.type = NVOS32_TYPE_DMA;
2412         memAllocParams.size = limit + 1;
2413         memAllocParams.attr = DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) |
2414                               DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM);
2415 
2416         ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
2417                            pDevEvo->deviceHandle,
2418                            memoryHandle,
2419                            NV01_MEMORY_LOCAL_USER,
2420                            &memAllocParams);
2421 
2422         if (ret != NVOS_STATUS_SUCCESS) {
2423             /* We can't fall back to any of the sysmem options below, due to
2424              * the nature of the HW bug forcing us to use vidmem. */
2425             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2426                         "Unable to allocate video memory for display");
2427             return FALSE;
2428         }
2429 
2430         limit = memAllocParams.size - 1;
2431 
2432         /* We'll access these surfaces through IFB */
2433         pBase = NULL;
2434 
2435         bufferAllocated = TRUE;
2436         needBar1Mapping = TRUE;
2437     }
2438 
2439     if (!bufferAllocated) {
2440         /*
2441          * Setting NVKMS_MEMORY_NISO since nvRmAllocEvoDma() is currently only
2442          * called to allocate pushbuffer and notifier memory.
2443          */
2444         bufferAllocated = nvRmAllocSysmem(pDevEvo, memoryHandle,
2445                                           &localCtxDmaFlags, &pBase, limit + 1,
2446                                           NVKMS_MEMORY_NISO);
2447     }
2448 
2449     if (!bufferAllocated) {
2450         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
2451 
2452         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unable to allocate DMA memory");
2453 
2454         return FALSE;
2455     }
2456 
2457     ctxDmaHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
2458 
2459     // Create a ctxdma for this allocation.
2460     ctxdmaParams.hMemory = memoryHandle;
2461     ctxdmaParams.flags = localCtxDmaFlags;
2462     ctxdmaParams.offset = 0;
2463     ctxdmaParams.limit = limit;
2464 
2465     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
2466                        pDevEvo->deviceHandle,
2467                        ctxDmaHandle,
2468                        NV01_CONTEXT_DMA,
2469                        &ctxdmaParams);
2470 
2471     if (ret != NVOS_STATUS_SUCCESS) {
2472         if (pBase != NULL) {
2473             nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
2474                                pDevEvo->deviceHandle,
2475                                memoryHandle,
2476                                pBase,
2477                                0);
2478         }
2479         nvRmApiFree(nvEvoGlobal.clientHandle,
2480                     pDevEvo->deviceHandle, memoryHandle);
2481         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
2482 
2483         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, ctxDmaHandle);
2484 
2485         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate a DMA context");
2486 
2487         return FALSE;
2488     }
2489 
2490     pDma->memoryHandle = memoryHandle;
2491 
2492     pDma->ctxHandle = ctxDmaHandle;
2493 
2494     pDma->limit = limit;
2495 
2496     if (needBar1Mapping) {
2497         NvBool result;
2498 
2499         result = nvRmEvoMapVideoMemory(pDevEvo, memoryHandle, limit + 1,
2500                                        pDma->subDeviceAddress, subDeviceMask);
2501 
2502         if (!result) {
2503             nvRmFreeEvoDma(pDevEvo, pDma);
2504             return FALSE;
2505         }
2506     } else {
2507         int sd;
2508 
2509         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2510             if (((1 << sd) & subDeviceMask) == 0) {
2511                 continue;
2512             }
2513 
2514             pDma->subDeviceAddress[sd] = pBase;
2515         }
2516     }
2517     pDma->isBar1Mapping = needBar1Mapping;
2518 
2519     return TRUE;
2520 }
2521 
2522 void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma)
2523 {
2524     NvU32 ret;
2525 
2526     if (pDma->ctxHandle != 0) {
2527         ret = nvRmApiFree(nvEvoGlobal.clientHandle,
2528                           pDevEvo->deviceHandle, pDma->ctxHandle);
2529 
2530         if (ret != NVOS_STATUS_SUCCESS) {
2531             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to free DMA context");
2532         }
2533 
2534         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDma->ctxHandle);
2535         pDma->ctxHandle = 0;
2536     }
2537 
2538     if (pDma->memoryHandle != 0) {
2539         if (pDma->isBar1Mapping) {
2540             nvRmEvoUnMapVideoMemory(pDevEvo, pDma->memoryHandle,
2541                                     pDma->subDeviceAddress);
2542         } else {
2543             int sd = 0;
2544             NvBool addressMapped = TRUE;
2545 
2546             /* If pDma->subDeviceAddress[sd] is non-NULL for multiple subdevices,
2547              * assume they are the same. Unmap only one but set all of them to
2548              * NULL. This matches the logic in nvRmAllocEvoDma().
2549              */
2550             for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2551 
2552                 if (addressMapped && pDma->subDeviceAddress[sd] != NULL) {
2553                     ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
2554                                              pDevEvo->deviceHandle,
2555                                              pDma->memoryHandle,
2556                                              pDma->subDeviceAddress[sd],
2557                                              0);
2558 
2559                     if (ret != NVOS_STATUS_SUCCESS) {
2560                         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to unmap memory");
2561                     }
2562 
2563                     addressMapped = FALSE;
2564                 }
2565 
2566                 pDma->subDeviceAddress[sd] = NULL;
2567             }
2568         }
2569 
2570         ret = nvRmApiFree(nvEvoGlobal.clientHandle,
2571                           pDevEvo->deviceHandle, pDma->memoryHandle);
2572 
2573         if (ret != NVOS_STATUS_SUCCESS) {
2574             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to free DMA memory");
2575         }
2576 
2577         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDma->memoryHandle);
2578         pDma->memoryHandle = 0;
2579 
2580         pDma->limit = 0;
2581 
2582         nvkms_memset(pDma->subDeviceAddress, 0, sizeof(pDma->subDeviceAddress));
2583     }
2584 }
2585 
2586 /*****************************************************************************/
2587 /* RmAllocEvoChannel ()
2588  * Allocates the EVO channel and associated notifier surfaces and ctxdmas.
2589  * Takes how big the DMA controls are (varies by class of channel) and which
2590  * class to allocate.
2591  */
2592 /*****************************************************************************/
2593 static NVEvoChannelPtr
2594 RmAllocEvoChannel(NVDevEvoPtr pDevEvo,
2595                   NVEvoChannelMask channelMask,
2596                   NvV32 instance, NvU32 class)
2597 {
2598     NVEvoChannelPtr pChannel = NULL;
2599     NVDmaBufferEvoPtr buffer = NULL;
2600     int sd;
2601     NvU32 ret;
2602 
2603     /* One 4k page is enough to map PUT and GET */
2604     const NvU64 dmaControlLen = 0x1000;
2605 
2606     nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(channelMask) == 1);
2607 
2608     /* Allocate the channel data structure */
2609     pChannel = nvCalloc(1, sizeof(*pChannel));
2610 
2611     if (pChannel == NULL) {
2612         goto fail;
2613     }
2614 
2615     buffer = &pChannel->pb;
2616 
2617     pChannel->hwclass = class;
2618     pChannel->instance = instance;
2619     pChannel->channelMask = channelMask;
2620 
2621     pChannel->notifiersDma = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoDma));
2622 
2623     if (pChannel->notifiersDma == NULL) {
2624         goto fail;
2625     }
2626 
2627     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2628         NVEvoDmaPtr pNotifiersDma = &pChannel->notifiersDma[sd];
2629 
2630         void *pDmaDisplayChannel = NULL;
2631 
2632         // Allocation of the notifiers
2633         if (!nvRmAllocEvoDma(pDevEvo, pNotifiersDma,
2634                              NV_DMA_EVO_NOTIFIER_SIZE - 1,
2635                              DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER),
2636                              1 << sd)) {
2637             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2638                         "Notifier DMA allocation failed");
2639 
2640             goto fail;
2641         }
2642 
2643         nvAssert(pNotifiersDma->subDeviceAddress[sd] != NULL);
2644 
2645         // Only allocate memory for one pushbuffer.
2646         // All subdevices will share (via subdevice mask)
2647         if (sd == 0) {
2648             NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS ChannelAllocParams = { 0 };
2649 
2650             NvU64 limit = NV_DMA_EVO_PUSH_BUFFER_SIZE - 1;
2651             NVEvoDmaPtr pDma = &buffer->dma;
2652 
2653             // Allocation of the push buffer
2654             if (!nvRmAllocEvoDma(pDevEvo, pDma, limit, 0, SUBDEVICE_MASK_ALL)) {
2655                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2656                             "Display engine push buffer DMA allocation failed");
2657 
2658                 goto fail;
2659             }
2660 
2661             if (!pDma->isBar1Mapping) {
2662                 buffer->base = pDma->subDeviceAddress[0];
2663             } else {
2664                 /*
2665                  * Allocate memory for a shadow copy in sysmem that we'll copy
2666                  * to vidmem via BAR1 at kickoff time.
2667                  */
2668                 buffer->base = nvCalloc(buffer->dma.limit + 1, 1);
2669                 if (buffer->base == NULL) {
2670                     goto fail;
2671                 }
2672             }
2673 
2674             buffer->channel_handle =
2675                 nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
2676 
2677             // Channel instance (always 0 for CORE - head number otherwise)
2678             ChannelAllocParams.channelInstance = instance;
2679             // PB CtxDMA Handle
2680             ChannelAllocParams.hObjectBuffer   = buffer->dma.ctxHandle;
2681             // Initial offset within the PB
2682             ChannelAllocParams.offset          = 0;
2683 
2684             ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
2685                                pDevEvo->displayHandle,
2686                                buffer->channel_handle,
2687                                class,
2688                                &ChannelAllocParams);
2689             if (ret != NVOS_STATUS_SUCCESS) {
2690                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2691                             "Display engine push buffer channel allocation failed: 0x%x (%s)",
2692                             ret, nvstatusToString(ret));
2693 
2694                 nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
2695                                    buffer->channel_handle);
2696                 buffer->channel_handle = 0;
2697 
2698                 goto fail;
2699             }
2700         }
2701 
2702         ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
2703                                pDevEvo->pSubDevices[sd]->handle,
2704                                buffer->channel_handle,
2705                                0,
2706                                dmaControlLen,
2707                                &pDmaDisplayChannel,
2708                                0);
2709         if (ret != NVOS_STATUS_SUCCESS) {
2710             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2711                         "Display engine push buffer DMA mapping failed: 0x%x (%s)",
2712                         ret, nvstatusToString(ret));
2713             goto fail;
2714         }
2715 
2716         buffer->control[sd] = pDmaDisplayChannel;
2717     }
2718 
2719     /* Initialize the rest of the required push buffer information */
2720     buffer->buffer          = buffer->base;
2721     buffer->end             = (NvU32 *)((char *)buffer->base +
2722                               NV_DMA_EVO_PUSH_BUFFER_SIZE - 8);
2723 
2724     /*
2725      * Due to hardware bug 235044, we can not use the last 12 dwords of the
2726      * core channel pushbuffer.  Adjust offset_max appropriately.
2727      *
2728      * This bug is fixed in Volta and newer, so this workaround can be removed
2729      * when Pascal support is dropped. See bug 3116066.
2730      */
2731     buffer->offset_max   = NV_DMA_EVO_PUSH_BUFFER_SIZE -
2732                            NV_DMA_EVO_PUSH_BUFFER_PAD_SIZE;
2733     buffer->fifo_free_count = (buffer->offset_max >> 2) - 2;
2734     buffer->put_offset   = 0;
2735     buffer->num_channels = pDevEvo->numSubDevices;
2736     buffer->pDevEvo      = pDevEvo;
2737     buffer->currentSubDevMask = SUBDEVICE_MASK_ALL;
2738 
2739     pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE;
2740 
2741     pDevEvo->hal->InitChannel(pDevEvo, pChannel);
2742 
2743     return pChannel;
2744 
2745 fail:
2746 
2747     RmFreeEvoChannel(pDevEvo, pChannel);
2748 
2749     return NULL;
2750 }
2751 
2752 static void FreeImmediateChannelPio(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel)
2753 {
2754     NVEvoPioChannel *pPio = pChannel->imm.u.pio;
2755     int sd;
2756 
2757     nvAssert(pPio != NULL);
2758 
2759     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2760 
2761         if (!pPio->control[sd]) {
2762             continue;
2763         }
2764 
2765         if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
2766                                pDevEvo->pSubDevices[sd]->handle,
2767                                pPio->handle,
2768                                pPio->control[sd],
2769                                0)) {
2770             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
2771                         "Failed to unmap immediate channel");
2772         }
2773         pPio->control[sd] = NULL;
2774     }
2775 
2776     if (pPio->handle) {
2777         if (nvRmApiFree(nvEvoGlobal.clientHandle,
2778                         pDevEvo->displayHandle,
2779                         pPio->handle)) {
2780             nvEvoLogDev(pDevEvo, EVO_LOG_WARN, "Failed to free immediate channel");
2781         }
2782         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
2783                            pPio->handle);
2784         pPio->handle = 0;
2785     }
2786 
2787     nvFree(pPio);
2788     pChannel->imm.u.pio = NULL;
2789 }
2790 
2791 static void FreeImmediateChannelDma(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel)
2792 {
2793     NVEvoChannelPtr pImmChannel = pChannel->imm.u.dma;
2794 
2795     RmFreeEvoChannel(pDevEvo, pImmChannel);
2796     pChannel->imm.u.dma = NULL;
2797 }
2798 
2799 static void FreeImmediateChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel)
2800 {
2801     switch (pChannel->imm.type) {
2802         case NV_EVO_IMM_CHANNEL_NONE:
2803             return;
2804         case NV_EVO_IMM_CHANNEL_PIO:
2805             FreeImmediateChannelPio(pDevEvo, pChannel);
2806             break;
2807         case NV_EVO_IMM_CHANNEL_DMA:
2808             FreeImmediateChannelDma(pDevEvo, pChannel);
2809             break;
2810     }
2811     pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE;
2812 }
2813 
2814 /*****************************************************************************/
2815 /* RmFreeEvoChannel ()
2816  * Frees all of the stuff allocated in RmAllocEvoChannel */
2817 /*****************************************************************************/
2818 static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel)
2819 {
2820     int sd;
2821 
2822     if (pChannel == NULL) {
2823         return;
2824     }
2825 
2826     FreeImmediateChannel(pDevEvo, pChannel);
2827 
2828     if (pChannel->completionNotifierEventHandle != 0) {
2829 
2830         nvRmApiFree(nvEvoGlobal.clientHandle,
2831                     pChannel->pb.channel_handle,
2832                     pChannel->completionNotifierEventHandle);
2833 
2834         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
2835                            pChannel->completionNotifierEventHandle);
2836 
2837         pChannel->completionNotifierEventHandle = 0;
2838     }
2839 
2840     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2841         if (pChannel->pb.control[sd]) {
2842             if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
2843                                    pDevEvo->pSubDevices[sd]->handle,
2844                                    pChannel->pb.channel_handle,
2845                                    pChannel->pb.control[sd],
2846                                    0) != NVOS_STATUS_SUCCESS) {
2847                 nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
2848                                  "Failed to unmap display engine channel memory");
2849             }
2850             pChannel->pb.control[sd] = NULL;
2851         }
2852     }
2853 
2854     if (pChannel->pb.channel_handle != 0) {
2855         // If NVKMS restored the console successfully, tell RM to leave the
2856         // channels allocated to avoid shutting down the heads we just
2857         // enabled.
2858         //
2859         // On EVO, only leave the core and base channels allocated. The
2860         // other satellite channels shouldn't be active at the console.
2861         //
2862         // On nvdisplay, one or more window channels are also needed. Rather
2863         // than try to figure out which ones are needed, just leave them all
2864         // alone.
2865         const NvBool isCore =
2866             FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE,
2867                            pChannel->channelMask);
2868         const NvBool isBase =
2869             (pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0;
2870         const NvBool isWindow =
2871             (pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0;
2872         if ((isCore || isBase || isWindow) && pDevEvo->skipConsoleRestore) {
2873             NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS params = { };
2874 
2875             params.base.subdeviceIndex = pDevEvo->vtFbInfo.subDeviceInstance;
2876             params.flags = NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW;
2877 
2878             if (nvRmApiControl(nvEvoGlobal.clientHandle,
2879                                pDevEvo->displayHandle,
2880                                NV5070_CTRL_CMD_SET_RMFREE_FLAGS,
2881                                &params, sizeof(params))
2882                 != NVOS_STATUS_SUCCESS) {
2883                 nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
2884                                  "Failed to set the PRESERVE_HW flag");
2885             }
2886         }
2887 
2888         if (nvRmApiFree(nvEvoGlobal.clientHandle,
2889                         pDevEvo->displayHandle,
2890                         pChannel->pb.channel_handle)
2891             != NVOS_STATUS_SUCCESS) {
2892             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2893                         "Failed to tear down display engine channel");
2894         }
2895         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
2896                            pChannel->pb.channel_handle);
2897         pChannel->pb.channel_handle = 0;
2898     }
2899 
2900     if (pChannel->pb.dma.isBar1Mapping) {
2901         /* Pushbuffer is in vidmem. Free shadow copy. */
2902         nvFree(pChannel->pb.base);
2903         pChannel->pb.base = NULL;
2904     }
2905 
2906     nvRmFreeEvoDma(pDevEvo, &pChannel->pb.dma);
2907 
2908     if (pChannel->notifiersDma) {
2909         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2910             nvRmFreeEvoDma(pDevEvo, &pChannel->notifiersDma[sd]);
2911         }
2912     }
2913 
2914     nvFree(pChannel->notifiersDma);
2915     pChannel->notifiersDma = NULL;
2916 
2917     nvFree(pChannel);
2918 }
2919 
2920 static NvBool
2921 AllocImmediateChannelPio(NVDevEvoPtr pDevEvo,
2922                          NVEvoChannelPtr pChannel,
2923                          NvU32 class,
2924                          NvU32 instance,
2925                          NvU32 mapSize)
2926 {
2927     NVEvoPioChannel *pPio = NULL;
2928     NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
2929     NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS params = { 0 };
2930     NvU32 sd;
2931 
2932     pPio = nvCalloc(1, sizeof(*pPio));
2933 
2934     if (!pPio) {
2935         return FALSE;
2936     }
2937 
2938     pChannel->imm.type = NV_EVO_IMM_CHANNEL_PIO;
2939     pChannel->imm.u.pio = pPio;
2940 
2941     params.channelInstance = instance;
2942 
2943     if (nvRmApiAlloc(nvEvoGlobal.clientHandle,
2944                      pDevEvo->displayHandle,
2945                      handle,
2946                      class,
2947                      &params) != NVOS_STATUS_SUCCESS) {
2948         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2949                     "Failed to allocate immediate channel %d", instance);
2950         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle);
2951         return FALSE;
2952     }
2953 
2954     pPio->handle = handle;
2955 
2956     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2957         void *pImm = NULL;
2958 
2959         if (nvRmApiMapMemory(nvEvoGlobal.clientHandle,
2960                              pDevEvo->pSubDevices[sd]->handle,
2961                              pPio->handle,
2962                              0,
2963                              mapSize,
2964                              &pImm,
2965                              0) != NVOS_STATUS_SUCCESS) {
2966             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2967                         "Failed to map immediate channel %d/%d",
2968                         sd, instance);
2969             return FALSE;
2970         }
2971 
2972         pPio->control[sd] = pImm;
2973     }
2974 
2975     return TRUE;
2976 }
2977 
2978 static NvBool
2979 AllocImmediateChannelDma(NVDevEvoPtr pDevEvo,
2980                          NVEvoChannelPtr pChannel,
2981                          NvU32 immClass)
2982 {
2983     NVEvoChannelPtr pImmChannel = RmAllocEvoChannel(
2984         pDevEvo,
2985         DRF_DEF64(_EVO, _CHANNEL_MASK, _WINDOW_IMM, _ENABLE),
2986         pChannel->instance, immClass);
2987 
2988     if (!pImmChannel) {
2989         return FALSE;
2990     }
2991 
2992     pChannel->imm.type = NV_EVO_IMM_CHANNEL_DMA;
2993     pChannel->imm.u.dma = pImmChannel;
2994 
2995     return TRUE;
2996 }
2997 
2998 NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo)
2999 {
3000     int i;
3001     NvU32 baseClass = 0;
3002     NvU32 head;
3003 
3004     static const NvU32 baseChannelDmaClasses[] = {
3005         NV927C_BASE_CHANNEL_DMA,
3006     };
3007 
3008     for (i = 0; i < ARRAY_LEN(baseChannelDmaClasses); i++) {
3009         if (nvRmEvoClassListCheck(pDevEvo, baseChannelDmaClasses[i])) {
3010             baseClass = baseChannelDmaClasses[i];
3011             break;
3012         }
3013     }
3014 
3015     if (!baseClass) {
3016         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported base display class");
3017         return FALSE;
3018     }
3019 
3020     for (head = 0; head < pDevEvo->numHeads; head++) {
3021         pDevEvo->base[head] = RmAllocEvoChannel(
3022             pDevEvo,
3023             DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE),
3024             head, baseClass);
3025 
3026         if (!pDevEvo->base[head]) {
3027             return FALSE;
3028         }
3029     }
3030 
3031     return TRUE;
3032 }
3033 
3034 NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo)
3035 {
3036     NvU32 immMapSize;
3037     NvU32 head;
3038 
3039     if (!nvRmEvoClassListCheck(pDevEvo,
3040                                NV917E_OVERLAY_CHANNEL_DMA)) {
3041         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
3042                     "Unsupported overlay display class");
3043         return FALSE;
3044     }
3045 
3046     nvAssert(nvRmEvoClassListCheck(pDevEvo, NV917B_OVERLAY_IMM_CHANNEL_PIO));
3047 
3048     /*
3049      * EvoSetImmPointOut91() will interpret the PIO mapping as a pointer
3050      * to GK104DispOverlayImmControlPio and access the SetPointOut and
3051      * Update fields, which is safe as long as SetPointOut and Update are
3052      * at consistent offsets.
3053      */
3054     nvAssert(offsetof(GK104DispOverlayImmControlPio, SetPointsOut) ==
3055              NV917B_SET_POINTS_OUT(NVKMS_LEFT));
3056     nvAssert(offsetof(GK104DispOverlayImmControlPio, Update) ==
3057              NV917B_UPDATE);
3058     immMapSize =
3059         NV_MAX(NV917B_SET_POINTS_OUT(NVKMS_LEFT), NV917B_UPDATE) + sizeof(NvV32);
3060 
3061     for (head = 0; head < pDevEvo->numHeads; head++) {
3062         pDevEvo->overlay[head] = RmAllocEvoChannel(
3063             pDevEvo,
3064             DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE),
3065             head, NV917E_OVERLAY_CHANNEL_DMA);
3066 
3067         if (!pDevEvo->overlay[head]) {
3068             return FALSE;
3069         }
3070 
3071         if (!AllocImmediateChannelPio(pDevEvo, pDevEvo->overlay[head],
3072                                       NV917B_OVERLAY_IMM_CHANNEL_PIO, head, immMapSize)) {
3073             return FALSE;
3074         }
3075     }
3076 
3077     return TRUE;
3078 }
3079 
3080 /*!
3081  * This allocates a syncpt per channel. This syncpt is dedicated
3082  * to this channel. As NVKMS only supports syncpoints for SOC devices,
3083  * in which there's only one device/sub-device/disp, sd can be 0.
3084  */
3085 static NvBool AllocSyncpt(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel,
3086         NVEvoSyncpt *pEvoSyncptOut)
3087 {
3088     NvU32 hSyncptCtxDma, hSyncpt, id;
3089     NvKmsSyncPtOpParams params = { };
3090     NvBool result;
3091 
3092     if (!pDevEvo->supportsSyncpts) {
3093         return FALSE;
3094     }
3095 
3096     /*! Set syncpt id to invalid to avoid un-intended Free */
3097     pEvoSyncptOut->id = NVKMS_SYNCPT_ID_INVALID;
3098 
3099     /*
3100      * HW engine on Orin is called HOST1X, all syncpts are in internal RAM of
3101      * HOST1X.
3102      * OP_ALLOC calls into HOST1X driver and allocs a syncpt resource.
3103      */
3104     params.alloc.syncpt_name = "nvkms-fence";
3105     result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_ALLOC, &params);
3106     if (!result) {
3107         return FALSE;
3108     }
3109     id = params.alloc.id;
3110 
3111     /* Post syncpt max val is tracked locally. Init the value here. */
3112     params.read_minval.id = id;
3113     result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_READ_MINVAL, &params);
3114     if (!result) {
3115         goto failed;
3116     }
3117 
3118     result = nvRmEvoAllocAndBindSyncpt(pDevEvo, pChannel, id,
3119                                        &hSyncpt, &hSyncptCtxDma);
3120     if (!result) {
3121         goto failed;
3122     }
3123 
3124     /*! Populate syncpt values to return. */
3125     pEvoSyncptOut->id = id;
3126     pEvoSyncptOut->hCtxDma = hSyncptCtxDma;
3127     pEvoSyncptOut->hSyncpt = hSyncpt;
3128     pEvoSyncptOut->channelMask = pChannel->channelMask;
3129     pEvoSyncptOut->syncptMaxVal = params.read_minval.minval;
3130 
3131     return TRUE;
3132 
3133 failed:
3134     /*! put back syncpt as operation failed */
3135     params.put.id = id;
3136     nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, &params);
3137     return FALSE;
3138 }
3139 
3140 static NvBool AllocPostSyncptPerChannel(NVDevEvoPtr pDevEvo,
3141                                         NVEvoChannelPtr pChannel)
3142 {
3143     if (!pDevEvo->supportsSyncpts) {
3144         return TRUE;
3145     }
3146 
3147     return AllocSyncpt(pDevEvo, pChannel, &pChannel->postSyncpt);
3148 }
3149 
3150 NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo)
3151 {
3152     int index;
3153     NvU32 window, sd;
3154 
3155     static const struct {
3156         NvU32 windowClass;
3157         NvU32 immClass;
3158     } windowChannelClasses[] = {
3159         { NVC67E_WINDOW_CHANNEL_DMA,
3160           NVC67B_WINDOW_IMM_CHANNEL_DMA },
3161         { NVC57E_WINDOW_CHANNEL_DMA,
3162           NVC57B_WINDOW_IMM_CHANNEL_DMA },
3163         { NVC37E_WINDOW_CHANNEL_DMA,
3164           NVC37B_WINDOW_IMM_CHANNEL_DMA },
3165     }, *c = NULL;
3166 
3167     for (index = 0; index < ARRAY_LEN(windowChannelClasses); index++) {
3168         if (nvRmEvoClassListCheck(pDevEvo,
3169                     windowChannelClasses[index].windowClass)) {
3170 
3171             c = &windowChannelClasses[index];
3172 
3173             nvAssert(nvRmEvoClassListCheck(pDevEvo, c->immClass));
3174             break;
3175         }
3176     }
3177 
3178     if (index >= ARRAY_LEN(windowChannelClasses)) {
3179         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported window display class");
3180         return FALSE;
3181     }
3182 
3183     nvAssert(pDevEvo->numWindows <= ARRAY_LEN(pDevEvo->window));
3184     for (window = 0; window < pDevEvo->numWindows; window++) {
3185         pDevEvo->window[window] = RmAllocEvoChannel(
3186             pDevEvo,
3187             DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE),
3188             window, c->windowClass);
3189 
3190         if (!pDevEvo->window[window]) {
3191             return FALSE;
3192         }
3193 
3194         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3195             NvU32 ret = nvRmEvoBindDispContextDMA(pDevEvo,
3196                                             pDevEvo->window[window],
3197                                             pDevEvo->window[window]->notifiersDma[sd].ctxHandle);
3198             if (ret != NVOS_STATUS_SUCCESS) {
3199                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
3200                         "Failed to bind(window channel) display engine notify context DMA: 0x%x (%s)",
3201                         ret, nvstatusToString(ret));
3202                 return FALSE;
3203             }
3204         }
3205 
3206         if (!AllocImmediateChannelDma(pDevEvo, pDevEvo->window[window],
3207                                       c->immClass)) {
3208             return FALSE;
3209         }
3210 
3211         if (!AllocPostSyncptPerChannel(pDevEvo,
3212                                        pDevEvo->window[window])) {
3213             return FALSE;
3214         }
3215     }
3216 
3217     return TRUE;
3218 }
3219 
3220 static void EvoFreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel)
3221 {
3222     NvU32 sd;
3223 
3224     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3225         NvU32 ret;
3226 
3227         if (!pDevEvo->pSubDevices[sd]->pCoreDma) {
3228             continue;
3229         }
3230 
3231         ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
3232                                  pDevEvo->pSubDevices[sd]->handle,
3233                                  pChannel->pb.channel_handle,
3234                                  pDevEvo->pSubDevices[sd]->pCoreDma,
3235                                  0);
3236 
3237         if (ret != NVOS_STATUS_SUCCESS) {
3238             nvEvoLogDevDebug(
3239                 pDevEvo,
3240                 EVO_LOG_ERROR,
3241                 "Failed to unmap NVDisplay core channel memory mapping for ARMed values");
3242         }
3243         pDevEvo->pSubDevices[sd]->pCoreDma = NULL;
3244     }
3245 
3246     RmFreeEvoChannel(pDevEvo, pChannel);
3247 }
3248 
3249 static NVEvoChannel* EvoAllocateCoreChannel(NVDevEvoRec *pDevEvo)
3250 {
3251     NVEvoChannel *pChannel;
3252     NvU32 sd;
3253 
3254     pChannel =
3255         RmAllocEvoChannel(pDevEvo,
3256                           DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE),
3257                           0,
3258                           pDevEvo->coreChannelDma.coreChannelClass);
3259 
3260     if (pChannel == NULL) {
3261         goto failed;
3262     }
3263 
3264     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3265         NvU32 ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
3266                                      pDevEvo->pSubDevices[sd]->handle,
3267                                      pChannel->pb.channel_handle,
3268                                      pDevEvo->coreChannelDma.dmaArmedOffset,
3269                                      pDevEvo->coreChannelDma.dmaArmedSize,
3270                                      (void**)&pDevEvo->pSubDevices[sd]->pCoreDma,
3271                                      DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY));
3272 
3273         if (ret != NVOS_STATUS_SUCCESS) {
3274             nvEvoLogDev(
3275                 pDevEvo,
3276                 EVO_LOG_ERROR,
3277                 "Core channel memory mapping for ARMed values failed: 0x%x (%s)",
3278                 ret, nvstatusToString(ret));
3279             goto failed;
3280         }
3281     }
3282 
3283     return pChannel;
3284 
3285 failed:
3286     if (pChannel != NULL) {
3287         EvoFreeCoreChannel(pDevEvo, pChannel);
3288     }
3289     return NULL;
3290 }
3291 
3292 /* Pre-allocate the vblank syncpts, store in NVDispHeadStateEvoRec. */
3293 void nvRmAllocCoreRGSyncpts(NVDevEvoPtr pDevEvo)
3294 {
3295 
3296     NVDispEvoPtr pDispEvo = NULL;
3297     NvU32 syncptIdx = 0;
3298 
3299     if (!pDevEvo->supportsSyncpts ||
3300         !pDevEvo->hal->caps.supportsVblankSyncObjects) {
3301         return;
3302     }
3303 
3304     /* If Syncpts are supported, we're on Orin, which only has one display. */
3305     nvAssert(pDevEvo->nDispEvo == 1);
3306     pDispEvo = pDevEvo->pDispEvo[0];
3307 
3308     /* Initialize all heads' vblank sync object counts to zero. */
3309     for (int i = 0; i < pDevEvo->numApiHeads; i++) {
3310         pDispEvo->apiHeadState[i].numVblankSyncObjectsCreated = 0;
3311     }
3312 
3313     /* For each core RG syncpt index: */
3314     for (syncptIdx = 0; syncptIdx < NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD;
3315          syncptIdx++) {
3316         /* For each Head: */
3317         for (int i = 0; i < pDevEvo->numApiHeads; i++) {
3318             NvBool result = FALSE;
3319             NVDispApiHeadStateEvoRec *pApiHeadState =
3320                 &pDispEvo->apiHeadState[i];
3321 
3322             result =
3323                 AllocSyncpt(pDevEvo, pDevEvo->core,
3324                             &pApiHeadState->vblankSyncObjects[syncptIdx].evoSyncpt);
3325             if (!result) {
3326                 /*
3327                  * Stop trying to allocate more syncpts if none are
3328                  * available.
3329                  */
3330                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
3331                              "Failed to allocate Core RG Syncpoint at index %d "
3332                              "on Head %d.", syncptIdx, i);
3333                 return;
3334             }
3335 
3336             /* Populate the index of the syncpt in the NVVblankSyncObjectRec. */
3337             pApiHeadState->vblankSyncObjects[syncptIdx].index = syncptIdx;
3338             /* Update the count. */
3339             pApiHeadState->numVblankSyncObjectsCreated = syncptIdx + 1;
3340         }
3341     }
3342 }
3343 
3344 NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo)
3345 {
3346     NvU32 sd;
3347 
3348     pDevEvo->core = EvoAllocateCoreChannel(pDevEvo);
3349     if (!pDevEvo->core) {
3350         return FALSE;
3351     }
3352 
3353     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3354         // Bind the core notifier ctxDma
3355         NvU32 ret =
3356             nvRmEvoBindDispContextDMA(pDevEvo, pDevEvo->core,
3357                                       pDevEvo->core->notifiersDma[sd].ctxHandle);
3358         if (ret != NVOS_STATUS_SUCCESS) {
3359             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
3360                         "Failed to bind display engine notify context DMA: 0x%x (%s)",
3361                         ret, nvstatusToString(ret));
3362             nvRMFreeEvoCoreChannel(pDevEvo);
3363             return FALSE;
3364         }
3365     }
3366 
3367     nvInitEvoSubDevMask(pDevEvo);
3368 
3369     /*
3370      * XXX NVKMS TODO: Enable core channel event generation; see bug
3371      * 1671139.
3372      */
3373 
3374     // Query the VBIOS head assignments.  Note that this has to happen after the
3375     // core channel is allocated or else RM will return incorrect information
3376     // about dynamic display IDs it allocates for the boot display on DP MST
3377     // devices.
3378     GetVbiosHeadAssignment(pDevEvo);
3379 
3380     return TRUE;
3381 }
3382 
3383 void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo)
3384 {
3385     NvU32 head;
3386 
3387     for (head = 0; head < pDevEvo->numHeads; head++) {
3388         RmFreeEvoChannel(pDevEvo, pDevEvo->base[head]);
3389         pDevEvo->base[head] = NULL;
3390     }
3391 }
3392 
3393 void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo)
3394 {
3395     NvU32 head;
3396 
3397     for (head = 0; head < pDevEvo->numHeads; head++) {
3398         RmFreeEvoChannel(pDevEvo, pDevEvo->overlay[head]);
3399         pDevEvo->overlay[head] = NULL;
3400     }
3401 }
3402 
3403 void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo)
3404 {
3405     NvU32 window;
3406 
3407     for (window = 0; window < pDevEvo->numWindows; window++) {
3408         nvRmEvoFreeSyncpt(pDevEvo, &pDevEvo->window[window]->postSyncpt);
3409         RmFreeEvoChannel(pDevEvo, pDevEvo->window[window]);
3410         pDevEvo->window[window] = NULL;
3411     }
3412 }
3413 
3414 /* Frees the Core RG Syncpts. */
3415 void nvRmFreeCoreRGSyncpts(NVDevEvoPtr pDevEvo)
3416 {
3417 
3418     NVDispEvoPtr pDispEvo = NULL;
3419 
3420     if (!pDevEvo->supportsSyncpts ||
3421         !pDevEvo->hal->caps.supportsVblankSyncObjects) {
3422         return;
3423     }
3424 
3425     /* We can get here in teardown cases from alloc failures */
3426     if (pDevEvo->nDispEvo == 0) {
3427         return;
3428     }
3429 
3430     /* If Syncpts are supported, we're on Orin, which only has one display. */
3431     nvAssert(pDevEvo->nDispEvo == 1);
3432     pDispEvo = pDevEvo->pDispEvo[0];
3433 
3434     /* For each Head: */
3435     for (int i = 0; i < pDevEvo->numApiHeads; i++) {
3436         /* Free all core RG syncpts. */
3437         NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[i];
3438         for (int j = 0; j < pApiHeadState->numVblankSyncObjectsCreated; j++) {
3439             nvAssert(!pApiHeadState->vblankSyncObjects[j].inUse);
3440             nvRmEvoFreeSyncpt(pDevEvo,
3441                               &pApiHeadState->vblankSyncObjects[j].evoSyncpt);
3442         }
3443         pApiHeadState->numVblankSyncObjectsCreated = 0;
3444     }
3445 }
3446 
3447 void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo)
3448 {
3449     if (pDevEvo->core != NULL) {
3450         EvoFreeCoreChannel(pDevEvo, pDevEvo->core);
3451         pDevEvo->core = NULL;
3452     }
3453 }
3454 
3455 /* Poll for an EVO channel on a particular subdevice to process all its methods */
3456 static NvBool SyncOneEvoChannel(
3457     NVDevEvoPtr pDevEvo,
3458     NVEvoChannelPtr pChan,
3459     NvU32 sd,
3460     NvU32 errorToken)
3461 {
3462     NvBool isMethodPending;
3463     NvU64 startTime = 0;
3464     const NvU32 timeout = 2000000; // microseconds
3465 
3466     do {
3467         if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo, pChan,
3468                                                   sd, &isMethodPending)) {
3469             return FALSE;
3470         }
3471 
3472         if (!isMethodPending) {
3473             break;
3474         }
3475 
3476         if (!nvIsEmulationEvo(pDevEvo)) {
3477             if (nvExceedsTimeoutUSec(&startTime, timeout)) {
3478                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
3479                             "Idling display engine timed out: 0x%08x:%d:%d:%d",
3480                             pChan->hwclass, pChan->instance,
3481                             sd, errorToken);
3482                 return FALSE;
3483             }
3484         }
3485 
3486         nvkms_yield();
3487 
3488     } while (TRUE);
3489 
3490     return TRUE;
3491 }
3492 
3493 /* Sync an EVO channel on all subdevices */
3494 NvBool nvRMSyncEvoChannel(
3495     NVDevEvoPtr pDevEvo,
3496     NVEvoChannelPtr pChannel,
3497     NvU32 errorToken)
3498 {
3499     NvBool ret = TRUE;
3500 
3501     if (pChannel) {
3502         NvU32 sd;
3503 
3504         nvDmaKickoffEvo(pChannel);
3505 
3506         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3507             if (!SyncOneEvoChannel(pDevEvo, pChannel, sd, errorToken)) {
3508                 ret = FALSE;
3509             }
3510         }
3511     }
3512 
3513     return ret;
3514 }
3515 
3516 
3517 /*
3518  * Wait for the requested base channel to be idle (no methods pending), and
3519  * call STOP_BASE if the wait times out.
3520  *
3521  * stoppedBase will be TRUE if calling STOP_BASE was necessary and
3522  * successful.
3523  */
3524 NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd,
3525                            NvBool *stoppedBase)
3526 {
3527     NVEvoChannelPtr pMainLayerChannel =
3528         pDevEvo->head[head].layer[NVKMS_MAIN_LAYER];
3529     NvU64 startTime = 0;
3530     NvBool idleTimedOut = FALSE;
3531     const NvU32 timeout = 2000000; // 2 seconds
3532     NvBool isMethodPending = TRUE;
3533     NvBool ret = TRUE;
3534 
3535     *stoppedBase = FALSE;
3536 
3537     do {
3538         if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo,
3539                                                   pMainLayerChannel,
3540                                                   sd,
3541                                                   &isMethodPending)) {
3542             break;
3543         }
3544 
3545         if (!isMethodPending) {
3546             break;
3547         }
3548 
3549         if (nvExceedsTimeoutUSec(&startTime, timeout)) {
3550             idleTimedOut = TRUE;
3551             break;
3552         }
3553 
3554         nvkms_yield();
3555 
3556     } while (TRUE);
3557 
3558     if (idleTimedOut) {
3559         NVEvoIdleChannelState idleChannelState = { };
3560 
3561         idleChannelState.subdev[sd].channelMask |= pMainLayerChannel->channelMask;
3562         ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo, &idleChannelState);
3563 
3564         *stoppedBase = ret;
3565     }
3566 
3567     return ret;
3568 }
3569 
3570 
3571 NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID)
3572 {
3573     const NvU32 *classes = pDevEvo->supportedClasses;
3574 
3575     int i;
3576 
3577     nvAssert(pDevEvo->numClasses > 0);
3578 
3579     for (i = 0; i < pDevEvo->numClasses; i++) {
3580         if (classes[i] == classID) {
3581             return TRUE;
3582         }
3583     }
3584 
3585     return FALSE;
3586 }
3587 
3588 /*!
3589  * This API used to register syncpt object with RM.
3590  * It involves ->
3591  * 1. Allocate a new NV01_MEMORY_SYNCPOINT syncpt object.
3592  * 2. Allocate a new ctxdma descriptor for the syncpt object.
3593  * 3. Bind the ctxdma entry to the channel.
3594  */
3595 NvBool nvRmEvoAllocAndBindSyncpt(
3596     NVDevEvoRec *pDevEvo,
3597     NVEvoChannel *pChannel,
3598     NvU32 id,
3599     NvU32 *pSyncptHandle,
3600     NvU32 *pSyncptCtxDmaHandle)
3601 {
3602     return FALSE;
3603 }
3604 
3605 static void FreeSyncptHandle(
3606     NVDevEvoRec *pDevEvo,
3607     NVEvoSyncpt *pSyncpt)
3608 {
3609     nvRmApiFree(nvEvoGlobal.clientHandle,
3610                 pDevEvo->deviceHandle,
3611                 pSyncpt->hSyncpt);
3612     nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
3613                        pSyncpt->hSyncpt);
3614     pSyncpt->hSyncpt = 0;
3615 
3616     nvRmApiFree(nvEvoGlobal.clientHandle,
3617                 pDevEvo->deviceHandle,
3618                 pSyncpt->hCtxDma);
3619     nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
3620                        pSyncpt->hCtxDma);
3621     pSyncpt->hCtxDma = 0;
3622 }
3623 
3624 /*!
3625  * This API used to unregister syncpt object with given channel.
3626  * It searches global table, and when finds that for given channel, syncpt
3627  * is registered, then frees it.
3628  */
3629 void nvRmEvoFreePreSyncpt(
3630     NVDevEvoRec *pDevEvo,
3631     NVEvoChannel *pChannel)
3632 {
3633     NvU32 i;
3634     NvBool isChannelIdle = NV_FALSE;
3635 
3636     if (pChannel == NULL) {
3637         return;
3638     }
3639 
3640     if (!pDevEvo->supportsSyncpts) {
3641         return;
3642     }
3643 
3644     if (pChannel->channelMask == 0) {
3645         return;
3646     }
3647 
3648     pDevEvo->hal->IsChannelIdle(
3649         pDevEvo, pChannel, 0, &isChannelIdle);
3650 
3651     if (isChannelIdle == NV_FALSE) {
3652         return;
3653     }
3654 
3655     /*! Find pre-syncpt and free it */
3656     for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) {
3657 
3658         pDevEvo->preSyncptTable[i].channelMask &= ~pChannel->channelMask;
3659         if (pDevEvo->preSyncptTable[i].channelMask == 0 &&
3660             pDevEvo->preSyncptTable[i].hCtxDma != 0) {
3661 
3662             /*! Free handles */
3663             FreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]);
3664         }
3665     }
3666 }
3667 
3668 static NvBool GarbageCollectSyncptHelperOneChannel(
3669     NVDevEvoRec *pDevEvo,
3670     NvU32 sd,
3671     NVEvoChannel *pChannel,
3672     NVEvoSyncpt *pSyncpt,
3673     NVEvoChannelMask *pIdledChannelMask)
3674 {
3675     NvBool isChannelIdle = FALSE;
3676 
3677     if ((pChannel->channelMask & pSyncpt->channelMask) == 0) {
3678         return TRUE;
3679     }
3680 
3681     if ((*pIdledChannelMask) & pChannel->channelMask) {
3682         goto done;
3683     }
3684 
3685     /*! Check whether channel is idle. */
3686     pDevEvo->hal->IsChannelIdle(pDevEvo, pChannel, sd, &isChannelIdle);
3687 
3688     if (!isChannelIdle) {
3689         return FALSE;
3690     }
3691 
3692     /*! record idle channel mask to use in next check */
3693     *pIdledChannelMask |= pChannel->channelMask;
3694 
3695 done:
3696     pSyncpt->channelMask &= ~pChannel->channelMask;
3697     return TRUE;
3698 }
3699 
3700 static NvBool GarbageCollectSyncptHelperOneSyncpt(
3701     NVDevEvoRec *pDevEvo,
3702     NVEvoSyncpt *pSyncpt,
3703     NVEvoChannelMask *pIdledChannelMask)
3704 {
3705     NvBool ret = TRUE;
3706     NvU32 head, sd;
3707 
3708     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3709 
3710         for (head = 0; head < pDevEvo->numHeads; head++) {
3711             NvU32 layer;
3712 
3713             /*!
3714              * If a given channel isn't idle, continue to check if this syncpt
3715              * is used on other channels which may be idle.
3716              */
3717             for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
3718                 if (!GarbageCollectSyncptHelperOneChannel(
3719                         pDevEvo,
3720                         sd,
3721                         pDevEvo->head[head].layer[layer],
3722                         pSyncpt,
3723                         &pIdledChannelMask[sd])) {
3724                     ret = FALSE;
3725                 }
3726             }
3727         }
3728     }
3729 
3730     return ret;
3731 }
3732 
3733 /*!
3734  * This API is used to unregister the given syncpt object.
3735  */
3736 void nvRmEvoFreeSyncpt(
3737     NVDevEvoRec *pDevEvo,
3738     NVEvoSyncpt *pEvoSyncpt)
3739 {
3740     if ((pEvoSyncpt == NULL) || !pDevEvo->supportsSyncpts ||
3741         (pEvoSyncpt->id == NVKMS_SYNCPT_ID_INVALID)) {
3742         return;
3743     }
3744 
3745     /*! Put reference of syncptid from nvhost */
3746     NvKmsSyncPtOpParams params = { };
3747     params.put.id = pEvoSyncpt->id;
3748     nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, &params);
3749 
3750     /*! Free handles */
3751     FreeSyncptHandle(pDevEvo, pEvoSyncpt);
3752 }
3753 
3754 /*!
3755  * This API try to find free syncpt and then unregisters it.
3756  * It searches global table, and when finds that all channels using this
3757  * syncpt are idle then frees it. It makes sure that syncpt is not part
3758  * of current flip.
3759  */
3760 NvBool nvRmGarbageCollectSyncpts(
3761     NVDevEvoRec *pDevEvo)
3762 {
3763     NvU32 i;
3764     NvBool freedSyncpt = FALSE;
3765     NVEvoChannelMask idledChannelMask[NVKMS_MAX_SUBDEVICES] = { 0 };
3766 
3767     if (!pDevEvo->supportsSyncpts) {
3768         return FALSE;
3769     }
3770 
3771     for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) {
3772 
3773         NvBool allLayersIdle = NV_TRUE;
3774 
3775         if (pDevEvo->pAllSyncptUsedInCurrentFlip != NULL) {
3776             if (pDevEvo->pAllSyncptUsedInCurrentFlip[i]) {
3777                 /*! syncpt is part of current flip, so skip it */
3778                 continue;
3779             }
3780         }
3781 
3782         if (pDevEvo->preSyncptTable[i].hCtxDma == 0) {
3783             /*! syncpt isn't registered, so skip it */
3784             continue;
3785         }
3786 
3787         allLayersIdle = GarbageCollectSyncptHelperOneSyncpt(
3788                             pDevEvo,
3789                             &pDevEvo->preSyncptTable[i],
3790                             idledChannelMask);
3791 
3792         if (allLayersIdle) {
3793             /*! Free handles */
3794             FreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]);
3795             freedSyncpt = TRUE;
3796         }
3797     }
3798 
3799     return freedSyncpt;
3800 }
3801 
3802 NvU32 nvRmEvoBindDispContextDMA(
3803     NVDevEvoPtr pDevEvo,
3804     NVEvoChannelPtr pChannel,
3805     NvU32 hCtxDma)
3806 {
3807     NV0002_CTRL_BIND_CONTEXTDMA_PARAMS params = { };
3808     NvU32 ret;
3809     NvBool retryOnlyOnce = TRUE;
3810 
3811     params.hChannel = pChannel->pb.channel_handle;
3812 
3813 retryOnce:
3814     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3815                          hCtxDma,
3816                          NV0002_CTRL_CMD_BIND_CONTEXTDMA,
3817                          &params, sizeof(params));
3818     if (ret != NVOS_STATUS_SUCCESS) {
3819         /*!
3820          * syncpts (lazily freed) occupy space in the disp ctxDma hash
3821          * table, and therefore may cause bind ctxDma to fail.
3822          * Free any unused syncpts and try again.
3823          */
3824         if (retryOnlyOnce) {
3825             /*! try to free syncpt only once */
3826             if (nvRmGarbageCollectSyncpts(pDevEvo)) {
3827                 retryOnlyOnce = FALSE;
3828                 goto retryOnce;
3829             }
3830         }
3831     }
3832     return ret;
3833 }
3834 
3835 
3836 NvU32 nvRmEvoAllocateAndBindDispContextDMA(
3837     NVDevEvoPtr pDevEvo,
3838     NvU32 hMemory,
3839     const enum NvKmsSurfaceMemoryLayout layout,
3840     NvU64 limit)
3841 {
3842     NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxdmaParams = { };
3843     NvU32 hDispCtxDma;
3844     NvU32 flags = DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE);
3845     NvU32 ret;
3846     int h;
3847 
3848     /* each surface to be displayed needs its own ctx dma. */
3849     nvAssert(pDevEvo->displayHandle != 0);
3850 
3851     nvAssert(pDevEvo->core);
3852     nvAssert(pDevEvo->core->pb.channel_handle);
3853 
3854     nvAssert(hMemory);
3855     nvAssert(limit);
3856 
3857     switch (layout) {
3858         case NvKmsSurfaceMemoryLayoutBlockLinear:
3859             flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _BL);
3860             break;
3861         case NvKmsSurfaceMemoryLayoutPitch:
3862             flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _PITCH);
3863             break;
3864     }
3865 
3866     hDispCtxDma = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
3867 
3868     ctxdmaParams.hMemory = hMemory;
3869     ctxdmaParams.flags = flags;
3870     ctxdmaParams.offset = 0;
3871     ctxdmaParams.limit = limit;
3872 
3873     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
3874                        pDevEvo->deviceHandle,
3875                        hDispCtxDma,
3876                        NV01_CONTEXT_DMA,
3877                        &ctxdmaParams);
3878 
3879     if (ret != NVOS_STATUS_SUCCESS) {
3880         goto cleanup_this_handle_and_fail;
3881     }
3882 
3883     ret = nvRmEvoBindDispContextDMA(pDevEvo, pDevEvo->core, hDispCtxDma);
3884 
3885     if (ret != NVOS_STATUS_SUCCESS) {
3886         goto free_this_handle_and_fail;
3887     }
3888 
3889     for (h = 0; h < pDevEvo->numHeads; h++) {
3890         NvU32 layer;
3891 
3892         for (layer = 0; layer < pDevEvo->head[h].numLayers; layer++) {
3893             if (pDevEvo->head[h].layer[layer]) {
3894                 nvAssert(pDevEvo->head[h].layer[layer]->pb.channel_handle);
3895 
3896                 ret = nvRmEvoBindDispContextDMA(pDevEvo,
3897                                                 pDevEvo->head[h].layer[layer],
3898                                                 hDispCtxDma);
3899 
3900                 if (ret != NVOS_STATUS_SUCCESS) {
3901                     goto free_this_handle_and_fail;
3902                 }
3903             }
3904         }
3905     }
3906 
3907     return hDispCtxDma;
3908 
3909 free_this_handle_and_fail:
3910 
3911     nvRmApiFree(nvEvoGlobal.clientHandle,
3912                 nvEvoGlobal.clientHandle, hDispCtxDma);
3913 
3914         /* Fall through */
3915 cleanup_this_handle_and_fail:
3916 
3917     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hDispCtxDma);
3918 
3919     return 0;
3920 }
3921 
3922 void nvRmEvoFreeDispContextDMA(NVDevEvoPtr pDevEvo,
3923                                NvU32 *hDispCtxDma)
3924 {
3925     if (*hDispCtxDma) {
3926         nvRmApiFree(nvEvoGlobal.clientHandle,
3927                     nvEvoGlobal.clientHandle, *hDispCtxDma);
3928         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, *hDispCtxDma);
3929         *hDispCtxDma = 0;
3930     }
3931 }
3932 
3933 void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo, NvU32 memoryHandle,
3934                              void *subDeviceAddress[NVKMS_MAX_SUBDEVICES])
3935 {
3936     unsigned int sd;
3937     NvU32 ret;
3938 
3939     if (memoryHandle == 0) {
3940         return;
3941     }
3942 
3943     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3944         if (subDeviceAddress[sd] != NULL) {
3945             ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
3946                                      pDevEvo->pSubDevices[sd]->handle,
3947                                      memoryHandle,
3948                                      subDeviceAddress[sd],
3949                                      0);
3950 
3951             if (ret != NVOS_STATUS_SUCCESS) {
3952                 nvAssert(!"UnmapMemory() failed");
3953             }
3954         }
3955 
3956         subDeviceAddress[sd] = NULL;
3957     }
3958 }
3959 
3960 NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo,
3961                              NvU32 memoryHandle, NvU64 size,
3962                              void *subDeviceAddress[NVKMS_MAX_SUBDEVICES],
3963                              NvU32 subDeviceMask)
3964 {
3965     NvU32 ret;
3966 
3967     unsigned int sd;
3968 
3969     nvkms_memset(subDeviceAddress, 0, sizeof(void*) * NVKMS_MAX_SUBDEVICES);
3970 
3971     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3972         void *address = NULL;
3973 
3974         if (((1 << sd) & subDeviceMask) == 0) {
3975             continue;
3976         }
3977 
3978         ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
3979                                pDevEvo->pSubDevices[sd]->handle,
3980                                memoryHandle,
3981                                0,
3982                                size,
3983                                &address,
3984                                0);
3985 
3986         if (ret != NVOS_STATUS_SUCCESS) {
3987             nvRmEvoUnMapVideoMemory(pDevEvo, memoryHandle, subDeviceAddress);
3988             return FALSE;
3989         }
3990         subDeviceAddress[sd] = address;
3991     }
3992     return TRUE;
3993 }
3994 
3995 static NvBool GetClassList(NVDevEvoPtr pDevEvo)
3996 {
3997     NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS classListParams = { 0 };
3998     NvU32 ret;
3999 
4000     classListParams.numClasses = 0;
4001     classListParams.classList = NvP64_NULL;
4002 
4003     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4004                          pDevEvo->deviceHandle,
4005                          NV0080_CTRL_CMD_GPU_GET_CLASSLIST,
4006                          &classListParams, sizeof(classListParams));
4007 
4008     if (ret != NVOS_STATUS_SUCCESS) {
4009         return FALSE;
4010     }
4011 
4012     pDevEvo->supportedClasses =
4013         nvCalloc(classListParams.numClasses, sizeof(NvU32));
4014 
4015     if (pDevEvo->supportedClasses == NULL) {
4016         return FALSE;
4017     }
4018 
4019     classListParams.classList = NV_PTR_TO_NvP64(pDevEvo->supportedClasses);
4020 
4021     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4022                          pDevEvo->deviceHandle,
4023                          NV0080_CTRL_CMD_GPU_GET_CLASSLIST,
4024                          &classListParams, sizeof(classListParams));
4025 
4026     if (ret != NVOS_STATUS_SUCCESS) {
4027         nvFree(pDevEvo->supportedClasses);
4028         pDevEvo->supportedClasses = NULL;
4029         return FALSE;
4030     }
4031 
4032     pDevEvo->numClasses = classListParams.numClasses;
4033 
4034     return TRUE;
4035 }
4036 
4037 static NvBool GetEngineListOneSubDevice(NVDevEvoPtr pDevEvo, NvU32 sd)
4038 {
4039     NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS engineListParams = { 0 };
4040     NvU32 ret;
4041     NVSubDeviceEvoPtr pSubDevice = pDevEvo->pSubDevices[sd];
4042     size_t length;
4043 
4044     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4045                          pSubDevice->handle,
4046                          NV2080_CTRL_CMD_GPU_GET_ENGINES_V2,
4047                          &engineListParams, sizeof(engineListParams));
4048 
4049     if (ret != NVOS_STATUS_SUCCESS) {
4050         return FALSE;
4051     }
4052 
4053     if (engineListParams.engineCount == 0) {
4054         return TRUE;
4055     }
4056 
4057     length = engineListParams.engineCount * sizeof(NvU32);
4058 
4059     pSubDevice->supportedEngines = nvAlloc(length);
4060 
4061     if (pSubDevice->supportedEngines == NULL) {
4062         return FALSE;
4063     }
4064 
4065     nvkms_memcpy(pSubDevice->supportedEngines,
4066                  engineListParams.engineList,
4067                  length);
4068     pSubDevice->numEngines = engineListParams.engineCount;
4069 
4070     return TRUE;
4071 }
4072 
4073 static NvBool GetEngineList(NVDevEvoPtr pDevEvo)
4074 {
4075     int sd;
4076 
4077     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4078         if (!GetEngineListOneSubDevice(pDevEvo, sd)) {
4079             return FALSE;
4080         }
4081     }
4082 
4083     return TRUE;
4084 }
4085 
4086 static void FreeSubDevice(NVDevEvoPtr pDevEvo, NVSubDeviceEvoPtr pSubDevice)
4087 {
4088     if (pSubDevice == NULL) {
4089         return;
4090     }
4091 
4092     if (pSubDevice->handle != 0) {
4093         nvRmApiFree(nvEvoGlobal.clientHandle,
4094                     pDevEvo->deviceHandle,
4095                     pSubDevice->handle);
4096         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle);
4097     }
4098 
4099     if (pSubDevice->gpuString[0] != '\0') {
4100         nvEvoLogDebug(EVO_LOG_INFO, "Freed %s", pSubDevice->gpuString);
4101     }
4102 
4103     nvFree(pSubDevice->supportedEngines);
4104 
4105     nvFree(pSubDevice);
4106 }
4107 
4108 static NVSubDeviceEvoPtr AllocSubDevice(NVDevEvoPtr pDevEvo, const NvU32 sd)
4109 {
4110     NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 };
4111     NV2080_CTRL_GPU_GET_ID_PARAMS getIdParams = { 0 };
4112     NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidParams = NULL;
4113     NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS pciInfoParams = { 0 };
4114     NvU32 ret;
4115     const char *uuid;
4116 
4117     NVSubDeviceEvoPtr pSubDevice = nvCalloc(1, sizeof(*pSubDevice));
4118 
4119     if (pSubDevice == NULL) {
4120         goto failure;
4121     }
4122 
4123     pSubDevice->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4124 
4125     subdevAllocParams.subDeviceId = sd;
4126 
4127     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
4128                        pDevEvo->deviceHandle,
4129                        pSubDevice->handle,
4130                        NV20_SUBDEVICE_0,
4131                        &subdevAllocParams);
4132 
4133     if (ret != NVOS_STATUS_SUCCESS) {
4134         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize subDevice");
4135         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle);
4136         pSubDevice->handle = 0;
4137         goto failure;
4138     }
4139 
4140     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4141                          pSubDevice->handle,
4142                          NV2080_CTRL_CMD_GPU_GET_ID,
4143                          &getIdParams,
4144                          sizeof(getIdParams));
4145 
4146     if (ret != NVOS_STATUS_SUCCESS) {
4147         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to identify GPU");
4148         goto failure;
4149     }
4150 
4151     pSubDevice->gpuId = getIdParams.gpuId;
4152 
4153     /* Query the UUID for the gpuString. */
4154 
4155     pGidParams = nvCalloc(1, sizeof(*pGidParams));
4156 
4157     if (pGidParams == NULL) {
4158         goto failure;
4159     }
4160 
4161     pGidParams->flags =
4162         DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) |
4163         DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1);
4164 
4165     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4166                          pSubDevice->handle,
4167                          NV2080_CTRL_CMD_GPU_GET_GID_INFO,
4168                          pGidParams,
4169                          sizeof(*pGidParams));
4170 
4171     if (ret != NVOS_STATUS_SUCCESS) {
4172         /* If the query failed, make sure the UUID is cleared out. */
4173         nvkms_memset(pGidParams, 0, sizeof(*pGidParams));
4174     }
4175 
4176     /* Query the PCI bus address for the gpuString. */
4177 
4178     pciInfoParams.gpuId = pSubDevice->gpuId;
4179 
4180     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4181                          nvEvoGlobal.clientHandle,
4182                          NV0000_CTRL_CMD_GPU_GET_PCI_INFO,
4183                          &pciInfoParams, sizeof(pciInfoParams));
4184 
4185     if (ret != NVOS_STATUS_SUCCESS) {
4186         /* If the query failed, make sure the PCI bus address is cleared out. */
4187         nvkms_memset(&pciInfoParams, 0, sizeof(pciInfoParams));
4188     }
4189 
4190     pSubDevice->gpuLogIndex = nvGetGpuLogIndex();
4191 
4192     /*
4193      * Create the gpuString, using this example format:
4194      * GPU:0 (GPU-af2422f5-2719-29de-567f-ac899cf458c4) @ PCI:0000:01:00.0
4195      */
4196     if ((pGidParams->data[0] == '\0') || (pGidParams->length == 0)) {
4197         uuid = "";
4198     } else {
4199         uuid = (const char *) pGidParams->data;
4200     }
4201 
4202     nvkms_snprintf(pSubDevice->gpuString, sizeof(pSubDevice->gpuString),
4203                    "GPU:%d (%s) @ PCI:%04x:%02x:%02x.0",
4204                    pSubDevice->gpuLogIndex, uuid,
4205                    pciInfoParams.domain,
4206                    pciInfoParams.bus,
4207                    pciInfoParams.slot);
4208 
4209     pSubDevice->gpuString[sizeof(pSubDevice->gpuString) - 1] = '\0';
4210 
4211     nvEvoLogDebug(EVO_LOG_INFO, "Allocated %s", pSubDevice->gpuString);
4212     nvFree(pGidParams);
4213 
4214     return pSubDevice;
4215 
4216 failure:
4217     FreeSubDevice(pDevEvo, pSubDevice);
4218     nvFree(pGidParams);
4219 
4220     return NULL;
4221 }
4222 
4223 static void CloseDevice(NVDevEvoPtr pDevEvo)
4224 {
4225     NvU32 i;
4226 
4227     for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) {
4228         const NvU32 gpuId = pDevEvo->openedGpuIds[i];
4229 
4230         if (gpuId == NV0000_CTRL_GPU_INVALID_ID) {
4231             break;
4232         }
4233 
4234         nvkms_close_gpu(gpuId);
4235         pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID;
4236     }
4237 }
4238 
4239 static NvBool OpenTegraDevice(NVDevEvoPtr pDevEvo)
4240 {
4241     NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 };
4242     nv_gpu_info_t *gpu_info = NULL;
4243     NvU32 ret, gpu_count = 0;
4244 
4245     nvAssert(pDevEvo->deviceId == NVKMS_DEVICE_ID_TEGRA);
4246 
4247     gpu_info = nvAlloc(NV_MAX_GPUS * sizeof(*gpu_info));
4248     if (gpu_info == NULL) {
4249         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate GPU ids arrays");
4250         goto fail;
4251     }
4252 
4253     gpu_count = nvkms_enumerate_gpus(gpu_info);
4254     if (gpu_count == 0) {
4255         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No NVIDIA GPUs found");
4256         goto fail;
4257     }
4258 
4259     if (gpu_count != 1) {
4260         // XXX If the system has both Tegra/iGPU and dGPU, it is not
4261         // guaranteed to find the Tegra, so fail.
4262         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "More than one NVIDIA GPU found "
4263                     "in a Tegra configuration where only Tegra is expected.");
4264         goto fail;
4265     }
4266 
4267     if (!nvkms_open_gpu(gpu_info[0].gpu_id)) {
4268         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU");
4269         goto fail;
4270     }
4271 
4272     pDevEvo->openedGpuIds[0] = gpu_info[0].gpu_id;
4273     params.gpuId = gpu_info[0].gpu_id;
4274 
4275     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4276                          nvEvoGlobal.clientHandle,
4277                          NV0000_CTRL_CMD_GPU_GET_ID_INFO,
4278                          &params, sizeof(params));
4279 
4280     if (ret != NVOS_STATUS_SUCCESS) {
4281         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID");
4282         goto fail;
4283     }
4284 
4285     pDevEvo->deviceId = params.deviceInstance;
4286 
4287     nvFree(gpu_info);
4288     return TRUE;
4289 
4290 fail:
4291     nvFree(gpu_info);
4292     CloseDevice(pDevEvo);
4293     return FALSE;
4294 }
4295 
4296 static NvBool OpenDevice(NVDevEvoPtr pDevEvo)
4297 {
4298     NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS idParams = { };
4299     NvU32 ret, i, gpuIdIndex = 0;
4300 
4301     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4302                          nvEvoGlobal.clientHandle,
4303                          NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS,
4304                          &idParams, sizeof(idParams));
4305 
4306     if (ret != NVOS_STATUS_SUCCESS) {
4307         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query attached GPUs");
4308         goto fail;
4309     }
4310 
4311     ct_assert(ARRAY_LEN(pDevEvo->openedGpuIds) >= ARRAY_LEN(idParams.gpuIds));
4312 
4313     for (i = 0; i < ARRAY_LEN(idParams.gpuIds); i++) {
4314         NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 };
4315         const NvU32 gpuId = idParams.gpuIds[i];
4316 
4317         if (gpuId == NV0000_CTRL_GPU_INVALID_ID) {
4318             break;
4319         }
4320 
4321         nvAssert(pDevEvo->openedGpuIds[gpuIdIndex] ==
4322                  NV0000_CTRL_GPU_INVALID_ID);
4323 
4324         params.gpuId = gpuId;
4325 
4326         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4327                              nvEvoGlobal.clientHandle,
4328                              NV0000_CTRL_CMD_GPU_GET_ID_INFO,
4329                              &params, sizeof(params));
4330 
4331         if (ret != NVOS_STATUS_SUCCESS) {
4332             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID");
4333             goto fail;
4334         }
4335 
4336         if (pDevEvo->deviceId != params.deviceInstance) {
4337             continue;
4338         }
4339 
4340         if (!nvkms_open_gpu(gpuId)) {
4341             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU");
4342             goto fail;
4343         }
4344 
4345         pDevEvo->openedGpuIds[gpuIdIndex++] = gpuId;
4346     }
4347 
4348     return TRUE;
4349 
4350 fail:
4351     CloseDevice(pDevEvo);
4352     return FALSE;
4353 }
4354 
4355 static void FreeGpuVASpace(NVDevEvoPtr pDevEvo)
4356 {
4357     if (pDevEvo->nvkmsGpuVASpace != 0) {
4358         nvRmApiFree(nvEvoGlobal.clientHandle,
4359                     pDevEvo->deviceHandle,
4360                     pDevEvo->nvkmsGpuVASpace);
4361         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4362                            pDevEvo->nvkmsGpuVASpace);
4363         pDevEvo->nvkmsGpuVASpace = 0;
4364     }
4365 }
4366 
4367 static NvBool AllocGpuVASpace(NVDevEvoPtr pDevEvo)
4368 {
4369     NvU32 ret;
4370     NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS memoryVirtualParams = { };
4371 
4372     pDevEvo->nvkmsGpuVASpace =
4373         nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4374 
4375     memoryVirtualParams.offset = 0;
4376     memoryVirtualParams.limit = 0;          // no limit on VA space
4377     memoryVirtualParams.hVASpace = 0;       // client's default VA space
4378 
4379     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
4380                        pDevEvo->deviceHandle,
4381                        pDevEvo->nvkmsGpuVASpace,
4382                        NV01_MEMORY_VIRTUAL,
4383                        &memoryVirtualParams);
4384 
4385     if (ret != NVOS_STATUS_SUCCESS) {
4386         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4387                            pDevEvo->nvkmsGpuVASpace);
4388         pDevEvo->nvkmsGpuVASpace = 0;
4389         return FALSE;
4390     }
4391 
4392     return TRUE;
4393 }
4394 
4395 static void NonStallInterruptCallback(
4396     void *arg,
4397     void *pEventDataVoid,
4398     NvU32 hEvent,
4399     NvU32 data,
4400     NV_STATUS status)
4401 {
4402     /*
4403      * We are called within resman's altstack and locks.  Schedule a separate
4404      * callback to execute with the nvkms_lock.
4405      *
4406      * XXX It might be nice to use a lighter-weight lock here to check if any
4407      * requests are pending in any NvKmsDeferredRequestFifo before scheduling
4408      * nvKmsServiceNonStallInterrupt().
4409      */
4410 
4411     (void) nvkms_alloc_timer_with_ref_ptr(
4412         nvKmsServiceNonStallInterrupt, /* callback */
4413         arg, /* argument (this is a ref_ptr to a pDevEvo) */
4414         0,   /* dataU32 */
4415         0);  /* usec */
4416 }
4417 
4418 static void UnregisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo)
4419 {
4420     if (pDevEvo->nonStallInterrupt.handle != 0) {
4421         NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS
4422             eventNotificationParams = { 0 };
4423 
4424         eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD;
4425         eventNotificationParams.action =
4426             NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
4427         nvRmApiControl(nvEvoGlobal.clientHandle,
4428                        pDevEvo->pSubDevices[0]->handle,
4429                        NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
4430                        &eventNotificationParams,
4431                        sizeof(eventNotificationParams));
4432 
4433         nvRmApiFree(nvEvoGlobal.clientHandle,
4434                     pDevEvo->pSubDevices[0]->handle,
4435                     pDevEvo->nonStallInterrupt.handle);
4436 
4437         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4438                            pDevEvo->nonStallInterrupt.handle);
4439     }
4440 
4441     pDevEvo->nonStallInterrupt.handle = 0;
4442 }
4443 
4444 static NvBool RegisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo)
4445 {
4446     NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS eventNotificationParams = { 0 };
4447 
4448     pDevEvo->nonStallInterrupt.handle =
4449         nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4450 
4451     if (!nvRmRegisterCallback(pDevEvo,
4452                               &pDevEvo->nonStallInterrupt.callback,
4453                               pDevEvo->ref_ptr,
4454                               pDevEvo->pSubDevices[0]->handle,
4455                               pDevEvo->nonStallInterrupt.handle,
4456                               NonStallInterruptCallback,
4457                               NV2080_NOTIFIERS_FIFO_EVENT_MTHD |
4458                               NV01_EVENT_NONSTALL_INTR)) {
4459         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
4460                          "Failed to register nonstall interrupt callback");
4461         goto failure_free_handle;
4462     }
4463 
4464     // Setup event notification
4465     eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD;
4466     eventNotificationParams.action =
4467         NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
4468 
4469     if (nvRmApiControl(nvEvoGlobal.clientHandle,
4470                        pDevEvo->pSubDevices[0]->handle,
4471                        NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
4472                        &eventNotificationParams,
4473                        sizeof(eventNotificationParams))
4474         != NVOS_STATUS_SUCCESS) {
4475         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
4476                          "Failed to set nonstall interrupt notification");
4477         goto failure_free_callback_and_handle;
4478     }
4479 
4480     return TRUE;
4481 
4482 failure_free_callback_and_handle:
4483     nvRmApiFree(nvEvoGlobal.clientHandle,
4484                 pDevEvo->pSubDevices[0]->handle,
4485                 pDevEvo->nonStallInterrupt.handle);
4486 failure_free_handle:
4487     nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4488                        pDevEvo->nonStallInterrupt.handle);
4489     pDevEvo->nonStallInterrupt.handle = 0;
4490     return FALSE;
4491 }
4492 
4493 NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo,
4494                           const struct NvKmsAllocDeviceRequest *pRequest)
4495 {
4496     NV0080_ALLOC_PARAMETERS allocParams = { 0 };
4497     NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 };
4498     NvU32 ret, sd;
4499 
4500     if (nvEvoGlobal.clientHandle == 0) {
4501         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Client handle not initialized");
4502         goto failure;
4503     }
4504 
4505     /*
4506      * RM deviceIds should be within [0,NV_MAX_DEVICES); check
4507      * that the client provided a value in range, and add one when
4508      * using deviceId as the per-device unique identifier in the
4509      * RM handle allocator: the identifier is expected to be != 0.
4510      */
4511 
4512     if ((pRequest->deviceId >= NV_MAX_DEVICES) &&
4513         (pRequest->deviceId != NVKMS_DEVICE_ID_TEGRA)) {
4514         goto failure;
4515     }
4516 
4517     pDevEvo->dpTimer = nvDPAllocTimer(pDevEvo);
4518     if (!pDevEvo->dpTimer) {
4519         goto failure;
4520     }
4521 
4522     if (!nvInitUnixRmHandleAllocator(&pDevEvo->handleAllocator,
4523                                      nvEvoGlobal.clientHandle,
4524                                      pRequest->deviceId + 1)) {
4525         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize handles");
4526         goto failure;
4527     }
4528 
4529     pDevEvo->deviceHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4530 
4531     pDevEvo->deviceId = pRequest->deviceId;
4532     pDevEvo->sli.mosaic = pRequest->sliMosaic;
4533 
4534     if (pRequest->deviceId == NVKMS_DEVICE_ID_TEGRA) {
4535         /*
4536          * On Tegra, NVKMS client is not desktop RM client, so
4537          * enumerate and open first GPU.
4538          */
4539         if (!OpenTegraDevice(pDevEvo)) {
4540             goto failure;
4541         }
4542 
4543         pDevEvo->usesTegraDevice = TRUE;
4544     } else if (!OpenDevice(pDevEvo)) {
4545         goto failure;
4546     }
4547 
4548     allocParams.deviceId = pDevEvo->deviceId;
4549 
4550     /* Give NVKMS a private GPU virtual address space. */
4551     allocParams.hClientShare = nvEvoGlobal.clientHandle;
4552 
4553     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
4554                        nvEvoGlobal.clientHandle,
4555                        pDevEvo->deviceHandle,
4556                        NV01_DEVICE_0,
4557                        &allocParams);
4558 
4559     if (ret != NVOS_STATUS_SUCCESS) {
4560         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize device");
4561         goto failure;
4562     }
4563 
4564     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4565                          pDevEvo->deviceHandle,
4566                          NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES,
4567                          &getNumSubDevicesParams,
4568                          sizeof(getNumSubDevicesParams));
4569 
4570     if (ret != NVOS_STATUS_SUCCESS) {
4571         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
4572                     "Failed to determine number of GPUs");
4573         goto failure;
4574     }
4575 
4576     ct_assert(NVKMS_MAX_SUBDEVICES == NV_MAX_SUBDEVICES);
4577     if ((getNumSubDevicesParams.numSubDevices == 0) ||
4578         (getNumSubDevicesParams.numSubDevices >
4579          ARRAY_LEN(pDevEvo->pSubDevices))) {
4580         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported number of GPUs: %d",
4581                     getNumSubDevicesParams.numSubDevices);
4582         goto failure;
4583     }
4584 
4585     pDevEvo->numSubDevices = getNumSubDevicesParams.numSubDevices;
4586 
4587     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4588         pDevEvo->pSubDevices[sd] = AllocSubDevice(pDevEvo, sd);
4589         if (pDevEvo->pSubDevices[sd] == NULL) {
4590             goto failure;
4591         }
4592     }
4593 
4594     pDevEvo->gpuLogIndex = pDevEvo->pSubDevices[0]->gpuLogIndex;
4595 
4596     if (!GetClassList(pDevEvo) || !GetEngineList(pDevEvo)) {
4597         goto failure;
4598     }
4599 
4600     if (!RegisterNonStallInterruptCallback(pDevEvo)) {
4601         goto failure;
4602     }
4603 
4604     if (!AllocGpuVASpace(pDevEvo)) {
4605         goto failure;
4606     }
4607 
4608     if (!nvAllocNvPushDevice(pDevEvo)) {
4609         goto failure;
4610     }
4611 
4612     return TRUE;
4613 
4614 failure:
4615     nvRmFreeDeviceEvo(pDevEvo);
4616     return FALSE;
4617 }
4618 
4619 void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo)
4620 {
4621     NvU32 sd;
4622 
4623     nvFreeNvPushDevice(pDevEvo);
4624 
4625     FreeGpuVASpace(pDevEvo);
4626 
4627     UnregisterNonStallInterruptCallback(pDevEvo);
4628 
4629     nvFree(pDevEvo->supportedClasses);
4630     pDevEvo->supportedClasses = NULL;
4631 
4632     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4633         FreeSubDevice(pDevEvo, pDevEvo->pSubDevices[sd]);
4634         pDevEvo->pSubDevices[sd] = NULL;
4635     }
4636 
4637     if (pDevEvo->deviceHandle != 0) {
4638         nvRmApiFree(nvEvoGlobal.clientHandle,
4639                     nvEvoGlobal.clientHandle,
4640                     pDevEvo->deviceHandle);
4641         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->deviceHandle);
4642         pDevEvo->deviceHandle = 0;
4643     }
4644 
4645     nvTearDownUnixRmHandleAllocator(&pDevEvo->handleAllocator);
4646 
4647     nvDPFreeTimer(pDevEvo->dpTimer);
4648     pDevEvo->dpTimer = NULL;
4649 
4650     CloseDevice(pDevEvo);
4651 }
4652 
4653 /*
4654  * Set up DIFR notifier listener to drive framebuffer prefetching once the
4655  * h/w gets idle enough.
4656  */
4657 NvBool nvRmRegisterDIFREventHandler(NVDevEvoPtr pDevEvo)
4658 {
4659     pDevEvo->difrPrefetchEventHandler =
4660         nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4661 
4662     if (pDevEvo->difrPrefetchEventHandler != 0) {
4663         NvBool registered;
4664 
4665         /*
4666          * Allocate event callback.
4667          */
4668         registered = nvRmRegisterCallback(
4669             pDevEvo,
4670             &pDevEvo->difrPrefetchCallback,
4671             pDevEvo->ref_ptr,
4672             pDevEvo->pSubDevices[0]->handle,
4673             pDevEvo->difrPrefetchEventHandler,
4674             DifrPrefetchEvent,
4675             NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST);
4676 
4677         /*
4678          * Configure event notification.
4679          */
4680         if (registered) {
4681             NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS prefetchEventParams = { 0 };
4682 
4683             prefetchEventParams.event = NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST;
4684             prefetchEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
4685 
4686             if (nvRmApiControl(nvEvoGlobal.clientHandle,
4687                                pDevEvo->pSubDevices[0]->handle,
4688                                NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
4689                                &prefetchEventParams,
4690                                sizeof(prefetchEventParams))
4691                 == NVOS_STATUS_SUCCESS) {
4692                 return TRUE;
4693 
4694             }
4695         }
4696         nvRmUnregisterDIFREventHandler(pDevEvo);
4697     }
4698     return FALSE;
4699 }
4700 
4701 void nvRmUnregisterDIFREventHandler(NVDevEvoPtr pDevEvo)
4702 {
4703     if (pDevEvo->difrPrefetchEventHandler != 0) {
4704         NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS prefetchEventParams = { 0 };
4705 
4706         prefetchEventParams.event = NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST;
4707         prefetchEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
4708 
4709         nvRmApiControl(nvEvoGlobal.clientHandle,
4710                        pDevEvo->pSubDevices[0]->handle,
4711                        NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
4712                        &prefetchEventParams,
4713                        sizeof(prefetchEventParams));
4714 
4715         nvRmApiFree(nvEvoGlobal.clientHandle,
4716                     pDevEvo->pSubDevices[0]->handle,
4717                     pDevEvo->difrPrefetchEventHandler);
4718 
4719         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4720                            pDevEvo->difrPrefetchEventHandler);
4721         pDevEvo->difrPrefetchEventHandler = 0;
4722     }
4723 }
4724 
4725 
4726 /*!
4727  * Determine whether all the dpys in the dpyIdList can be activated together.
4728  *
4729  * \param[in]   pDispEvo         The disp on which we search for a head.
4730  * \param[in]   dpyIdList        The connectors to test.
4731  *
4732  * \return      Return TRUE if all dpys can be driven simultaneously.
4733  */
4734 NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo,
4735                                          const NVDpyIdList dpyIdList)
4736 {
4737     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4738     NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS mapParams = { 0 };
4739     NvU32 ret = 0;
4740 
4741     /* Trivially accept an empty dpyIdList. */
4742 
4743     if (nvDpyIdListIsEmpty(dpyIdList)) {
4744         return TRUE;
4745     }
4746 
4747     /* don't even try if EVO isn't initialized (e.g. during a VT switch) */
4748 
4749     if (!pDevEvo->gpus) {
4750         return FALSE;
4751     }
4752 
4753     /* build a mask of all the displays to use */
4754 
4755     mapParams.subDeviceInstance = pDispEvo->displayOwner;
4756 
4757     mapParams.displayMask = nvDpyIdListToNvU32(dpyIdList);
4758 
4759     /* ask RM for the head routing */
4760 
4761     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4762                          pDevEvo->displayCommonHandle,
4763                          NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP,
4764                          &mapParams,
4765                          sizeof(mapParams));
4766 
4767     if ((ret != NVOS_STATUS_SUCCESS) || (mapParams.displayMask == 0)) {
4768         char *dpyIdListStr = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList);
4769         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
4770                     "The requested configuration of display devices "
4771                     "(%s) is not supported on this GPU.",
4772                     nvSafeString(dpyIdListStr, "unknown"));
4773         nvFree(dpyIdListStr);
4774 
4775         return FALSE;
4776     }
4777 
4778     /* make sure we got everything we asked for */
4779 
4780     if (mapParams.displayMask != nvDpyIdListToNvU32(dpyIdList)) {
4781         char *requestedDpyIdListStr;
4782         char *returnedDpyIdListStr;
4783 
4784         requestedDpyIdListStr =
4785             nvGetDpyIdListStringEvo(pDispEvo, dpyIdList);
4786 
4787         returnedDpyIdListStr =
4788             nvGetDpyIdListStringEvo(pDispEvo,
4789                                     nvNvU32ToDpyIdList(mapParams.displayMask));
4790 
4791         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
4792                     "The requested configuration of display devices "
4793                     "(%s) is not supported on this GPU; "
4794                     "%s is recommended, instead.",
4795                     nvSafeString(requestedDpyIdListStr, "unknown"),
4796                     nvSafeString(returnedDpyIdListStr, "unknown"));
4797 
4798         nvFree(requestedDpyIdListStr);
4799         nvFree(returnedDpyIdListStr);
4800 
4801         return FALSE;
4802     }
4803 
4804     return TRUE;
4805 }
4806 
4807 
4808 /*!
4809  * Tell the RM to save or restore the console VT state.
4810  *
4811  * \param[in]   cmd    indicate RM about the action.
4812  *
4813  * \return      TRUE on success, FALSE on failure.
4814  */
4815 NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd)
4816 {
4817     NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS params = { 0 };
4818     NvU32 ret;
4819 
4820     params.cmd = cmd;
4821     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4822                          pDevEvo->deviceHandle,
4823                          NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH,
4824                          &params, sizeof(params));
4825 
4826     if (ret != NVOS_STATUS_SUCCESS) {
4827         return FALSE;
4828     }
4829 
4830     return TRUE;
4831 }
4832 
4833 NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo)
4834 {
4835     NvU32 ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4836                      pDevEvo->deviceHandle,
4837                      NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO,
4838                      &pDevEvo->vtFbInfo, sizeof(pDevEvo->vtFbInfo));
4839 
4840     if (ret != NVOS_STATUS_SUCCESS) {
4841         return FALSE;
4842     }
4843 
4844     return TRUE;
4845 }
4846 
4847 /*!
4848  * Import the current framebuffer console memory, for later use with NVKMS-based
4849  * console restore.
4850  *
4851  * Note this relies on pDevEvo->fbInfo populated by nvRmVTSwitch().
4852  *
4853  * There are several cases in which NVKMS cannot perform console restore:
4854  *
4855  * - Anything other than linear frame buffer consoles (i.e., VGA text modes,
4856  *   Non-linear or paletted graphical modes, etc).  For those, resman cannot
4857  *   query the framebuffer dimensions from the kernel,
4858  *   NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE returns empty fbInfo
4859  *   params, and consequently pDevEvo->fbInfo.width == 0.
4860  *
4861  * - Linear frame buffer console with an unaligned pitch.  In this case,
4862  *   nvEvoRegisterSurface() will fail: it has to ensure the surface registration
4863  *   satisfies the EVO method interface requirement that PITCH surfaces are
4864  *   multiples of 256 bytes.  Consequently, pDevEvo->fbConsoleSurfaceHandle will
4865  *   be 0.
4866  *
4867  * - Depth 8 frame buffer consoles: these are color index, and cannot be
4868  *   supported by NVKMS console restore because they require the VGA palette,
4869  *   which exists in special RAM in the VGA core, so we can't name it with a
4870  *   ctxdma that we can feed into EVO's LUT.  The pFbInfo->depth switch below
4871  *   will reject depth 8.
4872  */
4873 void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo)
4874 {
4875     NvU32 ret;
4876     struct NvKmsRegisterSurfaceParams registration = { };
4877     const NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pFbInfo = &pDevEvo->vtFbInfo;
4878     NvHandle hMemory;
4879 
4880     nvAssert(pDevEvo->fbConsoleSurfaceHandle == 0);
4881 
4882     if (pFbInfo->width == 0) {
4883         // No console memory to map.
4884         return;
4885     }
4886 
4887     switch (pFbInfo->depth) {
4888     case 15:
4889         registration.request.format = NvKmsSurfaceMemoryFormatX1R5G5B5;
4890         break;
4891     case 16:
4892         registration.request.format = NvKmsSurfaceMemoryFormatR5G6B5;
4893         break;
4894     case 32:
4895         // That's a lie, it's really depth 24. Fall through.
4896     case 24:
4897         registration.request.format = NvKmsSurfaceMemoryFormatX8R8G8B8;
4898         break;
4899     default:
4900         nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN,
4901                          "Unsupported framebuffer console depth %d",
4902                          pFbInfo->depth);
4903         return;
4904     }
4905 
4906     hMemory = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4907     if (hMemory == 0) {
4908         return;
4909     }
4910 
4911     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
4912                        pDevEvo->deviceHandle,
4913                        hMemory,
4914                        NV01_MEMORY_FRAMEBUFFER_CONSOLE,
4915                        NULL);
4916 
4917     if (ret != NVOS_STATUS_SUCCESS) {
4918         nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN,
4919                          "Failed to map framebuffer console memory");
4920         goto done;
4921     }
4922 
4923     registration.request.useFd = FALSE;
4924     registration.request.rmClient = nvEvoGlobal.clientHandle;
4925     registration.request.widthInPixels = pFbInfo->width;
4926     registration.request.heightInPixels = pFbInfo->height;
4927     registration.request.layout = NvKmsSurfaceMemoryLayoutPitch;
4928 
4929     registration.request.planes[0].u.rmObject = hMemory;
4930     registration.request.planes[0].pitch = pFbInfo->pitch;
4931     registration.request.planes[0].rmObjectSizeInBytes =
4932         (NvU64) pFbInfo->height * (NvU64) pFbInfo->pitch;
4933 
4934     nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, &registration,
4935                          NvHsMapPermissionsNone);
4936 
4937     pDevEvo->fbConsoleSurfaceHandle = registration.reply.surfaceHandle;
4938 
4939     // nvEvoRegisterSurface dups the handle, so we can free the one we just
4940     // imported.
4941     nvRmApiFree(nvEvoGlobal.clientHandle,
4942                 nvEvoGlobal.clientHandle,
4943                 hMemory);
4944 done:
4945     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hMemory);
4946 }
4947 
4948 static void LogAuxPacket(const NVDispEvoRec *pDispEvo, const DPAUXPACKET *pkt)
4949 {
4950     const char *req, *rep;
4951     char str[DP_MAX_MSG_SIZE * 3 + 1];
4952     char *p = str;
4953     int i;
4954 
4955     switch (DRF_VAL(_DP, _AUXLOGGER, _REQUEST_TYPE, pkt->auxEvents)) {
4956         case NV_DP_AUXLOGGER_REQUEST_TYPE_AUXWR:
4957             req = "auxwr";
4958             break;
4959         case NV_DP_AUXLOGGER_REQUEST_TYPE_AUXRD:
4960             req = "auxrd";
4961             break;
4962         case NV_DP_AUXLOGGER_REQUEST_TYPE_MOTWR:
4963             // MOT is "middle of transaction", which is just another type of i2c
4964             // access.
4965             req = "motwr";
4966             break;
4967         case NV_DP_AUXLOGGER_REQUEST_TYPE_I2CWR:
4968             req = "i2cwr";
4969             break;
4970         case NV_DP_AUXLOGGER_REQUEST_TYPE_MOTRD:
4971             req = "motrd";
4972             break;
4973         case NV_DP_AUXLOGGER_REQUEST_TYPE_I2CRD:
4974             req = "i2crd";
4975             break;
4976         default:
4977             // Only log I2C and AUX transactions.
4978             return;
4979     }
4980 
4981     switch (DRF_VAL(_DP, _AUXLOGGER, _REPLY_TYPE, pkt->auxEvents)) {
4982         case NV_DP_AUXLOGGER_REPLY_TYPE_NULL:
4983             rep = "none";
4984             break;
4985         case NV_DP_AUXLOGGER_REPLY_TYPE_SB_ACK:
4986             rep = "sb_ack";
4987             break;
4988         case NV_DP_AUXLOGGER_REPLY_TYPE_RETRY:
4989             rep = "retry";
4990             break;
4991         case NV_DP_AUXLOGGER_REPLY_TYPE_TIMEOUT:
4992             rep = "timeout";
4993             break;
4994         case NV_DP_AUXLOGGER_REPLY_TYPE_DEFER:
4995             rep = "defer";
4996             break;
4997         case NV_DP_AUXLOGGER_REPLY_TYPE_DEFER_TO:
4998             rep = "defer_to";
4999             break;
5000         case NV_DP_AUXLOGGER_REPLY_TYPE_ACK:
5001             rep = "ack";
5002             break;
5003         case NV_DP_AUXLOGGER_REPLY_TYPE_ERROR:
5004             rep = "error";
5005             break;
5006         default:
5007         case NV_DP_AUXLOGGER_REPLY_TYPE_UNKNOWN:
5008             rep = "unknown";
5009             break;
5010     }
5011 
5012     for (i = 0; i < pkt->auxMessageReplySize; i++) {
5013         p += nvkms_snprintf(p, str + sizeof(str) - p, "%02x ",
5014                             pkt->auxPacket[i]);
5015     }
5016 
5017     nvAssert(p < str + sizeof(str));
5018     *p = '\0';
5019 
5020     nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
5021                  "%04u: port %u @ 0x%05x: [%10u] %s %2u, [%10u] %-8s %s",
5022                  pkt->auxCount, pkt->auxOutPort, pkt->auxPortAddress,
5023                  pkt->auxRequestTimeStamp, req,
5024                  pkt->auxMessageReqSize,
5025                  pkt->auxReplyTimeStamp, rep,
5026                  str);
5027 }
5028 
5029 /*!
5030  * This "attribute" queries the RM DisplayPort AUX channel log and dumps it to
5031  * the kernel log. It returns a value of TRUE if any RM AUX transactions were
5032  * logged, and FALSE otherwise.
5033  *
5034  * This attribute is intended to be queried in a loop as long as it reads TRUE.
5035  *
5036  * \return TRUE if the query succeeded (even if no events were logged).
5037  * \return FALSE if the query failed.
5038  */
5039 NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue)
5040 {
5041     NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pParams =
5042         nvCalloc(sizeof(*pParams), 1);
5043     NvU32 status;
5044     int i;
5045     NvBool ret = FALSE;
5046 
5047     pDispEvo->dpAuxLoggingEnabled = TRUE;
5048     *pValue = FALSE;
5049 
5050     if (!pParams) {
5051         return FALSE;
5052     }
5053 
5054     pParams->subDeviceInstance = pDispEvo->displayOwner;
5055     pParams->dpAuxBufferReadSize = MAX_LOGS_PER_POLL;
5056 
5057     status = nvRmApiControl(nvEvoGlobal.clientHandle,
5058                             pDispEvo->pDevEvo->displayCommonHandle,
5059                             NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA,
5060                             pParams, sizeof(*pParams));
5061     if (status != NVOS_STATUS_SUCCESS) {
5062         goto done;
5063     }
5064 
5065     nvAssert(pParams->dpNumMessagesRead <= MAX_LOGS_PER_POLL);
5066     for (i = 0; i < pParams->dpNumMessagesRead; i++) {
5067         const DPAUXPACKET *pkt = &pParams->dpAuxBuffer[i];
5068 
5069         switch (DRF_VAL(_DP, _AUXLOGGER, _EVENT_TYPE, pkt->auxEvents)) {
5070             case NV_DP_AUXLOGGER_EVENT_TYPE_AUX:
5071                 LogAuxPacket(pDispEvo, pkt);
5072                 break;
5073             case NV_DP_AUXLOGGER_EVENT_TYPE_HOT_PLUG:
5074                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
5075                              "%04u: port %u [%10u] hotplug",
5076                              pkt->auxCount, pkt->auxOutPort,
5077                              pkt->auxRequestTimeStamp);
5078                 break;
5079             case NV_DP_AUXLOGGER_EVENT_TYPE_HOT_UNPLUG:
5080                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
5081                              "%04u: port %u [%10u] unplug",
5082                              pkt->auxCount, pkt->auxOutPort,
5083                              pkt->auxRequestTimeStamp);
5084                 break;
5085             case NV_DP_AUXLOGGER_EVENT_TYPE_IRQ:
5086                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
5087                              "%04u: port %u [%10u] irq",
5088                              pkt->auxCount, pkt->auxOutPort,
5089                              pkt->auxRequestTimeStamp);
5090                 break;
5091             default:
5092                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
5093                              "%04u: port %u [%10u] unknown event",
5094                              pkt->auxCount, pkt->auxOutPort,
5095                              pkt->auxRequestTimeStamp);
5096                 break;
5097         }
5098 
5099         *pValue = TRUE;
5100     }
5101 
5102     ret = TRUE;
5103 
5104 done:
5105     nvFree(pParams);
5106     return ret;
5107 }
5108 
5109 /*!
5110  * Return the GPU's current PTIMER, or 0 if the query fails.
5111  */
5112 NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo)
5113 {
5114     const NvU32 sd = 0;
5115     NV2080_CTRL_TIMER_GET_TIME_PARAMS params;
5116 
5117     NvU32 ret;
5118 
5119     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5120                          pDevEvo->pSubDevices[sd]->handle,
5121                          NV2080_CTRL_CMD_TIMER_GET_TIME,
5122                          &params, sizeof(params));
5123 
5124     if (ret != NVOS_STATUS_SUCCESS) {
5125         nvEvoLogDebug(EVO_LOG_ERROR, "Failed to query GPU time, ret = %d", ret);
5126         return 0;
5127     }
5128 
5129     return params.time_nsec;
5130 }
5131 
5132 NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed)
5133 {
5134     NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { };
5135     NvU32 sd;
5136 
5137     if (allowed == pDevEvo->gc6Allowed) {
5138         return TRUE;
5139     }
5140 
5141     params.action = allowed ? NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC :
5142                               NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC;
5143 
5144     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
5145         NvU32 ret = nvRmApiControl(
5146                         nvEvoGlobal.clientHandle,
5147                         pDevEvo->pSubDevices[sd]->handle,
5148                         NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT,
5149                         &params, sizeof(params));
5150         if (ret != NVOS_STATUS_SUCCESS) {
5151             // XXX This is catastrophic, is there a good way to unravel?
5152             nvEvoLogDevDebug(
5153                 pDevEvo, EVO_LOG_ERROR,
5154                 "Failed to modify GC6 blocker refcount, sd = %d, ret = %x",
5155                 sd, ret);
5156             return FALSE;
5157         }
5158     }
5159 
5160     pDevEvo->gc6Allowed = allowed;
5161 
5162     /*
5163      * If we are just now disallowing GC6, it's possible that we previously
5164      * entered GC6 and invalidated display channel state. Re-initialize it here
5165      * to ensure that future modesets are successful.
5166      */
5167     if (!allowed && pDevEvo->core) {
5168         NvU32 channelIdx;
5169 
5170         pDevEvo->hal->InitChannel(pDevEvo, pDevEvo->core);
5171         pDevEvo->coreInitMethodsPending = TRUE;
5172 
5173         for (channelIdx = 0; channelIdx < pDevEvo->numHeads; channelIdx++) {
5174             // XXX We should InitChannel() for all per-head channels when coming
5175             // out of GC6.
5176             pDevEvo->hal->InitChannel(
5177                 pDevEvo, pDevEvo->head[channelIdx].layer[NVKMS_MAIN_LAYER]);
5178         }
5179     }
5180 
5181     return TRUE;
5182 }
5183 
5184 typedef struct _NVRmRgLine1CallbackRec {
5185     NVRgLine1CallbackRec base;
5186     struct nvkms_ref_ptr *ref_ptr;
5187     NvU32 rmHandle;
5188     NVDispEvoRec *pDispEvo;
5189     NvU32 head;
5190 } NVRmRgLine1CallbackRec;
5191 
5192 static void RGLine1ServiceInterrupt(void *dataPtr, NvU32 dataU32)
5193 {
5194      NVRmRgLine1CallbackRec *pRmCallback = (NVRmRgLine1CallbackRec*)dataPtr;
5195      pRmCallback->base.pCallbackProc(pRmCallback->pDispEvo, pRmCallback->head,
5196                                      &pRmCallback->base);
5197 }
5198 
5199 /*!
5200  * Receive RG line 1 interrupt notification from resman.
5201  *
5202  * This function is registered as the kernel callback function from resman when
5203  * the RG line 1 interrupt is generated.
5204  *
5205  * This function is called within resman's context, so we schedule a zero timer
5206  * callback to process the swapgroup check and release without holding the
5207  * resman lock.
5208  */
5209 static void RGLine1InterruptCallback(NvU32 rgIntrLine, void *param1,
5210                                       NvBool bIsIrqlIsr /* unused */)
5211 {
5212     (void) nvkms_alloc_timer_with_ref_ptr(
5213         RGLine1ServiceInterrupt, /* callback */
5214         param1, /* argument (this is a ref_ptr to a NVRmRgLine1CallbackRec*) */
5215         0,  /* dataU32 */
5216         0); /* usec */
5217 }
5218 
5219 /*!
5220  * Register an RM callback function for the RG line 1 interrupt.
5221  *
5222  * \param[in]   pDispEvo          The display on which to allocate the callback
5223  *
5224  * \param[in]   head              The head on which to allocate the callback
5225  *
5226  * \param[in]   pCallback         The callback function pointer to be registered
5227  *
5228  * \return      Pointer to callback object on success, NULL on failure. This same
5229  *              pointer must be used to unregister the callback.
5230  */
5231 NVRgLine1CallbackPtr
5232 nvRmAddRgLine1Callback(NVDispEvoRec *pDispEvo,
5233                        NvU32 head,
5234                        NVRgLine1CallbackProc pCallbackProc,
5235                        void *pUserData)
5236 {
5237     NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS rgLineParams = { 0 };
5238     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5239     NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
5240     NvU32 ret;
5241     NVRmRgLine1CallbackRec *pRmCallback = nvCalloc(1, sizeof(*pRmCallback));
5242 
5243     if (pRmCallback == NULL) {
5244         goto failed;
5245     }
5246 
5247     pRmCallback->ref_ptr = nvkms_alloc_ref_ptr(pRmCallback);
5248     if (pRmCallback->ref_ptr == NULL) {
5249         goto failed;
5250     }
5251     pRmCallback->base.pCallbackProc = pCallbackProc;
5252     pRmCallback->base.pUserData = pUserData;
5253     pRmCallback->rmHandle = handle;
5254     pRmCallback->pDispEvo = pDispEvo;
5255     pRmCallback->head = head;
5256 
5257     rgLineParams.subDeviceInstance = pDispEvo->displayOwner;
5258     rgLineParams.head = head;
5259     rgLineParams.rgLineNum = 1;
5260     rgLineParams.pCallbkFn = RGLine1InterruptCallback;
5261     rgLineParams.pCallbkParams = pRmCallback->ref_ptr;
5262 
5263     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
5264                        pDevEvo->displayCommonHandle,
5265                        handle,
5266                        NV0092_RG_LINE_CALLBACK,
5267                        &rgLineParams);
5268 
5269     if (ret == NVOS_STATUS_SUCCESS) {
5270         return &pRmCallback->base;
5271     }
5272 
5273     nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5274                      "Failed to enable RG line interrupt, ret: %d", ret);
5275     /* fall through */
5276 
5277 failed:
5278     if (pRmCallback != NULL) {
5279         nvkms_free_ref_ptr(pRmCallback->ref_ptr);
5280         nvFree(pRmCallback);
5281     }
5282 
5283     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle);
5284 
5285     return NULL;
5286 }
5287 
5288 /*!
5289  * Unregister an RM callback function previously registered with
5290  * nvRmAddRgLine1Callback.
5291  *
5292  * \param[in]  pDispEvo    The display on which to unregister the
5293  *                         callback
5294  *
5295  * \param[in]  pCallback   Pointer to the previously allocated
5296  *                         callback object
5297  */
5298 void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo,
5299                                NVRgLine1CallbackPtr pCallback)
5300 {
5301     NVRmRgLine1CallbackRec *pRmCallback;
5302     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5303     NvU32 ret;
5304 
5305     if (pCallback == NULL) {
5306         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5307                          "Failed to disable RG line interrupt, obj pointer NULL");
5308         return;
5309     }
5310     pRmCallback = nv_container_of(pCallback, NVRmRgLine1CallbackRec, base);
5311 
5312     ret = nvRmApiFree(nvEvoGlobal.clientHandle,
5313                       pDevEvo->displayCommonHandle,
5314                       pRmCallback->rmHandle);
5315 
5316     if (ret != NVOS_STATUS_SUCCESS) {
5317         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5318                          "Failed to disable RG line interrupt, ret: %d", ret);
5319     }
5320 
5321     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pRmCallback->rmHandle);
5322     nvkms_free_ref_ptr(pRmCallback->ref_ptr);
5323     nvFree(pRmCallback);
5324 }
5325 
5326 /*!
5327  * Register an RM callback function for the VBlankinterrupt.
5328  *
5329  * \param[in]   pDispEvo          The display on which to allocate the callback
5330  *
5331  * \param[in]   head              The head on which to allocate the callback
5332  *
5333  * \param[in]   pCallback         The callback function pointer to be registered
5334  *
5335  * \return      Handle to callback object on success, 0 on failure. This same
5336  *              handle must be used to unregister the callback.
5337  */
5338 NvU32 nvRmAddVBlankCallback(
5339     const NVDispEvoRec *pDispEvo,
5340     NvU32 head,
5341     OSVBLANKCALLBACKPROC pCallback,
5342     void *pParam2)
5343 {
5344     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5345     const NvU32 sd = pDispEvo->displayOwner;
5346     NvU32 ret;
5347     NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
5348 
5349     NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS params = {
5350         .pProc       = pCallback,
5351         .LogicalHead = head,
5352         .pParm1      = pDispEvo->ref_ptr,
5353         .pParm2      = pParam2,
5354     };
5355 
5356     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
5357                        pDevEvo->pSubDevices[sd]->handle,
5358                        handle,
5359                        NV9010_VBLANK_CALLBACK,
5360                        &params);
5361 
5362     if (ret == NVOS_STATUS_SUCCESS) {
5363         return handle;
5364     } else {
5365         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5366                          "Failed to enable VBlank callback, ret: %d", ret);
5367         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle);
5368         return 0;
5369     }
5370 }
5371 
5372 /*!
5373  * Unregister an RM callback function previously registered with
5374  * nvRmAddVBlankCallback.
5375  *
5376  * \param[in]  pDispEvo                 The display on which to unregister the
5377  *                                      callback
5378  *
5379  * \param[in]  callbackObjectHandle     Handle to the previously allocated
5380  *                                      callback object
5381  */
5382 void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo,
5383                               NvU32 callbackObjectHandle)
5384 {
5385     const NvU32 sd = pDispEvo->displayOwner;
5386     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5387     NvU32 ret;
5388 
5389     if (callbackObjectHandle == 0) {
5390         // already removed
5391         return;
5392     }
5393 
5394     ret = nvRmApiFree(nvEvoGlobal.clientHandle,
5395                       pDevEvo->pSubDevices[sd]->handle,
5396                       callbackObjectHandle);
5397 
5398     if (ret != NVOS_STATUS_SUCCESS) {
5399         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5400                          "Failed to disable VBlank callback, ret: %d", ret);
5401     }
5402 
5403     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, callbackObjectHandle);
5404 }
5405 
5406 /*!
5407  * Initialize the dynamic display mux on supported systems.
5408  *
5409  * \param[in] pDpyEvo    The dpy on which to initialize the mux.
5410  */
5411 static void MuxInit(const NVDpyEvoRec *pDpyEvo)
5412 {
5413     NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS params = { 0 };
5414     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
5415     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5416 
5417     NvU32 ret;
5418 
5419     params.subDeviceInstance = pDispEvo->displayOwner;
5420     params.displayId = nvDpyEvoGetConnectorId(pDpyEvo);
5421 
5422     if (pDpyEvo->internal) {
5423         /* Attempt to get the EDID from ACPI. This is required for internal
5424          * displays only, as the internal mux initialization requires data
5425          * from the internal panel's EDID, while the external mux can be
5426          * initialized in the absence of a display, in which case there is
5427          * obviously no EDID present. The EDID read is done via ACPI, in
5428          * order to accommodate mux initialization while the internal panel
5429          * is disconnected from the GPU. */
5430 
5431         /* Map with hard-coded data for systems known to support dynamic mux
5432          * switching. This is a poor-man's alternative to the WDDM driver's
5433          * CDisplayMgr::NVInitializeACPIToDeviceMaskMap() */
5434         NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS acpiMap = {
5435             .mapTable = {
5436                 {.acpiId = 0x8001a420, .displayId = 0x1000, .dodIndex = 0},
5437             }
5438         };
5439         NVEdidRec edid = { };
5440         NVParsedEdidEvoRec *pParsedEdid = NULL;
5441         NVEvoInfoStringRec infoString;
5442 
5443         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5444                              pDevEvo->displayCommonHandle,
5445                              NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING,
5446                              &acpiMap, sizeof(acpiMap));
5447 
5448         if (ret != NVOS_STATUS_SUCCESS) {
5449             nvEvoLogDebug(EVO_LOG_ERROR, "Failed to set ACPI ID map.");
5450             return;
5451         }
5452 
5453         nvInitInfoString(&infoString, NULL, 0);
5454 
5455         /* Retrieve the internal panel's EDID from ACPI */
5456         if (!nvDpyReadAndParseEdidEvo(pDpyEvo, NULL,
5457                                       NVKMS_EDID_READ_MODE_ACPI,
5458                                       &edid, &pParsedEdid,
5459                                       &infoString)) {
5460             /* EDID read is expected to fail on non-dynamic-mux systems. */
5461             goto edid_done;
5462         }
5463 
5464         if (edid.length == 0 || pParsedEdid == NULL || !pParsedEdid->valid) {
5465             goto edid_done;
5466         }
5467 
5468         params.manfId = pParsedEdid->info.manuf_id;
5469         params.productId = pParsedEdid->info.product_id;
5470 
5471 edid_done:
5472         nvFree(edid.buffer);
5473         nvFree(pParsedEdid);
5474 
5475         /* Internal mux initialization will fail without manfId/productId */
5476         if (!params.manfId || !params.productId) {
5477             return;
5478         }
5479     }
5480 
5481     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5482                          pDevEvo->displayCommonHandle,
5483                          NV0073_CTRL_CMD_DFP_INIT_MUX_DATA,
5484                          &params,
5485                          sizeof(params));
5486 
5487     if (ret == NVOS_STATUS_SUCCESS) {
5488         pDispEvo->muxDisplays = nvAddDpyIdToDpyIdList(pDpyEvo->id,
5489                                                       pDispEvo->muxDisplays);
5490     } else {
5491         nvEvoLogDebug(EVO_LOG_ERROR, "Failed to initialize mux on %s.",
5492                       pDpyEvo->name);
5493     }
5494 }
5495 
5496 static NVDpyIdList GetValidMuxDpys(NVDispEvoPtr pDispEvo)
5497 {
5498     NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS params = { 0 };
5499 
5500     params.subDeviceInstance = pDispEvo->displayOwner;
5501 
5502     nvRmApiControl(nvEvoGlobal.clientHandle,
5503                    pDispEvo->pDevEvo->displayCommonHandle,
5504                    NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX,
5505                    &params, sizeof(params));
5506 
5507     return nvNvU32ToDpyIdList(params.muxDisplayMask);
5508 }
5509 
5510 void nvRmMuxInit(NVDevEvoPtr pDevEvo)
5511 {
5512     NVDispEvoPtr pDispEvo;
5513     int i;
5514 
5515     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
5516         NVDpyIdList validMuxDpys = GetValidMuxDpys(pDispEvo);
5517         NVDpyEvoPtr pDpyEvo;
5518 
5519         FOR_ALL_EVO_DPYS(pDpyEvo, validMuxDpys, pDispEvo) {
5520             MuxInit(pDpyEvo);
5521         }
5522     }
5523 }
5524 
5525 /*!
5526  * Perform mux pre-switch operations
5527  *
5528  * \param[in] pDpyEvo             The Dpy of the target mux
5529  * \param[in] state               The target mux state
5530  *
5531  * \return TRUE on success; FALSE on failure
5532  */
5533 NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state)
5534 {
5535     NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS params = { 0 };
5536     NVDispEvoPtr pDispEvo;
5537     NVDevEvoPtr pDevEvo;
5538     NvU32 ret;
5539 
5540     pDispEvo = pDpyEvo->pDispEvo;
5541     pDevEvo = pDispEvo->pDevEvo;
5542 
5543     if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) {
5544         return FALSE;
5545     }
5546 
5547     params.subDeviceInstance = pDispEvo->displayOwner;
5548     params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
5549     params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO);
5550 
5551     if (state == MUX_STATE_DISCRETE) {
5552         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU;
5553     } else if (state == MUX_STATE_INTEGRATED) {
5554         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU;
5555     } else {
5556         return FALSE;
5557     }
5558 
5559     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5560                          pDevEvo->displayCommonHandle,
5561                          NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS,
5562                          &params, sizeof(params));
5563 
5564     nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPre status %d", ret);
5565 
5566     return ret == NVOS_STATUS_SUCCESS;
5567 }
5568 
5569 /*!
5570  * Perform mux switch operation
5571  *
5572  * \param[in] pDpyEvo    The Dpy of the target mux
5573  * \param[in] state      The target mux state
5574  *
5575  * \return TRUE on success; FALSE on failure
5576  */
5577 NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state)
5578 {
5579     NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS params = { 0 };
5580     NVDispEvoPtr pDispEvo;
5581     NVDevEvoPtr pDevEvo;
5582     NvU32 ret;
5583 
5584     pDispEvo = pDpyEvo->pDispEvo;
5585     pDevEvo = pDispEvo->pDevEvo;
5586 
5587     if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) {
5588         return FALSE;
5589     }
5590 
5591     params.subDeviceInstance = pDispEvo->displayOwner;
5592     params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
5593 
5594     if (state == MUX_STATE_DISCRETE) {
5595         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU;
5596     } else if (state == MUX_STATE_INTEGRATED) {
5597         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU;
5598     } else {
5599         return FALSE;
5600     }
5601 
5602     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5603                          pDevEvo->displayCommonHandle,
5604                          NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX,
5605                          &params, sizeof(params));
5606 
5607     nvEvoLogDebug(EVO_LOG_INFO, "RmMuxSwitch status %d", ret);
5608 
5609     /*
5610      * Force link training after waiting for the DP AUX link to settle.
5611      * The delay duration comes from DFP_MUX_AUX_SETTLE_DELAY_MS_DEFAULT
5612      * in drivers/resman/kernel/inc/dfpmux.h.
5613      */
5614     nvkms_usleep(100000);
5615 
5616     if (pDpyEvo->internal && state == MUX_STATE_DISCRETE) {
5617         nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo));
5618         nvDPNotifyShortPulse(pDpyEvo->pConnectorEvo->pDpLibConnector);
5619         nvDPFireExpiredTimers(pDevEvo);
5620     }
5621 
5622     return ret == NVOS_STATUS_SUCCESS;
5623 }
5624 
5625 /*!
5626  * Perform mux post-switch operations
5627  *
5628  * \param[in] pDpyEvo                The Dpy of the target mux
5629  * \param[in] state                  The target mux state
5630  *
5631  * \return TRUE on success; FALSE on failure
5632  */
5633 NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state)
5634 {
5635     NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS params = { 0 };
5636     NVDispEvoPtr pDispEvo;
5637     NVDevEvoPtr pDevEvo;
5638     NvU32 ret;
5639 
5640     pDispEvo = pDpyEvo->pDispEvo;
5641     pDevEvo = pDispEvo->pDevEvo;
5642 
5643     if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) {
5644         return FALSE;
5645     }
5646 
5647     params.subDeviceInstance = pDispEvo->displayOwner;
5648     params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
5649     params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO);
5650 
5651     if (state == MUX_STATE_DISCRETE) {
5652         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU;
5653     } else if (state == MUX_STATE_INTEGRATED) {
5654         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU;
5655     } else {
5656         return FALSE;
5657     }
5658 
5659     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5660                          pDevEvo->displayCommonHandle,
5661                          NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS,
5662                          &params, sizeof(params));
5663 
5664     nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPost status %d", ret);
5665 
5666     return ret == NVOS_STATUS_SUCCESS;
5667 }
5668 
5669 /*!
5670  * Query the current state of a dynamic mux
5671  *
5672  * \param[in] pDpyEvo    The Dpy of the target mux whose state is to be queried
5673  *
5674  * \return Mux state (either MUX_STATE_INTEGRATED or MUX_STATE_DISCRETE) on
5675  * success; MUX_STATE_UNKNOWN on failure.
5676  */
5677 NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo)
5678 {
5679     NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS params = { 0 };
5680     NVDispEvoPtr pDispEvo;
5681     NVDevEvoPtr pDevEvo;
5682 
5683     pDispEvo = pDpyEvo->pDispEvo;
5684     pDevEvo = pDispEvo->pDevEvo;
5685 
5686     if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) {
5687         return MUX_STATE_UNKNOWN;
5688     }
5689 
5690     params.subDeviceInstance = pDispEvo->displayOwner;
5691     params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
5692 
5693     if (NVOS_STATUS_SUCCESS == nvRmApiControl(nvEvoGlobal.clientHandle,
5694                                     pDevEvo->displayCommonHandle,
5695                                     NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS,
5696                                     &params, sizeof(params))) {
5697         if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _INTEGRATED_GPU,
5698             params.muxStatus)) {
5699             return MUX_STATE_INTEGRATED;
5700         }
5701         if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _DISCRETE_GPU,
5702             params.muxStatus)) {
5703             return MUX_STATE_DISCRETE;
5704         }
5705     }
5706 
5707     return MUX_STATE_UNKNOWN;
5708 }
5709 
5710 void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo)
5711 {
5712     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
5713     NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS dispParams = { 0 };
5714     NvU32 displayMask, displayId;
5715     NvU32 brightness;
5716 
5717     nvAssert(pDispEvo->backlightDevice == NULL);
5718 
5719     dispParams.subDeviceInstance = pDispEvo->displayOwner;
5720 
5721     if (nvRmApiControl(nvEvoGlobal.clientHandle,
5722                        pDevEvo->displayCommonHandle,
5723                        NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS,
5724                        &dispParams, sizeof(dispParams)) != NV_OK) {
5725         return;
5726     }
5727 
5728     /* Find a display with a backlight */
5729     displayMask = dispParams.availableInternalDisplaysMask;
5730     for (; displayMask; displayMask &= ~LOWESTBIT(displayMask))
5731     {
5732         NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 };
5733         NV_STATUS status;
5734 
5735         displayId = LOWESTBIT(displayMask);
5736 
5737         params.subDeviceInstance = pDispEvo->displayOwner;
5738         params.displayId         = displayId;
5739 
5740         status = nvRmApiControl(nvEvoGlobal.clientHandle,
5741                                 pDevEvo->displayCommonHandle,
5742                                 NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
5743                                 &params, sizeof(params));
5744 
5745         if (status == NV_OK)
5746         {
5747             brightness = params.brightness;
5748             break;
5749         }
5750     }
5751 
5752     if (displayMask == 0)
5753     {
5754         /* No internal display has backlight */
5755         return;
5756     }
5757 
5758     pDispEvo->backlightDevice = nvkms_register_backlight(
5759         pDevEvo->pSubDevices[pDispEvo->displayOwner]->gpuId,
5760         displayId, pDispEvo,
5761         brightness);
5762 }
5763 
5764 void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo)
5765 {
5766     if (pDispEvo->backlightDevice != NULL) {
5767         nvkms_unregister_backlight(pDispEvo->backlightDevice);
5768     }
5769     pDispEvo->backlightDevice = NULL;
5770 }
5771