1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 
25 
26 #include "dp/nvdp-connector.h"
27 #include "dp/nvdp-timer.h"
28 #include "dp/nvdp-device.h"
29 #include "nvkms-rm.h"
30 #include "nvkms-rmapi.h"
31 #include "g_nvkms-evo-states.h"
32 #include "nvkms-event.h"
33 #include "nvkms-dpy.h"
34 #include "nvkms-types.h"
35 #include "nvkms-evo.h"
36 #include "nvkms-dma.h"
37 #include "nvkms-utils.h"
38 #include "nvkms-private.h"
39 #include "nvkms-modeset.h"
40 #include "nvkms-surface.h"
41 #include "nvkms-vrr.h"
42 
43 #include "nvkms-push.h"
44 #include "nvkms-difr.h"
45 
46 #include "class/cl0005.h" /* NV01_EVENT */
47 
48 #include <class/cl0070.h> // NV01_MEMORY_VIRTUAL
49 #include <class/cl0073.h> /* NV04_DISPLAY_COMMON */
50 #include <class/cl003e.h> /* NV01_MEMORY_SYSTEM */
51 #include <class/cl0076.h> /* NV01_MEMORY_FRAMEBUFFER_CONSOLE */
52 #include <class/cl0080.h> /* NV01_DEVICE_0 */
53 #include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */
54 #include <class/cl2080.h> /* NV20_SUBDEVICE_0 */
55 
56 #include "class/clc37b.h" /* NVC37B_WINDOW_IMM_CHANNEL_DMA */
57 #include "class/clc37e.h" /* NVC37E_WINDOW_CHANNEL_DMA */
58 #include "class/clc57b.h" /* NVC57B_WINDOW_IMM_CHANNEL_DMA */
59 #include "class/clc57e.h" /* NVC57E_WINDOW_CHANNEL_DMA */
60 #include "class/clc67b.h" /* NVC67B_WINDOW_IMM_CHANNEL_DMA */
61 #include "class/clc67e.h" /* NVC67E_WINDOW_CHANNEL_DMA */
62 
63 #include "class/cl917b.h" /* NV917B_OVERLAY_IMM_CHANNEL_PIO */
64 
65 #include "class/cl927c.h" /* NV927C_BASE_CHANNEL_DMA */
66 
67 #include "class/cl917e.h" /* NV917E_OVERLAY_CHANNEL_DMA */
68 
69 #include <ctrl/ctrl0000/ctrl0000gpu.h> /* NV0000_CTRL_GPU_* */
70 #include <ctrl/ctrl0002.h> /* NV0002_CTRL_CMD_BIND_CONTEXTDMA */
71 #include <ctrl/ctrl0073/ctrl0073dfp.h> /* NV0073_CTRL_CMD_DFP_GET_INFO */
72 #include <ctrl/ctrl0073/ctrl0073dp.h> /* NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID */
73 #include <ctrl/ctrl0073/ctrl0073specific.h> /* NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO */
74 #include <ctrl/ctrl0073/ctrl0073system.h> /* NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED */
75 #include <ctrl/ctrl0080/ctrl0080gpu.h> /* NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER */
76 #include <ctrl/ctrl0080/ctrl0080gr.h> /* NV0080_CTRL_CMD_GR_GET_CAPS_V2 */
77 #include <ctrl/ctrl0080/ctrl0080unix.h> /* NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH */
78 #include <ctrl/ctrl2080/ctrl2080bios.h> /* NV2080_CTRL_CMD_BIOS_GET_NBSI */
79 #include <ctrl/ctrl2080/ctrl2080bus.h> /* NV2080_CTRL_CMD_BUS_GET_INFO */
80 #include <ctrl/ctrl2080/ctrl2080event.h> /* NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION */
81 #include <ctrl/ctrl2080/ctrl2080tmr.h> /* NV2080_CTRL_CMD_TIMER_GET_TIME */
82 #include <ctrl/ctrl2080/ctrl2080unix.h> /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */
83 #include <ctrl/ctrl5070/ctrl5070chnc.h> /* NV5070_CTRL_CMD_SET_RMFREE_FLAGS */
84 #include <ctrl/ctrl5070/ctrl5070or.h> /* NV5070_CTRL_CMD_SET_DAC_PWR */
85 
86 #include "nvos.h"
87 
88 #include "displayport/dpcd.h"
89 
90 #define NVKMS_SYNCPT_ID_INVALID     (0xFFFFFFFF)
91 
92 static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId);
93 
94 static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel);
95 
EngineListCheckOneSubdevice(const NVEvoSubDeviceRec * pSubDevice,NvU32 engineType)96 static NvBool EngineListCheckOneSubdevice(const NVEvoSubDeviceRec *pSubDevice,
97                                           NvU32 engineType)
98 {
99     const NvU32 *engines = pSubDevice->supportedEngines;
100     int i;
101 
102     for (i = 0; i < pSubDevice->numEngines; i++) {
103         if (engines[i] == engineType) {
104             return TRUE;
105         }
106     }
107 
108     return FALSE;
109 }
110 
EngineListCheck(const NVDevEvoRec * pDevEvo,NvU32 engineType)111 static NvBool EngineListCheck(const NVDevEvoRec *pDevEvo, NvU32 engineType)
112 {
113     int sd;
114 
115     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
116         if (!EngineListCheckOneSubdevice(pDevEvo->pSubDevices[sd],
117                                          engineType)) {
118             return FALSE;
119         }
120     }
121 
122     return TRUE;
123 }
124 
QueryGpuCapabilities(NVDevEvoPtr pDevEvo)125 static NvBool QueryGpuCapabilities(NVDevEvoPtr pDevEvo)
126 {
127     NvBool ctxDmaCoherentAllowedDev = FALSE;
128     NvBool ctxDmaNonCoherentAllowedDev = FALSE;
129     NvU32 ret, sd;
130 
131     NV0000_CTRL_GPU_GET_ID_INFO_PARAMS idInfoParams = { 0 };
132 
133     pDevEvo->isHeadSurfaceSupported = FALSE;
134 
135     if (EngineListCheck(pDevEvo, NV2080_ENGINE_TYPE_GRAPHICS)) {
136         NV0080_CTRL_GR_GET_CAPS_V2_PARAMS grCaps = { 0 };
137 
138         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
139                              pDevEvo->deviceHandle,
140                              NV0080_CTRL_CMD_GR_GET_CAPS_V2,
141                              &grCaps,
142                              sizeof(grCaps));
143 
144         if (ret != NVOS_STATUS_SUCCESS) {
145             return FALSE;
146         }
147 
148         /* Assume headSurface is supported if there is a graphics engine
149          * and headSurface support is included in the NVKMS build.
150          */
151         pDevEvo->isHeadSurfaceSupported = NVKMS_INCLUDE_HEADSURFACE;
152     }
153 
154     /* ctxDma{,Non}CoherentAllowed */
155 
156     /* simulationType */
157 
158     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
159 
160         NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS simParams = { 0 };
161 
162         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
163                              pDevEvo->pSubDevices[sd]->handle,
164                              NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO,
165                              &simParams,
166                              sizeof(simParams));
167 
168         if (ret != NVOS_STATUS_SUCCESS) {
169             simParams.type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE;
170         }
171         if (sd == 0) {
172             pDevEvo->simulationType = simParams.type;
173         }
174         nvAssert(pDevEvo->simulationType == simParams.type);
175     }
176 
177     /* mobile */
178 
179     idInfoParams.gpuId = pDevEvo->pSubDevices[0]->gpuId;
180 
181     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
182                          nvEvoGlobal.clientHandle,
183                          NV0000_CTRL_CMD_GPU_GET_ID_INFO,
184                          &idInfoParams, sizeof(idInfoParams));
185 
186     if (ret != NVOS_STATUS_SUCCESS) {
187         pDevEvo->mobile = FALSE;
188         pDevEvo->isSOCDisplay = FALSE;
189     } else {
190         pDevEvo->mobile =
191             FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _MOBILE, _TRUE,
192                          idInfoParams.gpuFlags);
193 
194         pDevEvo->isSOCDisplay =
195             FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE,
196                          idInfoParams.gpuFlags);
197     }
198 
199     /* TODO: This cap bit should be queried from RM */
200     pDevEvo->requiresAllAllocationsInSysmem = pDevEvo->isSOCDisplay;
201 
202     pDevEvo->supportsVblankSemControl =
203         !pDevEvo->isSOCDisplay && nvkms_vblank_sem_control();
204 
205     /* ctxDma{,Non}CoherentAllowed */
206 
207     if (!pDevEvo->isSOCDisplay) {
208         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
209             NV2080_CTRL_BUS_GET_INFO_PARAMS busParams = { 0 };
210             struct {
211                 NV2080_CTRL_BUS_INFO coherentFlags;
212                 NV2080_CTRL_BUS_INFO nonCoherentFlags;
213             } busInfoList = { { 0 } };
214 
215             NvBool ctxDmaCoherentAllowed;
216             NvBool ctxDmaNonCoherentAllowed;
217 
218             busInfoList.coherentFlags.index =
219                 NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS;
220             busInfoList.nonCoherentFlags.index =
221                 NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS;
222 
223             busParams.busInfoListSize =
224                 sizeof(busInfoList) / sizeof(busInfoList.coherentFlags);
225             busParams.busInfoList = NV_PTR_TO_NvP64(&busInfoList);
226 
227             ret = nvRmApiControl(nvEvoGlobal.clientHandle,
228                                  pDevEvo->pSubDevices[sd]->handle,
229                                  NV2080_CTRL_CMD_BUS_GET_INFO,
230                                  &busParams, sizeof(busParams));
231 
232             if (ret != NVOS_STATUS_SUCCESS) {
233                 return FALSE;
234             }
235 
236             ctxDmaCoherentAllowed =
237                 FLD_TEST_DRF(2080_CTRL_BUS_INFO, _COHERENT_DMA_FLAGS,
238                              _CTXDMA, _TRUE, busInfoList.coherentFlags.data);
239 
240             ctxDmaNonCoherentAllowed =
241                 FLD_TEST_DRF(2080_CTRL_BUS_INFO, _NONCOHERENT_DMA_FLAGS,
242                              _CTXDMA, _TRUE, busInfoList.nonCoherentFlags.data);
243 
244             if (sd == 0) {
245                 ctxDmaCoherentAllowedDev = ctxDmaCoherentAllowed;
246                 ctxDmaNonCoherentAllowedDev = ctxDmaNonCoherentAllowed;
247             } else {
248                 ctxDmaCoherentAllowedDev =
249                     ctxDmaCoherentAllowedDev && ctxDmaCoherentAllowed;
250                 ctxDmaNonCoherentAllowedDev =
251                     ctxDmaNonCoherentAllowedDev && ctxDmaNonCoherentAllowed;
252             }
253         }
254         nvAssert(ctxDmaCoherentAllowedDev || ctxDmaNonCoherentAllowedDev);
255 
256         if (ctxDmaCoherentAllowedDev) {
257             pDevEvo->isoIOCoherencyModes.coherent = TRUE;
258             pDevEvo->nisoIOCoherencyModes.coherent = TRUE;
259         }
260 
261         if (ctxDmaNonCoherentAllowedDev) {
262             pDevEvo->isoIOCoherencyModes.noncoherent = TRUE;
263             pDevEvo->nisoIOCoherencyModes.noncoherent = TRUE;
264         }
265     } else {
266         /*
267          * On SOC display, NISO requests are IO-coherent and ISO
268          * requests are non-coherent.
269          */
270         pDevEvo->isoIOCoherencyModes.noncoherent = TRUE;
271         pDevEvo->nisoIOCoherencyModes.coherent = TRUE;
272     }
273 
274     pDevEvo->supportsSyncpts =
275         FALSE;
276 
277     return TRUE;
278 }
279 
280 
FreeDisplay(NVDispEvoPtr pDispEvo)281 static void FreeDisplay(NVDispEvoPtr pDispEvo)
282 {
283     if (pDispEvo == NULL) {
284         return;
285     }
286 
287     nvAssert(pDispEvo->vrrSetTimeoutEventUsageCount == 0);
288     nvAssert(pDispEvo->vrrSetTimeoutEventHandle == 0);
289 
290 #if defined(DEBUG)
291     for (NvU32 apiHead = 0;
292          apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) {
293         nvAssert(pDispEvo->pSwapGroup[apiHead] == NULL);
294     }
295 #endif
296 
297     nvAssert(nvListIsEmpty(&pDispEvo->dpyList));
298 
299     nvkms_free_ref_ptr(pDispEvo->ref_ptr);
300 
301     nvInvalidateRasterLockGroupsEvo();
302     nvFree(pDispEvo);
303 }
304 
305 
AllocDisplay(NVDevEvoPtr pDevEvo)306 static inline NVDispEvoPtr AllocDisplay(NVDevEvoPtr pDevEvo)
307 {
308     NVDispEvoPtr pDispEvo = nvCalloc(1, sizeof(NVDispEvoRec));
309 
310     if (pDispEvo == NULL) {
311         goto fail;
312     }
313 
314     pDispEvo->pDevEvo = pDevEvo;
315 
316     nvListInit(&pDispEvo->dpyList);
317     nvListInit(&pDispEvo->connectorList);
318 
319     pDispEvo->framelock.server = nvInvalidDpyId();
320     pDispEvo->framelock.clients = nvEmptyDpyIdList();
321     pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD;
322 
323     pDispEvo->ref_ptr = nvkms_alloc_ref_ptr(pDispEvo);
324     if (!pDispEvo->ref_ptr) {
325         goto fail;
326     }
327 
328     return pDispEvo;
329 
330 fail:
331     FreeDisplay(pDispEvo);
332 
333     return NULL;
334 }
335 
336 
FreeDisplays(NVDevEvoPtr pDevEvo)337 static void FreeDisplays(NVDevEvoPtr pDevEvo)
338 {
339     unsigned int sd;
340 
341     for (sd = 0; sd < pDevEvo->nDispEvo; sd++) {
342         FreeDisplay(pDevEvo->pDispEvo[sd]);
343         pDevEvo->pDispEvo[sd] = NULL;
344     }
345     pDevEvo->nDispEvo = 0;
346 }
347 
348 
349 /*!
350  * Allocate the NVDispRecs for the given pDev.
351  *
352  * \param[in,out]  pDev  The device for which to allocate Displays.
353  */
AllocDisplays(NVDevEvoPtr pDevEvo)354 static NvBool AllocDisplays(NVDevEvoPtr pDevEvo)
355 {
356     unsigned int sd;
357 
358     nvAssert(pDevEvo->nDispEvo == 0);
359 
360     pDevEvo->nDispEvo = pDevEvo->numSubDevices;
361 
362     for (sd = 0; sd < pDevEvo->nDispEvo; sd++) {
363         NVDispEvoPtr pDispEvo = AllocDisplay(pDevEvo);
364 
365         if (pDispEvo == NULL) {
366             goto fail;
367         }
368 
369         pDevEvo->pDispEvo[sd] = pDispEvo;
370 
371         pDispEvo->displayOwner = sd;
372 
373         pDispEvo->gpuLogIndex = pDevEvo->pSubDevices[sd]->gpuLogIndex;
374     }
375 
376     return TRUE;
377 
378 fail:
379     FreeDisplays(pDevEvo);
380     return FALSE;
381 }
382 
383 /*
384  * Get the (id) list of all supported display devices for this pDisp.
385  */
ProbeValidDisplays(NVDispEvoPtr pDispEvo)386 static NvBool ProbeValidDisplays(NVDispEvoPtr pDispEvo)
387 {
388     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
389     NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS getSupportedParams = { 0 };
390     NvU32 ret;
391 
392     pDispEvo->connectorIds = nvEmptyDpyIdList();
393     pDispEvo->displayPortMSTIds = nvEmptyDpyIdList();
394     pDispEvo->dynamicDpyIds = nvEmptyDpyIdList();
395     pDispEvo->validDisplays = nvEmptyDpyIdList();
396 
397     getSupportedParams.subDeviceInstance = pDispEvo->displayOwner;
398 
399     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
400                          pDevEvo->displayCommonHandle,
401                          NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED,
402                          &getSupportedParams, sizeof(getSupportedParams));
403 
404     if (ret != NVOS_STATUS_SUCCESS) {
405         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
406                     "Failed to get supported display device(s)");
407     } else {
408         NVDpyIdList dpyIdList;
409         NVDpyId dpyId;
410 
411         // Grab only the static ids from the list.  Dynamic ids are
412         // used to communicate with devices that are connected to
413         // a connector that has a static id.
414         dpyIdList = nvNvU32ToDpyIdList(getSupportedParams.displayMask);
415 
416         FOR_ALL_DPY_IDS(dpyId, dpyIdList) {
417             NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS getOrInfoParams = { 0 };
418             getOrInfoParams.subDeviceInstance = pDispEvo->displayOwner;
419             getOrInfoParams.displayId = nvDpyIdToNvU32(dpyId);
420 
421             ret = nvRmApiControl(nvEvoGlobal.clientHandle,
422                                  pDevEvo->displayCommonHandle,
423                                  NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
424                                  &getOrInfoParams,
425                                  sizeof(getOrInfoParams));
426             if (ret != NVOS_STATUS_SUCCESS) {
427                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
428                             "Failed to get supported display device(s)");
429             } else {
430                 if (!getOrInfoParams.bIsDispDynamic) {
431                     pDispEvo->connectorIds =
432                         nvAddDpyIdToDpyIdList(dpyId, pDispEvo->connectorIds);
433                 }
434             }
435         }
436     }
437 
438     pDispEvo->validDisplays = pDispEvo->connectorIds;
439 
440     return TRUE;
441 }
442 
443 /*!
444  * Return TRUE if every pDispEvo on this pDevEvo has an empty validDisplays.
445  */
NoValidDisplays(NVDevEvoPtr pDevEvo)446 static NvBool NoValidDisplays(NVDevEvoPtr pDevEvo)
447 {
448     NVDispEvoPtr pDispEvo;
449     unsigned int sd;
450 
451     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
452         if (!nvDpyIdListIsEmpty(pDispEvo->validDisplays)) {
453             return FALSE;
454         }
455     }
456 
457     return TRUE;
458 }
459 
460 
461 /*
462  * Find the NvKmsConnectorSignalFormat for the pConnectorEvo.
463  */
464 static NvKmsConnectorSignalFormat
GetSignalFormat(const NVConnectorEvoRec * pConnectorEvo)465 GetSignalFormat(const NVConnectorEvoRec *pConnectorEvo)
466 {
467     // SignalFormat represents a weird combination of our OR type and protocol.
468     switch (pConnectorEvo->or.type) {
469     case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC:
470         switch (pConnectorEvo->or.protocol) {
471         default:
472             nvAssert(!"Unexpected OR protocol for DAC");
473             // fall through
474         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT:
475             return NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA;
476         }
477 
478     case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR:
479         switch (pConnectorEvo->or.protocol) {
480         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM:
481             return NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS;
482 
483         default:
484             nvAssert(!"Unexpected OR protocol for SOR");
485             // fall through
486         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
487         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
488         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
489             return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS;
490 
491         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
492         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
493             return NVKMS_CONNECTOR_SIGNAL_FORMAT_DP;
494         }
495 
496     case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR:
497         switch (pConnectorEvo->or.protocol) {
498         default:
499             nvAssert(!"Unexpected OR protocol for PIOR");
500             // fall through
501         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC:
502             return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS;
503         }
504 
505     case NV0073_CTRL_SPECIFIC_OR_TYPE_DSI:
506         switch (pConnectorEvo->or.protocol) {
507         default:
508             nvAssert(!"Unexpected OR protocol for DSI");
509             // fall through
510         case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI:
511             return NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI;
512         }
513 
514     default:
515         nvAssert(!"Unexpected OR type");
516         return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN;
517     }
518 
519     return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN;
520 }
521 
522 
GetDfpInfo(const NVConnectorEvoRec * pConnectorEvo)523 static NvU32 GetDfpInfo(const NVConnectorEvoRec *pConnectorEvo)
524 {
525     NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
526     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
527 
528     NV0073_CTRL_DFP_GET_INFO_PARAMS params = { 0 };
529     NvU32 ret;
530 
531     if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
532         return 0x0;
533     }
534 
535     params.subDeviceInstance = pDispEvo->displayOwner;
536     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
537 
538     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
539                          pDevEvo->displayCommonHandle,
540                          NV0073_CTRL_CMD_DFP_GET_INFO,
541                          &params,
542                          sizeof(params));
543 
544     if (ret != NVOS_STATUS_SUCCESS) {
545         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, "Failed to query DFP info");
546         return 0x0;
547     }
548 
549     return params.flags;
550 }
551 
552 typedef struct _AllocConnectorDispDataRec {
553     NvU32 dfpIndex;
554     NvU32 crtIndex;
555     NvU32 typeIndices[NVKMS_CONNECTOR_TYPE_MAX + 1];
556 } AllocConnectorDispDataRec;
557 
558 /*!
559  * Query and setup information for a connector.
560  */
AllocConnector(NVDispEvoPtr pDispEvo,NVDpyId dpyId,AllocConnectorDispDataRec * pAllocConnectorDispData)561 static NvBool AllocConnector(
562     NVDispEvoPtr pDispEvo,
563     NVDpyId dpyId,
564     AllocConnectorDispDataRec *pAllocConnectorDispData)
565 {
566     NVConnectorEvoPtr pConnectorEvo = NULL;
567     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
568     NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS params = { 0 };
569     NvU32 ret;
570     NvBool isDP;
571 
572     pConnectorEvo = nvCalloc(1, sizeof(*pConnectorEvo));
573 
574     if (pConnectorEvo == NULL) {
575         return FALSE;
576     }
577 
578     pConnectorEvo->pDispEvo = pDispEvo;
579     pConnectorEvo->displayId = dpyId;
580     pConnectorEvo->type = NVKMS_CONNECTOR_TYPE_UNKNOWN;
581     pConnectorEvo->physicalIndex = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION;
582     pConnectorEvo->physicalLocation = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION;
583     /* Query the output resource configuration */
584     nvRmGetConnectorORInfo(pConnectorEvo, FALSE);
585 
586     isDP =
587         (pConnectorEvo->or.type ==
588          NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) &&
589         (pConnectorEvo->or.protocol ==
590          NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A ||
591          pConnectorEvo->or.protocol ==
592          NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B);
593 
594     /* Determine the connector type. */
595 
596     params.subDeviceInstance = pDispEvo->displayOwner;
597     params.displayId = nvDpyIdToNvU32(dpyId);
598 
599     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
600                          pDevEvo->displayCommonHandle,
601                          NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA,
602                          &params,
603                          sizeof(params));
604 
605     if (ret != NVOS_STATUS_SUCCESS) {
606         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
607                      "Failed to determine connector type for connector "
608                      NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId));
609         goto fail;
610     } else {
611 
612         static const struct {
613             NvU32 type0073;
614             NvKmsConnectorType typeNvKms;
615         } connectorTypeTable[] = {
616             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_EXT,
617               NVKMS_CONNECTOR_TYPE_DP },
618             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C,
619               NVKMS_CONNECTOR_TYPE_USBC },
620             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_INT,
621               NVKMS_CONNECTOR_TYPE_DP },
622             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_MINI_EXT,
623               NVKMS_CONNECTOR_TYPE_DP },
624             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_1,
625               NVKMS_CONNECTOR_TYPE_DP },
626             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_2,
627               NVKMS_CONNECTOR_TYPE_DP },
628             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VGA_15_PIN,
629               NVKMS_CONNECTOR_TYPE_VGA },
630             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_SVIDEO,
631               NVKMS_CONNECTOR_TYPE_DVI_I },
632             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_COMPOSITE,
633               NVKMS_CONNECTOR_TYPE_DVI_I },
634             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I,
635               NVKMS_CONNECTOR_TYPE_DVI_I },
636             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_D,
637               NVKMS_CONNECTOR_TYPE_DVI_D },
638             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_ADC,
639               NVKMS_CONNECTOR_TYPE_ADC },
640             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_1,
641               NVKMS_CONNECTOR_TYPE_DVI_I },
642             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_2,
643               NVKMS_CONNECTOR_TYPE_DVI_I },
644             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_SPWG,
645               NVKMS_CONNECTOR_TYPE_LVDS },
646             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_OEM,
647               NVKMS_CONNECTOR_TYPE_LVDS },
648             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_A,
649               NVKMS_CONNECTOR_TYPE_HDMI },
650             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_C_MINI,
651               NVKMS_CONNECTOR_TYPE_HDMI },
652             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VIRTUAL_WFD,
653               NVKMS_CONNECTOR_TYPE_UNKNOWN },
654             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI,
655               NVKMS_CONNECTOR_TYPE_DSI },
656             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_SERIALIZER,
657               NVKMS_CONNECTOR_TYPE_DP_SERIALIZER },
658             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN,
659               NVKMS_CONNECTOR_TYPE_UNKNOWN },
660             { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN,
661               NVKMS_CONNECTOR_TYPE_UNKNOWN },
662         };
663 
664         int i, j;
665 
666         for (i = 0; i < params.count; i++) {
667             for (j = 0; j < ARRAY_LEN(connectorTypeTable); j++) {
668                 if (connectorTypeTable[j].type0073 == params.data[i].type) {
669                     if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) {
670                         pConnectorEvo->type = connectorTypeTable[j].typeNvKms;
671                     } else {
672                         /*
673                          * The only cases where we should see
674                          * params.count > 1 (and thus attempt to
675                          * assign pConnectorEvo->type multiple times)
676                          * should be where all the
677                          * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_*
678                          * values map to the same NvKmsConnectorType;
679                          */
680                         nvAssert(pConnectorEvo->type ==
681                                  connectorTypeTable[j].typeNvKms);
682                     }
683                     break;
684                 }
685             }
686             if (j == ARRAY_LEN(connectorTypeTable)) {
687                 nvAssert(!"Unhandled connector type!");
688             }
689 
690             if (i == 0) {
691                 pConnectorEvo->physicalIndex = params.data[i].index;
692                 pConnectorEvo->physicalLocation = params.data[i].location;
693             } else {
694                 nvAssert(pConnectorEvo->physicalIndex == params.data[i].index);
695                 nvAssert(pConnectorEvo->physicalLocation ==
696                          params.data[i].location);
697             }
698         }
699 
700         pConnectorEvo->ddcPartnerDpyIdsList = nvNvU32ToDpyIdList(params.DDCPartners);
701     }
702 
703     /* If the connector type is unknown, ignore this connector. */
704     if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) {
705         nvFree(pConnectorEvo);
706         return TRUE;
707     }
708 
709     /*
710      * Ignore connectors that use DP protocol, but don't have a
711      * DP-compatible type.
712      */
713     if (isDP &&
714         ((pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_DP) &&
715          !nvConnectorIsDPSerializer(pConnectorEvo) &&
716          (pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_USBC))) {
717         nvFree(pConnectorEvo);
718         return TRUE;
719     }
720 
721     /*
722      * Bind connector to the DP lib if DP capable. Serializer
723      * connector is not managed by DP lib.
724      */
725     if (isDP &&
726         !nvConnectorIsDPSerializer(pConnectorEvo)) {
727         pConnectorEvo->pDpLibConnector = nvDPCreateConnector(pConnectorEvo);
728         if (!pConnectorEvo->pDpLibConnector) {
729             nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
730                          "Failed to initialize DisplayPort support for "
731                          NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId));
732             goto fail;
733         }
734     }
735 
736     pConnectorEvo->signalFormat = GetSignalFormat(pConnectorEvo);
737 
738     pConnectorEvo->dfpInfo = GetDfpInfo(pConnectorEvo);
739 
740     /* Assign connector indices. */
741 
742     pConnectorEvo->legacyType =
743         GetLegacyConnectorType(pDispEvo, pConnectorEvo->displayId);
744 
745     switch (pConnectorEvo->legacyType) {
746         case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT:
747             pConnectorEvo->legacyTypeIndex =
748                 pAllocConnectorDispData->crtIndex++;
749             break;
750         case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP:
751             pConnectorEvo->legacyTypeIndex =
752                 pAllocConnectorDispData->dfpIndex++;
753             break;
754         default:
755             nvAssert(!"Unknown connector type");
756             break;
757     }
758 
759     nvAssert(pConnectorEvo->type <
760              ARRAY_LEN(pAllocConnectorDispData->typeIndices));
761     pConnectorEvo->typeIndex =
762         pAllocConnectorDispData->typeIndices[pConnectorEvo->type]++;
763 
764     nvListAppend(&pConnectorEvo->connectorListEntry, &pDispEvo->connectorList);
765 
766     nvkms_snprintf(pConnectorEvo->name, sizeof(pConnectorEvo->name), "%s-%u",
767                    NvKmsConnectorTypeString(pConnectorEvo->type),
768                    pConnectorEvo->typeIndex);
769 
770     return TRUE;
771 
772 fail:
773     nvFree(pConnectorEvo);
774     return FALSE;
775 }
776 
777 
FreeConnectors(NVDispEvoPtr pDispEvo)778 static void FreeConnectors(NVDispEvoPtr pDispEvo)
779 {
780     NVConnectorEvoPtr pConnectorEvo, pConnectorEvoNext;
781 
782     nvListForEachEntry_safe(pConnectorEvo, pConnectorEvoNext,
783                             &pDispEvo->connectorList, connectorListEntry) {
784         // Unbind DP lib from the connector
785         nvDPDestroyConnector(pConnectorEvo->pDpLibConnector);
786         pConnectorEvo->pDpLibConnector = NULL;
787         nvListDel(&pConnectorEvo->connectorListEntry);
788         nvFree(pConnectorEvo);
789     }
790 }
791 
792 
793 /*!
794  * Allocate and initialize the connector structs for the given pDisp.
795  *
796  * NOTE: Each Display ID in pDispEvo->connectorIds (aka the
797  * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED mask) is a possible display
798  * connection to the GPU which is static after boot.
799  */
AllocConnectors(NVDispEvoPtr pDispEvo)800 static NvBool AllocConnectors(NVDispEvoPtr pDispEvo)
801 {
802     NVDpyId dpyId;
803     NVConnectorEvoPtr pConnectorEvo;
804     AllocConnectorDispDataRec allocConnectorDispData = { };
805 
806     nvAssert(nvListIsEmpty(&pDispEvo->connectorList));
807 
808     if (nvDpyIdListIsEmpty(pDispEvo->connectorIds)) {
809         /* Allow boards with no connectors */
810         return TRUE;
811     }
812 
813     /* Allocate the connectors */
814     FOR_ALL_DPY_IDS(dpyId, pDispEvo->connectorIds) {
815         if (!AllocConnector(pDispEvo, dpyId, &allocConnectorDispData)) {
816             goto fail;
817         }
818     }
819 
820     /*
821      * Reassign pDispEvo->connectorIds, to exclude any connectors ignored above:
822      * AllocConnector() may return TRUE but not actually create a pConnectorEvo
823      * for some connectors reported by resman.
824      */
825     pDispEvo->connectorIds = nvEmptyDpyIdList();
826     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
827         pDispEvo->connectorIds =
828             nvAddDpyIdToDpyIdList(pConnectorEvo->displayId,
829                                   pDispEvo->connectorIds);
830     }
831 
832     pDispEvo->validDisplays = pDispEvo->connectorIds;
833 
834     return TRUE;
835 
836  fail:
837     FreeConnectors(pDispEvo);
838     return FALSE;
839 }
840 
IsFlexibleWindowMapping(NvU32 windowHeadMask)841 static NvBool IsFlexibleWindowMapping(NvU32 windowHeadMask)
842 {
843     return (windowHeadMask ==
844             NV0073_CTRL_SPECIFIC_FLEXIBLE_HEAD_WINDOW_ASSIGNMENT);
845 }
846 
847 /*!
848  * Query the number of heads and save the result in pDevEvo->numHeads.
849  * Get window head assignment and save it in pDevEvo->headForWindow[win].
850  *
851  * Query the number of heads on each pDisp of the pDev and limit to
852  * the minimum across all pDisps. Query the headMask on each pDisp and
853  * take the intersection across pDisps. Query the window-head assignment
854  * and if it is fully flexible, assign WINDOWs (2N) and (2N + 1) to HEAD N.
855  * Otherwise, use the queried assignment.
856  *
857  * Limit the number of heads to the number of bits in the headMask. Ignore
858  * the heads which don't have any windows assigned to them and heads which
859  * create holes in the headMask. If a head which has assigned windows gets
860  * pruned out, assign NV_INVALID_HEAD to those windows.
861  *
862  * \param[in,out] pDev   This is the device pointer; the pDisps within
863  *                       it are used to query per-GPU information.
864  *                       The result is written to pDevEvo->numHeads.
865  *
866  * \return               Return TRUE if numHeads are correctly queried and
867  *                       window-head assignment is done.
868  *                       Return FALSE if numHeads or window-head assignment
869  *                       could not be queried.
870  */
ProbeHeadCountAndWindowAssignment(NVDevEvoPtr pDevEvo)871 static NvBool ProbeHeadCountAndWindowAssignment(NVDevEvoPtr pDevEvo)
872 {
873     NvU32 numHeads = 0, headMask = 0;
874     NvU32 headsWithWindowsMask = 0;
875     int sd, head, numBits;
876     NVDispEvoPtr pDispEvo;
877     NvBool first = TRUE;
878     NvBool isFlexibleWindowMapping = NV_TRUE;
879     NvU32 win;
880     NvU32 ret;
881 
882     pDevEvo->numHeads = 0;
883 
884     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
885 
886         NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS numHeadsParams = { 0 };
887         NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS headMaskParams = { 0 };
888         NV0073_CTRL_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT_PARAMS winHeadAssignParams = { };
889 
890         numHeadsParams.subDeviceInstance = sd;
891         numHeadsParams.flags = 0;
892 
893         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
894                              pDevEvo->displayCommonHandle,
895                              NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS,
896                              &numHeadsParams, sizeof(numHeadsParams));
897 
898         if (ret != NVOS_STATUS_SUCCESS) {
899             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
900                         "Failed to get the number of heads");
901             return FALSE;
902         }
903 
904         if (numHeadsParams.numHeads == 0) {
905             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No heads found on board!");
906             return FALSE;
907         }
908 
909         if (numHeads == 0) {
910             numHeads = numHeadsParams.numHeads;
911         } else {
912             if (numHeads != numHeadsParams.numHeads) {
913                 NvU32 minNumHeads =
914                     NV_MIN(numHeads, numHeadsParams.numHeads);
915                 nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
916                             "Unexpected numbers of heads "
917                             "(%d, %d); clamping to %d",
918                             numHeads, numHeadsParams.numHeads, minNumHeads);
919                 numHeads = minNumHeads;
920             }
921         }
922 
923         headMaskParams.subDeviceInstance = sd;
924 
925         ret = nvRmApiControl(
926                 nvEvoGlobal.clientHandle,
927                 pDevEvo->displayCommonHandle,
928                 NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK,
929                 &headMaskParams, sizeof(headMaskParams));
930 
931         if (ret != NVOS_STATUS_SUCCESS) {
932             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
933                         "Failed to get head configuration");
934             return FALSE;
935         }
936 
937         if (headMask == 0) {
938             headMask = headMaskParams.headMask;
939         } else {
940             if (headMask != headMaskParams.headMask) {
941                 NvU32 intersectedHeadMask =
942                     headMask & headMaskParams.headMask;
943                 nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
944                             "Unexpected head configurations "
945                             "(0x%02x, 0x%02x); limiting to 0x%02x",
946                             headMask, headMaskParams.headMask,
947                             intersectedHeadMask);
948                 headMask = intersectedHeadMask;
949             }
950         }
951 
952         winHeadAssignParams.subDeviceInstance = sd;
953         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
954                              pDevEvo->displayCommonHandle,
955                              NV0073_CTRL_CMD_SPECIFIC_GET_VALID_HEAD_WINDOW_ASSIGNMENT,
956                              &winHeadAssignParams, sizeof(winHeadAssignParams));
957 
958         if (ret == NVOS_STATUS_SUCCESS) {
959             for (win = 0; win < NVKMS_MAX_WINDOWS_PER_DISP; win++) {
960                 NvU32 windowHeadMask = winHeadAssignParams.windowHeadMask[win];
961 
962                 if ((win == 0) && first) {
963                     isFlexibleWindowMapping = IsFlexibleWindowMapping(windowHeadMask);
964                 } else if (isFlexibleWindowMapping) {
965                     /*
966                      * Currently, if one window is completely flexible, then all are.
967                      * In case of fully flexible window mapping, if windowHeadMask is
968                      * zero for a window, then that window is not present in HW.
969                      */
970                     nvAssert(!windowHeadMask || (isFlexibleWindowMapping ==
971                              IsFlexibleWindowMapping(windowHeadMask)));
972                 }
973 
974                 /*
975                  * For custom window mapping, if windowHeadMask is 0, then head
976                  * is not assigned to this window. For flexible window mapping,
977                  * if windowHeadMask is 0, then the window is not present in HW.
978                  */
979                 if (windowHeadMask == 0) {
980                     continue;
981                 }
982 
983                 if (isFlexibleWindowMapping) {
984                     /*
985                      * TODO: For now assign WINDOWs (2N) and (2N + 1) to HEAD N when
986                      * completely flexible window assignment is specified by window
987                      * head assignment mask.
988                      */
989                     head = win >> 1;
990                     windowHeadMask = NVBIT_TYPE(head, NvU8);
991                     nvAssert(head < numHeads);
992                 } else {
993                     // We don't support same window assigned to multiple heads.
994                     nvAssert(ONEBITSET(windowHeadMask));
995 
996                     head = BIT_IDX_32(windowHeadMask);
997                 }
998 
999                 if (first) {
1000                     pDevEvo->headForWindow[win] = head;
1001                     headsWithWindowsMask |= windowHeadMask;
1002                 } else {
1003                     nvAssert(pDevEvo->headForWindow[win] == head);
1004                 }
1005             }
1006         } else if (ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) {
1007             nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
1008                              "Failed to get window-head assignment");
1009             return FALSE;
1010         } else {
1011             // Pre-Volta, we don't need to populate pDevEvo->headForWindow[] and
1012             // each HW head has a window assigned.
1013             headsWithWindowsMask = headMask;
1014         }
1015 
1016         if (first) {
1017             first = FALSE;
1018         }
1019     }
1020 
1021     /* Check whether heads which have windows assigned are actually present in HW */
1022     nvAssert(!(~headMask & headsWithWindowsMask));
1023 
1024     /* Intersect heads present in HW with heads which have windows assigned */
1025     headMask &= headsWithWindowsMask;
1026 
1027     /* clamp numHeads to the number of bits in headMask */
1028 
1029     numBits = nvPopCount32(headMask);
1030 
1031     /* for now, we only support headMask when it is tightly packed at 0 */
1032 
1033     for (head = 0; head < numBits; head++) {
1034         if ((headMask & (1 << head)) == 0) {
1035             NvU32 modifiedHeadMask = (1 << head) - 1;
1036 
1037             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1038                         "The head configuration (0x%02x) "
1039                         "is unexpected; limiting to 0x%02x", headMask,
1040                    modifiedHeadMask);
1041 
1042             headMask = modifiedHeadMask;
1043             numBits = head;
1044             break;
1045         }
1046     }
1047 
1048     /* headMask should never increase numHeads */
1049 
1050     if (numBits > numHeads) {
1051         nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1052                     "The head configuration (0x%02x) "
1053                     "is inconsistent with the number of heads (%d)",
1054                     headMask, numHeads);
1055     } else if (numBits < numHeads) {
1056         nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1057                     "Correcting number of heads for "
1058                     "current head configuration (0x%02x)", headMask);
1059         numHeads = numBits;
1060     }
1061 
1062     pDevEvo->numHeads = numHeads;
1063 
1064     /*
1065      * If a head which has assigned windows gets pruned out, assign
1066      * NV_INVALID_HEAD to those windows.
1067      */
1068     for (win = 0; win < NVKMS_MAX_WINDOWS_PER_DISP; win++) {
1069         if ((pDevEvo->headForWindow[win] == NV_INVALID_HEAD) ||
1070             (pDevEvo->headForWindow[win] < pDevEvo->numHeads)) {
1071             continue;
1072         }
1073         pDevEvo->headForWindow[win] = NV_INVALID_HEAD;
1074     }
1075 
1076     return TRUE;
1077 }
1078 
1079 /*!
1080  * Set a pConnectorEvo's software state based on the boot head assignment.
1081  */
MarkConnectorBootHeadActive(NVDispEvoPtr pDispEvo,NvU32 head)1082 static void MarkConnectorBootHeadActive(NVDispEvoPtr pDispEvo, NvU32 head)
1083 {
1084     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1085     NVDpyId displayId, rootPortId;
1086     NVConnectorEvoPtr pConnectorEvo;
1087     NVDispHeadStateEvoPtr pHeadState;
1088     NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 };
1089     NvU32 ret;
1090 
1091     // Use the first displayId in the boot display list.
1092     //
1093     // TODO: What should we do if more than one dpy ID is listed for a boot
1094     // display?
1095     nvAssert(nvCountDpyIdsInDpyIdList(pDispEvo->vbiosDpyConfig[head]) == 1);
1096     displayId = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(),
1097                                               pDispEvo->vbiosDpyConfig[head]);
1098 
1099     // The displayId reported by RM could be a dynamic one.  Find the root port
1100     // for this ID.
1101     params.subDeviceInstance = pDispEvo->displayOwner;
1102     params.displayId = nvDpyIdToNvU32(displayId);
1103 
1104     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1105                          pDevEvo->displayCommonHandle,
1106                          NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
1107                          &params, sizeof(params));
1108     if (ret != NVOS_STATUS_SUCCESS) {
1109         return;
1110     }
1111 
1112     if (params.bIsDispDynamic) {
1113         rootPortId = nvNvU32ToDpyId(params.rootPortId);
1114     } else {
1115         rootPortId = displayId;
1116     }
1117 
1118     pConnectorEvo = nvGetConnectorFromDisp(pDispEvo, rootPortId);
1119     if (!pConnectorEvo) {
1120         return;
1121     }
1122 
1123     if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
1124             NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
1125 
1126         nvAssert(params.index != NV_INVALID_OR);
1127         if (params.index == NV_INVALID_OR) {
1128             // If RM reported that a head is driving this dpyId, then there
1129             // should be an SOR assigned.  However, due to a bug in the way
1130             // PDB_PROP_GPU_DISABLE_VGA_CONSOLE_RESTORATION_ON_RESUME is
1131             // handled, RM can report an "active" head with no SOR assigned on
1132             // certain specific GPUs.  If that happens, just treat the head as
1133             // disabled.  See bug 1692425.
1134             pDispEvo->vbiosDpyConfig[head] = nvEmptyDpyIdList();
1135             return;
1136         } else {
1137             // Track the SOR assignment for this connector.  See the comment in
1138             // nvRmGetConnectorORInfo() for why this is deferred until now.
1139             nvAssert(pConnectorEvo->or.primary == NV_INVALID_OR);
1140             pConnectorEvo->or.primary = params.index;
1141         }
1142     }
1143     nvAssert(pConnectorEvo->or.primary == params.index);
1144 
1145     pHeadState = &pDispEvo->headState[head];
1146 
1147     nvAssert(!nvHeadIsActive(pDispEvo, head));
1148 
1149     pHeadState->pConnectorEvo = pConnectorEvo;
1150     pHeadState->activeRmId = nvDpyIdToNvU32(displayId);
1151 
1152     // Track the assigned head.
1153     pConnectorEvo->or.ownerHeadMask[params.index] |= NVBIT(head);
1154 
1155     nvEvoStateStartNoLock(&pDispEvo->pDevEvo->gpus[pDispEvo->displayOwner]);
1156 }
1157 
1158 /*!
1159  * Query the vbios assignment of heads to display devices, and cache
1160  * in pDispEvo->vbiosDpyConfig for later use by nvDPResume().
1161  *
1162  * \param[in,out] pDisp  This is the GPU display pointer; the result is
1163  *                       written to pDispEvo->vbiosDpyConfig
1164  */
GetVbiosHeadAssignmentOneDisp(NVDispEvoPtr pDispEvo)1165 static void GetVbiosHeadAssignmentOneDisp(NVDispEvoPtr pDispEvo)
1166 {
1167     unsigned int head;
1168     NvU32 ret = NVOS_STATUS_ERROR_GENERIC;
1169     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1170 
1171     nvkms_memset(&pDispEvo->vbiosDpyConfig, 0,
1172                  sizeof(pDispEvo->vbiosDpyConfig));
1173 
1174     /* if there is no display, there is no origDpyConfig */
1175 
1176     nvAssert(pDevEvo->displayCommonHandle != 0);
1177 
1178     /*
1179      * get the vbios assignment of heads within the GPU, so that
1180      * later when we do head assignment, we can try to preserve the
1181      * existing assignment; see bug 208072
1182      */
1183 
1184     for (head = 0; head < pDevEvo->numHeads; head++) {
1185         NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS activeDpysParams = { 0 };
1186 
1187         activeDpysParams.subDeviceInstance = pDispEvo->displayOwner;
1188         activeDpysParams.head = head;
1189         /*
1190          * We want to check for active displays set by any low-level software
1191          * such as VBIOS, not just those set by an RM client
1192          */
1193         activeDpysParams.flags =
1194             DRF_DEF(0073, _CTRL_SYSTEM_GET_ACTIVE_FLAGS, _CLIENT, _DISABLE);
1195 
1196         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1197                              pDevEvo->displayCommonHandle,
1198                              NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE,
1199                              &activeDpysParams, sizeof(activeDpysParams));
1200 
1201         if (ret == NVOS_STATUS_SUCCESS) {
1202             // XXX TODO: If this is a dynamic display ID, it's not necessarily
1203             // correlated with the NVDpyId we'll assign to a dynamic pDpyEvo
1204             // later.  We should instead store this as an NvU32 and assign it as
1205             // the activeRmId for a dynamic pDpyEvo that DPLib reports as being
1206             // driven by the firmware group.  See bug 1656584.
1207             pDispEvo->vbiosDpyConfig[head] =
1208                 nvNvU32ToDpyIdList(activeDpysParams.displayId);
1209             if (activeDpysParams.displayId != 0) {
1210                 MarkConnectorBootHeadActive(pDispEvo, head);
1211             }
1212         }
1213 
1214         nvAssert(ret == NVOS_STATUS_SUCCESS);
1215     }
1216 }
1217 
GetVbiosHeadAssignment(NVDevEvoPtr pDevEvo)1218 static void GetVbiosHeadAssignment(NVDevEvoPtr pDevEvo)
1219 {
1220     NVDispEvoPtr pDispEvo;
1221     NvU32 dispIndex;
1222 
1223     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1224         GetVbiosHeadAssignmentOneDisp(pDispEvo);
1225     }
1226 }
1227 
1228 /*!
1229  * Query the boot display device(s).
1230  */
ProbeBootDisplays(NVDispEvoPtr pDispEvo)1231 static void ProbeBootDisplays(NVDispEvoPtr pDispEvo)
1232 {
1233     NvU32 ret;
1234     NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS bootParams = { 0 };
1235     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1236 
1237     pDispEvo->bootDisplays = nvEmptyDpyIdList();
1238 
1239     bootParams.subDeviceInstance = pDispEvo->displayOwner;
1240 
1241     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1242                          pDevEvo->displayCommonHandle,
1243                          NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS,
1244                          &bootParams, sizeof(bootParams));
1245 
1246     if (ret == NVOS_STATUS_SUCCESS) {
1247         pDispEvo->bootDisplays =
1248             nvNvU32ToDpyIdList(bootParams.bootDisplayMask);
1249     }
1250 }
1251 
1252 /*!
1253  * Query the 0073 display common object capabilities.
1254  */
ProbeDisplayCommonCaps(NVDevEvoPtr pDevEvo)1255 static NvBool ProbeDisplayCommonCaps(NVDevEvoPtr pDevEvo)
1256 {
1257     NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { };
1258     NvU32 ret;
1259 
1260     ct_assert(sizeof(pDevEvo->commonCapsBits) == sizeof(capsParams.capsTbl));
1261     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1262                          pDevEvo->displayCommonHandle,
1263                          NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2,
1264                          &capsParams, sizeof(capsParams));
1265     if (ret != NVOS_STATUS_SUCCESS) {
1266         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1267                     "Failed to determine display common capabilities");
1268         return FALSE;
1269     }
1270     nvkms_memcpy(pDevEvo->commonCapsBits, capsParams.capsTbl,
1271                  sizeof(pDevEvo->commonCapsBits));
1272 
1273     return TRUE;
1274 }
1275 
ReadDPCDReg(NVConnectorEvoPtr pConnectorEvo,NvU32 dpcdAddr,NvU8 * dpcdData)1276 static NvBool ReadDPCDReg(NVConnectorEvoPtr pConnectorEvo,
1277                           NvU32 dpcdAddr,
1278                           NvU8 *dpcdData)
1279 {
1280     NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { };
1281     NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo;
1282 
1283     params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner;
1284     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
1285 
1286     params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX);
1287     params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _READ);
1288 
1289     params.addr = dpcdAddr;
1290 
1291     /* Requested size is 0-based */
1292     params.size = 0;
1293 
1294     if (nvRmApiControl(nvEvoGlobal.clientHandle,
1295                        pDevEvo->displayCommonHandle,
1296                        NV0073_CTRL_CMD_DP_AUXCH_CTRL,
1297                        &params, sizeof(params)) != NVOS_STATUS_SUCCESS) {
1298         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1299                     "AUX read failed for DPCD addr 0x%x",
1300                     dpcdAddr);
1301         return FALSE;
1302     }
1303 
1304     if (params.size != 1U) {
1305         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1306                     "AUX read returned 0 bytes for DPCD addr 0x%x",
1307                     dpcdAddr);
1308         return FALSE;
1309     }
1310 
1311     *dpcdData = params.data[0];
1312 
1313     return TRUE;
1314 }
1315 
nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo,NvU32 dpcdAddr,NvU8 dpcdData)1316 NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo,
1317                       NvU32 dpcdAddr,
1318                       NvU8 dpcdData)
1319 {
1320     NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { };
1321     NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo;
1322 
1323     params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner;
1324     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
1325 
1326     params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX);
1327     params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE);
1328 
1329     params.addr = dpcdAddr;
1330     params.data[0] = dpcdData;
1331 
1332     /* Requested size is 0-based */
1333     params.size = 0;
1334 
1335     if (nvRmApiControl(nvEvoGlobal.clientHandle,
1336                        pDevEvo->displayCommonHandle,
1337                        NV0073_CTRL_CMD_DP_AUXCH_CTRL,
1338                        &params, sizeof(params)) != NVOS_STATUS_SUCCESS) {
1339         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1340                     "AUX write failed for DPCD addr 0x%x",
1341                     dpcdAddr);
1342         return FALSE;
1343     }
1344 
1345     if (params.size != 1U) {
1346         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1347                     "Wrote 0 bytes for DPCD addr 0x%x",
1348                     dpcdAddr);
1349         return FALSE;
1350     }
1351 
1352     return TRUE;
1353 }
1354 
ReadDPSerializerCaps(NVConnectorEvoPtr pConnectorEvo)1355 static NvBool ReadDPSerializerCaps(NVConnectorEvoPtr pConnectorEvo)
1356 {
1357     NVDpyIdList oneDpyIdList =
1358         nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId);
1359     NVDpyIdList connectedList;
1360     NvU8 dpcdData = 0;
1361 
1362     /*
1363      * This call will not only confirm that the DP serializer is connected, but
1364      * will also power on the corresponding DPAUX pads if the serializer is
1365      * detected via NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE. The DPAUX pads
1366      * need to be enabled for the DPCD reads below.
1367      */
1368     connectedList = nvRmGetConnectedDpys(pConnectorEvo->pDispEvo, oneDpyIdList);
1369     if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedList)) {
1370         nvEvoLogDev(pConnectorEvo->pDispEvo->pDevEvo, EVO_LOG_ERROR,
1371                     "Serializer connector %s is not currently connected!",
1372                     pConnectorEvo->name);
1373         return FALSE;
1374     }
1375 
1376     if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LINK_BANDWIDTH, &dpcdData)) {
1377         return FALSE;
1378     }
1379     pConnectorEvo->dpSerializerCaps.maxLinkBW =
1380         DRF_VAL(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, dpcdData);
1381 
1382     if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LANE_COUNT, &dpcdData)) {
1383         return FALSE;
1384     }
1385     pConnectorEvo->dpSerializerCaps.maxLaneCount =
1386         DRF_VAL(_DPCD, _MAX_LANE_COUNT, _LANE, dpcdData);
1387 
1388     if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MSTM, &dpcdData)) {
1389         return FALSE;
1390     }
1391     pConnectorEvo->dpSerializerCaps.supportsMST =
1392         FLD_TEST_DRF(_DPCD, _MSTM, _CAP, _YES, dpcdData);
1393 
1394     return TRUE;
1395 }
1396 
AllocDPSerializerDpys(NVConnectorEvoPtr pConnectorEvo)1397 static NvBool AllocDPSerializerDpys(NVConnectorEvoPtr pConnectorEvo)
1398 {
1399     NvBool supportsMST;
1400     NvU32 numHeads;
1401     NvU32 i;
1402 
1403     if (!nvConnectorIsDPSerializer(pConnectorEvo)) {
1404         return TRUE;
1405     }
1406 
1407     if (!ReadDPSerializerCaps(pConnectorEvo)) {
1408         return FALSE;
1409     }
1410 
1411     supportsMST = pConnectorEvo->dpSerializerCaps.supportsMST;
1412     numHeads = pConnectorEvo->pDispEvo->pDevEvo->numHeads;
1413     for (i = 0; i < numHeads && supportsMST; i++) {
1414         NVDpyEvoPtr pDpyEvo = NULL;
1415         NvBool dynamicDpyCreated = FALSE;
1416         char address[5] = { };
1417 
1418         nvkms_snprintf(address, sizeof(address), "0.%d", i + 1);
1419         pDpyEvo = nvGetDPMSTDpyEvo(pConnectorEvo, address,
1420                                    &dynamicDpyCreated);
1421         if ((pDpyEvo == NULL) || !dynamicDpyCreated) {
1422             return FALSE;
1423         }
1424 
1425         pDpyEvo->dp.serializerStreamIndex = i;
1426     }
1427 
1428     return TRUE;
1429 }
1430 
1431 /*!
1432  *
1433  */
AllocDpys(NVDispEvoPtr pDispEvo)1434 static NvBool AllocDpys(NVDispEvoPtr pDispEvo)
1435 {
1436     NVConnectorEvoPtr pConnectorEvo;
1437 
1438     // At this point, there should be no DisplayPort multistream devices.
1439     nvAssert(nvDpyIdListsAreEqual(pDispEvo->validDisplays,
1440                                   pDispEvo->connectorIds));
1441     nvAssert(nvDpyIdListIsEmpty(pDispEvo->displayPortMSTIds));
1442     nvAssert(nvDpyIdListIsEmpty(pDispEvo->dynamicDpyIds));
1443 
1444     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
1445         NVDpyEvoPtr pDpyEvo;
1446 
1447         pDpyEvo = nvAllocDpyEvo(pDispEvo, pConnectorEvo,
1448                                 pConnectorEvo->displayId, NULL);
1449 
1450         if (pDpyEvo == NULL) {
1451             nvAssert(!"Failed to allocate pDpy");
1452             return FALSE;
1453         }
1454 
1455         if (!AllocDPSerializerDpys(pConnectorEvo)) {
1456             nvAssert(!"Failed to allocate non DPLib managed dpys");
1457             return FALSE;
1458         }
1459     }
1460 
1461     return TRUE;
1462 }
1463 
FreeDpys(NVDispEvoPtr pDispEvo)1464 static void FreeDpys(NVDispEvoPtr pDispEvo)
1465 {
1466     NVDpyEvoPtr pDpyEvo, pDpyEvoTmp;
1467 
1468     nvListForEachEntry_safe(pDpyEvo, pDpyEvoTmp,
1469                             &pDispEvo->dpyList, dpyListEntry) {
1470         nvFreeDpyEvo(pDispEvo, pDpyEvo);
1471     }
1472 }
1473 
1474 
1475 /*!
1476  * Receive hotplug notification from resman.
1477  *
1478  * This function is registered as the kernel callback function from
1479  * resman when an NV2080_NOTIFIERS_HOTPLUG event is generated.
1480  *
1481  * However, this function is called with resman's context (resman locks held,
1482  * etc).  Schedule deferred work, so that we can process the hotplug event
1483  * without resman's encumbrances.
1484  */
ReceiveHotplugEvent(void * arg,void * pEventDataVoid,NvU32 hEvent,NvU32 Data,NV_STATUS Status)1485 static void ReceiveHotplugEvent(void *arg, void *pEventDataVoid, NvU32 hEvent,
1486                                 NvU32 Data, NV_STATUS Status)
1487 {
1488     (void) nvkms_alloc_timer_with_ref_ptr(
1489         nvHandleHotplugEventDeferredWork, /* callback */
1490         arg, /* argument (this is a ref_ptr to a pDispEvo) */
1491         0,   /* dataU32 */
1492         0);
1493 }
1494 
ReceiveDPIRQEvent(void * arg,void * pEventDataVoid,NvU32 hEvent,NvU32 Data,NV_STATUS Status)1495 static void ReceiveDPIRQEvent(void *arg, void *pEventDataVoid, NvU32 hEvent,
1496                               NvU32 Data, NV_STATUS Status)
1497 {
1498     // XXX The displayId of the connector that generated the event should be
1499     // available here somewhere.  We should figure out how to find that and
1500     // plumb it through to nvHandleDPIRQEventDeferredWork.
1501     (void) nvkms_alloc_timer_with_ref_ptr(
1502         nvHandleDPIRQEventDeferredWork, /* callback */
1503         arg, /* argument (this is a ref_ptr to a pDispEvo) */
1504         0,   /* dataU32 */
1505         0);
1506 }
1507 
nvRmRegisterCallback(const NVDevEvoRec * pDevEvo,NVOS10_EVENT_KERNEL_CALLBACK_EX * cb,struct nvkms_ref_ptr * ref_ptr,NvU32 parentHandle,NvU32 eventHandle,Callback5ArgVoidReturn func,NvU32 event)1508 NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo,
1509                             NVOS10_EVENT_KERNEL_CALLBACK_EX *cb,
1510                             struct nvkms_ref_ptr *ref_ptr,
1511                             NvU32 parentHandle,
1512                             NvU32 eventHandle,
1513                             Callback5ArgVoidReturn func,
1514                             NvU32 event)
1515 {
1516     NV0005_ALLOC_PARAMETERS allocEventParams = { 0 };
1517 
1518     cb->func = func;
1519     cb->arg = ref_ptr;
1520 
1521     allocEventParams.hParentClient = nvEvoGlobal.clientHandle;
1522     allocEventParams.hClass        = NV01_EVENT_KERNEL_CALLBACK_EX;
1523     allocEventParams.notifyIndex   = event;
1524     allocEventParams.data          = NV_PTR_TO_NvP64(cb);
1525 
1526     return nvRmApiAlloc(nvEvoGlobal.clientHandle,
1527                         parentHandle,
1528                         eventHandle,
1529                         NV01_EVENT_KERNEL_CALLBACK_EX,
1530                         &allocEventParams)
1531         == NVOS_STATUS_SUCCESS;
1532 }
1533 
RegisterDispCallback(NVOS10_EVENT_KERNEL_CALLBACK_EX * cb,NVDispEvoPtr pDispEvo,NvU32 handle,Callback5ArgVoidReturn func,NvU32 event)1534 static NvBool RegisterDispCallback(NVOS10_EVENT_KERNEL_CALLBACK_EX *cb,
1535                                    NVDispEvoPtr pDispEvo,
1536                                    NvU32 handle,
1537                                    Callback5ArgVoidReturn func,
1538                                    NvU32 event)
1539 {
1540     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1541     NvU32 subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle;
1542 
1543     return nvRmRegisterCallback(pDevEvo, cb, pDispEvo->ref_ptr, subDevice,
1544                                 handle, func, event);
1545 }
1546 
1547 static void
DifrPrefetchEventDeferredWork(void * dataPtr,NvU32 dataU32)1548 DifrPrefetchEventDeferredWork(void *dataPtr, NvU32 dataU32)
1549 {
1550     NVDevEvoPtr pDevEvo = dataPtr;
1551     size_t l2CacheSize = (size_t)dataU32;
1552     NvU32 status;
1553 
1554     nvAssert(pDevEvo->pDifrState);
1555 
1556     status = nvDIFRPrefetchSurfaces(pDevEvo->pDifrState, l2CacheSize);
1557     nvDIFRSendPrefetchResponse(pDevEvo->pDifrState, status);
1558 }
1559 
DifrPrefetchEvent(void * arg,void * pEventDataVoid,NvU32 hEvent,NvU32 Data,NV_STATUS Status)1560 static void DifrPrefetchEvent(void *arg, void *pEventDataVoid,
1561                               NvU32 hEvent, NvU32 Data, NV_STATUS Status)
1562 {
1563     Nv2080LpwrDifrPrefetchNotification *notif =
1564         (Nv2080LpwrDifrPrefetchNotification *)pEventDataVoid;
1565 
1566     (void)nvkms_alloc_timer_with_ref_ptr(
1567         DifrPrefetchEventDeferredWork, /* callback */
1568         arg, /* argument (this is a ref_ptr to a pDevEvo) */
1569         notif->l2CacheSize, /* dataU32 */
1570         0);  /* timeout: schedule the work immediately */
1571 }
1572 
nvRmAllocDisplays(NVDevEvoPtr pDevEvo)1573 enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo)
1574 {
1575     NVDispEvoPtr pDispEvo;
1576     unsigned int sd;
1577     enum NvKmsAllocDeviceStatus status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
1578     NvU32 totalDispNumSubDevices = 0;
1579 
1580     pDevEvo->sli.bridge.present = FALSE;
1581 
1582     if (!QueryGpuCapabilities(pDevEvo)) {
1583         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1584                     "Failed to query GPU capabilities");
1585         goto fail;
1586     }
1587 
1588     if (pDevEvo->supportsSyncpts) {
1589         pDevEvo->preSyncptTable =
1590             nvCalloc(1, sizeof(NVEvoSyncpt) * NV_SYNCPT_GLOBAL_TABLE_LENGTH);
1591         if (pDevEvo->preSyncptTable == NULL) {
1592             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1593                     "Failed to allocate memory for pre-syncpt table");
1594             goto fail;
1595         }
1596     }
1597 
1598     if (!AllocDisplays(pDevEvo)) {
1599         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate displays");
1600         goto fail;
1601     }
1602 
1603     /* allocate the display common object for this device */
1604 
1605     if (nvRmEvoClassListCheck(pDevEvo, NV04_DISPLAY_COMMON)) {
1606 
1607         pDevEvo->displayCommonHandle =
1608             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
1609 
1610         if (nvRmApiAlloc(nvEvoGlobal.clientHandle,
1611                          pDevEvo->deviceHandle,
1612                          pDevEvo->displayCommonHandle,
1613                          NV04_DISPLAY_COMMON, NULL)
1614                 != NVOS_STATUS_SUCCESS) {
1615             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
1616                         "Failed to initialize the display "
1617                         "subsystem for the NVIDIA graphics device!");
1618             goto fail;
1619 
1620         }
1621     } else {
1622         /*
1623          * Not supporting NV04_DISPLAY_COMMON is expected in some
1624          * configurations: e.g., GF117 (an Optimus-only or "coproc" GPU),
1625          * emulation netlists.  Fail with "no hardware".
1626          */
1627         status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1628         goto fail;
1629     }
1630 
1631     if (!ProbeDisplayCommonCaps(pDevEvo)) {
1632         status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1633         goto fail;
1634     }
1635 
1636     if (!ProbeHeadCountAndWindowAssignment(pDevEvo)) {
1637         status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1638         goto fail;
1639     }
1640 
1641     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1642         if (!ProbeValidDisplays(pDispEvo)) {
1643             status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1644             goto fail;
1645         }
1646 
1647         /* Keep track of connectors per pDisp and bind to DP lib if capable */
1648         if (!AllocConnectors(pDispEvo)) {
1649             status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1650             goto fail;
1651         }
1652     }
1653 
1654     /*
1655      * If there are no valid display devices, fail with "no hardware".
1656      */
1657     if (NoValidDisplays(pDevEvo)) {
1658         status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
1659         goto fail;
1660     }
1661 
1662     /*
1663      * The number of numSubDevices across disps should equal the
1664      * device's numSubDevices.
1665      */
1666     totalDispNumSubDevices = 0;
1667     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1668         totalDispNumSubDevices++;
1669     }
1670 
1671     if (totalDispNumSubDevices != pDevEvo->numSubDevices) {
1672         nvAssert(!"Number of disps' subdevices does not match device's");
1673     }
1674 
1675     /*
1676      * Allocate an NV event for each pDispEvo on the corresponding
1677      * subDevice, tied to the pDevEvo's OS event.
1678      */
1679     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1680         NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { };
1681         NvU32 subDevice, ret;
1682 
1683         subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle;
1684 
1685         pDispEvo->hotplugEventHandle =
1686             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
1687 
1688         if (!RegisterDispCallback(&pDispEvo->rmHotplugCallback, pDispEvo,
1689                                   pDispEvo->hotplugEventHandle,
1690                                   ReceiveHotplugEvent,
1691                                   NV2080_NOTIFIERS_HOTPLUG)) {
1692             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1693                         "Failed to register display hotplug event");
1694         }
1695 
1696         // Enable hotplug notifications from this subdevice.
1697         setEventParams.event = NV2080_NOTIFIERS_HOTPLUG;
1698         setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1699         if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1700                                   subDevice,
1701                                   NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
1702                                   &setEventParams,
1703                                   sizeof(setEventParams)))
1704                 != NVOS_STATUS_SUCCESS) {
1705             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1706                         "Failed to register display hotplug "
1707                         "handler: 0x%x\n", ret);
1708         }
1709     }
1710 
1711     // Allocate a handler for the DisplayPort "IRQ" event, which is signaled
1712     // when there's a short interruption in the hotplug detect line.
1713     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1714         NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { };
1715         NvU32 subDevice, ret;
1716 
1717         subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle;
1718 
1719         pDispEvo->DPIRQEventHandle =
1720             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
1721 
1722         if (!RegisterDispCallback(&pDispEvo->rmDPIRQCallback, pDispEvo,
1723                                   pDispEvo->DPIRQEventHandle, ReceiveDPIRQEvent,
1724                                   NV2080_NOTIFIERS_DP_IRQ)) {
1725             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1726                         "Failed to register DisplayPort interrupt event");
1727         }
1728 
1729         // Enable DP IRQ notifications from this subdevice.
1730         setEventParams.event = NV2080_NOTIFIERS_DP_IRQ;
1731         setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
1732         if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1733                                   subDevice,
1734                                   NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
1735                                   &setEventParams,
1736                                   sizeof(setEventParams)))
1737                 != NVOS_STATUS_SUCCESS) {
1738             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
1739                         "Failed to register DisplayPort interrupt "
1740                         "handler: 0x%x\n", ret);
1741         }
1742     }
1743 
1744     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1745         ProbeBootDisplays(pDispEvo);
1746 
1747         if (!AllocDpys(pDispEvo)) {
1748             goto fail;
1749         }
1750 
1751     }
1752 
1753     nvAllocVrrEvo(pDevEvo);
1754 
1755     return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS;
1756 
1757 fail:
1758     nvRmDestroyDisplays(pDevEvo);
1759     return status;
1760 }
1761 
1762 
nvRmDestroyDisplays(NVDevEvoPtr pDevEvo)1763 void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo)
1764 {
1765     NvU32 ret;
1766     NVDispEvoPtr pDispEvo;
1767     int dispIndex;
1768     NvS64 tmp;
1769 
1770     nvFreeVrrEvo(pDevEvo);
1771 
1772     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1773 
1774         // Before freeing anything, dump anything left in the RM's DisplayPort
1775         // AUX channel log.
1776         if (pDispEvo->dpAuxLoggingEnabled) {
1777             do {
1778                 ret = nvRmQueryDpAuxLog(pDispEvo, &tmp);
1779             } while (ret && tmp);
1780         }
1781 
1782         // Free the DisplayPort IRQ event.
1783         if (pDispEvo->DPIRQEventHandle != 0) {
1784             nvRmApiFree(nvEvoGlobal.clientHandle,
1785                         nvEvoGlobal.clientHandle,
1786                         pDispEvo->DPIRQEventHandle);
1787             nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1788                                pDispEvo->DPIRQEventHandle);
1789             pDispEvo->DPIRQEventHandle = 0;
1790         }
1791 
1792         // Free the hotplug event.
1793         /*
1794          * XXX I wish I could cancel anything scheduled by
1795          * ReceiveHotplugEvent() and ReceiveDPIRQEvent() for this pDispEvo...
1796          */
1797         if (pDispEvo->hotplugEventHandle != 0) {
1798             nvRmApiFree(nvEvoGlobal.clientHandle,
1799                         nvEvoGlobal.clientHandle,
1800                         pDispEvo->hotplugEventHandle);
1801             nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1802                                pDispEvo->hotplugEventHandle);
1803             pDispEvo->hotplugEventHandle = 0;
1804         }
1805     }
1806 
1807     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1808         FreeDpys(pDispEvo);
1809         FreeConnectors(pDispEvo);
1810     }
1811 
1812     FreeDisplays(pDevEvo);
1813 
1814     nvFree(pDevEvo->preSyncptTable);
1815     pDevEvo->preSyncptTable = NULL;
1816 
1817     if (pDevEvo->displayCommonHandle != 0) {
1818         ret = nvRmApiFree(nvEvoGlobal.clientHandle,
1819                           pDevEvo->deviceHandle,
1820                           pDevEvo->displayCommonHandle);
1821         if (ret != NVOS_STATUS_SUCCESS) {
1822             nvAssert(!"Free(displayCommonHandle) failed");
1823         }
1824         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1825                            pDevEvo->displayCommonHandle);
1826         pDevEvo->displayCommonHandle = 0;
1827     }
1828 }
1829 
1830 
1831 /*!
1832  * The Allocate a display ID that we use to talk to RM about the dpy(s) on
1833  * head.
1834  *
1835  * \param[in]  pDisp      The display system on which to allocate the ID.
1836  * \param[in]  dpyList    The list of dpys.
1837  *
1838  * \return  The display ID, or 0 on failure.
1839  */
nvRmAllocDisplayId(const NVDispEvoRec * pDispEvo,const NVDpyIdList dpyList)1840 NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList)
1841 {
1842     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1843     NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS params = { 0 };
1844     const NVDpyEvoRec *pDpyEvo;
1845     const NVConnectorEvoRec *pConnectorEvo = NULL;
1846     NvBool isDPMST = NV_FALSE;
1847     NvU32 ret;
1848 
1849     FOR_ALL_EVO_DPYS(pDpyEvo, dpyList, pDispEvo) {
1850         if (pConnectorEvo == NULL) {
1851             /* First DPY from list, assign pConnectorEvo and isDPMST variable */
1852             pConnectorEvo = pDpyEvo->pConnectorEvo;
1853             isDPMST = nvDpyEvoIsDPMST(pDpyEvo);
1854         }
1855 
1856         if (pConnectorEvo != pDpyEvo->pConnectorEvo ||
1857             isDPMST != nvDpyEvoIsDPMST(pDpyEvo)) {
1858             return 0;
1859         }
1860     }
1861 
1862     nvAssert(nvConnectorUsesDPLib(pConnectorEvo) || !isDPMST);
1863 
1864     if (!isDPMST) {
1865         /* For non-MST dpy(s), simply return static display ID of connector */
1866         return nvDpyIdToNvU32(pConnectorEvo->displayId);
1867     }
1868 
1869     params.subDeviceInstance = pDispEvo->displayOwner;
1870     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
1871 
1872     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1873                          pDevEvo->displayCommonHandle,
1874                          NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
1875                          &params, sizeof(params));
1876 
1877     if (ret == NVOS_STATUS_SUCCESS) {
1878         return params.displayIdAssigned;
1879     } else {
1880         nvEvoLogDisp(pDispEvo, EVO_LOG_WARN,
1881                      "Failed to allocate display resource.");
1882     }
1883 
1884     return 0;
1885 }
1886 
1887 
1888 /*!
1889  * Send DISPLAY_CHANGE to resman.
1890  *
1891  * This should be called before and after each mode change, with the display
1892  * mask describing the NEW display configuration.
1893  */
nvRmBeginEndModeset(NVDispEvoPtr pDispEvo,enum NvKmsBeginEndModeset beginOrEnd,NvU32 mask)1894 void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo,
1895                          enum NvKmsBeginEndModeset beginOrEnd,
1896                          NvU32 mask)
1897 {
1898     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1899     NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS bracketParams = { };
1900     NvU32 ret;
1901 
1902     bracketParams.subDeviceInstance = pDispEvo->displayOwner;
1903     bracketParams.newDevices = mask;
1904     bracketParams.properties = 0; /* this is currently unused */
1905     switch (beginOrEnd) {
1906         case BEGIN_MODESET:
1907             bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START;
1908             break;
1909         case END_MODESET:
1910             bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END;
1911             break;
1912     }
1913 
1914     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1915                          pDevEvo->displayCommonHandle,
1916                          NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE,
1917                          &bracketParams,
1918                          sizeof(bracketParams));
1919     if (ret != NVOS_STATUS_SUCCESS) {
1920         nvAssert(!"Failed NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE");
1921     }
1922 }
1923 
1924 
1925 /*!
1926  * Free a RM display ID, if it was allocated dynamically.
1927  *
1928  * This function frees a display ID if it was allocated by
1929  * nvRmAllocDisplayId.  If the display ID is static, this function does
1930  * nothing.
1931  *
1932  * From ctrl0073dp.h: You must not call this function while either the ARM
1933  * or ASSEMBLY state cache refers to this display-id.  The head must not be
1934  * attached.
1935  *
1936  * \param[in]  pDisp      The display system on which to free the ID.
1937  * \param[in]  displayId  The display ID to free.
1938  */
nvRmFreeDisplayId(const NVDispEvoRec * pDispEvo,NvU32 displayId)1939 void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 displayId)
1940 {
1941     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1942     NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS params = { 0 };
1943     NVDpyId dpyId = nvNvU32ToDpyId(displayId);
1944     NvU32 ret;
1945 
1946     /* Do nothing if display ID is static one! */
1947     if (nvDpyIdIsInDpyIdList(dpyId, pDispEvo->connectorIds)) {
1948         return;
1949     }
1950 
1951     params.subDeviceInstance = pDispEvo->displayOwner;
1952     params.displayId = displayId;
1953 
1954     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1955                          pDevEvo->displayCommonHandle,
1956                          NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID,
1957                          &params, sizeof(params));
1958 
1959     if (ret != NVOS_STATUS_SUCCESS) {
1960         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
1961                      "Failed to relinquish display resource.");
1962     }
1963 }
1964 
1965 
1966 /*!
1967  * Query Resman for the (broad) display device type.
1968  */
GetLegacyConnectorType(NVDispEvoPtr pDispEvo,NVDpyId dpyId)1969 static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId)
1970 {
1971     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1972     NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS params = { 0 };
1973     NvU32 ret;
1974 
1975     params.subDeviceInstance = pDispEvo->displayOwner;
1976     params.displayId = nvDpyIdToNvU32(dpyId);
1977 
1978     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1979                          pDevEvo->displayCommonHandle,
1980                          NV0073_CTRL_CMD_SPECIFIC_GET_TYPE,
1981                          &params, sizeof(params));
1982 
1983     if (ret != NVOS_STATUS_SUCCESS) {
1984         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
1985                      "Failure getting specific display device type.");
1986         return NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_UNKNOWN;
1987     }
1988 
1989     nvAssert((params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) ||
1990              (params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP));
1991 
1992     return params.displayType;
1993 }
1994 
1995 
1996 /*!
1997  * Query RM for the current OR properties of the given connector.
1998  *
1999  * If 'assertOnly' is TRUE, this function will only assert that the OR
2000  * configuration has not changed.
2001  */
nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo,NvBool assertOnly)2002 void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly)
2003 {
2004     NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
2005     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2006     NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 };
2007     NvU32 ret;
2008 
2009     params.subDeviceInstance = pDispEvo->displayOwner;
2010     params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
2011 
2012     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
2013                          pDevEvo->displayCommonHandle,
2014                          NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
2015                          &params,
2016                          sizeof(params));
2017     if (ret != NVOS_STATUS_SUCCESS) {
2018         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
2019                      "Failed to determine output resource properties.");
2020 
2021         if (assertOnly) {
2022             return;
2023         }
2024         pConnectorEvo->or.type = NV0073_CTRL_SPECIFIC_OR_TYPE_DAC;
2025         pConnectorEvo->or.primary = NV_INVALID_OR;
2026         pConnectorEvo->or.secondaryMask = 0;
2027         pConnectorEvo->or.protocol =
2028             NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT;
2029         pConnectorEvo->or.ditherType = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF;
2030         pConnectorEvo->or.ditherAlgo =
2031             NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN;
2032         pConnectorEvo->or.location = NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP;
2033 
2034         return;
2035     }
2036 
2037     if (!assertOnly) {
2038         pConnectorEvo->or.type = params.type;
2039         if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
2040                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) &&
2041             params.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
2042             // For the SOR crossbar, RM may report that multiple displayIDs own
2043             // the same SOR.  For example, it may report SOR 2 for both the
2044             // DisplayPort and TMDS halves of a physical connector even though
2045             // they have separate displayIds.
2046             //
2047             // All we really need to know is which SOR is assigned to the boot
2048             // display, so we defer the query to MarkConnectorBootHeadActive().
2049             pConnectorEvo->or.secondaryMask = 0x0;
2050             pConnectorEvo->or.primary = NV_INVALID_OR;
2051         } else {
2052             pConnectorEvo->or.secondaryMask = 0x0;
2053             pConnectorEvo->or.primary = params.index;
2054         }
2055         pConnectorEvo->or.protocol = params.protocol;
2056         pConnectorEvo->or.ditherType = params.ditherType;
2057         pConnectorEvo->or.ditherAlgo = params.ditherAlgo;
2058         pConnectorEvo->or.location = params.location;
2059     } else {
2060         nvAssert(pConnectorEvo->or.type == params.type);
2061         nvAssert(pConnectorEvo->or.primary == params.index);
2062         nvAssert(pConnectorEvo->or.protocol == params.protocol);
2063         nvAssert(pConnectorEvo->or.ditherType == params.ditherType);
2064         nvAssert(pConnectorEvo->or.ditherAlgo == params.ditherAlgo);
2065         nvAssert(pConnectorEvo->or.location == params.location);
2066     }
2067 }
2068 
2069 /*!
2070  * Query connector state, and retry if necessary.
2071  */
nvRmGetConnectedDpys(const NVDispEvoRec * pDispEvo,NVDpyIdList dpyIdList)2072 NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo,
2073                                  NVDpyIdList dpyIdList)
2074 {
2075     NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS params = { 0 };
2076     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2077     NvU32 ret;
2078 
2079     params.subDeviceInstance = pDispEvo->displayOwner;
2080     params.displayMask = nvDpyIdListToNvU32(dpyIdList);
2081     params.flags =
2082         (DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_METHOD,_DEFAULT) |
2083          DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_DDC,_DEFAULT) |
2084          DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_LOAD,_DEFAULT));
2085 
2086     do {
2087         params.retryTimeMs = 0;
2088         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
2089                              pDevEvo->displayCommonHandle,
2090                              NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE,
2091                              &params,
2092                              sizeof(params));
2093 
2094         if (ret == NVOS_STATUS_ERROR_NOT_READY &&
2095             params.retryTimeMs == 0) {
2096             // Work around bug 970351: RM returns a zero retry time on platforms
2097             // where the display driver is in user space.  Use a conservative
2098             // default.  This code can be removed once this call is fixed in RM.
2099             params.retryTimeMs = 20;
2100         }
2101 
2102         if (params.retryTimeMs > 0) {
2103             nvkms_usleep(params.retryTimeMs * 1000);
2104         } else {
2105             nvkms_yield();
2106         }
2107     } while(params.retryTimeMs > 0);
2108 
2109     if (ret == NVOS_STATUS_SUCCESS) {
2110         return nvNvU32ToDpyIdList(params.displayMask);
2111     } else {
2112         nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
2113                      "Failed detecting connected display devices");
2114         return nvEmptyDpyIdList();
2115     }
2116 }
2117 
2118 /*!
2119  * Notify the DP library that we are ready to proceed after a suspend/boot, and
2120  * that it should initialize and start handling events.
2121  */
nvRmResumeDP(NVDevEvoPtr pDevEvo)2122 NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo)
2123 {
2124     NVDispEvoPtr pDispEvo;
2125     int i;
2126 
2127     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
2128         NVConnectorEvoPtr pConnectorEvo;
2129         NVDpyIdList connectedIdsList =
2130             nvRmGetConnectedDpys(pDispEvo, pDispEvo->connectorIds);
2131 
2132         FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
2133             NvBool plugged =
2134                 nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedIdsList);
2135 
2136             if (!pConnectorEvo->pDpLibConnector) {
2137                 continue;
2138             }
2139 
2140             if (!nvDPResume(pConnectorEvo->pDpLibConnector, plugged)) {
2141                 goto failed;
2142             }
2143         }
2144     }
2145 
2146     return TRUE;
2147 
2148 failed:
2149     nvRmPauseDP(pDevEvo);
2150     return FALSE;
2151 }
2152 
2153 
nvRmPauseDP(NVDevEvoPtr pDevEvo)2154 void nvRmPauseDP(NVDevEvoPtr pDevEvo)
2155 {
2156     NVDispEvoPtr pDispEvo;
2157     int i;
2158 
2159     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
2160         NVConnectorEvoPtr pConnectorEvo;
2161 
2162         FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
2163             if (nvConnectorUsesDPLib(pConnectorEvo)) {
2164                 nvDPPause(pConnectorEvo->pDpLibConnector);
2165             }
2166         }
2167     }
2168 }
2169 
2170 
2171 /*!
2172  * This function is called whenever the DPMS level changes; On a CRT,
2173  * you set the DPMS level by (dis/en)abling the hsync and vsync
2174  * signals:
2175  *
2176  * Hsync  Vsync  Mode
2177  * =====  =====  ====
2178  * 1      1      Normal (on).
2179  * 0      1      Standby -- RGB guns off, power supply on, tube filaments
2180  *               energized, (screen saver mode).
2181  * 1      0      Suspend -- RGB guns off, power supply off, tube filaments
2182  *               energized.
2183  * 0      0      Power off -- small auxiliary circuit stays on to monitor the
2184  *               hsync/vsync signals to know when to wake up.
2185  */
nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo,NvS64 value)2186 NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value)
2187 {
2188     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
2189     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2190     NvU32 ret;
2191 
2192     if (nvDpyUsesDPLib(pDpyEvo)) {
2193         nvDPDeviceSetPowerState(pDpyEvo,
2194                                 (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON));
2195         return TRUE;
2196     } else if (pDpyEvo->pConnectorEvo->legacyType !=
2197                NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) {
2198         NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS powerParams = { 0 };
2199 
2200         powerParams.subDeviceInstance = pDispEvo->displayOwner;
2201         powerParams.displayId = nvDpyEvoGetConnectorId(pDpyEvo);
2202 
2203         powerParams.powerState = (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON) ?
2204             NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_ON :
2205             NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_OFF;
2206 
2207         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
2208                              pDevEvo->displayCommonHandle,
2209                              NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER,
2210                              &powerParams,
2211                              sizeof(powerParams));
2212 
2213         return (ret == NVOS_STATUS_SUCCESS);
2214     } else {
2215         NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
2216         NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS powerParams = { { 0 }, 0 };
2217 
2218         powerParams.base.subdeviceIndex = pDispEvo->displayOwner;
2219         if (pConnectorEvo->or.primary == NV_INVALID_OR) {
2220             nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR);
2221             return FALSE;
2222         }
2223         powerParams.orNumber = pConnectorEvo->or.primary;
2224 
2225         switch (value) {
2226         case NV_KMS_DPY_ATTRIBUTE_DPMS_ON:
2227             powerParams.normalHSync =
2228                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE);
2229             powerParams.normalVSync =
2230                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE);
2231             break;
2232         case NV_KMS_DPY_ATTRIBUTE_DPMS_STANDBY:
2233             powerParams.normalHSync =
2234                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO);
2235             powerParams.normalVSync =
2236                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE);
2237             break;
2238         case NV_KMS_DPY_ATTRIBUTE_DPMS_SUSPEND:
2239             powerParams.normalHSync =
2240                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE);
2241             powerParams.normalVSync =
2242                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO);
2243             break;
2244         case NV_KMS_DPY_ATTRIBUTE_DPMS_OFF:
2245             powerParams.normalHSync =
2246                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO);
2247             powerParams.normalVSync =
2248                 DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO);
2249             break;
2250         default:
2251             return FALSE;
2252         }
2253         // XXX These could probably be disabled too, in the DPMS_OFF case.
2254         powerParams.normalData =
2255             DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_DATA, _ENABLE);
2256         powerParams.normalPower =
2257             DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_PWR, _ON);
2258 
2259         powerParams.flags =
2260             DRF_DEF(5070, _CTRL_CMD_SET_DAC_PWR_FLAGS, _SPECIFIED_NORMAL, _YES);
2261 
2262         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
2263                              pDevEvo->displayHandle,
2264                              NV5070_CTRL_CMD_SET_DAC_PWR,
2265                              &powerParams,
2266                              sizeof(powerParams));
2267 
2268         return (ret == NVOS_STATUS_SUCCESS);
2269     }
2270 }
2271 
2272 
nvRmAllocSysmem(NVDevEvoPtr pDevEvo,NvU32 memoryHandle,NvU32 * ctxDmaFlags,void ** ppBase,NvU64 size,NvKmsMemoryIsoType isoType)2273 NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle,
2274                        NvU32 *ctxDmaFlags, void **ppBase, NvU64 size,
2275                        NvKmsMemoryIsoType isoType)
2276 {
2277     NvU32 ret;
2278     NvBool bufferAllocated = FALSE;
2279     NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
2280     const NvKmsDispIOCoherencyModes *pIOCoherencyModes;
2281 
2282     memAllocParams.owner = NVKMS_RM_HEAP_ID;
2283 
2284     memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO);
2285 
2286     memAllocParams.size = size;
2287 
2288     if (isoType == NVKMS_MEMORY_NISO) {
2289         memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _YES);
2290 
2291         pIOCoherencyModes = &pDevEvo->nisoIOCoherencyModes;
2292     } else {
2293         pIOCoherencyModes = &pDevEvo->isoIOCoherencyModes;
2294     }
2295 
2296     memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) |
2297                           DRF_DEF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS) |
2298                           DRF_DEF(OS32, _ATTR, _FORMAT, _PITCH);
2299 
2300     if (pIOCoherencyModes->noncoherent) {
2301         // Model (3)
2302         // - allocate USWC system memory
2303         // - allocate ctx dma with NVOS03_FLAGS_CACHE_SNOOP_DISABLE
2304         // - to sync CPU and GPU, flush CPU WC buffer
2305 
2306         memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE,
2307                                           memAllocParams.attr);
2308 
2309         ret = nvRmApiAlloc(
2310                   nvEvoGlobal.clientHandle,
2311                   pDevEvo->deviceHandle,
2312                   memoryHandle,
2313                   NV01_MEMORY_SYSTEM,
2314                   &memAllocParams);
2315 
2316         if (ret == NVOS_STATUS_SUCCESS) {
2317             bufferAllocated = TRUE;
2318             if (ctxDmaFlags) {
2319                 *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _DISABLE);
2320             }
2321         } else {
2322             bufferAllocated = FALSE;
2323         }
2324 
2325     }
2326 
2327     if (!bufferAllocated && pIOCoherencyModes->coherent) {
2328         // Model (2b): Similar to existing PCI model
2329         // - allocate cached (or USWC) system memory
2330         // - allocate ctx DMA with NVOS03_FLAGS_CACHE_SNOOP_ENABLE
2331         // ...
2332 
2333         memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK,
2334                                           memAllocParams.attr);
2335 
2336         ret = nvRmApiAlloc(
2337                   nvEvoGlobal.clientHandle,
2338                   pDevEvo->deviceHandle,
2339                   memoryHandle,
2340                   NV01_MEMORY_SYSTEM,
2341                   &memAllocParams);
2342 
2343         if (ret == NVOS_STATUS_SUCCESS) {
2344             bufferAllocated = TRUE;
2345             if (ctxDmaFlags) {
2346                 *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _ENABLE);
2347             }
2348         } else {
2349             bufferAllocated = FALSE;
2350         }
2351     }
2352 
2353     if (bufferAllocated) {
2354         ret = nvRmApiMapMemory(
2355                   nvEvoGlobal.clientHandle,
2356                   pDevEvo->deviceHandle,
2357                   memoryHandle,
2358                   0, /* offset */
2359                   size,
2360                   ppBase,
2361                   0 /* flags */);
2362 
2363         if (ret != NVOS_STATUS_SUCCESS) {
2364             nvRmApiFree(nvEvoGlobal.clientHandle,
2365                         pDevEvo->deviceHandle,
2366                         memoryHandle);
2367 
2368             bufferAllocated = FALSE;
2369         }
2370     }
2371 
2372     return bufferAllocated;
2373 }
2374 
2375 
2376 /*****************************************************************************/
2377 /* Alloc memory and a context dma, following the rules dictated by the
2378    DMA coherence flags. */
2379 /*****************************************************************************/
2380 
nvRmAllocEvoDma(NVDevEvoPtr pDevEvo,NVEvoDmaPtr pDma,NvU64 limit,NvU32 ctxDmaFlags,NvU32 subDeviceMask)2381 NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma,
2382                        NvU64 limit, NvU32 ctxDmaFlags, NvU32 subDeviceMask)
2383 {
2384     NvBool bufferAllocated = FALSE;
2385     NvU32  memoryHandle = 0;
2386     void  *pBase = NULL;
2387 
2388     NvBool needBar1Mapping = FALSE;
2389 
2390     NVSurfaceDescriptor surfaceDesc;
2391     NvU32 localCtxDmaFlags = ctxDmaFlags |
2392         DRF_DEF(OS03, _FLAGS, _ACCESS, _READ_WRITE) |
2393         DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE);
2394 
2395     NvU32  ret;
2396 
2397     nvkms_memset(pDma, 0, sizeof(*pDma));
2398 
2399     memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
2400 
2401     /*
2402      * On certain GPUs (GF100, GF104) there exists a hardware bug that forces
2403      * us to put display NISO surfaces (pushbuffer, semaphores, notifiers
2404      * accessed by EVO) in vidmem instead of sysmem.  See bug 632241 for
2405      * details.
2406      */
2407     if (NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits,
2408             NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY)) {
2409         NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
2410 
2411         memAllocParams.owner = NVKMS_RM_HEAP_ID;
2412         memAllocParams.type = NVOS32_TYPE_DMA;
2413         memAllocParams.size = limit + 1;
2414         memAllocParams.attr = DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) |
2415                               DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM);
2416 
2417         ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
2418                            pDevEvo->deviceHandle,
2419                            memoryHandle,
2420                            NV01_MEMORY_LOCAL_USER,
2421                            &memAllocParams);
2422 
2423         if (ret != NVOS_STATUS_SUCCESS) {
2424             /* We can't fall back to any of the sysmem options below, due to
2425              * the nature of the HW bug forcing us to use vidmem. */
2426             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2427                         "Unable to allocate video memory for display");
2428             return FALSE;
2429         }
2430 
2431         limit = memAllocParams.size - 1;
2432 
2433         /* We'll access these surfaces through IFB */
2434         pBase = NULL;
2435 
2436         bufferAllocated = TRUE;
2437         needBar1Mapping = TRUE;
2438     }
2439 
2440     if (!bufferAllocated) {
2441         /*
2442          * Setting NVKMS_MEMORY_NISO since nvRmAllocEvoDma() is currently only
2443          * called to allocate pushbuffer and notifier memory.
2444          */
2445         bufferAllocated = nvRmAllocSysmem(pDevEvo, memoryHandle,
2446                                           &localCtxDmaFlags, &pBase, limit + 1,
2447                                           NVKMS_MEMORY_NISO);
2448     }
2449 
2450     if (!bufferAllocated) {
2451         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
2452 
2453         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unable to allocate DMA memory");
2454 
2455         return FALSE;
2456     }
2457 
2458     // Create surface descriptor for this allocation.
2459     ret = pDevEvo->hal->AllocSurfaceDescriptor(pDevEvo, &surfaceDesc, memoryHandle,
2460                                                localCtxDmaFlags, limit);
2461 
2462     if (ret != NVOS_STATUS_SUCCESS) {
2463         if (pBase != NULL) {
2464             nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
2465                                pDevEvo->deviceHandle,
2466                                memoryHandle,
2467                                pBase,
2468                                0);
2469         }
2470         nvRmApiFree(nvEvoGlobal.clientHandle,
2471                     pDevEvo->deviceHandle, memoryHandle);
2472         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
2473 
2474         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate surface descriptor");
2475 
2476         return FALSE;
2477     }
2478 
2479     pDma->memoryHandle = memoryHandle;
2480 
2481     pDma->surfaceDesc = surfaceDesc;
2482 
2483     pDma->limit = limit;
2484 
2485     if (needBar1Mapping) {
2486         NvBool result;
2487 
2488         result = nvRmEvoMapVideoMemory(pDevEvo, memoryHandle, limit + 1,
2489                                        pDma->subDeviceAddress, subDeviceMask);
2490 
2491         if (!result) {
2492             nvRmFreeEvoDma(pDevEvo, pDma);
2493             return FALSE;
2494         }
2495     } else {
2496         int sd;
2497 
2498         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2499             if (((1 << sd) & subDeviceMask) == 0) {
2500                 continue;
2501             }
2502 
2503             pDma->subDeviceAddress[sd] = pBase;
2504         }
2505     }
2506     pDma->isBar1Mapping = needBar1Mapping;
2507 
2508     return TRUE;
2509 }
2510 
nvRmFreeEvoDma(NVDevEvoPtr pDevEvo,NVEvoDmaPtr pDma)2511 void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma)
2512 {
2513     NvU32 ret;
2514 
2515     pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo,
2516                                         pDevEvo->deviceHandle,
2517                                         &pDma->surfaceDesc);
2518 
2519     if (pDma->memoryHandle != 0) {
2520         if (pDma->isBar1Mapping) {
2521             nvRmEvoUnMapVideoMemory(pDevEvo, pDma->memoryHandle,
2522                                     pDma->subDeviceAddress);
2523         } else {
2524             int sd = 0;
2525             NvBool addressMapped = TRUE;
2526 
2527             /* If pDma->subDeviceAddress[sd] is non-NULL for multiple subdevices,
2528              * assume they are the same. Unmap only one but set all of them to
2529              * NULL. This matches the logic in nvRmAllocEvoDma().
2530              */
2531             for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2532 
2533                 if (addressMapped && pDma->subDeviceAddress[sd] != NULL) {
2534                     ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
2535                                              pDevEvo->deviceHandle,
2536                                              pDma->memoryHandle,
2537                                              pDma->subDeviceAddress[sd],
2538                                              0);
2539 
2540                     if (ret != NVOS_STATUS_SUCCESS) {
2541                         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to unmap memory");
2542                     }
2543 
2544                     addressMapped = FALSE;
2545                 }
2546 
2547                 pDma->subDeviceAddress[sd] = NULL;
2548             }
2549         }
2550 
2551         ret = nvRmApiFree(nvEvoGlobal.clientHandle,
2552                           pDevEvo->deviceHandle, pDma->memoryHandle);
2553 
2554         if (ret != NVOS_STATUS_SUCCESS) {
2555             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to free DMA memory");
2556         }
2557 
2558         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDma->memoryHandle);
2559         pDma->memoryHandle = 0;
2560 
2561         pDma->limit = 0;
2562 
2563         nvkms_memset(pDma->subDeviceAddress, 0, sizeof(pDma->subDeviceAddress));
2564     }
2565 }
2566 
2567 /*****************************************************************************/
2568 /* RmAllocEvoChannel ()
2569  * Allocates the EVO channel and associated notifier surfaces and ctxdmas.
2570  * Takes how big the DMA controls are (varies by class of channel) and which
2571  * class to allocate.
2572  */
2573 /*****************************************************************************/
2574 static NVEvoChannelPtr
RmAllocEvoChannel(NVDevEvoPtr pDevEvo,NVEvoChannelMask channelMask,NvV32 instance,NvU32 class)2575 RmAllocEvoChannel(NVDevEvoPtr pDevEvo,
2576                   NVEvoChannelMask channelMask,
2577                   NvV32 instance, NvU32 class)
2578 {
2579     NVEvoChannelPtr pChannel = NULL;
2580     NVDmaBufferEvoPtr buffer = NULL;
2581     int sd;
2582     NvU32 ret;
2583 
2584     /* One 4k page is enough to map PUT and GET */
2585     const NvU64 dmaControlLen = 0x1000;
2586 
2587     nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(channelMask) == 1);
2588 
2589     /* Allocate the channel data structure */
2590     pChannel = nvCalloc(1, sizeof(*pChannel));
2591 
2592     if (pChannel == NULL) {
2593         goto fail;
2594     }
2595 
2596     buffer = &pChannel->pb;
2597 
2598     pChannel->hwclass = class;
2599     pChannel->instance = instance;
2600     pChannel->channelMask = channelMask;
2601 
2602     pChannel->notifiersDma = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoDma));
2603 
2604     if (pChannel->notifiersDma == NULL) {
2605         goto fail;
2606     }
2607 
2608     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2609         NVEvoDmaPtr pNotifiersDma = &pChannel->notifiersDma[sd];
2610 
2611         void *pDmaDisplayChannel = NULL;
2612 
2613         // Allocation of the notifiers
2614         if (!nvRmAllocEvoDma(pDevEvo, pNotifiersDma,
2615                              NV_DMA_EVO_NOTIFIER_SIZE - 1,
2616                              DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER),
2617                              1 << sd)) {
2618             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2619                         "Notifier DMA allocation failed");
2620 
2621             goto fail;
2622         }
2623 
2624         nvAssert(pNotifiersDma->subDeviceAddress[sd] != NULL);
2625 
2626         // Only allocate memory for one pushbuffer.
2627         // All subdevices will share (via subdevice mask)
2628         if (sd == 0) {
2629             NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS ChannelAllocParams = { 0 };
2630 
2631             NvU64 limit = NV_DMA_EVO_PUSH_BUFFER_SIZE - 1;
2632             NVEvoDmaPtr pDma = &buffer->dma;
2633 
2634             // Allocation of the push buffer
2635             if (!nvRmAllocEvoDma(pDevEvo, pDma, limit, 0, SUBDEVICE_MASK_ALL)) {
2636                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2637                             "Display engine push buffer DMA allocation failed");
2638 
2639                 goto fail;
2640             }
2641 
2642             if (!pDma->isBar1Mapping) {
2643                 buffer->base = pDma->subDeviceAddress[0];
2644             } else {
2645                 /*
2646                  * Allocate memory for a shadow copy in sysmem that we'll copy
2647                  * to vidmem via BAR1 at kickoff time.
2648                  */
2649                 buffer->base = nvCalloc(buffer->dma.limit + 1, 1);
2650                 if (buffer->base == NULL) {
2651                     goto fail;
2652                 }
2653             }
2654 
2655             buffer->channel_handle =
2656                 nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
2657 
2658             // Channel instance (always 0 for CORE - head number otherwise)
2659             ChannelAllocParams.channelInstance = instance;
2660             // PB CtxDMA Handle
2661             ChannelAllocParams.hObjectBuffer   = buffer->dma.surfaceDesc.ctxDmaHandle;
2662             // Initial offset within the PB
2663             ChannelAllocParams.offset          = 0;
2664 
2665             ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
2666                                pDevEvo->displayHandle,
2667                                buffer->channel_handle,
2668                                class,
2669                                &ChannelAllocParams);
2670             if (ret != NVOS_STATUS_SUCCESS) {
2671                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2672                             "Display engine push buffer channel allocation failed: 0x%x (%s)",
2673                             ret, nvstatusToString(ret));
2674 
2675                 nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
2676                                    buffer->channel_handle);
2677                 buffer->channel_handle = 0;
2678 
2679                 goto fail;
2680             }
2681         }
2682 
2683         ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
2684                                pDevEvo->pSubDevices[sd]->handle,
2685                                buffer->channel_handle,
2686                                0,
2687                                dmaControlLen,
2688                                &pDmaDisplayChannel,
2689                                0);
2690         if (ret != NVOS_STATUS_SUCCESS) {
2691             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2692                         "Display engine push buffer DMA mapping failed: 0x%x (%s)",
2693                         ret, nvstatusToString(ret));
2694             goto fail;
2695         }
2696 
2697         buffer->control[sd] = pDmaDisplayChannel;
2698     }
2699 
2700     /* Initialize the rest of the required push buffer information */
2701     buffer->buffer          = buffer->base;
2702     buffer->end             = (NvU32 *)((char *)buffer->base +
2703                               NV_DMA_EVO_PUSH_BUFFER_SIZE - 8);
2704 
2705     /*
2706      * Due to hardware bug 235044, we can not use the last 12 dwords of the
2707      * core channel pushbuffer.  Adjust offset_max appropriately.
2708      *
2709      * This bug is fixed in Volta and newer, so this workaround can be removed
2710      * when Pascal support is dropped. See bug 3116066.
2711      */
2712     buffer->offset_max   = NV_DMA_EVO_PUSH_BUFFER_SIZE -
2713                            NV_DMA_EVO_PUSH_BUFFER_PAD_SIZE;
2714     buffer->fifo_free_count = (buffer->offset_max >> 2) - 2;
2715     buffer->put_offset   = 0;
2716     buffer->num_channels = pDevEvo->numSubDevices;
2717     buffer->pDevEvo      = pDevEvo;
2718     buffer->currentSubDevMask = SUBDEVICE_MASK_ALL;
2719 
2720     pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE;
2721 
2722     pDevEvo->hal->InitChannel(pDevEvo, pChannel);
2723 
2724     return pChannel;
2725 
2726 fail:
2727 
2728     RmFreeEvoChannel(pDevEvo, pChannel);
2729 
2730     return NULL;
2731 }
2732 
FreeImmediateChannelPio(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel)2733 static void FreeImmediateChannelPio(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel)
2734 {
2735     NVEvoPioChannel *pPio = pChannel->imm.u.pio;
2736     int sd;
2737 
2738     nvAssert(pPio != NULL);
2739 
2740     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2741 
2742         if (!pPio->control[sd]) {
2743             continue;
2744         }
2745 
2746         if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
2747                                pDevEvo->pSubDevices[sd]->handle,
2748                                pPio->handle,
2749                                pPio->control[sd],
2750                                0)) {
2751             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
2752                         "Failed to unmap immediate channel");
2753         }
2754         pPio->control[sd] = NULL;
2755     }
2756 
2757     if (pPio->handle) {
2758         if (nvRmApiFree(nvEvoGlobal.clientHandle,
2759                         pDevEvo->displayHandle,
2760                         pPio->handle)) {
2761             nvEvoLogDev(pDevEvo, EVO_LOG_WARN, "Failed to free immediate channel");
2762         }
2763         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
2764                            pPio->handle);
2765         pPio->handle = 0;
2766     }
2767 
2768     nvFree(pPio);
2769     pChannel->imm.u.pio = NULL;
2770 }
2771 
FreeImmediateChannelDma(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel)2772 static void FreeImmediateChannelDma(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel)
2773 {
2774     NVEvoChannelPtr pImmChannel = pChannel->imm.u.dma;
2775 
2776     RmFreeEvoChannel(pDevEvo, pImmChannel);
2777     pChannel->imm.u.dma = NULL;
2778 }
2779 
FreeImmediateChannel(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel)2780 static void FreeImmediateChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel)
2781 {
2782     switch (pChannel->imm.type) {
2783         case NV_EVO_IMM_CHANNEL_NONE:
2784             return;
2785         case NV_EVO_IMM_CHANNEL_PIO:
2786             FreeImmediateChannelPio(pDevEvo, pChannel);
2787             break;
2788         case NV_EVO_IMM_CHANNEL_DMA:
2789             FreeImmediateChannelDma(pDevEvo, pChannel);
2790             break;
2791     }
2792     pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE;
2793 }
2794 
2795 /*****************************************************************************/
2796 /* RmFreeEvoChannel ()
2797  * Frees all of the stuff allocated in RmAllocEvoChannel */
2798 /*****************************************************************************/
RmFreeEvoChannel(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel)2799 static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel)
2800 {
2801     int sd;
2802 
2803     if (pChannel == NULL) {
2804         return;
2805     }
2806 
2807     FreeImmediateChannel(pDevEvo, pChannel);
2808 
2809     if (pChannel->completionNotifierEventHandle != 0) {
2810 
2811         nvRmApiFree(nvEvoGlobal.clientHandle,
2812                     pChannel->pb.channel_handle,
2813                     pChannel->completionNotifierEventHandle);
2814 
2815         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
2816                            pChannel->completionNotifierEventHandle);
2817 
2818         pChannel->completionNotifierEventHandle = 0;
2819     }
2820 
2821     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2822         if (pChannel->pb.control[sd]) {
2823             if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
2824                                    pDevEvo->pSubDevices[sd]->handle,
2825                                    pChannel->pb.channel_handle,
2826                                    pChannel->pb.control[sd],
2827                                    0) != NVOS_STATUS_SUCCESS) {
2828                 nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
2829                                  "Failed to unmap display engine channel memory");
2830             }
2831             pChannel->pb.control[sd] = NULL;
2832         }
2833     }
2834 
2835     if (pChannel->pb.channel_handle != 0) {
2836         // If NVKMS restored the console successfully, tell RM to leave the
2837         // channels allocated to avoid shutting down the heads we just
2838         // enabled.
2839         //
2840         // On EVO, only leave the core and base channels allocated. The
2841         // other satellite channels shouldn't be active at the console.
2842         //
2843         // On nvdisplay, one or more window channels are also needed. Rather
2844         // than try to figure out which ones are needed, just leave them all
2845         // alone.
2846         const NvBool isCore =
2847             FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE,
2848                            pChannel->channelMask);
2849         const NvBool isBase =
2850             (pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0;
2851         const NvBool isWindow =
2852             (pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0;
2853         if ((isCore || isBase || isWindow) && pDevEvo->skipConsoleRestore) {
2854             NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS params = { };
2855 
2856             params.base.subdeviceIndex = pDevEvo->vtFbInfo.subDeviceInstance;
2857             params.flags = NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW;
2858 
2859             if (nvRmApiControl(nvEvoGlobal.clientHandle,
2860                                pDevEvo->displayHandle,
2861                                NV5070_CTRL_CMD_SET_RMFREE_FLAGS,
2862                                &params, sizeof(params))
2863                 != NVOS_STATUS_SUCCESS) {
2864                 nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
2865                                  "Failed to set the PRESERVE_HW flag");
2866             }
2867         }
2868 
2869         if (nvRmApiFree(nvEvoGlobal.clientHandle,
2870                         pDevEvo->displayHandle,
2871                         pChannel->pb.channel_handle)
2872             != NVOS_STATUS_SUCCESS) {
2873             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2874                         "Failed to tear down display engine channel");
2875         }
2876         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
2877                            pChannel->pb.channel_handle);
2878         pChannel->pb.channel_handle = 0;
2879     }
2880 
2881     if (pChannel->pb.dma.isBar1Mapping) {
2882         /* Pushbuffer is in vidmem. Free shadow copy. */
2883         nvFree(pChannel->pb.base);
2884         pChannel->pb.base = NULL;
2885     }
2886 
2887     nvRmFreeEvoDma(pDevEvo, &pChannel->pb.dma);
2888 
2889     if (pChannel->notifiersDma) {
2890         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2891             nvRmFreeEvoDma(pDevEvo, &pChannel->notifiersDma[sd]);
2892         }
2893     }
2894 
2895     nvFree(pChannel->notifiersDma);
2896     pChannel->notifiersDma = NULL;
2897 
2898     nvFree(pChannel);
2899 }
2900 
2901 static NvBool
AllocImmediateChannelPio(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel,NvU32 class,NvU32 instance,NvU32 mapSize)2902 AllocImmediateChannelPio(NVDevEvoPtr pDevEvo,
2903                          NVEvoChannelPtr pChannel,
2904                          NvU32 class,
2905                          NvU32 instance,
2906                          NvU32 mapSize)
2907 {
2908     NVEvoPioChannel *pPio = NULL;
2909     NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
2910     NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS params = { 0 };
2911     NvU32 sd;
2912 
2913     pPio = nvCalloc(1, sizeof(*pPio));
2914 
2915     if (!pPio) {
2916         return FALSE;
2917     }
2918 
2919     pChannel->imm.type = NV_EVO_IMM_CHANNEL_PIO;
2920     pChannel->imm.u.pio = pPio;
2921 
2922     params.channelInstance = instance;
2923 
2924     if (nvRmApiAlloc(nvEvoGlobal.clientHandle,
2925                      pDevEvo->displayHandle,
2926                      handle,
2927                      class,
2928                      &params) != NVOS_STATUS_SUCCESS) {
2929         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2930                     "Failed to allocate immediate channel %d", instance);
2931         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle);
2932         return FALSE;
2933     }
2934 
2935     pPio->handle = handle;
2936 
2937     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
2938         void *pImm = NULL;
2939 
2940         if (nvRmApiMapMemory(nvEvoGlobal.clientHandle,
2941                              pDevEvo->pSubDevices[sd]->handle,
2942                              pPio->handle,
2943                              0,
2944                              mapSize,
2945                              &pImm,
2946                              0) != NVOS_STATUS_SUCCESS) {
2947             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
2948                         "Failed to map immediate channel %d/%d",
2949                         sd, instance);
2950             return FALSE;
2951         }
2952 
2953         pPio->control[sd] = pImm;
2954     }
2955 
2956     return TRUE;
2957 }
2958 
2959 static NvBool
AllocImmediateChannelDma(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel,NvU32 immClass)2960 AllocImmediateChannelDma(NVDevEvoPtr pDevEvo,
2961                          NVEvoChannelPtr pChannel,
2962                          NvU32 immClass)
2963 {
2964     NVEvoChannelPtr pImmChannel = RmAllocEvoChannel(
2965         pDevEvo,
2966         DRF_DEF64(_EVO, _CHANNEL_MASK, _WINDOW_IMM, _ENABLE),
2967         pChannel->instance, immClass);
2968 
2969     if (!pImmChannel) {
2970         return FALSE;
2971     }
2972 
2973     pChannel->imm.type = NV_EVO_IMM_CHANNEL_DMA;
2974     pChannel->imm.u.dma = pImmChannel;
2975 
2976     return TRUE;
2977 }
2978 
nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo)2979 NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo)
2980 {
2981     int i;
2982     NvU32 baseClass = 0;
2983     NvU32 head;
2984 
2985     static const NvU32 baseChannelDmaClasses[] = {
2986         NV927C_BASE_CHANNEL_DMA,
2987     };
2988 
2989     for (i = 0; i < ARRAY_LEN(baseChannelDmaClasses); i++) {
2990         if (nvRmEvoClassListCheck(pDevEvo, baseChannelDmaClasses[i])) {
2991             baseClass = baseChannelDmaClasses[i];
2992             break;
2993         }
2994     }
2995 
2996     if (!baseClass) {
2997         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported base display class");
2998         return FALSE;
2999     }
3000 
3001     for (head = 0; head < pDevEvo->numHeads; head++) {
3002         pDevEvo->base[head] = RmAllocEvoChannel(
3003             pDevEvo,
3004             DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE),
3005             head, baseClass);
3006 
3007         if (!pDevEvo->base[head]) {
3008             return FALSE;
3009         }
3010     }
3011 
3012     return TRUE;
3013 }
3014 
nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo)3015 NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo)
3016 {
3017     NvU32 immMapSize;
3018     NvU32 head;
3019 
3020     if (!nvRmEvoClassListCheck(pDevEvo,
3021                                NV917E_OVERLAY_CHANNEL_DMA)) {
3022         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
3023                     "Unsupported overlay display class");
3024         return FALSE;
3025     }
3026 
3027     nvAssert(nvRmEvoClassListCheck(pDevEvo, NV917B_OVERLAY_IMM_CHANNEL_PIO));
3028 
3029     /*
3030      * EvoSetImmPointOut91() will interpret the PIO mapping as a pointer
3031      * to GK104DispOverlayImmControlPio and access the SetPointOut and
3032      * Update fields, which is safe as long as SetPointOut and Update are
3033      * at consistent offsets.
3034      */
3035     nvAssert(offsetof(GK104DispOverlayImmControlPio, SetPointsOut) ==
3036              NV917B_SET_POINTS_OUT(NVKMS_LEFT));
3037     nvAssert(offsetof(GK104DispOverlayImmControlPio, Update) ==
3038              NV917B_UPDATE);
3039     immMapSize =
3040         NV_MAX(NV917B_SET_POINTS_OUT(NVKMS_LEFT), NV917B_UPDATE) + sizeof(NvV32);
3041 
3042     for (head = 0; head < pDevEvo->numHeads; head++) {
3043         pDevEvo->overlay[head] = RmAllocEvoChannel(
3044             pDevEvo,
3045             DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE),
3046             head, NV917E_OVERLAY_CHANNEL_DMA);
3047 
3048         if (!pDevEvo->overlay[head]) {
3049             return FALSE;
3050         }
3051 
3052         if (!AllocImmediateChannelPio(pDevEvo, pDevEvo->overlay[head],
3053                                       NV917B_OVERLAY_IMM_CHANNEL_PIO, head, immMapSize)) {
3054             return FALSE;
3055         }
3056     }
3057 
3058     return TRUE;
3059 }
3060 
3061 /*!
3062  * This allocates a syncpt per channel. This syncpt is dedicated
3063  * to this channel. As NVKMS only supports syncpoints for SOC devices,
3064  * in which there's only one device/sub-device/disp, sd can be 0.
3065  */
AllocSyncpt(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel,NVEvoSyncpt * pEvoSyncptOut)3066 static NvBool AllocSyncpt(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel,
3067         NVEvoSyncpt *pEvoSyncptOut)
3068 {
3069     NvU32 id;
3070     NvKmsSyncPtOpParams params = { };
3071     NvBool result;
3072     NVSurfaceDescriptor surfaceDesc;
3073 
3074     if (!pDevEvo->supportsSyncpts) {
3075         return FALSE;
3076     }
3077 
3078     /*! Set syncpt id to invalid to avoid un-intended Free */
3079     pEvoSyncptOut->id = NVKMS_SYNCPT_ID_INVALID;
3080 
3081     /*
3082      * HW engine on Orin is called HOST1X, all syncpts are in internal RAM of
3083      * HOST1X.
3084      * OP_ALLOC calls into HOST1X driver and allocs a syncpt resource.
3085      */
3086     params.alloc.syncpt_name = "nvkms-fence";
3087     result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_ALLOC, &params);
3088     if (!result) {
3089         return FALSE;
3090     }
3091     id = params.alloc.id;
3092 
3093     /* Post syncpt max val is tracked locally. Init the value here. */
3094     params.read_minval.id = id;
3095     result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_READ_MINVAL, &params);
3096     if (!result) {
3097         goto failed;
3098     }
3099 
3100     result = nvRmEvoAllocAndBindSyncpt(pDevEvo, pChannel, id,
3101                                        &surfaceDesc,
3102                                        pEvoSyncptOut);
3103     if (!result) {
3104         goto failed;
3105     }
3106 
3107     /*! Populate syncpt values to return. */
3108     pEvoSyncptOut->channelMask = pChannel->channelMask;
3109     pEvoSyncptOut->syncptMaxVal = params.read_minval.minval;
3110 
3111     return TRUE;
3112 
3113 failed:
3114     /*! put back syncpt as operation failed */
3115     params.put.id = id;
3116     nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, &params);
3117     return FALSE;
3118 }
3119 
AllocPostSyncptPerChannel(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel)3120 static NvBool AllocPostSyncptPerChannel(NVDevEvoPtr pDevEvo,
3121                                         NVEvoChannelPtr pChannel)
3122 {
3123     if (!pDevEvo->supportsSyncpts) {
3124         return TRUE;
3125     }
3126 
3127     return AllocSyncpt(pDevEvo, pChannel, &pChannel->postSyncpt);
3128 }
3129 
nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo)3130 NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo)
3131 {
3132     int index;
3133     NvU32 window, sd;
3134 
3135     static const struct {
3136         NvU32 windowClass;
3137         NvU32 immClass;
3138     } windowChannelClasses[] = {
3139         { NVC67E_WINDOW_CHANNEL_DMA,
3140           NVC67B_WINDOW_IMM_CHANNEL_DMA },
3141         { NVC57E_WINDOW_CHANNEL_DMA,
3142           NVC57B_WINDOW_IMM_CHANNEL_DMA },
3143         { NVC37E_WINDOW_CHANNEL_DMA,
3144           NVC37B_WINDOW_IMM_CHANNEL_DMA },
3145     }, *c = NULL;
3146 
3147     for (index = 0; index < ARRAY_LEN(windowChannelClasses); index++) {
3148         if (nvRmEvoClassListCheck(pDevEvo,
3149                     windowChannelClasses[index].windowClass)) {
3150 
3151             c = &windowChannelClasses[index];
3152 
3153             nvAssert(nvRmEvoClassListCheck(pDevEvo, c->immClass));
3154             break;
3155         }
3156     }
3157 
3158     if (index >= ARRAY_LEN(windowChannelClasses)) {
3159         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported window display class");
3160         return FALSE;
3161     }
3162 
3163     nvAssert(pDevEvo->numWindows <= ARRAY_LEN(pDevEvo->window));
3164     for (window = 0; window < pDevEvo->numWindows; window++) {
3165         pDevEvo->window[window] = RmAllocEvoChannel(
3166             pDevEvo,
3167             DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE),
3168             window, c->windowClass);
3169 
3170         if (!pDevEvo->window[window]) {
3171             return FALSE;
3172         }
3173 
3174         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3175             NvU32 ret = pDevEvo->hal->BindSurfaceDescriptor(
3176                 pDevEvo,
3177                 pDevEvo->window[window],
3178                 &pDevEvo->window[window]->notifiersDma[sd].surfaceDesc);
3179             if (ret != NVOS_STATUS_SUCCESS) {
3180                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
3181                         "Failed to bind(window channel) display engine notify surface descriptor: 0x%x (%s)",
3182                         ret, nvstatusToString(ret));
3183                 return FALSE;
3184             }
3185         }
3186 
3187         if (!AllocImmediateChannelDma(pDevEvo, pDevEvo->window[window],
3188                                       c->immClass)) {
3189             return FALSE;
3190         }
3191 
3192         if (!AllocPostSyncptPerChannel(pDevEvo,
3193                                        pDevEvo->window[window])) {
3194             return FALSE;
3195         }
3196     }
3197 
3198     return TRUE;
3199 }
3200 
EvoFreeCoreChannel(NVDevEvoRec * pDevEvo,NVEvoChannel * pChannel)3201 static void EvoFreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel)
3202 {
3203     NvU32 sd;
3204 
3205     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3206         NvU32 ret;
3207 
3208         if (!pDevEvo->pSubDevices[sd]->pCoreDma) {
3209             continue;
3210         }
3211 
3212         ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
3213                                  pDevEvo->pSubDevices[sd]->handle,
3214                                  pChannel->pb.channel_handle,
3215                                  pDevEvo->pSubDevices[sd]->pCoreDma,
3216                                  0);
3217 
3218         if (ret != NVOS_STATUS_SUCCESS) {
3219             nvEvoLogDevDebug(
3220                 pDevEvo,
3221                 EVO_LOG_ERROR,
3222                 "Failed to unmap NVDisplay core channel memory mapping for ARMed values");
3223         }
3224         pDevEvo->pSubDevices[sd]->pCoreDma = NULL;
3225     }
3226 
3227     RmFreeEvoChannel(pDevEvo, pChannel);
3228 }
3229 
EvoAllocateCoreChannel(NVDevEvoRec * pDevEvo)3230 static NVEvoChannel* EvoAllocateCoreChannel(NVDevEvoRec *pDevEvo)
3231 {
3232     NVEvoChannel *pChannel;
3233     NvU32 sd;
3234 
3235     pChannel =
3236         RmAllocEvoChannel(pDevEvo,
3237                           DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE),
3238                           0,
3239                           pDevEvo->coreChannelDma.coreChannelClass);
3240 
3241     if (pChannel == NULL) {
3242         goto failed;
3243     }
3244 
3245     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3246         NvU32 ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
3247                                      pDevEvo->pSubDevices[sd]->handle,
3248                                      pChannel->pb.channel_handle,
3249                                      pDevEvo->coreChannelDma.dmaArmedOffset,
3250                                      pDevEvo->coreChannelDma.dmaArmedSize,
3251                                      (void**)&pDevEvo->pSubDevices[sd]->pCoreDma,
3252                                      DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY));
3253 
3254         if (ret != NVOS_STATUS_SUCCESS) {
3255             nvEvoLogDev(
3256                 pDevEvo,
3257                 EVO_LOG_ERROR,
3258                 "Core channel memory mapping for ARMed values failed: 0x%x (%s)",
3259                 ret, nvstatusToString(ret));
3260             goto failed;
3261         }
3262     }
3263 
3264     return pChannel;
3265 
3266 failed:
3267     if (pChannel != NULL) {
3268         EvoFreeCoreChannel(pDevEvo, pChannel);
3269     }
3270     return NULL;
3271 }
3272 
3273 /* Pre-allocate the vblank syncpts, store in NVDispHeadStateEvoRec. */
nvRmAllocCoreRGSyncpts(NVDevEvoPtr pDevEvo)3274 void nvRmAllocCoreRGSyncpts(NVDevEvoPtr pDevEvo)
3275 {
3276 
3277     NVDispEvoPtr pDispEvo = NULL;
3278     NvU32 syncptIdx = 0;
3279 
3280     if (!pDevEvo->supportsSyncpts ||
3281         !pDevEvo->hal->caps.supportsVblankSyncObjects) {
3282         return;
3283     }
3284 
3285     /* If Syncpts are supported, we're on Orin, which only has one display. */
3286     nvAssert(pDevEvo->nDispEvo == 1);
3287     pDispEvo = pDevEvo->pDispEvo[0];
3288 
3289     /* Initialize all heads' vblank sync object counts to zero. */
3290     for (int i = 0; i < pDevEvo->numApiHeads; i++) {
3291         pDispEvo->apiHeadState[i].numVblankSyncObjectsCreated = 0;
3292     }
3293 
3294     /* For each core RG syncpt index: */
3295     for (syncptIdx = 0; syncptIdx < NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD;
3296          syncptIdx++) {
3297         /* For each Head: */
3298         for (int i = 0; i < pDevEvo->numApiHeads; i++) {
3299             NvBool result = FALSE;
3300             NVDispApiHeadStateEvoRec *pApiHeadState =
3301                 &pDispEvo->apiHeadState[i];
3302 
3303             result =
3304                 AllocSyncpt(pDevEvo, pDevEvo->core,
3305                             &pApiHeadState->vblankSyncObjects[syncptIdx].evoSyncpt);
3306             if (!result) {
3307                 /*
3308                  * Stop trying to allocate more syncpts if none are
3309                  * available.
3310                  */
3311                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
3312                              "Failed to allocate Core RG Syncpoint at index %d "
3313                              "on Head %d.", syncptIdx, i);
3314                 return;
3315             }
3316 
3317             /* Populate the index of the syncpt in the NVVblankSyncObjectRec. */
3318             pApiHeadState->vblankSyncObjects[syncptIdx].index = syncptIdx;
3319             /* Update the count. */
3320             pApiHeadState->numVblankSyncObjectsCreated = syncptIdx + 1;
3321         }
3322     }
3323 }
3324 
nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo)3325 NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo)
3326 {
3327     NvU32 sd;
3328 
3329     pDevEvo->core = EvoAllocateCoreChannel(pDevEvo);
3330     if (!pDevEvo->core) {
3331         return FALSE;
3332     }
3333 
3334     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3335         // Bind the core notifier surface descriptor
3336         NvU32 ret =
3337             pDevEvo->hal->BindSurfaceDescriptor(
3338                 pDevEvo, pDevEvo->core,
3339                 &pDevEvo->core->notifiersDma[sd].surfaceDesc);
3340         if (ret != NVOS_STATUS_SUCCESS) {
3341             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
3342                         "Failed to bind display engine notify surface descriptor: 0x%x (%s)",
3343                         ret, nvstatusToString(ret));
3344             nvRMFreeEvoCoreChannel(pDevEvo);
3345             return FALSE;
3346         }
3347     }
3348 
3349     nvInitEvoSubDevMask(pDevEvo);
3350 
3351     /*
3352      * XXX NVKMS TODO: Enable core channel event generation; see bug
3353      * 1671139.
3354      */
3355 
3356     // Query the VBIOS head assignments.  Note that this has to happen after the
3357     // core channel is allocated or else RM will return incorrect information
3358     // about dynamic display IDs it allocates for the boot display on DP MST
3359     // devices.
3360     GetVbiosHeadAssignment(pDevEvo);
3361 
3362     return TRUE;
3363 }
3364 
nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo)3365 void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo)
3366 {
3367     NvU32 head;
3368 
3369     for (head = 0; head < pDevEvo->numHeads; head++) {
3370         RmFreeEvoChannel(pDevEvo, pDevEvo->base[head]);
3371         pDevEvo->base[head] = NULL;
3372     }
3373 }
3374 
nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo)3375 void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo)
3376 {
3377     NvU32 head;
3378 
3379     for (head = 0; head < pDevEvo->numHeads; head++) {
3380         RmFreeEvoChannel(pDevEvo, pDevEvo->overlay[head]);
3381         pDevEvo->overlay[head] = NULL;
3382     }
3383 }
3384 
nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo)3385 void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo)
3386 {
3387     NvU32 window;
3388 
3389     for (window = 0; window < pDevEvo->numWindows; window++) {
3390         nvRmEvoFreeSyncpt(pDevEvo, &pDevEvo->window[window]->postSyncpt);
3391         RmFreeEvoChannel(pDevEvo, pDevEvo->window[window]);
3392         pDevEvo->window[window] = NULL;
3393     }
3394 }
3395 
3396 /* Frees the Core RG Syncpts. */
nvRmFreeCoreRGSyncpts(NVDevEvoPtr pDevEvo)3397 void nvRmFreeCoreRGSyncpts(NVDevEvoPtr pDevEvo)
3398 {
3399 
3400     NVDispEvoPtr pDispEvo = NULL;
3401 
3402     if (!pDevEvo->supportsSyncpts ||
3403         !pDevEvo->hal->caps.supportsVblankSyncObjects) {
3404         return;
3405     }
3406 
3407     /* We can get here in teardown cases from alloc failures */
3408     if (pDevEvo->nDispEvo == 0) {
3409         return;
3410     }
3411 
3412     /* If Syncpts are supported, we're on Orin, which only has one display. */
3413     nvAssert(pDevEvo->nDispEvo == 1);
3414     pDispEvo = pDevEvo->pDispEvo[0];
3415 
3416     /* For each Head: */
3417     for (int i = 0; i < pDevEvo->numApiHeads; i++) {
3418         /* Free all core RG syncpts. */
3419         NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[i];
3420         for (int j = 0; j < pApiHeadState->numVblankSyncObjectsCreated; j++) {
3421             nvAssert(!pApiHeadState->vblankSyncObjects[j].inUse);
3422             nvRmEvoFreeSyncpt(pDevEvo,
3423                               &pApiHeadState->vblankSyncObjects[j].evoSyncpt);
3424         }
3425         pApiHeadState->numVblankSyncObjectsCreated = 0;
3426     }
3427 }
3428 
nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo)3429 void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo)
3430 {
3431     if (pDevEvo->core != NULL) {
3432         EvoFreeCoreChannel(pDevEvo, pDevEvo->core);
3433         pDevEvo->core = NULL;
3434     }
3435 }
3436 
3437 /* Poll for an EVO channel on a particular subdevice to process all its methods */
SyncOneEvoChannel(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChan,NvU32 sd,NvU32 errorToken)3438 static NvBool SyncOneEvoChannel(
3439     NVDevEvoPtr pDevEvo,
3440     NVEvoChannelPtr pChan,
3441     NvU32 sd,
3442     NvU32 errorToken)
3443 {
3444     NvBool isMethodPending;
3445     NvU64 startTime = 0;
3446     const NvU32 timeout = 2000000; // microseconds
3447 
3448     do {
3449         if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo, pChan,
3450                                                   sd, &isMethodPending)) {
3451             return FALSE;
3452         }
3453 
3454         if (!isMethodPending) {
3455             break;
3456         }
3457 
3458         if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) {
3459             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
3460                         "Idling display engine timed out: 0x%08x:%d:%d:%d",
3461                         pChan->hwclass, pChan->instance,
3462                         sd, errorToken);
3463             return FALSE;
3464         }
3465 
3466         nvkms_yield();
3467 
3468     } while (TRUE);
3469 
3470     return TRUE;
3471 }
3472 
3473 /* Sync an EVO channel on all subdevices */
nvRMSyncEvoChannel(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel,NvU32 errorToken)3474 NvBool nvRMSyncEvoChannel(
3475     NVDevEvoPtr pDevEvo,
3476     NVEvoChannelPtr pChannel,
3477     NvU32 errorToken)
3478 {
3479     NvBool ret = TRUE;
3480 
3481     if (pChannel) {
3482         NvU32 sd;
3483 
3484         nvDmaKickoffEvo(pChannel);
3485 
3486         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3487             if (!SyncOneEvoChannel(pDevEvo, pChannel, sd, errorToken)) {
3488                 ret = FALSE;
3489             }
3490         }
3491     }
3492 
3493     return ret;
3494 }
3495 
3496 
3497 /*
3498  * Wait for the requested base channel to be idle (no methods pending), and
3499  * call STOP_BASE if the wait times out.
3500  *
3501  * stoppedBase will be TRUE if calling STOP_BASE was necessary and
3502  * successful.
3503  */
nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo,NvU32 head,NvU32 sd,NvBool * stoppedBase)3504 NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd,
3505                            NvBool *stoppedBase)
3506 {
3507     NVEvoChannelPtr pMainLayerChannel =
3508         pDevEvo->head[head].layer[NVKMS_MAIN_LAYER];
3509     NvU64 startTime = 0;
3510     NvBool idleTimedOut = FALSE;
3511     const NvU32 timeout = 2000000; // 2 seconds
3512     NvBool isMethodPending = TRUE;
3513     NvBool ret = TRUE;
3514 
3515     *stoppedBase = FALSE;
3516 
3517     do {
3518         if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo,
3519                                                   pMainLayerChannel,
3520                                                   sd,
3521                                                   &isMethodPending)) {
3522             break;
3523         }
3524 
3525         if (!isMethodPending) {
3526             break;
3527         }
3528 
3529         if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) {
3530             idleTimedOut = TRUE;
3531             break;
3532         }
3533 
3534         nvkms_yield();
3535 
3536     } while (TRUE);
3537 
3538     if (idleTimedOut) {
3539         NVEvoIdleChannelState idleChannelState = { };
3540 
3541         idleChannelState.subdev[sd].channelMask |= pMainLayerChannel->channelMask;
3542         ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo, &idleChannelState);
3543 
3544         *stoppedBase = ret;
3545     }
3546 
3547     return ret;
3548 }
3549 
3550 
nvRmEvoClassListCheck(const NVDevEvoRec * pDevEvo,NvU32 classID)3551 NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID)
3552 {
3553     const NvU32 *classes = pDevEvo->supportedClasses;
3554 
3555     int i;
3556 
3557     nvAssert(pDevEvo->numClasses > 0);
3558 
3559     for (i = 0; i < pDevEvo->numClasses; i++) {
3560         if (classes[i] == classID) {
3561             return TRUE;
3562         }
3563     }
3564 
3565     return FALSE;
3566 }
3567 
3568 /*!
3569  * This API used to register syncpt object with RM.
3570  * It involves ->
3571  * 1. Allocate a new NV01_MEMORY_SYNCPOINT syncpt object.
3572  * 2. Allocate a new ctxdma descriptor for the syncpt object.
3573  * 3. Bind the ctxdma entry to the channel.
3574  */
nvRmEvoAllocAndBindSyncpt(NVDevEvoRec * pDevEvo,NVEvoChannel * pChannel,NvU32 id,NVSurfaceDescriptor * pSurfaceDesc,NVEvoSyncpt * pEvoSyncpt)3575 NvBool nvRmEvoAllocAndBindSyncpt(
3576     NVDevEvoRec *pDevEvo,
3577     NVEvoChannel *pChannel,
3578     NvU32 id,
3579     NVSurfaceDescriptor *pSurfaceDesc,
3580     NVEvoSyncpt *pEvoSyncpt)
3581 {
3582     return FALSE;
3583 }
3584 
nvRmFreeSyncptHandle(NVDevEvoRec * pDevEvo,NVEvoSyncpt * pSyncpt)3585 void nvRmFreeSyncptHandle(
3586     NVDevEvoRec *pDevEvo,
3587     NVEvoSyncpt *pSyncpt)
3588 {
3589     nvRmApiFree(nvEvoGlobal.clientHandle,
3590                 pDevEvo->deviceHandle,
3591                 pSyncpt->hSyncpt);
3592     nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
3593                        pSyncpt->hSyncpt);
3594     pSyncpt->hSyncpt = 0;
3595 
3596     pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo,
3597                                         pDevEvo->deviceHandle,
3598                                         &pSyncpt->surfaceDesc);
3599     pSyncpt->allocated = FALSE;
3600 }
3601 
3602 /*!
3603  * This API used to unregister syncpt object with given channel.
3604  * It searches global table, and when finds that for given channel, syncpt
3605  * is registered, then frees it.
3606  */
nvRmEvoFreePreSyncpt(NVDevEvoRec * pDevEvo,NVEvoChannel * pChannel)3607 void nvRmEvoFreePreSyncpt(
3608     NVDevEvoRec *pDevEvo,
3609     NVEvoChannel *pChannel)
3610 {
3611     NvU32 i;
3612     NvBool isChannelIdle = NV_FALSE;
3613 
3614     if (pChannel == NULL) {
3615         return;
3616     }
3617 
3618     if (!pDevEvo->supportsSyncpts) {
3619         return;
3620     }
3621 
3622     if (pChannel->channelMask == 0) {
3623         return;
3624     }
3625 
3626     pDevEvo->hal->IsChannelIdle(
3627         pDevEvo, pChannel, 0, &isChannelIdle);
3628 
3629     if (isChannelIdle == NV_FALSE) {
3630         return;
3631     }
3632 
3633     /*! Find pre-syncpt and free it */
3634     for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) {
3635 
3636         pDevEvo->preSyncptTable[i].channelMask &= ~pChannel->channelMask;
3637         if (pDevEvo->preSyncptTable[i].channelMask == 0 &&
3638             pDevEvo->preSyncptTable[i].allocated) {
3639 
3640             /*! Free handles */
3641             nvRmFreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]);
3642         }
3643     }
3644 }
3645 
3646 /*!
3647  * This API is used to unregister the given syncpt object.
3648  */
nvRmEvoFreeSyncpt(NVDevEvoRec * pDevEvo,NVEvoSyncpt * pEvoSyncpt)3649 void nvRmEvoFreeSyncpt(
3650     NVDevEvoRec *pDevEvo,
3651     NVEvoSyncpt *pEvoSyncpt)
3652 {
3653     if ((pEvoSyncpt == NULL) || !pDevEvo->supportsSyncpts ||
3654         (pEvoSyncpt->id == NVKMS_SYNCPT_ID_INVALID)) {
3655         return;
3656     }
3657 
3658     /*! Put reference of syncptid from nvhost */
3659     NvKmsSyncPtOpParams params = { };
3660     params.put.id = pEvoSyncpt->id;
3661     nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, &params);
3662 
3663     /*! Free handles */
3664     nvRmFreeSyncptHandle(pDevEvo, pEvoSyncpt);
3665 }
3666 
nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo,NvU32 memoryHandle,void * subDeviceAddress[NVKMS_MAX_SUBDEVICES])3667 void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo, NvU32 memoryHandle,
3668                              void *subDeviceAddress[NVKMS_MAX_SUBDEVICES])
3669 {
3670     unsigned int sd;
3671     NvU32 ret;
3672 
3673     if (memoryHandle == 0) {
3674         return;
3675     }
3676 
3677     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3678         if (subDeviceAddress[sd] != NULL) {
3679             ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
3680                                      pDevEvo->pSubDevices[sd]->handle,
3681                                      memoryHandle,
3682                                      subDeviceAddress[sd],
3683                                      0);
3684 
3685             if (ret != NVOS_STATUS_SUCCESS) {
3686                 nvAssert(!"UnmapMemory() failed");
3687             }
3688         }
3689 
3690         subDeviceAddress[sd] = NULL;
3691     }
3692 }
3693 
nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo,NvU32 memoryHandle,NvU64 size,void * subDeviceAddress[NVKMS_MAX_SUBDEVICES],NvU32 subDeviceMask)3694 NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo,
3695                              NvU32 memoryHandle, NvU64 size,
3696                              void *subDeviceAddress[NVKMS_MAX_SUBDEVICES],
3697                              NvU32 subDeviceMask)
3698 {
3699     NvU32 ret;
3700 
3701     unsigned int sd;
3702 
3703     nvkms_memset(subDeviceAddress, 0, sizeof(void*) * NVKMS_MAX_SUBDEVICES);
3704 
3705     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3706         void *address = NULL;
3707 
3708         if (((1 << sd) & subDeviceMask) == 0) {
3709             continue;
3710         }
3711 
3712         ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
3713                                pDevEvo->pSubDevices[sd]->handle,
3714                                memoryHandle,
3715                                0,
3716                                size,
3717                                &address,
3718                                0);
3719 
3720         if (ret != NVOS_STATUS_SUCCESS) {
3721             nvRmEvoUnMapVideoMemory(pDevEvo, memoryHandle, subDeviceAddress);
3722             return FALSE;
3723         }
3724         subDeviceAddress[sd] = address;
3725     }
3726     return TRUE;
3727 }
3728 
GetClassList(NVDevEvoPtr pDevEvo)3729 static NvBool GetClassList(NVDevEvoPtr pDevEvo)
3730 {
3731     NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS classListParams = { 0 };
3732     NvU32 ret;
3733 
3734     classListParams.numClasses = 0;
3735     classListParams.classList = NvP64_NULL;
3736 
3737     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3738                          pDevEvo->deviceHandle,
3739                          NV0080_CTRL_CMD_GPU_GET_CLASSLIST,
3740                          &classListParams, sizeof(classListParams));
3741 
3742     if (ret != NVOS_STATUS_SUCCESS) {
3743         return FALSE;
3744     }
3745 
3746     pDevEvo->supportedClasses =
3747         nvCalloc(classListParams.numClasses, sizeof(NvU32));
3748 
3749     if (pDevEvo->supportedClasses == NULL) {
3750         return FALSE;
3751     }
3752 
3753     classListParams.classList = NV_PTR_TO_NvP64(pDevEvo->supportedClasses);
3754 
3755     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3756                          pDevEvo->deviceHandle,
3757                          NV0080_CTRL_CMD_GPU_GET_CLASSLIST,
3758                          &classListParams, sizeof(classListParams));
3759 
3760     if (ret != NVOS_STATUS_SUCCESS) {
3761         nvFree(pDevEvo->supportedClasses);
3762         pDevEvo->supportedClasses = NULL;
3763         return FALSE;
3764     }
3765 
3766     pDevEvo->numClasses = classListParams.numClasses;
3767 
3768     return TRUE;
3769 }
3770 
GetEngineListOneSubDevice(NVDevEvoPtr pDevEvo,NvU32 sd)3771 static NvBool GetEngineListOneSubDevice(NVDevEvoPtr pDevEvo, NvU32 sd)
3772 {
3773     NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS engineListParams = { 0 };
3774     NvU32 ret;
3775     NVSubDeviceEvoPtr pSubDevice = pDevEvo->pSubDevices[sd];
3776     size_t length;
3777 
3778     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3779                          pSubDevice->handle,
3780                          NV2080_CTRL_CMD_GPU_GET_ENGINES_V2,
3781                          &engineListParams, sizeof(engineListParams));
3782 
3783     if (ret != NVOS_STATUS_SUCCESS) {
3784         return FALSE;
3785     }
3786 
3787     if (engineListParams.engineCount == 0) {
3788         return TRUE;
3789     }
3790 
3791     length = engineListParams.engineCount * sizeof(NvU32);
3792 
3793     pSubDevice->supportedEngines = nvAlloc(length);
3794 
3795     if (pSubDevice->supportedEngines == NULL) {
3796         return FALSE;
3797     }
3798 
3799     nvkms_memcpy(pSubDevice->supportedEngines,
3800                  engineListParams.engineList,
3801                  length);
3802     pSubDevice->numEngines = engineListParams.engineCount;
3803 
3804     return TRUE;
3805 }
3806 
GetEngineList(NVDevEvoPtr pDevEvo)3807 static NvBool GetEngineList(NVDevEvoPtr pDevEvo)
3808 {
3809     int sd;
3810 
3811     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
3812         if (!GetEngineListOneSubDevice(pDevEvo, sd)) {
3813             return FALSE;
3814         }
3815     }
3816 
3817     return TRUE;
3818 }
3819 
FreeSubDevice(NVDevEvoPtr pDevEvo,NVSubDeviceEvoPtr pSubDevice)3820 static void FreeSubDevice(NVDevEvoPtr pDevEvo, NVSubDeviceEvoPtr pSubDevice)
3821 {
3822     if (pSubDevice == NULL) {
3823         return;
3824     }
3825 
3826     if (pSubDevice->handle != 0) {
3827         nvRmApiFree(nvEvoGlobal.clientHandle,
3828                     pDevEvo->deviceHandle,
3829                     pSubDevice->handle);
3830         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle);
3831     }
3832 
3833     if (pSubDevice->gpuString[0] != '\0') {
3834         nvEvoLogDebug(EVO_LOG_INFO, "Freed %s", pSubDevice->gpuString);
3835     }
3836 
3837     nvFree(pSubDevice->supportedEngines);
3838 
3839     nvFree(pSubDevice);
3840 }
3841 
AllocSubDevice(NVDevEvoPtr pDevEvo,const NvU32 sd)3842 static NVSubDeviceEvoPtr AllocSubDevice(NVDevEvoPtr pDevEvo, const NvU32 sd)
3843 {
3844     NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 };
3845     NV2080_CTRL_GPU_GET_ID_PARAMS getIdParams = { 0 };
3846     NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidParams = NULL;
3847     NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS pciInfoParams = { 0 };
3848     NvU32 ret;
3849     const char *uuid;
3850 
3851     NVSubDeviceEvoPtr pSubDevice = nvCalloc(1, sizeof(*pSubDevice));
3852 
3853     if (pSubDevice == NULL) {
3854         goto failure;
3855     }
3856 
3857     pSubDevice->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
3858 
3859     subdevAllocParams.subDeviceId = sd;
3860 
3861     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
3862                        pDevEvo->deviceHandle,
3863                        pSubDevice->handle,
3864                        NV20_SUBDEVICE_0,
3865                        &subdevAllocParams);
3866 
3867     if (ret != NVOS_STATUS_SUCCESS) {
3868         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize subDevice");
3869         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle);
3870         pSubDevice->handle = 0;
3871         goto failure;
3872     }
3873 
3874     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3875                          pSubDevice->handle,
3876                          NV2080_CTRL_CMD_GPU_GET_ID,
3877                          &getIdParams,
3878                          sizeof(getIdParams));
3879 
3880     if (ret != NVOS_STATUS_SUCCESS) {
3881         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to identify GPU");
3882         goto failure;
3883     }
3884 
3885     pSubDevice->gpuId = getIdParams.gpuId;
3886 
3887     /* Query the UUID for the gpuString. */
3888 
3889     pGidParams = nvCalloc(1, sizeof(*pGidParams));
3890 
3891     if (pGidParams == NULL) {
3892         goto failure;
3893     }
3894 
3895     pGidParams->flags =
3896         DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) |
3897         DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1);
3898 
3899     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3900                          pSubDevice->handle,
3901                          NV2080_CTRL_CMD_GPU_GET_GID_INFO,
3902                          pGidParams,
3903                          sizeof(*pGidParams));
3904 
3905     if (ret != NVOS_STATUS_SUCCESS) {
3906         /* If the query failed, make sure the UUID is cleared out. */
3907         nvkms_memset(pGidParams, 0, sizeof(*pGidParams));
3908     }
3909 
3910     /* Query the PCI bus address for the gpuString. */
3911 
3912     pciInfoParams.gpuId = pSubDevice->gpuId;
3913 
3914     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3915                          nvEvoGlobal.clientHandle,
3916                          NV0000_CTRL_CMD_GPU_GET_PCI_INFO,
3917                          &pciInfoParams, sizeof(pciInfoParams));
3918 
3919     if (ret != NVOS_STATUS_SUCCESS) {
3920         /* If the query failed, make sure the PCI bus address is cleared out. */
3921         nvkms_memset(&pciInfoParams, 0, sizeof(pciInfoParams));
3922     }
3923 
3924     pSubDevice->gpuLogIndex = nvGetGpuLogIndex();
3925 
3926     /*
3927      * Create the gpuString, using this example format:
3928      * GPU:0 (GPU-af2422f5-2719-29de-567f-ac899cf458c4) @ PCI:0000:01:00.0
3929      */
3930     if ((pGidParams->data[0] == '\0') || (pGidParams->length == 0)) {
3931         uuid = "";
3932     } else {
3933         uuid = (const char *) pGidParams->data;
3934     }
3935 
3936     nvkms_snprintf(pSubDevice->gpuString, sizeof(pSubDevice->gpuString),
3937                    "GPU:%d (%s) @ PCI:%04x:%02x:%02x.0",
3938                    pSubDevice->gpuLogIndex, uuid,
3939                    pciInfoParams.domain,
3940                    pciInfoParams.bus,
3941                    pciInfoParams.slot);
3942 
3943     pSubDevice->gpuString[sizeof(pSubDevice->gpuString) - 1] = '\0';
3944 
3945     nvEvoLogDebug(EVO_LOG_INFO, "Allocated %s", pSubDevice->gpuString);
3946     nvFree(pGidParams);
3947 
3948     return pSubDevice;
3949 
3950 failure:
3951     FreeSubDevice(pDevEvo, pSubDevice);
3952     nvFree(pGidParams);
3953 
3954     return NULL;
3955 }
3956 
CloseDevice(NVDevEvoPtr pDevEvo)3957 static void CloseDevice(NVDevEvoPtr pDevEvo)
3958 {
3959     NvU32 i;
3960 
3961     for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) {
3962         const NvU32 gpuId = pDevEvo->openedGpuIds[i];
3963 
3964         if (gpuId == NV0000_CTRL_GPU_INVALID_ID) {
3965             break;
3966         }
3967 
3968         nvkms_close_gpu(gpuId);
3969         pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID;
3970     }
3971 }
3972 
OpenTegraDevice(NVDevEvoPtr pDevEvo)3973 static NvBool OpenTegraDevice(NVDevEvoPtr pDevEvo)
3974 {
3975     NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 };
3976     nv_gpu_info_t *gpu_info = NULL;
3977     NvU32 ret, gpu_count = 0;
3978 
3979     nvAssert(pDevEvo->deviceId == NVKMS_DEVICE_ID_TEGRA);
3980 
3981     gpu_info = nvAlloc(NV_MAX_GPUS * sizeof(*gpu_info));
3982     if (gpu_info == NULL) {
3983         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate GPU ids arrays");
3984         goto fail;
3985     }
3986 
3987     gpu_count = nvkms_enumerate_gpus(gpu_info);
3988     if (gpu_count == 0) {
3989         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No NVIDIA GPUs found");
3990         goto fail;
3991     }
3992 
3993     if (gpu_count != 1) {
3994         // XXX If the system has both Tegra/iGPU and dGPU, it is not
3995         // guaranteed to find the Tegra, so fail.
3996         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "More than one NVIDIA GPU found "
3997                     "in a Tegra configuration where only Tegra is expected.");
3998         goto fail;
3999     }
4000 
4001     if (!nvkms_open_gpu(gpu_info[0].gpu_id)) {
4002         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU");
4003         goto fail;
4004     }
4005 
4006     pDevEvo->openedGpuIds[0] = gpu_info[0].gpu_id;
4007     params.gpuId = gpu_info[0].gpu_id;
4008 
4009     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4010                          nvEvoGlobal.clientHandle,
4011                          NV0000_CTRL_CMD_GPU_GET_ID_INFO,
4012                          &params, sizeof(params));
4013 
4014     if (ret != NVOS_STATUS_SUCCESS) {
4015         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID");
4016         goto fail;
4017     }
4018 
4019     pDevEvo->deviceId = params.deviceInstance;
4020 
4021     nvFree(gpu_info);
4022     return TRUE;
4023 
4024 fail:
4025     nvFree(gpu_info);
4026     CloseDevice(pDevEvo);
4027     return FALSE;
4028 }
4029 
OpenDevice(NVDevEvoPtr pDevEvo)4030 static NvBool OpenDevice(NVDevEvoPtr pDevEvo)
4031 {
4032     NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS idParams = { };
4033     NvU32 ret, i, gpuIdIndex = 0;
4034 
4035     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4036                          nvEvoGlobal.clientHandle,
4037                          NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS,
4038                          &idParams, sizeof(idParams));
4039 
4040     if (ret != NVOS_STATUS_SUCCESS) {
4041         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query attached GPUs");
4042         goto fail;
4043     }
4044 
4045     ct_assert(ARRAY_LEN(pDevEvo->openedGpuIds) >= ARRAY_LEN(idParams.gpuIds));
4046 
4047     for (i = 0; i < ARRAY_LEN(idParams.gpuIds); i++) {
4048         NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 };
4049         const NvU32 gpuId = idParams.gpuIds[i];
4050 
4051         if (gpuId == NV0000_CTRL_GPU_INVALID_ID) {
4052             break;
4053         }
4054 
4055         nvAssert(pDevEvo->openedGpuIds[gpuIdIndex] ==
4056                  NV0000_CTRL_GPU_INVALID_ID);
4057 
4058         params.gpuId = gpuId;
4059 
4060         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4061                              nvEvoGlobal.clientHandle,
4062                              NV0000_CTRL_CMD_GPU_GET_ID_INFO,
4063                              &params, sizeof(params));
4064 
4065         if (ret != NVOS_STATUS_SUCCESS) {
4066             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID");
4067             goto fail;
4068         }
4069 
4070         if (pDevEvo->deviceId != params.deviceInstance) {
4071             continue;
4072         }
4073 
4074         if (!nvkms_open_gpu(gpuId)) {
4075             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU");
4076             goto fail;
4077         }
4078 
4079         pDevEvo->openedGpuIds[gpuIdIndex++] = gpuId;
4080     }
4081 
4082     return TRUE;
4083 
4084 fail:
4085     CloseDevice(pDevEvo);
4086     return FALSE;
4087 }
4088 
FreeGpuVASpace(NVDevEvoPtr pDevEvo)4089 static void FreeGpuVASpace(NVDevEvoPtr pDevEvo)
4090 {
4091     if (pDevEvo->nvkmsGpuVASpace != 0) {
4092         nvRmApiFree(nvEvoGlobal.clientHandle,
4093                     pDevEvo->deviceHandle,
4094                     pDevEvo->nvkmsGpuVASpace);
4095         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4096                            pDevEvo->nvkmsGpuVASpace);
4097         pDevEvo->nvkmsGpuVASpace = 0;
4098     }
4099 }
4100 
AllocGpuVASpace(NVDevEvoPtr pDevEvo)4101 static NvBool AllocGpuVASpace(NVDevEvoPtr pDevEvo)
4102 {
4103     NvU32 ret;
4104     NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS memoryVirtualParams = { };
4105 
4106     pDevEvo->nvkmsGpuVASpace =
4107         nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4108 
4109     memoryVirtualParams.offset = 0;
4110     memoryVirtualParams.limit = 0;          // no limit on VA space
4111     memoryVirtualParams.hVASpace = 0;       // client's default VA space
4112 
4113     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
4114                        pDevEvo->deviceHandle,
4115                        pDevEvo->nvkmsGpuVASpace,
4116                        NV01_MEMORY_VIRTUAL,
4117                        &memoryVirtualParams);
4118 
4119     if (ret != NVOS_STATUS_SUCCESS) {
4120         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4121                            pDevEvo->nvkmsGpuVASpace);
4122         pDevEvo->nvkmsGpuVASpace = 0;
4123         return FALSE;
4124     }
4125 
4126     return TRUE;
4127 }
4128 
NonStallInterruptCallback(void * arg,void * pEventDataVoid,NvU32 hEvent,NvU32 data,NV_STATUS status)4129 static void NonStallInterruptCallback(
4130     void *arg,
4131     void *pEventDataVoid,
4132     NvU32 hEvent,
4133     NvU32 data,
4134     NV_STATUS status)
4135 {
4136     /*
4137      * We are called within resman's locks.  Schedule a separate callback to
4138      * execute with the nvkms_lock.
4139      *
4140      * XXX It might be nice to use a lighter-weight lock here to check if any
4141      * requests are pending in any NvKmsDeferredRequestFifo before scheduling
4142      * nvKmsServiceNonStallInterrupt().
4143      */
4144 
4145     (void) nvkms_alloc_timer_with_ref_ptr(
4146         nvKmsServiceNonStallInterrupt, /* callback */
4147         arg, /* argument (this is a ref_ptr to a pDevEvo) */
4148         0,   /* dataU32 */
4149         0);  /* usec */
4150 }
4151 
UnregisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo)4152 static void UnregisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo)
4153 {
4154     if (pDevEvo->nonStallInterrupt.handle != 0) {
4155         NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS
4156             eventNotificationParams = { 0 };
4157 
4158         eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD;
4159         eventNotificationParams.action =
4160             NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
4161         nvRmApiControl(nvEvoGlobal.clientHandle,
4162                        pDevEvo->pSubDevices[0]->handle,
4163                        NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
4164                        &eventNotificationParams,
4165                        sizeof(eventNotificationParams));
4166 
4167         nvRmApiFree(nvEvoGlobal.clientHandle,
4168                     pDevEvo->pSubDevices[0]->handle,
4169                     pDevEvo->nonStallInterrupt.handle);
4170 
4171         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4172                            pDevEvo->nonStallInterrupt.handle);
4173     }
4174 
4175     pDevEvo->nonStallInterrupt.handle = 0;
4176 }
4177 
RegisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo)4178 static NvBool RegisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo)
4179 {
4180     NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS eventNotificationParams = { 0 };
4181 
4182     pDevEvo->nonStallInterrupt.handle =
4183         nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4184 
4185     if (!nvRmRegisterCallback(pDevEvo,
4186                               &pDevEvo->nonStallInterrupt.callback,
4187                               pDevEvo->ref_ptr,
4188                               pDevEvo->pSubDevices[0]->handle,
4189                               pDevEvo->nonStallInterrupt.handle,
4190                               NonStallInterruptCallback,
4191                               NV2080_NOTIFIERS_FIFO_EVENT_MTHD |
4192                               NV01_EVENT_NONSTALL_INTR)) {
4193         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
4194                          "Failed to register nonstall interrupt callback");
4195         goto failure_free_handle;
4196     }
4197 
4198     // Setup event notification
4199     eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD;
4200     eventNotificationParams.action =
4201         NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
4202 
4203     if (nvRmApiControl(nvEvoGlobal.clientHandle,
4204                        pDevEvo->pSubDevices[0]->handle,
4205                        NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
4206                        &eventNotificationParams,
4207                        sizeof(eventNotificationParams))
4208         != NVOS_STATUS_SUCCESS) {
4209         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
4210                          "Failed to set nonstall interrupt notification");
4211         goto failure_free_callback_and_handle;
4212     }
4213 
4214     return TRUE;
4215 
4216 failure_free_callback_and_handle:
4217     nvRmApiFree(nvEvoGlobal.clientHandle,
4218                 pDevEvo->pSubDevices[0]->handle,
4219                 pDevEvo->nonStallInterrupt.handle);
4220 failure_free_handle:
4221     nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4222                        pDevEvo->nonStallInterrupt.handle);
4223     pDevEvo->nonStallInterrupt.handle = 0;
4224     return FALSE;
4225 }
4226 
nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo,const struct NvKmsAllocDeviceRequest * pRequest)4227 NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo,
4228                           const struct NvKmsAllocDeviceRequest *pRequest)
4229 {
4230     NV0080_ALLOC_PARAMETERS allocParams = { 0 };
4231     NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 };
4232     NvU32 ret, sd;
4233 
4234     if (nvEvoGlobal.clientHandle == 0) {
4235         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Client handle not initialized");
4236         goto failure;
4237     }
4238 
4239     /*
4240      * RM deviceIds should be within [0,NV_MAX_DEVICES); check
4241      * that the client provided a value in range, and add one when
4242      * using deviceId as the per-device unique identifier in the
4243      * RM handle allocator: the identifier is expected to be != 0.
4244      */
4245 
4246     if ((pRequest->deviceId >= NV_MAX_DEVICES) &&
4247         (pRequest->deviceId != NVKMS_DEVICE_ID_TEGRA)) {
4248         goto failure;
4249     }
4250 
4251     pDevEvo->dpTimer = nvDPAllocTimer(pDevEvo);
4252     if (!pDevEvo->dpTimer) {
4253         goto failure;
4254     }
4255 
4256     if (!nvInitUnixRmHandleAllocator(
4257             &pDevEvo->handleAllocator,
4258             nvEvoGlobal.clientHandle,
4259             NVKMS_RM_HANDLE_SPACE_DEVICE(pRequest->deviceId))) {
4260         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize handles");
4261         goto failure;
4262     }
4263 
4264     pDevEvo->deviceHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4265 
4266     pDevEvo->deviceId = pRequest->deviceId;
4267     pDevEvo->sli.mosaic = pRequest->sliMosaic;
4268 
4269     if (pRequest->deviceId == NVKMS_DEVICE_ID_TEGRA) {
4270         /*
4271          * On Tegra, NVKMS client is not desktop RM client, so
4272          * enumerate and open first GPU.
4273          */
4274         if (!OpenTegraDevice(pDevEvo)) {
4275             goto failure;
4276         }
4277 
4278         pDevEvo->usesTegraDevice = TRUE;
4279     } else if (!OpenDevice(pDevEvo)) {
4280         goto failure;
4281     }
4282 
4283     allocParams.deviceId = pDevEvo->deviceId;
4284 
4285     /* Give NVKMS a private GPU virtual address space. */
4286     allocParams.hClientShare = nvEvoGlobal.clientHandle;
4287 
4288     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
4289                        nvEvoGlobal.clientHandle,
4290                        pDevEvo->deviceHandle,
4291                        NV01_DEVICE_0,
4292                        &allocParams);
4293 
4294     if (ret != NVOS_STATUS_SUCCESS) {
4295         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize device");
4296         goto failure;
4297     }
4298 
4299     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4300                          pDevEvo->deviceHandle,
4301                          NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES,
4302                          &getNumSubDevicesParams,
4303                          sizeof(getNumSubDevicesParams));
4304 
4305     if (ret != NVOS_STATUS_SUCCESS) {
4306         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
4307                     "Failed to determine number of GPUs");
4308         goto failure;
4309     }
4310 
4311     ct_assert(NVKMS_MAX_SUBDEVICES == NV_MAX_SUBDEVICES);
4312     if ((getNumSubDevicesParams.numSubDevices == 0) ||
4313         (getNumSubDevicesParams.numSubDevices >
4314          ARRAY_LEN(pDevEvo->pSubDevices))) {
4315         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported number of GPUs: %d",
4316                     getNumSubDevicesParams.numSubDevices);
4317         goto failure;
4318     }
4319 
4320     pDevEvo->numSubDevices = getNumSubDevicesParams.numSubDevices;
4321 
4322     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4323         pDevEvo->pSubDevices[sd] = AllocSubDevice(pDevEvo, sd);
4324         if (pDevEvo->pSubDevices[sd] == NULL) {
4325             goto failure;
4326         }
4327     }
4328 
4329     pDevEvo->gpuLogIndex = pDevEvo->pSubDevices[0]->gpuLogIndex;
4330 
4331     if (!GetClassList(pDevEvo) || !GetEngineList(pDevEvo)) {
4332         goto failure;
4333     }
4334 
4335     if (!RegisterNonStallInterruptCallback(pDevEvo)) {
4336         goto failure;
4337     }
4338 
4339     if (!AllocGpuVASpace(pDevEvo)) {
4340         goto failure;
4341     }
4342 
4343     if (!nvAllocNvPushDevice(pDevEvo)) {
4344         goto failure;
4345     }
4346 
4347     return TRUE;
4348 
4349 failure:
4350     nvRmFreeDeviceEvo(pDevEvo);
4351     return FALSE;
4352 }
4353 
nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo)4354 void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo)
4355 {
4356     NvU32 sd;
4357 
4358     nvFreeNvPushDevice(pDevEvo);
4359 
4360     FreeGpuVASpace(pDevEvo);
4361 
4362     UnregisterNonStallInterruptCallback(pDevEvo);
4363 
4364     nvFree(pDevEvo->supportedClasses);
4365     pDevEvo->supportedClasses = NULL;
4366 
4367     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4368         FreeSubDevice(pDevEvo, pDevEvo->pSubDevices[sd]);
4369         pDevEvo->pSubDevices[sd] = NULL;
4370     }
4371 
4372     if (pDevEvo->deviceHandle != 0) {
4373         nvRmApiFree(nvEvoGlobal.clientHandle,
4374                     nvEvoGlobal.clientHandle,
4375                     pDevEvo->deviceHandle);
4376         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->deviceHandle);
4377         pDevEvo->deviceHandle = 0;
4378     }
4379 
4380     nvTearDownUnixRmHandleAllocator(&pDevEvo->handleAllocator);
4381 
4382     nvDPFreeTimer(pDevEvo->dpTimer);
4383     pDevEvo->dpTimer = NULL;
4384 
4385     CloseDevice(pDevEvo);
4386 }
4387 
4388 /*
4389  * Set up DIFR notifier listener to drive framebuffer prefetching once the
4390  * h/w gets idle enough.
4391  */
nvRmRegisterDIFREventHandler(NVDevEvoPtr pDevEvo)4392 NvBool nvRmRegisterDIFREventHandler(NVDevEvoPtr pDevEvo)
4393 {
4394     pDevEvo->difrPrefetchEventHandler =
4395         nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4396 
4397     if (pDevEvo->difrPrefetchEventHandler != 0) {
4398         NvBool registered;
4399 
4400         /*
4401          * Allocate event callback.
4402          */
4403         registered = nvRmRegisterCallback(
4404             pDevEvo,
4405             &pDevEvo->difrPrefetchCallback,
4406             pDevEvo->ref_ptr,
4407             pDevEvo->pSubDevices[0]->handle,
4408             pDevEvo->difrPrefetchEventHandler,
4409             DifrPrefetchEvent,
4410             NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST);
4411 
4412         /*
4413          * Configure event notification.
4414          */
4415         if (registered) {
4416             NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS prefetchEventParams = { 0 };
4417 
4418             prefetchEventParams.event = NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST;
4419             prefetchEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
4420 
4421             if (nvRmApiControl(nvEvoGlobal.clientHandle,
4422                                pDevEvo->pSubDevices[0]->handle,
4423                                NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
4424                                &prefetchEventParams,
4425                                sizeof(prefetchEventParams))
4426                 == NVOS_STATUS_SUCCESS) {
4427                 return TRUE;
4428 
4429             }
4430         }
4431         nvRmUnregisterDIFREventHandler(pDevEvo);
4432     }
4433     return FALSE;
4434 }
4435 
nvRmUnregisterDIFREventHandler(NVDevEvoPtr pDevEvo)4436 void nvRmUnregisterDIFREventHandler(NVDevEvoPtr pDevEvo)
4437 {
4438     if (pDevEvo->difrPrefetchEventHandler != 0) {
4439         NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS prefetchEventParams = { 0 };
4440 
4441         prefetchEventParams.event = NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST;
4442         prefetchEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE;
4443 
4444         nvRmApiControl(nvEvoGlobal.clientHandle,
4445                        pDevEvo->pSubDevices[0]->handle,
4446                        NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION,
4447                        &prefetchEventParams,
4448                        sizeof(prefetchEventParams));
4449 
4450         nvRmApiFree(nvEvoGlobal.clientHandle,
4451                     pDevEvo->pSubDevices[0]->handle,
4452                     pDevEvo->difrPrefetchEventHandler);
4453 
4454         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4455                            pDevEvo->difrPrefetchEventHandler);
4456         pDevEvo->difrPrefetchEventHandler = 0;
4457     }
4458 }
4459 
4460 
4461 /*!
4462  * Determine whether all the dpys in the dpyIdList can be activated together.
4463  *
4464  * \param[in]   pDispEvo         The disp on which we search for a head.
4465  * \param[in]   dpyIdList        The connectors to test.
4466  *
4467  * \return      Return TRUE if all dpys can be driven simultaneously.
4468  */
nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo,const NVDpyIdList dpyIdList)4469 NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo,
4470                                          const NVDpyIdList dpyIdList)
4471 {
4472     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4473     NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS mapParams = { 0 };
4474     NvU32 ret = 0;
4475 
4476     /* Trivially accept an empty dpyIdList. */
4477 
4478     if (nvDpyIdListIsEmpty(dpyIdList)) {
4479         return TRUE;
4480     }
4481 
4482     /* don't even try if EVO isn't initialized (e.g. during a VT switch) */
4483 
4484     if (!pDevEvo->gpus) {
4485         return FALSE;
4486     }
4487 
4488     /* build a mask of all the displays to use */
4489 
4490     mapParams.subDeviceInstance = pDispEvo->displayOwner;
4491 
4492     mapParams.displayMask = nvDpyIdListToNvU32(dpyIdList);
4493 
4494     /* ask RM for the head routing */
4495 
4496     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4497                          pDevEvo->displayCommonHandle,
4498                          NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP,
4499                          &mapParams,
4500                          sizeof(mapParams));
4501 
4502     if ((ret != NVOS_STATUS_SUCCESS) || (mapParams.displayMask == 0)) {
4503         char *dpyIdListStr = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList);
4504         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
4505                     "The requested configuration of display devices "
4506                     "(%s) is not supported on this GPU.",
4507                     nvSafeString(dpyIdListStr, "unknown"));
4508         nvFree(dpyIdListStr);
4509 
4510         return FALSE;
4511     }
4512 
4513     /* make sure we got everything we asked for */
4514 
4515     if (mapParams.displayMask != nvDpyIdListToNvU32(dpyIdList)) {
4516         char *requestedDpyIdListStr;
4517         char *returnedDpyIdListStr;
4518 
4519         requestedDpyIdListStr =
4520             nvGetDpyIdListStringEvo(pDispEvo, dpyIdList);
4521 
4522         returnedDpyIdListStr =
4523             nvGetDpyIdListStringEvo(pDispEvo,
4524                                     nvNvU32ToDpyIdList(mapParams.displayMask));
4525 
4526         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
4527                     "The requested configuration of display devices "
4528                     "(%s) is not supported on this GPU; "
4529                     "%s is recommended, instead.",
4530                     nvSafeString(requestedDpyIdListStr, "unknown"),
4531                     nvSafeString(returnedDpyIdListStr, "unknown"));
4532 
4533         nvFree(requestedDpyIdListStr);
4534         nvFree(returnedDpyIdListStr);
4535 
4536         return FALSE;
4537     }
4538 
4539     return TRUE;
4540 }
4541 
4542 
4543 /*!
4544  * Tell the RM to save or restore the console VT state.
4545  *
4546  * \param[in]   cmd    indicate RM about the action.
4547  *
4548  * \return      TRUE on success, FALSE on failure.
4549  */
nvRmVTSwitch(NVDevEvoPtr pDevEvo,NvU32 cmd)4550 NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd)
4551 {
4552     NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS params = { 0 };
4553     NvU32 ret;
4554 
4555     params.cmd = cmd;
4556     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4557                          pDevEvo->deviceHandle,
4558                          NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH,
4559                          &params, sizeof(params));
4560 
4561     if (ret != NVOS_STATUS_SUCCESS) {
4562         return FALSE;
4563     }
4564 
4565     return TRUE;
4566 }
4567 
nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo)4568 NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo)
4569 {
4570     NvU32 ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4571                      pDevEvo->deviceHandle,
4572                      NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO,
4573                      &pDevEvo->vtFbInfo, sizeof(pDevEvo->vtFbInfo));
4574 
4575     if (ret != NVOS_STATUS_SUCCESS) {
4576         return FALSE;
4577     }
4578 
4579     return TRUE;
4580 }
4581 
4582 /*!
4583  * Import the current framebuffer console memory, for later use with NVKMS-based
4584  * console restore.
4585  *
4586  * Note this relies on pDevEvo->fbInfo populated by nvRmVTSwitch().
4587  *
4588  * There are several cases in which NVKMS cannot perform console restore:
4589  *
4590  * - Anything other than linear frame buffer consoles (i.e., VGA text modes,
4591  *   Non-linear or paletted graphical modes, etc).  For those, resman cannot
4592  *   query the framebuffer dimensions from the kernel,
4593  *   NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE returns empty fbInfo
4594  *   params, and consequently pDevEvo->fbInfo.width == 0.
4595  *
4596  * - Linear frame buffer console with an unaligned pitch.  In this case,
4597  *   nvEvoRegisterSurface() will fail: it has to ensure the surface registration
4598  *   satisfies the EVO method interface requirement that PITCH surfaces are
4599  *   multiples of 256 bytes.  Consequently, pDevEvo->fbConsoleSurfaceHandle will
4600  *   be 0.
4601  *
4602  * - Depth 8 frame buffer consoles: these are color index, and cannot be
4603  *   supported by NVKMS console restore because they require the VGA palette,
4604  *   which exists in special RAM in the VGA core, so we can't name it with a
4605  *   ctxdma that we can feed into EVO's LUT.  The pFbInfo->depth switch below
4606  *   will reject depth 8.
4607  */
nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo)4608 void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo)
4609 {
4610     NvU32 ret;
4611     struct NvKmsRegisterSurfaceParams registration = { };
4612     const NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pFbInfo = &pDevEvo->vtFbInfo;
4613     NvHandle hMemory;
4614 
4615     nvAssert(pDevEvo->fbConsoleSurfaceHandle == 0);
4616 
4617     if (pFbInfo->width == 0) {
4618         // No console memory to map.
4619         return;
4620     }
4621 
4622     switch (pFbInfo->depth) {
4623     case 15:
4624         registration.request.format = NvKmsSurfaceMemoryFormatX1R5G5B5;
4625         break;
4626     case 16:
4627         registration.request.format = NvKmsSurfaceMemoryFormatR5G6B5;
4628         break;
4629     case 32:
4630         // That's a lie, it's really depth 24. Fall through.
4631     case 24:
4632         registration.request.format = NvKmsSurfaceMemoryFormatX8R8G8B8;
4633         break;
4634     default:
4635         nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN,
4636                          "Unsupported framebuffer console depth %d",
4637                          pFbInfo->depth);
4638         return;
4639     }
4640 
4641     hMemory = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4642     if (hMemory == 0) {
4643         return;
4644     }
4645 
4646     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
4647                        pDevEvo->deviceHandle,
4648                        hMemory,
4649                        NV01_MEMORY_FRAMEBUFFER_CONSOLE,
4650                        NULL);
4651 
4652     if (ret != NVOS_STATUS_SUCCESS) {
4653         nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN,
4654                          "Failed to map framebuffer console memory");
4655         goto done;
4656     }
4657 
4658     registration.request.useFd = FALSE;
4659     registration.request.rmClient = nvEvoGlobal.clientHandle;
4660     registration.request.widthInPixels = pFbInfo->width;
4661     registration.request.heightInPixels = pFbInfo->height;
4662     registration.request.layout = NvKmsSurfaceMemoryLayoutPitch;
4663 
4664     registration.request.planes[0].u.rmObject = hMemory;
4665     registration.request.planes[0].pitch = pFbInfo->pitch;
4666     registration.request.planes[0].rmObjectSizeInBytes =
4667         (NvU64) pFbInfo->height * (NvU64) pFbInfo->pitch;
4668 
4669     nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, &registration,
4670                          NvHsMapPermissionsNone);
4671 
4672     pDevEvo->fbConsoleSurfaceHandle = registration.reply.surfaceHandle;
4673 
4674     // nvEvoRegisterSurface dups the handle, so we can free the one we just
4675     // imported.
4676     nvRmApiFree(nvEvoGlobal.clientHandle,
4677                 nvEvoGlobal.clientHandle,
4678                 hMemory);
4679 done:
4680     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hMemory);
4681 }
4682 
LogAuxPacket(const NVDispEvoRec * pDispEvo,const DPAUXPACKET * pkt)4683 static void LogAuxPacket(const NVDispEvoRec *pDispEvo, const DPAUXPACKET *pkt)
4684 {
4685     const char *req, *rep;
4686     char str[DP_MAX_MSG_SIZE * 3 + 1];
4687     char *p = str;
4688     int i;
4689 
4690     switch (DRF_VAL(_DP, _AUXLOGGER, _REQUEST_TYPE, pkt->auxEvents)) {
4691         case NV_DP_AUXLOGGER_REQUEST_TYPE_AUXWR:
4692             req = "auxwr";
4693             break;
4694         case NV_DP_AUXLOGGER_REQUEST_TYPE_AUXRD:
4695             req = "auxrd";
4696             break;
4697         case NV_DP_AUXLOGGER_REQUEST_TYPE_MOTWR:
4698             // MOT is "middle of transaction", which is just another type of i2c
4699             // access.
4700             req = "motwr";
4701             break;
4702         case NV_DP_AUXLOGGER_REQUEST_TYPE_I2CWR:
4703             req = "i2cwr";
4704             break;
4705         case NV_DP_AUXLOGGER_REQUEST_TYPE_MOTRD:
4706             req = "motrd";
4707             break;
4708         case NV_DP_AUXLOGGER_REQUEST_TYPE_I2CRD:
4709             req = "i2crd";
4710             break;
4711         default:
4712             // Only log I2C and AUX transactions.
4713             return;
4714     }
4715 
4716     switch (DRF_VAL(_DP, _AUXLOGGER, _REPLY_TYPE, pkt->auxEvents)) {
4717         case NV_DP_AUXLOGGER_REPLY_TYPE_NULL:
4718             rep = "none";
4719             break;
4720         case NV_DP_AUXLOGGER_REPLY_TYPE_SB_ACK:
4721             rep = "sb_ack";
4722             break;
4723         case NV_DP_AUXLOGGER_REPLY_TYPE_RETRY:
4724             rep = "retry";
4725             break;
4726         case NV_DP_AUXLOGGER_REPLY_TYPE_TIMEOUT:
4727             rep = "timeout";
4728             break;
4729         case NV_DP_AUXLOGGER_REPLY_TYPE_DEFER:
4730             rep = "defer";
4731             break;
4732         case NV_DP_AUXLOGGER_REPLY_TYPE_DEFER_TO:
4733             rep = "defer_to";
4734             break;
4735         case NV_DP_AUXLOGGER_REPLY_TYPE_ACK:
4736             rep = "ack";
4737             break;
4738         case NV_DP_AUXLOGGER_REPLY_TYPE_ERROR:
4739             rep = "error";
4740             break;
4741         default:
4742         case NV_DP_AUXLOGGER_REPLY_TYPE_UNKNOWN:
4743             rep = "unknown";
4744             break;
4745     }
4746 
4747     for (i = 0; i < pkt->auxMessageReplySize; i++) {
4748         p += nvkms_snprintf(p, str + sizeof(str) - p, "%02x ",
4749                             pkt->auxPacket[i]);
4750     }
4751 
4752     nvAssert(p < str + sizeof(str));
4753     *p = '\0';
4754 
4755     nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
4756                  "%04u: port %u @ 0x%05x: [%10u] %s %2u, [%10u] %-8s %s",
4757                  pkt->auxCount, pkt->auxOutPort, pkt->auxPortAddress,
4758                  pkt->auxRequestTimeStamp, req,
4759                  pkt->auxMessageReqSize,
4760                  pkt->auxReplyTimeStamp, rep,
4761                  str);
4762 }
4763 
4764 /*!
4765  * This "attribute" queries the RM DisplayPort AUX channel log and dumps it to
4766  * the kernel log. It returns a value of TRUE if any RM AUX transactions were
4767  * logged, and FALSE otherwise.
4768  *
4769  * This attribute is intended to be queried in a loop as long as it reads TRUE.
4770  *
4771  * \return TRUE if the query succeeded (even if no events were logged).
4772  * \return FALSE if the query failed.
4773  */
nvRmQueryDpAuxLog(NVDispEvoRec * pDispEvo,NvS64 * pValue)4774 NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue)
4775 {
4776     NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pParams =
4777         nvCalloc(sizeof(*pParams), 1);
4778     NvU32 status;
4779     int i;
4780     NvBool ret = FALSE;
4781 
4782     pDispEvo->dpAuxLoggingEnabled = TRUE;
4783     *pValue = FALSE;
4784 
4785     if (!pParams) {
4786         return FALSE;
4787     }
4788 
4789     pParams->subDeviceInstance = pDispEvo->displayOwner;
4790     pParams->dpAuxBufferReadSize = MAX_LOGS_PER_POLL;
4791 
4792     status = nvRmApiControl(nvEvoGlobal.clientHandle,
4793                             pDispEvo->pDevEvo->displayCommonHandle,
4794                             NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA,
4795                             pParams, sizeof(*pParams));
4796     if (status != NVOS_STATUS_SUCCESS) {
4797         goto done;
4798     }
4799 
4800     nvAssert(pParams->dpNumMessagesRead <= MAX_LOGS_PER_POLL);
4801     for (i = 0; i < pParams->dpNumMessagesRead; i++) {
4802         const DPAUXPACKET *pkt = &pParams->dpAuxBuffer[i];
4803 
4804         switch (DRF_VAL(_DP, _AUXLOGGER, _EVENT_TYPE, pkt->auxEvents)) {
4805             case NV_DP_AUXLOGGER_EVENT_TYPE_AUX:
4806                 LogAuxPacket(pDispEvo, pkt);
4807                 break;
4808             case NV_DP_AUXLOGGER_EVENT_TYPE_HOT_PLUG:
4809                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
4810                              "%04u: port %u [%10u] hotplug",
4811                              pkt->auxCount, pkt->auxOutPort,
4812                              pkt->auxRequestTimeStamp);
4813                 break;
4814             case NV_DP_AUXLOGGER_EVENT_TYPE_HOT_UNPLUG:
4815                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
4816                              "%04u: port %u [%10u] unplug",
4817                              pkt->auxCount, pkt->auxOutPort,
4818                              pkt->auxRequestTimeStamp);
4819                 break;
4820             case NV_DP_AUXLOGGER_EVENT_TYPE_IRQ:
4821                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
4822                              "%04u: port %u [%10u] irq",
4823                              pkt->auxCount, pkt->auxOutPort,
4824                              pkt->auxRequestTimeStamp);
4825                 break;
4826             default:
4827                 nvEvoLogDisp(pDispEvo, EVO_LOG_INFO,
4828                              "%04u: port %u [%10u] unknown event",
4829                              pkt->auxCount, pkt->auxOutPort,
4830                              pkt->auxRequestTimeStamp);
4831                 break;
4832         }
4833 
4834         *pValue = TRUE;
4835     }
4836 
4837     ret = TRUE;
4838 
4839 done:
4840     nvFree(pParams);
4841     return ret;
4842 }
4843 
4844 /*!
4845  * Return the GPU's current PTIMER, or 0 if the query fails.
4846  */
nvRmGetGpuTime(NVDevEvoPtr pDevEvo)4847 NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo)
4848 {
4849     const NvU32 sd = 0;
4850     NV2080_CTRL_TIMER_GET_TIME_PARAMS params;
4851 
4852     NvU32 ret;
4853 
4854     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
4855                          pDevEvo->pSubDevices[sd]->handle,
4856                          NV2080_CTRL_CMD_TIMER_GET_TIME,
4857                          &params, sizeof(params));
4858 
4859     if (ret != NVOS_STATUS_SUCCESS) {
4860         nvEvoLogDebug(EVO_LOG_ERROR, "Failed to query GPU time, ret = %d", ret);
4861         return 0;
4862     }
4863 
4864     return params.time_nsec;
4865 }
4866 
nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo,NvBool allowed)4867 NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed)
4868 {
4869     NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { };
4870     NvU32 sd;
4871 
4872     if (allowed == pDevEvo->gc6Allowed) {
4873         return TRUE;
4874     }
4875 
4876     params.action = allowed ? NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC :
4877                               NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC;
4878 
4879     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4880         NvU32 ret = nvRmApiControl(
4881                         nvEvoGlobal.clientHandle,
4882                         pDevEvo->pSubDevices[sd]->handle,
4883                         NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT,
4884                         &params, sizeof(params));
4885         if (ret != NVOS_STATUS_SUCCESS) {
4886             // XXX This is catastrophic, is there a good way to unravel?
4887             nvEvoLogDevDebug(
4888                 pDevEvo, EVO_LOG_ERROR,
4889                 "Failed to modify GC6 blocker refcount, sd = %d, ret = %x",
4890                 sd, ret);
4891             return FALSE;
4892         }
4893     }
4894 
4895     pDevEvo->gc6Allowed = allowed;
4896 
4897     /*
4898      * If we are just now disallowing GC6, it's possible that we previously
4899      * entered GC6 and invalidated display channel state. Re-initialize it here
4900      * to ensure that future modesets are successful.
4901      */
4902     if (!allowed && pDevEvo->core) {
4903         NvU32 channelIdx;
4904 
4905         pDevEvo->hal->InitChannel(pDevEvo, pDevEvo->core);
4906         pDevEvo->coreInitMethodsPending = TRUE;
4907 
4908         for (channelIdx = 0; channelIdx < pDevEvo->numHeads; channelIdx++) {
4909             // XXX We should InitChannel() for all per-head channels when coming
4910             // out of GC6.
4911             pDevEvo->hal->InitChannel(
4912                 pDevEvo, pDevEvo->head[channelIdx].layer[NVKMS_MAIN_LAYER]);
4913         }
4914     }
4915 
4916     return TRUE;
4917 }
4918 
4919 typedef struct _NVRmRgLine1CallbackRec {
4920     NVRgLine1CallbackRec base;
4921     struct nvkms_ref_ptr *ref_ptr;
4922     NvU32 rmHandle;
4923     NVDispEvoRec *pDispEvo;
4924     NvU32 head;
4925 } NVRmRgLine1CallbackRec;
4926 
RGLine1ServiceInterrupt(void * dataPtr,NvU32 dataU32)4927 static void RGLine1ServiceInterrupt(void *dataPtr, NvU32 dataU32)
4928 {
4929      NVRmRgLine1CallbackRec *pRmCallback = (NVRmRgLine1CallbackRec*)dataPtr;
4930      pRmCallback->base.pCallbackProc(pRmCallback->pDispEvo, pRmCallback->head,
4931                                      &pRmCallback->base);
4932 }
4933 
4934 /*!
4935  * Receive RG line 1 interrupt notification from resman.
4936  *
4937  * This function is registered as the kernel callback function from resman when
4938  * the RG line 1 interrupt is generated.
4939  *
4940  * This function is called within resman's context, so we schedule a zero timer
4941  * callback to process the swapgroup check and release without holding the
4942  * resman lock.
4943  */
RGLine1InterruptCallback(NvU32 rgIntrLine,void * param1,NvBool bIsIrqlIsr)4944 static void RGLine1InterruptCallback(NvU32 rgIntrLine, void *param1,
4945                                       NvBool bIsIrqlIsr /* unused */)
4946 {
4947     (void) nvkms_alloc_timer_with_ref_ptr(
4948         RGLine1ServiceInterrupt, /* callback */
4949         param1, /* argument (this is a ref_ptr to a NVRmRgLine1CallbackRec*) */
4950         0,  /* dataU32 */
4951         0); /* usec */
4952 }
4953 
4954 /*!
4955  * Register an RM callback function for the RG line 1 interrupt.
4956  *
4957  * \param[in]   pDispEvo          The display on which to allocate the callback
4958  *
4959  * \param[in]   head              The head on which to allocate the callback
4960  *
4961  * \param[in]   pCallback         The callback function pointer to be registered
4962  *
4963  * \return      Pointer to callback object on success, NULL on failure. This same
4964  *              pointer must be used to unregister the callback.
4965  */
4966 NVRgLine1CallbackPtr
nvRmAddRgLine1Callback(NVDispEvoRec * pDispEvo,NvU32 head,NVRgLine1CallbackProc pCallbackProc,void * pUserData)4967 nvRmAddRgLine1Callback(NVDispEvoRec *pDispEvo,
4968                        NvU32 head,
4969                        NVRgLine1CallbackProc pCallbackProc,
4970                        void *pUserData)
4971 {
4972     NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS rgLineParams = { 0 };
4973     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4974     NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4975     NvU32 ret;
4976     NVRmRgLine1CallbackRec *pRmCallback = nvCalloc(1, sizeof(*pRmCallback));
4977 
4978     if (pRmCallback == NULL) {
4979         goto failed;
4980     }
4981 
4982     pRmCallback->ref_ptr = nvkms_alloc_ref_ptr(pRmCallback);
4983     if (pRmCallback->ref_ptr == NULL) {
4984         goto failed;
4985     }
4986     pRmCallback->base.pCallbackProc = pCallbackProc;
4987     pRmCallback->base.pUserData = pUserData;
4988     pRmCallback->rmHandle = handle;
4989     pRmCallback->pDispEvo = pDispEvo;
4990     pRmCallback->head = head;
4991 
4992     rgLineParams.subDeviceInstance = pDispEvo->displayOwner;
4993     rgLineParams.head = head;
4994     rgLineParams.rgLineNum = 1;
4995     rgLineParams.pCallbkFn = RGLine1InterruptCallback;
4996     rgLineParams.pCallbkParams = pRmCallback->ref_ptr;
4997 
4998     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
4999                        pDevEvo->displayCommonHandle,
5000                        handle,
5001                        NV0092_RG_LINE_CALLBACK,
5002                        &rgLineParams);
5003 
5004     if (ret == NVOS_STATUS_SUCCESS) {
5005         return &pRmCallback->base;
5006     }
5007 
5008     nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5009                      "Failed to enable RG line interrupt, ret: %d", ret);
5010     /* fall through */
5011 
5012 failed:
5013     if (pRmCallback != NULL) {
5014         nvkms_free_ref_ptr(pRmCallback->ref_ptr);
5015         nvFree(pRmCallback);
5016     }
5017 
5018     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle);
5019 
5020     return NULL;
5021 }
5022 
5023 /*!
5024  * Unregister an RM callback function previously registered with
5025  * nvRmAddRgLine1Callback.
5026  *
5027  * \param[in]  pDispEvo    The display on which to unregister the
5028  *                         callback
5029  *
5030  * \param[in]  pCallback   Pointer to the previously allocated
5031  *                         callback object
5032  */
nvRmRemoveRgLine1Callback(const NVDispEvoRec * pDispEvo,NVRgLine1CallbackPtr pCallback)5033 void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo,
5034                                NVRgLine1CallbackPtr pCallback)
5035 {
5036     NVRmRgLine1CallbackRec *pRmCallback;
5037     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5038     NvU32 ret;
5039 
5040     if (pCallback == NULL) {
5041         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5042                          "Failed to disable RG line interrupt, obj pointer NULL");
5043         return;
5044     }
5045     pRmCallback = nv_container_of(pCallback, NVRmRgLine1CallbackRec, base);
5046 
5047     ret = nvRmApiFree(nvEvoGlobal.clientHandle,
5048                       pDevEvo->displayCommonHandle,
5049                       pRmCallback->rmHandle);
5050 
5051     if (ret != NVOS_STATUS_SUCCESS) {
5052         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5053                          "Failed to disable RG line interrupt, ret: %d", ret);
5054     }
5055 
5056     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pRmCallback->rmHandle);
5057     nvkms_free_ref_ptr(pRmCallback->ref_ptr);
5058     nvFree(pRmCallback);
5059 }
5060 
5061 /*!
5062  * Register an RM callback function for the VBlankinterrupt.
5063  *
5064  * \param[in]   pDispEvo          The display on which to allocate the callback
5065  *
5066  * \param[in]   head              The head on which to allocate the callback
5067  *
5068  * \param[in]   pCallback         The callback function pointer to be registered
5069  *
5070  * \return      Handle to callback object on success, 0 on failure. This same
5071  *              handle must be used to unregister the callback.
5072  */
nvRmAddVBlankCallback(const NVDispEvoRec * pDispEvo,NvU32 head,OSVBLANKCALLBACKPROC pCallback,void * pParam2)5073 NvU32 nvRmAddVBlankCallback(
5074     const NVDispEvoRec *pDispEvo,
5075     NvU32 head,
5076     OSVBLANKCALLBACKPROC pCallback,
5077     void *pParam2)
5078 {
5079     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5080     const NvU32 sd = pDispEvo->displayOwner;
5081     NvU32 ret;
5082     NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
5083 
5084     NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS params = {
5085         .pProc       = pCallback,
5086         .LogicalHead = head,
5087         .pParm1      = pDispEvo->ref_ptr,
5088         .pParm2      = pParam2,
5089     };
5090 
5091     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
5092                        pDevEvo->pSubDevices[sd]->handle,
5093                        handle,
5094                        NV9010_VBLANK_CALLBACK,
5095                        &params);
5096 
5097     if (ret == NVOS_STATUS_SUCCESS) {
5098         return handle;
5099     } else {
5100         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5101                          "Failed to enable VBlank callback, ret: %d", ret);
5102         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle);
5103         return 0;
5104     }
5105 }
5106 
5107 /*!
5108  * Unregister an RM callback function previously registered with
5109  * nvRmAddVBlankCallback.
5110  *
5111  * \param[in]  pDispEvo                 The display on which to unregister the
5112  *                                      callback
5113  *
5114  * \param[in]  callbackObjectHandle     Handle to the previously allocated
5115  *                                      callback object
5116  */
nvRmRemoveVBlankCallback(const NVDispEvoRec * pDispEvo,NvU32 callbackObjectHandle)5117 void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo,
5118                               NvU32 callbackObjectHandle)
5119 {
5120     const NvU32 sd = pDispEvo->displayOwner;
5121     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5122     NvU32 ret;
5123 
5124     if (callbackObjectHandle == 0) {
5125         // already removed
5126         return;
5127     }
5128 
5129     ret = nvRmApiFree(nvEvoGlobal.clientHandle,
5130                       pDevEvo->pSubDevices[sd]->handle,
5131                       callbackObjectHandle);
5132 
5133     if (ret != NVOS_STATUS_SUCCESS) {
5134         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
5135                          "Failed to disable VBlank callback, ret: %d", ret);
5136     }
5137 
5138     nvFreeUnixRmHandle(&pDevEvo->handleAllocator, callbackObjectHandle);
5139 }
5140 
5141 /*!
5142  * Initialize the dynamic display mux on supported systems.
5143  *
5144  * \param[in] pDpyEvo    The dpy on which to initialize the mux.
5145  */
MuxInit(const NVDpyEvoRec * pDpyEvo)5146 static void MuxInit(const NVDpyEvoRec *pDpyEvo)
5147 {
5148     NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS params = { 0 };
5149     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
5150     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5151 
5152     NvU32 ret;
5153 
5154     params.subDeviceInstance = pDispEvo->displayOwner;
5155     params.displayId = nvDpyEvoGetConnectorId(pDpyEvo);
5156 
5157     if (pDpyEvo->internal) {
5158         /* Attempt to get the EDID from ACPI. This is required for internal
5159          * displays only, as the internal mux initialization requires data
5160          * from the internal panel's EDID, while the external mux can be
5161          * initialized in the absence of a display, in which case there is
5162          * obviously no EDID present. The EDID read is done via ACPI, in
5163          * order to accommodate mux initialization while the internal panel
5164          * is disconnected from the GPU. */
5165 
5166         /* Map with hard-coded data for systems known to support dynamic mux
5167          * switching. This is a poor-man's alternative to the WDDM driver's
5168          * CDisplayMgr::NVInitializeACPIToDeviceMaskMap() */
5169         NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS acpiMap = {
5170             .mapTable = {
5171                 {.acpiId = 0x8001a420, .displayId = 0x1000, .dodIndex = 0},
5172             }
5173         };
5174         NVEdidRec edid = { };
5175         NVParsedEdidEvoRec *pParsedEdid = NULL;
5176         NVEvoInfoStringRec infoString;
5177 
5178         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5179                              pDevEvo->displayCommonHandle,
5180                              NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING,
5181                              &acpiMap, sizeof(acpiMap));
5182 
5183         if (ret != NVOS_STATUS_SUCCESS) {
5184             nvEvoLogDebug(EVO_LOG_ERROR, "Failed to set ACPI ID map.");
5185             return;
5186         }
5187 
5188         nvInitInfoString(&infoString, NULL, 0);
5189 
5190         /* Retrieve the internal panel's EDID from ACPI */
5191         if (!nvDpyReadAndParseEdidEvo(pDpyEvo, NULL,
5192                                       NVKMS_EDID_READ_MODE_ACPI,
5193                                       &edid, &pParsedEdid,
5194                                       &infoString)) {
5195             /* EDID read is expected to fail on non-dynamic-mux systems. */
5196             goto edid_done;
5197         }
5198 
5199         if (edid.length == 0 || pParsedEdid == NULL || !pParsedEdid->valid) {
5200             goto edid_done;
5201         }
5202 
5203         params.manfId = pParsedEdid->info.manuf_id;
5204         params.productId = pParsedEdid->info.product_id;
5205 
5206 edid_done:
5207         nvFree(edid.buffer);
5208         nvFree(pParsedEdid);
5209 
5210         /* Internal mux initialization will fail without manfId/productId */
5211         if (!params.manfId || !params.productId) {
5212             return;
5213         }
5214     }
5215 
5216     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5217                          pDevEvo->displayCommonHandle,
5218                          NV0073_CTRL_CMD_DFP_INIT_MUX_DATA,
5219                          &params,
5220                          sizeof(params));
5221 
5222     if (ret == NVOS_STATUS_SUCCESS) {
5223         pDispEvo->muxDisplays = nvAddDpyIdToDpyIdList(pDpyEvo->id,
5224                                                       pDispEvo->muxDisplays);
5225     } else {
5226         nvEvoLogDebug(EVO_LOG_ERROR, "Failed to initialize mux on %s.",
5227                       pDpyEvo->name);
5228     }
5229 }
5230 
GetValidMuxDpys(NVDispEvoPtr pDispEvo)5231 static NVDpyIdList GetValidMuxDpys(NVDispEvoPtr pDispEvo)
5232 {
5233     NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS params = { 0 };
5234 
5235     params.subDeviceInstance = pDispEvo->displayOwner;
5236 
5237     nvRmApiControl(nvEvoGlobal.clientHandle,
5238                    pDispEvo->pDevEvo->displayCommonHandle,
5239                    NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX,
5240                    &params, sizeof(params));
5241 
5242     return nvNvU32ToDpyIdList(params.muxDisplayMask);
5243 }
5244 
nvRmMuxInit(NVDevEvoPtr pDevEvo)5245 void nvRmMuxInit(NVDevEvoPtr pDevEvo)
5246 {
5247     NVDispEvoPtr pDispEvo;
5248     int i;
5249 
5250     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
5251         NVDpyIdList validMuxDpys = GetValidMuxDpys(pDispEvo);
5252         NVDpyEvoPtr pDpyEvo;
5253 
5254         FOR_ALL_EVO_DPYS(pDpyEvo, validMuxDpys, pDispEvo) {
5255             MuxInit(pDpyEvo);
5256         }
5257     }
5258 }
5259 
5260 /*!
5261  * Perform mux pre-switch operations
5262  *
5263  * \param[in] pDpyEvo             The Dpy of the target mux
5264  * \param[in] state               The target mux state
5265  *
5266  * \return TRUE on success; FALSE on failure
5267  */
nvRmMuxPre(const NVDpyEvoRec * pDpyEvo,NvMuxState state)5268 NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state)
5269 {
5270     NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS params = { 0 };
5271     NVDispEvoPtr pDispEvo;
5272     NVDevEvoPtr pDevEvo;
5273     NvU32 ret;
5274 
5275     pDispEvo = pDpyEvo->pDispEvo;
5276     pDevEvo = pDispEvo->pDevEvo;
5277 
5278     if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) {
5279         return FALSE;
5280     }
5281 
5282     params.subDeviceInstance = pDispEvo->displayOwner;
5283     params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
5284     params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO);
5285 
5286     if (state == MUX_STATE_DISCRETE) {
5287         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU;
5288     } else if (state == MUX_STATE_INTEGRATED) {
5289         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU;
5290     } else {
5291         return FALSE;
5292     }
5293 
5294     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5295                          pDevEvo->displayCommonHandle,
5296                          NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS,
5297                          &params, sizeof(params));
5298 
5299     nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPre status %d", ret);
5300 
5301     return ret == NVOS_STATUS_SUCCESS;
5302 }
5303 
5304 /*!
5305  * Perform mux switch operation
5306  *
5307  * \param[in] pDpyEvo    The Dpy of the target mux
5308  * \param[in] state      The target mux state
5309  *
5310  * \return TRUE on success; FALSE on failure
5311  */
nvRmMuxSwitch(const NVDpyEvoRec * pDpyEvo,NvMuxState state)5312 NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state)
5313 {
5314     NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS params = { 0 };
5315     NVDispEvoPtr pDispEvo;
5316     NVDevEvoPtr pDevEvo;
5317     NvU32 ret;
5318 
5319     pDispEvo = pDpyEvo->pDispEvo;
5320     pDevEvo = pDispEvo->pDevEvo;
5321 
5322     if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) {
5323         return FALSE;
5324     }
5325 
5326     params.subDeviceInstance = pDispEvo->displayOwner;
5327     params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
5328 
5329     if (state == MUX_STATE_DISCRETE) {
5330         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU;
5331     } else if (state == MUX_STATE_INTEGRATED) {
5332         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU;
5333     } else {
5334         return FALSE;
5335     }
5336 
5337     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5338                          pDevEvo->displayCommonHandle,
5339                          NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX,
5340                          &params, sizeof(params));
5341 
5342     nvEvoLogDebug(EVO_LOG_INFO, "RmMuxSwitch status %d", ret);
5343 
5344     /*
5345      * Force link training after waiting for the DP AUX link to settle.
5346      * The delay duration comes from DFP_MUX_AUX_SETTLE_DELAY_MS_DEFAULT
5347      * in drivers/resman/kernel/inc/dfpmux.h.
5348      */
5349     nvkms_usleep(100000);
5350 
5351     if (pDpyEvo->internal && state == MUX_STATE_DISCRETE) {
5352         nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo));
5353         nvDPNotifyShortPulse(pDpyEvo->pConnectorEvo->pDpLibConnector);
5354         nvDPFireExpiredTimers(pDevEvo);
5355     }
5356 
5357     return ret == NVOS_STATUS_SUCCESS;
5358 }
5359 
5360 /*!
5361  * Perform mux post-switch operations
5362  *
5363  * \param[in] pDpyEvo                The Dpy of the target mux
5364  * \param[in] state                  The target mux state
5365  *
5366  * \return TRUE on success; FALSE on failure
5367  */
nvRmMuxPost(const NVDpyEvoRec * pDpyEvo,NvMuxState state)5368 NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state)
5369 {
5370     NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS params = { 0 };
5371     NVDispEvoPtr pDispEvo;
5372     NVDevEvoPtr pDevEvo;
5373     NvU32 ret;
5374 
5375     pDispEvo = pDpyEvo->pDispEvo;
5376     pDevEvo = pDispEvo->pDevEvo;
5377 
5378     if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) {
5379         return FALSE;
5380     }
5381 
5382     params.subDeviceInstance = pDispEvo->displayOwner;
5383     params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
5384     params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO);
5385 
5386     if (state == MUX_STATE_DISCRETE) {
5387         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU;
5388     } else if (state == MUX_STATE_INTEGRATED) {
5389         params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU;
5390     } else {
5391         return FALSE;
5392     }
5393 
5394     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5395                          pDevEvo->displayCommonHandle,
5396                          NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS,
5397                          &params, sizeof(params));
5398 
5399     nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPost status %d", ret);
5400 
5401     return ret == NVOS_STATUS_SUCCESS;
5402 }
5403 
5404 /*!
5405  * Query the current state of a dynamic mux
5406  *
5407  * \param[in] pDpyEvo    The Dpy of the target mux whose state is to be queried
5408  *
5409  * \return Mux state (either MUX_STATE_INTEGRATED or MUX_STATE_DISCRETE) on
5410  * success; MUX_STATE_UNKNOWN on failure.
5411  */
nvRmMuxState(const NVDpyEvoRec * pDpyEvo)5412 NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo)
5413 {
5414     NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS params = { 0 };
5415     NVDispEvoPtr pDispEvo;
5416     NVDevEvoPtr pDevEvo;
5417 
5418     pDispEvo = pDpyEvo->pDispEvo;
5419     pDevEvo = pDispEvo->pDevEvo;
5420 
5421     if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) {
5422         return MUX_STATE_UNKNOWN;
5423     }
5424 
5425     params.subDeviceInstance = pDispEvo->displayOwner;
5426     params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
5427 
5428     if (NVOS_STATUS_SUCCESS == nvRmApiControl(nvEvoGlobal.clientHandle,
5429                                     pDevEvo->displayCommonHandle,
5430                                     NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS,
5431                                     &params, sizeof(params))) {
5432         if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _INTEGRATED_GPU,
5433             params.muxStatus)) {
5434             return MUX_STATE_INTEGRATED;
5435         }
5436         if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _DISCRETE_GPU,
5437             params.muxStatus)) {
5438             return MUX_STATE_DISCRETE;
5439         }
5440     }
5441 
5442     return MUX_STATE_UNKNOWN;
5443 }
5444 
nvRmRegisterBacklight(NVDispEvoRec * pDispEvo)5445 void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo)
5446 {
5447     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
5448     NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS dispParams = { 0 };
5449     NvU32 displayMask, displayId;
5450     NvU32 brightness;
5451 
5452     nvAssert(pDispEvo->backlightDevice == NULL);
5453 
5454     dispParams.subDeviceInstance = pDispEvo->displayOwner;
5455 
5456     if (nvRmApiControl(nvEvoGlobal.clientHandle,
5457                        pDevEvo->displayCommonHandle,
5458                        NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS,
5459                        &dispParams, sizeof(dispParams)) != NV_OK) {
5460         return;
5461     }
5462 
5463     /* Find a display with a backlight */
5464     displayMask = dispParams.availableInternalDisplaysMask;
5465     for (; displayMask; displayMask &= ~LOWESTBIT(displayMask))
5466     {
5467         NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 };
5468         NV_STATUS status;
5469 
5470         displayId = LOWESTBIT(displayMask);
5471 
5472         params.subDeviceInstance = pDispEvo->displayOwner;
5473         params.displayId         = displayId;
5474         params.brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100;
5475 
5476         status = nvRmApiControl(nvEvoGlobal.clientHandle,
5477                                 pDevEvo->displayCommonHandle,
5478                                 NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
5479                                 &params, sizeof(params));
5480 
5481         if (status == NV_OK)
5482         {
5483             brightness = params.brightness;
5484             break;
5485         }
5486     }
5487 
5488     if (displayMask == 0)
5489     {
5490         /* No internal display has backlight */
5491         return;
5492     }
5493 
5494     pDispEvo->backlightDevice = nvkms_register_backlight(
5495         pDevEvo->pSubDevices[pDispEvo->displayOwner]->gpuId,
5496         displayId, pDispEvo,
5497         brightness);
5498 }
5499 
nvRmUnregisterBacklight(NVDispEvoRec * pDispEvo)5500 void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo)
5501 {
5502     if (pDispEvo->backlightDevice != NULL) {
5503         nvkms_unregister_backlight(pDispEvo->backlightDevice);
5504     }
5505     pDispEvo->backlightDevice = NULL;
5506 }
5507 
nvRmAllocAndBindSurfaceDescriptor(NVDevEvoPtr pDevEvo,NvU32 hMemory,const enum NvKmsSurfaceMemoryLayout layout,NvU64 limit,NVSurfaceDescriptor * pSurfaceDesc)5508 NvU32 nvRmAllocAndBindSurfaceDescriptor(
5509     NVDevEvoPtr pDevEvo,
5510     NvU32 hMemory,
5511     const enum NvKmsSurfaceMemoryLayout layout,
5512     NvU64 limit,
5513     NVSurfaceDescriptor *pSurfaceDesc)
5514 {
5515     NVSurfaceDescriptor surfaceDesc;
5516     NvU32 flags = DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE);
5517     NvU32 head, layer;
5518     NvU32 ret;
5519 
5520     switch (layout) {
5521         case NvKmsSurfaceMemoryLayoutBlockLinear:
5522             flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _BL);
5523             break;
5524         case NvKmsSurfaceMemoryLayoutPitch:
5525             flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _PITCH);
5526             break;
5527     }
5528 
5529      /* Each surface to be displayed needs its own surface descriptor */
5530     nvAssert(pDevEvo->displayHandle != 0);
5531     nvAssert(pDevEvo->core);
5532     nvAssert(pDevEvo->core->pb.channel_handle);
5533     nvAssert(hMemory);
5534     nvAssert(limit);
5535 
5536     ret =
5537         pDevEvo->hal->AllocSurfaceDescriptor(pDevEvo, &surfaceDesc,
5538                                              hMemory, flags, limit);
5539 
5540     if (ret != NVOS_STATUS_SUCCESS) {
5541         return ret;
5542     }
5543 
5544     ret =
5545         pDevEvo->hal->BindSurfaceDescriptor(pDevEvo,
5546                                             pDevEvo->core,
5547                                             &surfaceDesc);
5548     if (ret != NVOS_STATUS_SUCCESS) {
5549         goto free_this_handle_and_fail;
5550     }
5551 
5552     for (head = 0; head < pDevEvo->numHeads; head++) {
5553         for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
5554             if (pDevEvo->head[head].layer[layer]) {
5555                  nvAssert(pDevEvo->head[head].layer[layer]->pb.channel_handle);
5556 
5557                  ret = pDevEvo->hal->BindSurfaceDescriptor(pDevEvo,
5558                          pDevEvo->head[head].layer[layer],
5559                          &surfaceDesc);
5560                  if (ret != NVOS_STATUS_SUCCESS) {
5561                      goto free_this_handle_and_fail;
5562                  }
5563             }
5564         }
5565     }
5566 
5567     *pSurfaceDesc = surfaceDesc;
5568 
5569     return NVOS_STATUS_SUCCESS;
5570 
5571 free_this_handle_and_fail:
5572     pDevEvo->hal->FreeSurfaceDescriptor(pDevEvo,
5573                                         nvEvoGlobal.clientHandle,
5574                                         &surfaceDesc);
5575     return ret;
5576 }
5577