1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "nvkms-types.h"
25 
26 #include "nvkms-evo-states.h"
27 #include "dp/nvdp-connector.h"
28 #include "dp/nvdp-device.h"
29 #include "nvkms-console-restore.h"
30 #include "nvkms-rm.h"
31 #include "nvkms-dpy.h"
32 #include "nvkms-cursor.h"
33 #include "nvkms-hal.h"
34 #include "nvkms-hdmi.h"
35 #include "nvkms-modepool.h"
36 #include "nvkms-evo.h"
37 #include "nvkms-flip.h"
38 #include "nvkms-hw-flip.h"
39 #include "nvkms-dma.h"
40 #include "nvkms-framelock.h"
41 #include "nvkms-utils.h"
42 #include "nvkms-lut.h"
43 #include "nvkms-modeset.h"
44 #include "nvkms-prealloc.h"
45 #include "nvkms-rmapi.h"
46 #include "nvkms-surface.h"
47 #include "nvkms-headsurface.h"
48 #include "nvkms-difr.h"
49 #include "nvkms-vrr.h"
50 #include "nvkms-ioctl.h"
51 
52 #include "nvctassert.h"
53 
54 #include <ctrl/ctrl0073/ctrl0073dfp.h> // NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS
55 #include <ctrl/ctrl0073/ctrl0073system.h> // NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH
56 #include <ctrl/ctrl0080/ctrl0080gpu.h> // NV0080_CTRL_CMD_GPU_*
57 #include <ctrl/ctrl0080/ctrl0080unix.h> // NV0080_CTRL_OS_UNIX_VT_SWITCH_*
58 #include <ctrl/ctrl30f1.h> // NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_*
59 #include <ctrl/ctrl5070/ctrl5070rg.h> // NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS
60 #include <ctrl/ctrl5070/ctrl5070system.h> // NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2
61 #include <ctrl/ctrl5070/ctrl5070or.h> // NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE
62 #include <ctrl/ctrl0073/ctrl0073dp.h> // NV0073_CTRL_DP_CTRL
63 
64 #include "nvkms.h"
65 #include "nvkms-private.h"
66 #include "nvos.h"
67 
68 #include "displayport/dpcd.h"
69 
70 #define EVO_RASTER_LOCK     1
71 #define EVO_FLIP_LOCK       2
72 
73 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_HEAD                     7:0
74 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT                 8:8
75 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_DISABLE           0
76 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_ENABLE            1
77 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT               9:9
78 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_DISABLE         0
79 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_ENABLE          1
80 
81 /*
82  * This struct is used to describe a single set of GPUs to lock together by
83  * GetRasterLockGroups().
84  */
85 typedef struct _NVEvoRasterLockGroup {
86     NvU32 numDisps;
87     NVDispEvoPtr pDispEvoOrder[NVKMS_MAX_SUBDEVICES];
88 } RasterLockGroup;
89 
90 /*
91  * These are used hold additional state for each DispEvo during building of
92  * RasterLockGroups.
93  */
94 typedef struct
95 {
96     NVDispEvoPtr pDispEvo;
97     NvU32 gpuId;
98     RasterLockGroup *pRasterLockGroup;
99 } DispEntry;
100 
101 typedef struct
102 {
103     /* Array of DispEvos and their assigned RasterLockGroups. */
104     NvU32 numDisps;
105     DispEntry disps[NVKMS_MAX_SUBDEVICES];
106 } DispEvoList;
107 
108 struct _NVLockGroup {
109     RasterLockGroup rasterLockGroup;
110     NvBool flipLockEnabled;
111 };
112 
113 static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, NvU32 head,
114                                   NvU16 x, NvU16 y,
115                                   NVEvoUpdateState *updateState);
116 static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0,
117                              NVDispEvoPtr pDispEvo1, NvU32 head1,
118                              NVEvoLockPin *serverPin, NVEvoLockPin *clientPin);
119 static NvBool EvoWaitForLock(const NVDevEvoRec *pDevEvo, const NvU32 sd,
120                              const NvU32 head, const NvU32 type,
121                              NvU64 *pStartTime);
122 static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head,
123                                 NVEvoUpdateState *updateState);
124 
125 static void SetRefClk(NVDevEvoPtr pDevEvo,
126                       NvU32 sd, NvU32 head, NvBool external,
127                       NVEvoUpdateState *updateState);
128 static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo,
129                                         NVEvoSubDevPtr pEvoSubDev,
130                                         NVEvoLockAction action);
131 static void FinishModesetOneDev(NVDevEvoRec *pDevEvo);
132 static void FinishModesetOneGroup(RasterLockGroup *pRasterLockGroup);
133 static void EnableFlipLockIfRequested(NVLockGroup *pLockGroup);
134 
135 static void SyncEvoLockState(void);
136 static void UpdateEvoLockState(void);
137 
138 static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo,
139                               const NvU32 apiHead, const NvU32 data,
140                               const NvU64 usec);
141 
142 static NvBool DowngradeColorBpc(
143     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
144     enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
145     enum NvKmsDpyAttributeColorRangeValue *pColorRange);
146 
147 NVEvoGlobal nvEvoGlobal = {
148     .clientHandle = 0,
149     .frameLockList = NV_LIST_INIT(&nvEvoGlobal.frameLockList),
150     .devList = NV_LIST_INIT(&nvEvoGlobal.devList),
151 #if defined(DEBUG)
152     .debugMemoryAllocationList =
153         NV_LIST_INIT(&nvEvoGlobal.debugMemoryAllocationList),
154 #endif /* DEBUG */
155 };
156 
157 static RasterLockGroup *globalRasterLockGroups = NULL;
158 static NvU32 numGlobalRasterLockGroups = 0;
159 
160 /*
161  * Keep track of groups of HW heads which the modeset owner has requested to be
162  * fliplocked together.  All of the heads specified here are guaranteed to be
163  * active.  A given head can only be in one group at a time.  Fliplock is not
164  * guaranteed to be enabled in the hardware for these groups.
165  */
166 typedef struct _FlipLockRequestedGroup {
167     struct {
168         NVDispEvoPtr pDispEvo;
169         NvU32 flipLockHeads;
170     } disp[NV_MAX_SUBDEVICES];
171 
172     NVListRec listEntry;
173 } FlipLockRequestedGroup;
174 
175 static NVListRec requestedFlipLockGroups =
176     NV_LIST_INIT(&requestedFlipLockGroups);
177 
178 /*
179  * The dummy infoString should be used in paths that take an
180  * NVEvoInfoStringPtr where we don't need to log to a
181  * string.  By setting the 's' field to NULL, nothing will be printed
182  * to the infoString buffer.
183  */
184 NVEvoInfoStringRec dummyInfoString = {
185     .length = 0,
186     .totalLength = 0,
187     .s = NULL,
188 };
189 
190 /*!
191  * Return the NVDevEvoPtr, if any, that matches deviceId.
192  */
nvFindDevEvoByDeviceId(NvU32 deviceId)193 NVDevEvoPtr nvFindDevEvoByDeviceId(NvU32 deviceId)
194 {
195     NVDevEvoPtr pDevEvo;
196 
197     FOR_ALL_EVO_DEVS(pDevEvo) {
198         if (pDevEvo->usesTegraDevice &&
199             (deviceId == NVKMS_DEVICE_ID_TEGRA)) {
200             return pDevEvo;
201         } else if (pDevEvo->deviceId == deviceId) {
202             return pDevEvo;
203         }
204     };
205 
206     return NULL;
207 }
208 
209 /*!
210  * Find the first unused gpuLogIndex.
211  */
nvGetGpuLogIndex(void)212 NvU8 nvGetGpuLogIndex(void)
213 {
214     NVDevEvoPtr pDevEvo;
215     NvU8 gpuLogIndex = 0;
216 
217  tryAgain:
218     FOR_ALL_EVO_DEVS(pDevEvo) {
219         NvU32 sd;
220         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
221             if (pDevEvo->pSubDevices[sd] == NULL) {
222                 continue;
223             }
224             if (gpuLogIndex == pDevEvo->pSubDevices[sd]->gpuLogIndex) {
225                 gpuLogIndex++;
226                 if (gpuLogIndex == 0xFF) {
227                     nvAssert(!"Too many GPUs");
228                     return NV_INVALID_GPU_LOG_INDEX;
229                 }
230                 goto tryAgain;
231             }
232         }
233     }
234 
235     return gpuLogIndex;
236 }
237 
238 /*!
239  * Return whether there are active heads on this pDispEvo.
240  */
HasActiveHeads(NVDispEvoPtr pDispEvo)241 static NvBool HasActiveHeads(NVDispEvoPtr pDispEvo)
242 {
243     return nvGetActiveHeadMask(pDispEvo) != 0;
244 }
245 
BlankHeadEvo(NVDispEvoPtr pDispEvo,const NvU32 head,NVEvoUpdateState * updateState)246 static void BlankHeadEvo(NVDispEvoPtr pDispEvo, const NvU32 head,
247                          NVEvoUpdateState *updateState)
248 {
249     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
250     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
251     struct NvKmsCompositionParams emptyCursorCompParams =
252         nvDefaultCursorCompositionParams(pDevEvo);
253 
254     /*
255      * If core channel surface is supported, ->SetSurface()
256      * disables Lut along with core channel surface. Otherwise need to disable
257      * Lut explicitly.
258      */
259     if (!pDevEvo->hal->caps.supportsCoreChannelSurface) {
260         pDevEvo->hal->SetLUTContextDma(pDispEvo,
261                                        head,
262                                        NULL /* pSurfEvo */,
263                                        FALSE /* baseLutEnabled */,
264                                        FALSE /* outputLutEnabled */,
265                                        updateState,
266                                        pHeadState->bypassComposition);
267     }
268 
269     nvPushEvoSubDevMaskDisp(pDispEvo);
270 
271     pDevEvo->hal->SetCursorImage(pDevEvo,
272                                  head,
273                                  NULL /* pSurfaceEvo */,
274                                  updateState,
275                                  &emptyCursorCompParams);
276 
277     {
278         NVFlipChannelEvoHwState hwState = { { 0 } };
279         NvU32 layer;
280 
281         for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
282             pDevEvo->hal->Flip(pDevEvo,
283                                pDevEvo->head[head].layer[layer],
284                                &hwState,
285                                updateState,
286                                FALSE /* bypassComposition */);
287         }
288     }
289 
290     nvPopEvoSubDevMask(pDevEvo);
291 }
292 
nvEvoDetachConnector(NVConnectorEvoRec * pConnectorEvo,const NvU32 head,NVEvoModesetUpdateState * pModesetUpdateState)293 void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head,
294                           NVEvoModesetUpdateState *pModesetUpdateState)
295 {
296     NVEvoUpdateState *updateState = &pModesetUpdateState->updateState;
297     NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
298     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
299     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
300     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
301     NvU32 orIndex;
302 
303     for (orIndex = 0;
304             orIndex < ARRAY_LEN(pConnectorEvo->or.ownerHeadMask); orIndex++) {
305         if ((pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head)) != 0x0) {
306             break;
307         }
308     }
309 
310     if (orIndex >= ARRAY_LEN(pConnectorEvo->or.ownerHeadMask)) {
311         nvAssert(!"Not found attached OR");
312         return;
313     }
314 
315     pConnectorEvo->or.ownerHeadMask[orIndex] &= ~NVBIT(head);
316 
317     /* Disable the palette, cursor, and ISO ctxDma on this head. */
318     BlankHeadEvo(pDispEvo, head, updateState);
319 
320     // Only tear down the actual output for SLI primary.
321     nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner);
322 
323     pDevEvo->hal->ORSetControl(pDevEvo,
324                                pConnectorEvo,
325                                pTimings->protocol,
326                                orIndex,
327                                pConnectorEvo->or.ownerHeadMask[orIndex],
328                                updateState);
329 
330     /*
331      * Tell RM that there is no DisplayID is associated with this head anymore.
332      */
333     pDevEvo->hal->HeadSetDisplayId(pDevEvo, head, 0x0, updateState);
334 
335     nvPopEvoSubDevMask(pDevEvo);
336 
337     pModesetUpdateState->connectorIds =
338         nvAddDpyIdToDpyIdList(pHeadState->pConnectorEvo->displayId,
339                               pModesetUpdateState->connectorIds);
340 }
341 
342 static
GetSorIndexToAttachConnector(const NVConnectorEvoRec * pConnectorEvo,const NvBool isPrimaryHead)343 NvU32 GetSorIndexToAttachConnector(const NVConnectorEvoRec *pConnectorEvo,
344                                    const NvBool isPrimaryHead)
345 {
346     NvU32 orIndex = NV_INVALID_OR;
347 
348     nvAssert(isPrimaryHead ||
349                 (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR));
350 
351     if (isPrimaryHead ||
352             (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR)) {
353         orIndex = pConnectorEvo->or.primary;
354     } else {
355         NvU32 i;
356 
357         FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.secondaryMask) {
358             if (pConnectorEvo->or.ownerHeadMask[i] == 0x0) {
359                 orIndex = i;
360                 break;
361             }
362         } FOR_EACH_INDEX_IN_MASK_END;
363     }
364 
365     return orIndex;
366 }
367 
nvEvoAttachConnector(NVConnectorEvoRec * pConnectorEvo,const NvU32 head,const NvU32 isPrimaryHead,NVDPLibModesetStatePtr pDpLibModesetState,NVEvoModesetUpdateState * pModesetUpdateState)368 void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo,
369                           const NvU32 head,
370                           const NvU32 isPrimaryHead,
371                           NVDPLibModesetStatePtr pDpLibModesetState,
372                           NVEvoModesetUpdateState *pModesetUpdateState)
373 {
374     NVEvoUpdateState *updateState = &pModesetUpdateState->updateState;
375     NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
376     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
377     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
378     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
379     NvU32 orIndex =
380         GetSorIndexToAttachConnector(pConnectorEvo, isPrimaryHead);
381     NvU32 i;
382 
383     nvAssert(orIndex != NV_INVALID_OR);
384     nvAssert(!(pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head)));
385     nvAssert(pHeadState->activeRmId != 0);
386 
387     FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.ownerHeadMask[orIndex]) {
388         nvAssert(pTimings->protocol ==
389                  pDispEvo->headState[i].timings.protocol);
390     } FOR_EACH_INDEX_IN_MASK_END;
391 
392     pConnectorEvo->or.ownerHeadMask[orIndex] |= NVBIT(head);
393 
394     // Only set up the actual output for SLI primary.
395     nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner);
396 
397     pDevEvo->hal->ORSetControl(pDevEvo,
398                                pConnectorEvo,
399                                pTimings->protocol,
400                                orIndex,
401                                pConnectorEvo->or.ownerHeadMask[orIndex],
402                                updateState);
403 
404 
405     /* Tell RM which DisplayID is associated with the head. */
406     pDevEvo->hal->HeadSetDisplayId(pDevEvo,
407                                    head, pHeadState->activeRmId,
408                                    updateState);
409 
410     nvPopEvoSubDevMask(pDevEvo);
411 
412     pModesetUpdateState->connectorIds =
413         nvAddDpyIdToDpyIdList(pConnectorEvo->displayId,
414                               pModesetUpdateState->connectorIds);
415     pModesetUpdateState->pDpLibModesetState[head] = pDpLibModesetState;
416 }
417 
nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo,const NvU32 head,const NvU16 x,NvU16 y,NVEvoUpdateState * updateState)418 void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo,
419                              const NvU32 head,
420                              const NvU16 x,
421                              NvU16 y,
422                              NVEvoUpdateState *updateState)
423 {
424     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
425 
426     NVEvoSubDevHeadStateRec *pSdHeadState =
427         &pDevEvo->gpus[pDispEvo->displayOwner].headState[head];
428 
429     pSdHeadState->viewPortPointIn.x = x;
430     pSdHeadState->viewPortPointIn.y = y;
431 
432     EvoSetViewportPointIn(pDispEvo, head, x, y, updateState);
433 }
434 
435 //
436 // Sets the Update method which makes all the other methods in the PB to take effect.
437 //
EvoUpdateAndKickOffWithNotifier(const NVDispEvoRec * pDispEvo,NvBool notify,NvBool sync,int notifier,NVEvoUpdateState * updateState,NvBool releaseElv)438 static void EvoUpdateAndKickOffWithNotifier(
439     const NVDispEvoRec *pDispEvo,
440     NvBool notify,
441     NvBool sync, int notifier,
442     NVEvoUpdateState *updateState,
443     NvBool releaseElv)
444 {
445     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
446 
447     // Calling code should reject operations that send updates while the console
448     // is active.
449     nvAssert(!pDevEvo->coreInitMethodsPending);
450 
451     // It doesn't make sense to request sync without requesting a notifier.
452     nvAssert(!sync || notify);
453 
454     if (notify) {
455         // Clear the completion notifier.
456         pDevEvo->hal->InitCompNotifier(pDispEvo, notifier);
457     }
458 
459     nvPushEvoSubDevMaskDisp(pDispEvo);
460     pDevEvo->hal->SetNotifier(pDevEvo, notify, sync, notifier,
461                               updateState);
462     pDevEvo->hal->Update(pDevEvo, updateState, releaseElv);
463     nvPopEvoSubDevMask(pDevEvo);
464 
465     // Wait for completion.
466     if (sync) {
467         pDevEvo->hal->WaitForCompNotifier(pDispEvo, notifier);
468     }
469 
470     if (notify) {
471         const NVDispEvoRec *pDispEvoTmp;
472         NVEvoUpdateState coreUpdateState = { };
473         NvU32 sd;
474 
475         // To work around HW bug 1945716 and to prevent subsequent core updates
476         // from triggering unwanted notifier writes, set the core channel
477         // completion notifier control and context DMA disables when
478         // notification is not requested.
479 
480         nvPushEvoSubDevMaskDisp(pDispEvo);
481         pDevEvo->hal->SetNotifier(pDevEvo,
482                                   FALSE /* notify */,
483                                   FALSE /* awaken */,
484                                   0     /* notifier */,
485                                   &coreUpdateState);
486         nvPopEvoSubDevMask(pDevEvo);
487 
488         // SetCoreNotifier is only expected to push core channel methods.
489         FOR_ALL_EVO_DISPLAYS(pDispEvoTmp, sd, pDevEvo) {
490             if (pDispEvoTmp == pDispEvo) {
491                 nvAssert(coreUpdateState.subdev[sd].channelMask ==
492                          DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE));
493             } else {
494                 nvAssert(coreUpdateState.subdev[sd].channelMask == 0x0);
495             }
496         }
497 
498         // We don't really need to kick off here, but might as well to keep the
499         // state cache up to date.  Note that we intentionally don't use
500         // pDevEvo->hal->Update since we don't want another Update.
501         nvDmaKickoffEvo(pDevEvo->core);
502     }
503 
504     return;
505 }
506 
nvEvoUpdateAndKickOff(const NVDispEvoRec * pDispEvo,NvBool sync,NVEvoUpdateState * updateState,NvBool releaseElv)507 void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync,
508                            NVEvoUpdateState *updateState, NvBool releaseElv)
509 {
510     EvoUpdateAndKickOffWithNotifier(pDispEvo, sync, sync, 0, updateState,
511                                     releaseElv);
512 }
513 
nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo,NVEvoUpdateState * updateState)514 void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo,
515                       NVEvoUpdateState *updateState)
516 {
517     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
518 
519     // IMP pre-modeset
520     pDevEvo->hal->PrePostIMP(pDispEvo, TRUE /* isPre */);
521 
522     // Do the update
523     nvEvoUpdateAndKickOff(pDispEvo, TRUE, updateState, TRUE /* releaseElv */);
524 
525     // IMP post-modeset
526     pDevEvo->hal->PrePostIMP(pDispEvo, FALSE /* isPre */);
527 }
528 
nvEvoFlipUpdate(NVDispEvoPtr pDispEvo,NVEvoUpdateState * updateState)529 void nvEvoFlipUpdate(NVDispEvoPtr pDispEvo,
530                      NVEvoUpdateState *updateState)
531 {
532     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
533     int notifier = -1;
534 
535     if (nvEvoLUTNotifiersNeedCommit(pDispEvo)) {
536         notifier = nvEvoCommitLUTNotifiers(pDispEvo);
537     }
538 
539     if (notifier >= 0) {
540         EvoUpdateAndKickOffWithNotifier(pDispEvo,
541                                         TRUE /* notify */,
542                                         FALSE /* sync */,
543                                         notifier,
544                                         updateState,
545                                         TRUE /* releaseElv */);
546     } else {
547         pDevEvo->hal->Update(pDevEvo, updateState, TRUE /* releaseElv */);
548     }
549 }
550 
551 /*!
552  * Tell RM not to expect anything other than a stall lock change during the next
553  * update.
554  */
nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo,const NvU32 head,NvBool isVrr,NvBool enable)555 void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo,
556                                    const NvU32 head,
557                                    NvBool isVrr,
558                                    NvBool enable)
559 {
560     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
561     NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS params = { };
562     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
563 
564     if (!nvHeadIsActive(pDispEvo, head)) {
565         return;
566     }
567 
568     nvAssert(!pTimings->interlaced && !pTimings->doubleScan);
569 
570     params.subDeviceInstance = pDispEvo->displayOwner;
571     params.displayId = pHeadState->activeRmId;
572     params.bArmLWSV = enable;
573     params.bVrrState = isVrr;
574     params.vActive = nvEvoVisibleHeight(pTimings);
575     params.vfp = pTimings->rasterSize.y -
576                  pTimings->rasterBlankStart.y;
577 
578     if (nvRmApiControl(nvEvoGlobal.clientHandle,
579                        pDispEvo->pDevEvo->displayCommonHandle,
580                        NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR,
581                        &params, sizeof(params))
582             != NVOS_STATUS_SUCCESS) {
583         nvAssert(!"ARM_LIGHTWEIGHT_SUPERVISOR failed");
584     }
585 }
586 
587 /*
588  * Convert from NVHwModeTimingsEvoPtr to NvModeTimingsPtr.
589  *
590  * Note that converting from NvModeTimingsPtr to
591  * NVHwModeTimingsEvoPtr (via
592  * ConstructHwModeTimingsFromNvModeTimings()) and converting back from
593  * NVHwModeTimingsEvoPtr to NvModeTimingsPtr (via
594  * nvConstructNvModeTimingsFromHwModeTimings()) can lose precision in
595  * the case of interlaced modes due to the division by 2.  This
596  * function should only be used for reporting purposes.
597  */
598 
599 void
nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo * pTimings,NvModeTimingsPtr pModeTimings)600 nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings,
601                                           NvModeTimingsPtr pModeTimings)
602 {
603     NvU32 rasterBlankEndY, rasterSyncEndY;
604 
605     if (!pTimings || !pModeTimings) {
606         nvAssert(!"Null params");
607         return;
608     }
609 
610     pModeTimings->pixelClockHz  = KHzToHz(pTimings->pixelClock);
611     pModeTimings->hVisible      = nvEvoVisibleWidth(pTimings);
612     pModeTimings->hSyncStart    = pTimings->rasterSize.x -
613                                   pTimings->rasterBlankEnd.x - 1;
614     pModeTimings->hSyncEnd      = pTimings->rasterSize.x -
615                                   pTimings->rasterBlankEnd.x +
616                                   pTimings->rasterSyncEnd.x;
617     pModeTimings->hTotal        = pTimings->rasterSize.x;
618     pModeTimings->vVisible      = nvEvoVisibleHeight(pTimings);
619     rasterBlankEndY             = pTimings->rasterBlankEnd.y + 1;
620     rasterSyncEndY              = pTimings->rasterSyncEnd.y + 1;
621 
622     if (pTimings->interlaced) {
623         rasterBlankEndY *= 2;
624         rasterSyncEndY *= 2;
625     }
626 
627     /*
628      * The real pixel clock and width values for modes using YUV 420 emulation
629      * are half of the incoming values parsed from the EDID. This conversion is
630      * performed here, so NvModeTimings will have the user-visible (full width)
631      * values, and NVHwModeTimingsEvo will have the real (half width) values.
632      */
633     if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) {
634         pModeTimings->pixelClockHz *= 2;
635         pModeTimings->hVisible *= 2;
636         pModeTimings->hSyncStart *= 2;
637         pModeTimings->hSyncEnd *= 2;
638         pModeTimings->hTotal *= 2;
639     }
640 
641     pModeTimings->vSyncStart    = pTimings->rasterSize.y - rasterBlankEndY;
642     pModeTimings->vSyncEnd      = pTimings->rasterSize.y - rasterBlankEndY +
643                                   rasterSyncEndY;
644     pModeTimings->vTotal        = pTimings->rasterSize.y;
645     pModeTimings->interlaced    = pTimings->interlaced;
646     pModeTimings->doubleScan    = pTimings->doubleScan;
647     pModeTimings->hSyncNeg      = pTimings->hSyncPol;
648     pModeTimings->hSyncPos      = !pTimings->hSyncPol;
649     pModeTimings->vSyncNeg      = pTimings->vSyncPol;
650     pModeTimings->vSyncPos      = !pTimings->vSyncPol;
651     pModeTimings->RRx1k         = (pModeTimings->pixelClockHz /
652                                    (pModeTimings->hTotal *
653                                     pModeTimings->vTotal));
654 
655     if (pModeTimings->doubleScan) {
656         pModeTimings->vVisible /= 2;
657         pModeTimings->vSyncStart /= 2;
658         pModeTimings->vSyncEnd /= 2;
659         pModeTimings->vTotal /= 2;
660     }
661 
662     pModeTimings->hdmi3D = pTimings->hdmi3D;
663     pModeTimings->yuv420Mode = pTimings->yuv420Mode;
664 }
665 
666 
667 
668 /*
669  * Tweak pTimings to be compatible with gsync.
670  */
671 
TweakTimingsForGsync(const NVDpyEvoRec * pDpyEvo,NVHwModeTimingsEvoPtr pTimings,NVEvoInfoStringPtr pInfoString,const enum NvKmsStereoMode stereo)672 static void TweakTimingsForGsync(const NVDpyEvoRec *pDpyEvo,
673                                  NVHwModeTimingsEvoPtr pTimings,
674                                  NVEvoInfoStringPtr pInfoString,
675                                  const enum NvKmsStereoMode stereo)
676 {
677     NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS gsyncOptTimingParams = { 0 };
678     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
679     NvModeTimings modeTimings;
680     NvU32 ret;
681 
682     /*
683      * if 3D Vision Stereo is enabled, do not actually
684      * tweak the modetimings; WAR for bug 692266
685      */
686 
687     if (nvIs3DVisionStereoEvo(stereo)) {
688 
689         nvEvoLogInfoString(pInfoString,
690                            "Not adjusting mode timings of %s for Quadro Sync "
691                            "compatibility since 3D Vision Stereo is enabled.",
692                            pDpyEvo->name);
693         return;
694     }
695 
696     gsyncOptTimingParams.gpuId = nvGpuIdOfDispEvo(pDispEvo);
697 
698     if (pDpyEvo->pConnectorEvo->legacyType ==
699         NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) {
700 
701         gsyncOptTimingParams.output =
702             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_SOR;
703         gsyncOptTimingParams.adjust =
704             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_DFP;
705 
706     } else if (pDpyEvo->pConnectorEvo->legacyType ==
707                NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) {
708 
709         gsyncOptTimingParams.output =
710             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_DAC;
711         gsyncOptTimingParams.adjust =
712             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_CRT;
713     }
714 
715     gsyncOptTimingParams.pixelClockHz = KHzToHz(pTimings->pixelClock);
716 
717     if (pTimings->interlaced) {
718         gsyncOptTimingParams.structure =
719             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED;
720     } else {
721         gsyncOptTimingParams.structure =
722             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_PROGRESSIVE;
723     }
724 
725     gsyncOptTimingParams.hDeltaStep =
726         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_STEP_USE_DEFAULTS;
727     gsyncOptTimingParams.vDeltaStep =
728         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_STEP_USE_DEFAULTS;
729     gsyncOptTimingParams.hDeltaMax =
730         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_MAX_USE_DEFAULTS;
731     gsyncOptTimingParams.vDeltaMax =
732         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_MAX_USE_DEFAULTS;
733 
734     gsyncOptTimingParams.hSyncEnd       = pTimings->rasterSyncEnd.x + 1;
735     gsyncOptTimingParams.hBlankEnd      = pTimings->rasterBlankEnd.x + 1;
736     gsyncOptTimingParams.hBlankStart    = pTimings->rasterBlankStart.x + 1;
737     gsyncOptTimingParams.hTotal         = pTimings->rasterSize.x;
738 
739     gsyncOptTimingParams.vSyncEnd       = pTimings->rasterSyncEnd.y + 1;
740     gsyncOptTimingParams.vBlankEnd      = pTimings->rasterBlankEnd.y + 1;
741     gsyncOptTimingParams.vBlankStart    = pTimings->rasterBlankStart.y + 1;
742     gsyncOptTimingParams.vTotal         = pTimings->rasterSize.y;
743 
744     gsyncOptTimingParams.vInterlacedBlankEnd = pTimings->rasterVertBlank2End;
745     gsyncOptTimingParams.vInterlacedBlankStart =
746         pTimings->rasterVertBlank2Start;
747 
748     switch (pTimings->protocol) {
749         case NVKMS_PROTOCOL_DAC_RGB:
750             gsyncOptTimingParams.protocol =
751                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_DAC_RGB_CRT;
752             break;
753         case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC:
754             nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle external TMDS.");
755             // fallthrough
756         case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A:
757             gsyncOptTimingParams.protocol =
758                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_A;
759             break;
760         case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B:
761             gsyncOptTimingParams.protocol =
762                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_B;
763             break;
764         case NVKMS_PROTOCOL_SOR_DUAL_TMDS:
765             gsyncOptTimingParams.protocol =
766                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DUAL_TMDS;
767             break;
768         case NVKMS_PROTOCOL_SOR_DP_A:
769             gsyncOptTimingParams.protocol =
770                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_A;
771             break;
772         case NVKMS_PROTOCOL_SOR_DP_B:
773             gsyncOptTimingParams.protocol =
774                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_B;
775             break;
776         case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM:
777             gsyncOptTimingParams.protocol =
778                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_LVDS_CUSTOM;
779             break;
780         case NVKMS_PROTOCOL_SOR_HDMI_FRL:
781             gsyncOptTimingParams.protocol =
782                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_HDMI_FRL;
783             break;
784         case NVKMS_PROTOCOL_DSI:
785             nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle DSI.");
786             return;
787     }
788 
789     nvEvoLogInfoString(pInfoString,
790             "Adjusting Mode Timings for Quadro Sync Compatibility");
791 
792     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
793                          pDispEvo->pFrameLockEvo->device,
794                          NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING,
795                          &gsyncOptTimingParams,
796                          sizeof(gsyncOptTimingParams));
797 
798     if (ret != NVOS_STATUS_SUCCESS) {
799         nvAssert(!"Failed to convert to Quadro Sync safe timing");
800         /* do not apply the timings returned by RM if the call failed */
801         return;
802     } else if (!gsyncOptTimingParams.bOptimized) {
803         nvEvoLogInfoString(pInfoString, " Timings Unchanged.");
804         return;
805     }
806 
807     nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings);
808 
809     nvEvoLogInfoString(pInfoString, " Old Timings:");
810     nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings);
811 
812     pTimings->rasterSyncEnd.x           = gsyncOptTimingParams.hSyncEnd - 1;
813     pTimings->rasterSyncEnd.y           = gsyncOptTimingParams.vSyncEnd - 1;
814     pTimings->rasterBlankEnd.x          = gsyncOptTimingParams.hBlankEnd - 1;
815     pTimings->rasterBlankEnd.y          = gsyncOptTimingParams.vBlankEnd - 1;
816     pTimings->rasterBlankStart.x        = gsyncOptTimingParams.hBlankStart - 1;
817     pTimings->rasterBlankStart.y        = gsyncOptTimingParams.vBlankStart - 1;
818     pTimings->rasterSize.x              = gsyncOptTimingParams.hTotal;
819     pTimings->rasterSize.y              = gsyncOptTimingParams.vTotal;
820 
821     if (gsyncOptTimingParams.structure ==
822         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED) {
823         pTimings->rasterVertBlank2Start =
824             gsyncOptTimingParams.vInterlacedBlankStart;
825         pTimings->rasterVertBlank2End =
826             gsyncOptTimingParams.vInterlacedBlankEnd;
827     }
828 
829     pTimings->pixelClock = HzToKHz(gsyncOptTimingParams.pixelClockHz); // Hz to KHz
830 
831     nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings);
832 
833     nvEvoLogInfoString(pInfoString, " New Timings:");
834     nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings);
835 }
836 
HeadStateIsHdmiTmdsDeepColor(const NVDispHeadStateEvoRec * pHeadState)837 static NvBool HeadStateIsHdmiTmdsDeepColor(const NVDispHeadStateEvoRec *pHeadState)
838 {
839     nvAssert(pHeadState->pConnectorEvo != NULL);
840 
841     // Check for HDMI TMDS.
842     if (pHeadState->pConnectorEvo->isHdmiEnabled &&
843         (pHeadState->timings.protocol != NVKMS_PROTOCOL_SOR_HDMI_FRL)) {
844         // Check for pixelDepth >= 30.
845         switch (pHeadState->pixelDepth) {
846             case NVKMS_PIXEL_DEPTH_18_444:
847             case NVKMS_PIXEL_DEPTH_24_444:
848             case NVKMS_PIXEL_DEPTH_20_422:
849             case NVKMS_PIXEL_DEPTH_16_422:
850                 return FALSE;
851             case NVKMS_PIXEL_DEPTH_30_444:
852                 return TRUE;
853         }
854     }
855 
856     return FALSE;
857 }
858 
859 /*!
860  * Check whether rasterlock is possible between the two head states.
861  * Note that we don't compare viewports, but I don't believe the viewport size
862  * affects whether it is possible to rasterlock.
863  */
864 
RasterLockPossible(const NVDispHeadStateEvoRec * pHeadState1,const NVDispHeadStateEvoRec * pHeadState2)865 static NvBool RasterLockPossible(const NVDispHeadStateEvoRec *pHeadState1,
866                                  const NVDispHeadStateEvoRec *pHeadState2)
867 {
868     const NVHwModeTimingsEvo *pTimings1 = &pHeadState1->timings;
869     const NVHwModeTimingsEvo *pTimings2 = &pHeadState2->timings;
870 
871     /*
872      * XXX Bug 4235728: With HDMI TMDS signaling >= 10 BPC, display requires a
873      * higher VPLL clock multiplier varying by pixel depth, which can cause
874      * rasterlock to fail between heads with differing multipliers. So, if a
875      * head is using HDMI TMDS >= 10 BPC, it can only rasterlock with heads that
876      * that are using HDMI TMDS with the same pixel depth.
877      */
878 
879     // If either head is HDMI TMDS DeepColor (10+ BPC)...
880     if (HeadStateIsHdmiTmdsDeepColor(pHeadState1) ||
881         HeadStateIsHdmiTmdsDeepColor(pHeadState2)) {
882         // The other head must also be HDMI TMDS DeepColor.
883         if (!HeadStateIsHdmiTmdsDeepColor(pHeadState1) ||
884             !HeadStateIsHdmiTmdsDeepColor(pHeadState2)) {
885             return FALSE;
886         }
887 
888         // Both heads must have identical pixel depth.
889         if (pHeadState1->pixelDepth != pHeadState2->pixelDepth) {
890             return FALSE;
891         }
892     }
893 
894     return ((pTimings1->rasterSize.x       == pTimings2->rasterSize.x) &&
895             (pTimings1->rasterSize.y       == pTimings2->rasterSize.y) &&
896             (pTimings1->rasterSyncEnd.x    == pTimings2->rasterSyncEnd.x) &&
897             (pTimings1->rasterSyncEnd.y    == pTimings2->rasterSyncEnd.y) &&
898             (pTimings1->rasterBlankEnd.x   == pTimings2->rasterBlankEnd.x) &&
899             (pTimings1->rasterBlankEnd.y   == pTimings2->rasterBlankEnd.y) &&
900             (pTimings1->rasterBlankStart.x == pTimings2->rasterBlankStart.x) &&
901             (pTimings1->rasterBlankStart.y == pTimings2->rasterBlankStart.y) &&
902             (pTimings1->rasterVertBlank2Start ==
903              pTimings2->rasterVertBlank2Start) &&
904             (pTimings1->rasterVertBlank2End ==
905              pTimings2->rasterVertBlank2End) &&
906             (pTimings1->pixelClock         == pTimings2->pixelClock) &&
907             (pTimings1->hSyncPol           == pTimings2->hSyncPol) &&
908             (pTimings1->vSyncPol           == pTimings2->vSyncPol) &&
909             (pTimings1->interlaced         == pTimings2->interlaced) &&
910             (pTimings1->doubleScan         == pTimings2->doubleScan));
911 
912 }
913 
914 /*!
915  * Fill the overscan color struct to be passed to SetRasterParams based on
916  * whether or not SW yuv420 is enabled.
917  *
918  * \param[out] pOverscanColor     The overscan color struct to be filled
919  * \param[in] yuv420              Whether or not SW yuv420 is enabled
920  */
SetOverscanColor(NVEvoColorPtr pOverscanColor,NvBool yuv420)921 static void SetOverscanColor(NVEvoColorPtr pOverscanColor, NvBool yuv420)
922 {
923     // Black in RGB format.
924     // If we're using an emulated YUV 4:2:0 mode, set the equivalent in
925     // YUV ITU-R BT.709 (64/64/512).
926     if (yuv420) {
927         pOverscanColor->red = 64;
928         pOverscanColor->green = 64;
929         pOverscanColor->blue = 512;
930     } else {
931         pOverscanColor->red = 0;
932         pOverscanColor->green = 0;
933         pOverscanColor->blue = 0;
934     }
935 
936 #if defined(DEBUG)
937     // Override the overscan color to red in debug builds.
938     // XXX This will look different for YUV 4:2:0
939     pOverscanColor->red = 1023;
940     pOverscanColor->green = 0;
941     pOverscanColor->blue = 0;
942 #endif
943 }
944 
nvEvoDisableHwYUV420Packer(const NVDispEvoRec * pDispEvo,const NvU32 head,NVEvoUpdateState * pUpdateState)945 void nvEvoDisableHwYUV420Packer(const NVDispEvoRec *pDispEvo,
946                                 const NvU32 head,
947                                 NVEvoUpdateState *pUpdateState)
948 {
949     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
950     pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hwYuv420 = FALSE;
951     EvoUpdateHeadParams(pDispEvo, head, pUpdateState);
952 }
953 
954 /*
955  * Send the raster timings for the pDpyEvo to EVO.
956  */
nvEvoSetTimings(NVDispEvoPtr pDispEvo,const NvU32 head,NVEvoUpdateState * updateState)957 void nvEvoSetTimings(NVDispEvoPtr pDispEvo,
958                      const NvU32 head,
959                      NVEvoUpdateState *updateState)
960 {
961     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
962     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
963     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
964     const NVDscInfoEvoRec *pDscInfo = &pHeadState->dscInfo;
965     const enum nvKmsPixelDepth pixelDepth = pHeadState->pixelDepth;
966     NVEvoColorRec overscanColor;
967 
968     nvPushEvoSubDevMaskDisp(pDispEvo);
969     SetOverscanColor(&overscanColor, (pTimings->yuv420Mode ==
970                                       NV_YUV420_MODE_SW));
971 
972     pDevEvo->hal->SetRasterParams(pDevEvo, head,
973                                   pTimings, pHeadState->tilePosition,
974                                   pDscInfo, &overscanColor, updateState);
975 
976     // Set the head parameters
977     pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].interlaced =
978         pTimings->interlaced;
979     pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hdmi3D =
980         pTimings->hdmi3D;
981 
982     /*
983      * Current HW does not support the combination of HW YUV420 and DSC.
984      * HW YUV420 is currently only supported with HDMI, so we should never see
985      * the combination of DP DSC and HW YUV420.
986      * The combination of HDMI FRL DSC and HW YUV420 should be disallowed by
987      * the HDMI library.
988      */
989     nvAssert(!((pTimings->yuv420Mode == NV_YUV420_MODE_HW) &&
990                (pDscInfo->type != NV_DSC_INFO_EVO_TYPE_DISABLED)));
991 
992     pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hwYuv420 =
993         (pTimings->yuv420Mode == NV_YUV420_MODE_HW);
994 
995     EvoUpdateHeadParams(pDispEvo, head, updateState);
996 
997     pDevEvo->hal->SetDscParams(pDispEvo, head, pDscInfo, pixelDepth);
998 
999     nvPopEvoSubDevMask(pDevEvo);
1000 }
1001 
1002 /*
1003  * Increase the size of the provided raster lock group by 1.
1004  *
1005  * This involves incrementing *pNumRasterLockGroups, reallocating the
1006  * pRasterLockGroups array, and initializing the new entry.
1007  */
GrowRasterLockGroup(RasterLockGroup * pRasterLockGroups,unsigned int * pNumRasterLockGroups)1008 static RasterLockGroup *GrowRasterLockGroup(RasterLockGroup *pRasterLockGroups,
1009                                             unsigned int *pNumRasterLockGroups)
1010 {
1011     RasterLockGroup *pNewRasterLockGroups, *pRasterLockGroup;
1012     unsigned int numRasterLockGroups;
1013 
1014     numRasterLockGroups = *pNumRasterLockGroups;
1015 
1016     numRasterLockGroups++;
1017     pNewRasterLockGroups =
1018         nvRealloc(pRasterLockGroups,
1019                   numRasterLockGroups * sizeof(RasterLockGroup));
1020     if (!pNewRasterLockGroups) {
1021         nvFree(pRasterLockGroups);
1022         *pNumRasterLockGroups = 0;
1023         return NULL;
1024     }
1025 
1026     pRasterLockGroup = &pNewRasterLockGroups[numRasterLockGroups - 1];
1027     pRasterLockGroup->numDisps = 0;
1028 
1029     *pNumRasterLockGroups = numRasterLockGroups;
1030 
1031     return pNewRasterLockGroups;
1032 }
1033 
CopyAndAppendRasterLockGroup(RasterLockGroup * pRasterLockGroups,unsigned int * pNumRasterLockGroups,const RasterLockGroup * source)1034 static RasterLockGroup *CopyAndAppendRasterLockGroup(
1035     RasterLockGroup *pRasterLockGroups,
1036     unsigned int *pNumRasterLockGroups,
1037     const RasterLockGroup *source)
1038 {
1039     RasterLockGroup *dest;
1040 
1041     pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups,
1042                                             pNumRasterLockGroups);
1043     if (pRasterLockGroups) {
1044         dest = &pRasterLockGroups[*pNumRasterLockGroups - 1];
1045         nvkms_memcpy(dest, source, sizeof(RasterLockGroup));
1046     }
1047 
1048     return pRasterLockGroups;
1049 }
1050 
AddDispEvoIntoRasterLockGroup(RasterLockGroup * pRasterLockGroup,NVDispEvoPtr pDispEvo)1051 static void AddDispEvoIntoRasterLockGroup(RasterLockGroup *pRasterLockGroup,
1052                                           NVDispEvoPtr pDispEvo)
1053 {
1054     NvU32 i;
1055 
1056     /*
1057      * The extent of a RasterLockGroup is the largest number of GPUs that can
1058      * be linked together.
1059      */
1060     nvAssert(pRasterLockGroup->numDisps < NVKMS_MAX_SUBDEVICES);
1061 
1062     /* Caller should keep track of not adding duplicate entries. */
1063     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
1064         nvAssert(pRasterLockGroup->pDispEvoOrder[i] != pDispEvo);
1065     }
1066 
1067     /* Add to the end of the array. */
1068     pRasterLockGroup->pDispEvoOrder[pRasterLockGroup->numDisps] = pDispEvo;
1069     pRasterLockGroup->numDisps++;
1070 }
1071 
FindRasterLockGroupForDispEvo(const RasterLockGroup * pRasterLockGroups,unsigned int numRasterLockGroups,const NVDispEvoPtr pDispEvo)1072 static const RasterLockGroup *FindRasterLockGroupForDispEvo(
1073     const RasterLockGroup *pRasterLockGroups,
1074     unsigned int numRasterLockGroups,
1075     const NVDispEvoPtr pDispEvo)
1076 {
1077     const RasterLockGroup *pRasterLockGroup;
1078     NvU32 i;
1079 
1080     for (pRasterLockGroup = pRasterLockGroups;
1081          pRasterLockGroup < pRasterLockGroups + numRasterLockGroups;
1082          pRasterLockGroup++) {
1083         for (i = 0; i < pRasterLockGroup->numDisps; i++) {
1084             if (pRasterLockGroup->pDispEvoOrder[i] == pDispEvo) {
1085                 return pRasterLockGroup;
1086             }
1087         }
1088     }
1089 
1090     return NULL;
1091 }
1092 
DispEvoListFindDispByGpuId(DispEvoList * list,NvU32 gpuId)1093 static DispEntry *DispEvoListFindDispByGpuId (DispEvoList *list, NvU32 gpuId)
1094 {
1095     NvU32 i;
1096 
1097     for (i = 0; i < list->numDisps; i++) {
1098         if (list->disps[i].gpuId == gpuId) {
1099             return &list->disps[i];
1100         }
1101     }
1102 
1103     return NULL;
1104 }
1105 
DispEvoListInit(DispEvoList * list)1106 static void DispEvoListInit(DispEvoList *list)
1107 {
1108     list->numDisps = 0;
1109 }
1110 
DispEvoListAppend(DispEvoList * list,NVDispEvoPtr pDispEvo)1111 static void DispEvoListAppend(DispEvoList *list, NVDispEvoPtr pDispEvo)
1112 {
1113     nvAssert(DispEvoListFindDispByGpuId(
1114                  list, nvGpuIdOfDispEvo(pDispEvo)) == NULL);
1115 
1116     nvAssert(list->numDisps < ARRAY_LEN(list->disps));
1117     list->disps[list->numDisps].pDispEvo = pDispEvo;
1118     list->disps[list->numDisps].gpuId = nvGpuIdOfDispEvo(pDispEvo);
1119     list->disps[list->numDisps].pRasterLockGroup = NULL;
1120     list->numDisps++;
1121 }
1122 
1123 /*
1124  * Helper function to look up, for a gpuId, the list of connected GPUs in
1125  * NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS.
1126  */
FindLinksForGpuId(NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS * vidLinksParams,NvU32 gpuId)1127 static NV0000_CTRL_GPU_VIDEO_LINKS *FindLinksForGpuId(
1128     NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams,
1129     NvU32 gpuId)
1130 {
1131     NvU32 i;
1132 
1133     for (i = 0; i < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS; i++) {
1134         if (vidLinksParams->links[i].gpuId == NV0000_CTRL_GPU_INVALID_ID) {
1135             break;
1136         }
1137 
1138         if (vidLinksParams->links[i].gpuId == gpuId) {
1139             return &vidLinksParams->links[i];
1140         }
1141     }
1142 
1143     return NULL;
1144 }
1145 
BuildRasterLockGroupFromVideoLinks(DispEvoList * list,RasterLockGroup * pRasterLockGroup,NvU32 gpuId,NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS * vidLinksParams)1146 static void BuildRasterLockGroupFromVideoLinks(
1147     DispEvoList *list,
1148     RasterLockGroup *pRasterLockGroup,
1149     NvU32 gpuId,
1150     NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams)
1151 {
1152     DispEntry *dispEntry;
1153     NV0000_CTRL_GPU_VIDEO_LINKS *links;
1154     NvU32 i;
1155 
1156     /* Find the correct DispEntry for the gpuId. If we can't find one the
1157      * gpuId must be pointing to a DevEvo that was not listed in our
1158      * DevEvoList: ignore these links at this point. */
1159     dispEntry = DispEvoListFindDispByGpuId(list, gpuId);
1160     if (!dispEntry) {
1161         return;
1162     }
1163 
1164     /*
1165      * Unless we've seen this gpuId already add into current RasterLockGroup
1166      * and try to discover bridged GPUs.
1167      */
1168     if (!dispEntry->pRasterLockGroup) {
1169         /* Assign in the current RasterLockGroup. */
1170         AddDispEvoIntoRasterLockGroup(pRasterLockGroup, dispEntry->pDispEvo);
1171         dispEntry->pRasterLockGroup = pRasterLockGroup;
1172 
1173         /* First, get the links for this gpuId. */
1174         links = FindLinksForGpuId(vidLinksParams, gpuId);
1175 
1176         /* Recurse into connected GPUs. */
1177         if (links) {
1178             for (i = 0; i < NV0000_CTRL_GPU_MAX_VIDEO_LINKS; i++) {
1179                 if (links->connectedGpuIds[i] == NV0000_CTRL_GPU_INVALID_ID) {
1180                     break;
1181                 }
1182 
1183                 BuildRasterLockGroupFromVideoLinks(list,
1184                                                    pRasterLockGroup,
1185                                                    links->connectedGpuIds[i],
1186                                                    vidLinksParams);
1187             }
1188         }
1189     }
1190 }
1191 
1192 /*
1193  * Stateless (RM SLI/client SLI agnostic) discovery of bridged GPUs: build
1194  * RasterLockGroups for all non-RM SLI devices based on the found GPU links.
1195  *
1196  * This function and BuildRasterLockGroupFromVideoLinks() implement a simple
1197  * algorithm that puts clusters of bridged GPUs into distinct RasterLockGroups.
1198  * Here's an outline of how we basically generate the final RasterLockGroups:
1199  *
1200  * 1. Create a DispEvoList array to hold RasterLockGroup state for all the
1201  *    DispEvo objects in the system.
1202  *
1203  * 2. Query RM for an array of video links for each GPU.
1204  *
1205  * 3. As long as the DispEvoList contains DispEvos of the given pDevEvo
1206  *    without a group, find the first occurrence of such, create a new
1207  *    group, and populate it by recursively adding the DispEvo and all
1208  *    its connected DispEvos into the new group.
1209  *
1210  * 4. Once all known DispEvos are assigned the result will be a list of
1211  *    global RasterLockGroups, each of which hosts <N> DispEvos that are
1212  *    connected together.
1213  *
1214  * The result of this function should be cached once and later used to
1215  * cheaply look up the appropriate, immutable RasterLockGroup for a DispEvo.
1216  *
1217  */
GetRasterLockGroupsStateless(unsigned int * pNumRasterLockGroups)1218 static RasterLockGroup *GetRasterLockGroupsStateless(
1219     unsigned int *pNumRasterLockGroups)
1220 {
1221     RasterLockGroup *pRasterLockGroups = NULL;
1222     RasterLockGroup *pRasterLockGroup;
1223     DispEvoList evoList;
1224     NVDevEvoPtr pCurDev;
1225     NVDispEvoPtr pCurDisp;
1226     NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams;
1227     NvU32 sd;
1228     NvU32 i;
1229 
1230     DispEvoListInit(&evoList);
1231 
1232     /*
1233      * First create an array of DispEntries to hold some state for all the
1234      * DispEvos in the system.
1235      */
1236     FOR_ALL_EVO_DEVS(pCurDev) {
1237         /*
1238          * Only include non RM SLI devices so as to not clash with multi-GPU
1239          * RM SLI devices.
1240          */
1241         if (pCurDev->numSubDevices == 1) {
1242             FOR_ALL_EVO_DISPLAYS(pCurDisp, sd, pCurDev) {
1243                 DispEvoListAppend(&evoList, pCurDisp);
1244             }
1245         }
1246     }
1247 
1248     /*
1249      * Ask RM about the currently known video links.
1250      */
1251     vidLinksParams = nvCalloc(1, sizeof(*vidLinksParams));
1252     if (!vidLinksParams) {
1253         return NULL;
1254     }
1255 
1256     if (nvRmApiControl(nvEvoGlobal.clientHandle,
1257                        nvEvoGlobal.clientHandle,
1258                        NV0000_CTRL_CMD_GPU_GET_VIDEO_LINKS,
1259                        vidLinksParams,
1260                        sizeof(*vidLinksParams)) == NVOS_STATUS_SUCCESS) {
1261 
1262         for (i = 0; i < evoList.numDisps; i++) {
1263             /*
1264              * Create a new group starting from the first DispEvo not yet
1265              * assigned into a RasterLockGroup, and all GPUs possibly reachable
1266              * from it through bridges.
1267              *
1268              * TODO: Consider if we should only ever start a new
1269              * RasterLockGroup with a GPU that has only one connection and not
1270              * two. Then the group's pDispEvoOrder would always start from a
1271              * "leaf" GPU of a linkage graph. But will the GPU links always be
1272              * linear and non-branching? NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS
1273              * makes it possible to represent GPUs with any number of links.
1274              * Either FinishModesetOneGroup() must be able to handle that
1275              * (in which case this is not a concern) or we must be able to
1276              * trust that only 0-2 links will be reported per GPU.
1277              */
1278             if (evoList.disps[i].pRasterLockGroup) {
1279                 continue;
1280             }
1281 
1282             pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups,
1283                                                     pNumRasterLockGroups);
1284             if (!pRasterLockGroups) {
1285                 nvFree(vidLinksParams);
1286                 return NULL;
1287             }
1288 
1289             pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1];
1290 
1291             BuildRasterLockGroupFromVideoLinks(&evoList,
1292                                                pRasterLockGroup,
1293                                                evoList.disps[i].gpuId,
1294                                                vidLinksParams);
1295         }
1296 
1297         nvFree(vidLinksParams);
1298         nvAssert(*pNumRasterLockGroups > 0);
1299         return pRasterLockGroups;
1300     }
1301 
1302     nvFree(vidLinksParams);
1303     nvFree(pRasterLockGroups);
1304     return NULL;
1305 }
1306 
1307 /*
1308  * GetRasterLockGroups() - Determine which GPUs to consider for locking (or
1309  * unlocking) displays.  This is one of the following:
1310  * 1. SLI video bridge order, if SLI is enabled;
1311  * 2. GPUs linked through rasterlock pins, no SLI (like in clientSLI);
1312  * 3. A single GPU,
1313  * in that order.
1314  *
1315  * Note that we still go through the same codepaths for the last degenerate
1316  * case, in order to potentially lock heads on the same GPU together.
1317  */
GetRasterLockGroups(NVDevEvoPtr pDevEvo,unsigned int * pNumRasterLockGroups)1318 static RasterLockGroup *GetRasterLockGroups(
1319     NVDevEvoPtr pDevEvo,
1320     unsigned int *pNumRasterLockGroups)
1321 {
1322     unsigned int i;
1323     RasterLockGroup *pRasterLockGroups = NULL;
1324 
1325     *pNumRasterLockGroups = 0;
1326 
1327     if (pDevEvo->numSubDevices > 1 && pDevEvo->sli.bridge.present) {
1328         NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS params = { 0 };
1329         NvU32 ret;
1330 
1331         /* In SLI, with a video bridge.  Get the video bridge order from RM. */
1332 
1333         if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1334                                   pDevEvo->deviceHandle,
1335                                   NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER,
1336                                   &params, sizeof(params)))
1337                 != NVOS_STATUS_SUCCESS) {
1338             nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
1339                              "NvRmControl(GET_VIDLINK_ORDER) failed; "
1340                              "ret: %d\n", ret);
1341             return NULL;
1342         }
1343 
1344         if (params.ConnectionCount > 0) {
1345             RasterLockGroup *pRasterLockGroup;
1346             pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups,
1347                                                     pNumRasterLockGroups);
1348 
1349             if (!pRasterLockGroups) {
1350                 return NULL;
1351             }
1352 
1353             pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1];
1354 
1355             /*
1356              * For some reason this interface returns a mask instead of an
1357              * index, so we have to convert
1358              */
1359             for (i = 0; i < pDevEvo->numSubDevices; i++) {
1360                 NvU32 subDeviceMask = params.Order[i];
1361                 NvU32 sd = 0;
1362 
1363                 nvAssert(nvPopCount32(subDeviceMask) == 1);
1364 
1365                 if (!subDeviceMask) continue;
1366 
1367                 while (!(subDeviceMask & (1 << sd))) sd++;
1368 
1369                 nvAssert(sd < NVKMS_MAX_SUBDEVICES);
1370                 nvAssert(pDevEvo->pDispEvo[sd] != NULL);
1371 
1372                 /* SLI Mosaic. */
1373                 AddDispEvoIntoRasterLockGroup(pRasterLockGroup,
1374                                               pDevEvo->pDispEvo[sd]);
1375             }
1376         }
1377 
1378         if (*pNumRasterLockGroups > 0) {
1379             return pRasterLockGroups;
1380         }
1381     }
1382 
1383     /*
1384      * Client SLI: Create a RasterLockGroup from pDevEvo's only DispEvo
1385      * and other DispEvos potentially bridged to that.
1386      */
1387 
1388     if (pDevEvo->numSubDevices == 1) {
1389         /* Get-or-create cached RasterLockGroup for this device. */
1390         if (!globalRasterLockGroups) {
1391             globalRasterLockGroups =
1392                 GetRasterLockGroupsStateless(&numGlobalRasterLockGroups);
1393         }
1394 
1395         /* Look for a cached group containing this device's DispEvo. */
1396         if (globalRasterLockGroups && numGlobalRasterLockGroups > 0) {
1397             const RasterLockGroup *pRasterLockGroup =
1398                 FindRasterLockGroupForDispEvo(globalRasterLockGroups,
1399                                               numGlobalRasterLockGroups,
1400                                               pDevEvo->pDispEvo[0]);
1401 
1402             /* Make a copy of it and add to 'pRasterLockGroups'. */
1403             if (pRasterLockGroup) {
1404                 pRasterLockGroups =
1405                     CopyAndAppendRasterLockGroup(pRasterLockGroups,
1406                                                  pNumRasterLockGroups,
1407                                                  pRasterLockGroup);
1408             }
1409         }
1410 
1411         if (*pNumRasterLockGroups > 0) {
1412             return pRasterLockGroups;
1413         }
1414     }
1415 
1416     /*
1417      * Single GPU or bridgeless SLI. We create a group for each
1418      * individual DispEvo.
1419      */
1420 
1421     NVDispEvoPtr pDispEvo;
1422     unsigned int sd;
1423 
1424     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1425         RasterLockGroup *pRasterLockGroup;
1426         pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups,
1427                                                 pNumRasterLockGroups);
1428 
1429         if (!pRasterLockGroups) {
1430             return NULL;
1431         }
1432 
1433         pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1];
1434 
1435         AddDispEvoIntoRasterLockGroup(pRasterLockGroup, pDispEvo);
1436     }
1437 
1438     return pRasterLockGroups;
1439 }
1440 
1441 /*
1442  * ApplyLockActionIfPossible() - Check if the given action is a valid
1443  * transition for this pEvoSubDev's state, and apply it if so.
1444  * Return TRUE if any hardware state needs to be updated, FALSE o.w.
1445  */
ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo,NVEvoSubDevPtr pEvoSubDev,NVEvoLockAction action)1446 static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo,
1447                                         NVEvoSubDevPtr pEvoSubDev,
1448                                         NVEvoLockAction action)
1449 {
1450     if (!pEvoSubDev) {
1451         return FALSE;
1452     }
1453 
1454     if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
1455                                   action, NULL)) {
1456         unsigned int i = 0;
1457         NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
1458         NvU32 head;
1459 
1460         for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
1461             if (nvHeadIsActive(pDispEvo, head)) {
1462                 pHeads[i++] = head;
1463             }
1464         }
1465         nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP);
1466         pHeads[i] = NV_INVALID_HEAD;
1467 
1468         pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads);
1469 
1470         return TRUE;
1471     }
1472 
1473     return FALSE;
1474 
1475 } // ApplyLockActionIfPossible()
1476 
1477 /*
1478  * Disable any intra-GPU lock state set up in FinishModesetOneDisp().
1479  * This assumes that any cross-GPU locking which may have been set up on this
1480  * GPU was already torn down.
1481  */
UnlockRasterLockOneDisp(NVDispEvoPtr pDispEvo)1482 static void UnlockRasterLockOneDisp(NVDispEvoPtr pDispEvo)
1483 {
1484     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1485     NvU32 sd = pDispEvo->displayOwner;
1486     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
1487     NvBool changed = FALSE;
1488 
1489     /* Initialize the assembly state */
1490     SyncEvoLockState();
1491 
1492     /* We want to evaluate all of these, so don't use || */
1493     changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev,
1494                                          NV_EVO_PROHIBIT_LOCK_DISABLE);
1495     changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev,
1496                                          NV_EVO_UNLOCK_HEADS);
1497 
1498     /* Update the hardware if anything has changed */
1499     if (changed) {
1500         UpdateEvoLockState();
1501     }
1502 
1503     pDispEvo->rasterLockPossible = FALSE;
1504 }
1505 
1506 /*
1507  * Call UnlockRasterLockOneDisp() for each disp on this device to tear down
1508  * intra-GPU locking on each.
1509  */
UnlockRasterLockOneDev(NVDevEvoPtr pDevEvo)1510 static void UnlockRasterLockOneDev(NVDevEvoPtr pDevEvo)
1511 {
1512     NVDispEvoPtr pDispEvo;
1513     NvU32 sd;
1514 
1515     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1516         UnlockRasterLockOneDisp(pDispEvo);
1517     }
1518 }
1519 
DisableLockGroupFlipLock(NVLockGroup * pLockGroup)1520 static void DisableLockGroupFlipLock(NVLockGroup *pLockGroup)
1521 {
1522 
1523     const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup;
1524     NvU32 i;
1525 
1526     if (!pLockGroup->flipLockEnabled) {
1527         return;
1528     }
1529 
1530     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
1531         NVEvoUpdateState updateState = { };
1532         NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i];
1533         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1534         NvU32 sd = pDispEvo->displayOwner;
1535         NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
1536         NvU32 head;
1537         NvBool changed = FALSE;
1538 
1539         for (head = 0; head < pDevEvo->numHeads; head++) {
1540             NvBool headChanged = FALSE;
1541             if (!nvHeadIsActive(pDispEvo, head)) {
1542                 continue;
1543             }
1544 
1545             /*
1546              * scanLockState transitions (such as nvEvoLockHWStateLockHeads)
1547              * will update headControlAssy values for all heads, so we should
1548              * update flipLock and flipLockPin for all heads as well.
1549              */
1550             NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head];
1551             /*
1552              * Reset the fliplock pin, if it's not in use for framelock,
1553              * and unregister our use of the fliplock pin
1554              */
1555             if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockPinSetForFrameLockHeadMask,
1556                                  head)) {
1557                 if (pHC->flipLockPin != NV_EVO_LOCK_PIN_INTERNAL(0)) {
1558                     pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
1559                     headChanged = TRUE;
1560                 }
1561             }
1562             pEvoSubDev->flipLockPinSetForSliHeadMask =
1563                 HEAD_MASK_UNSET(pEvoSubDev->flipLockPinSetForSliHeadMask,
1564                                 head);
1565 
1566             /*
1567              * Disable fliplock, if it's not in use for framelock, and
1568              * unregister our need for fliplock to be enabled
1569              */
1570             if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask,
1571                                  head)) {
1572                 if (pHC->flipLock) {
1573                     pHC->flipLock = FALSE;
1574                     headChanged = TRUE;
1575                 }
1576             }
1577             pEvoSubDev->flipLockEnabledForSliHeadMask =
1578                 HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForSliHeadMask,
1579                                 head);
1580             if (headChanged) {
1581                 EvoUpdateHeadParams(pDispEvo, head, &updateState);
1582             }
1583         }
1584         if (changed) {
1585             nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
1586                                   TRUE /* releaseElv */);
1587         }
1588     }
1589 
1590     pLockGroup->flipLockEnabled = FALSE;
1591 }
1592 
1593 /*
1594  * Unlock cross-GPU locking in the given lock group.
1595  */
UnlockLockGroup(NVLockGroup * pLockGroup)1596 static void UnlockLockGroup(NVLockGroup *pLockGroup)
1597 {
1598     RasterLockGroup *pRasterLockGroup;
1599     int i;
1600 
1601     if (pLockGroup == NULL) {
1602         return;
1603     }
1604 
1605     pRasterLockGroup = &pLockGroup->rasterLockGroup;
1606 
1607     DisableLockGroupFlipLock(pLockGroup);
1608 
1609     for (i = (int)pRasterLockGroup->numDisps - 1; i >= 0; i--) {
1610         NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i];
1611         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1612         NvU32 sd = pDispEvo->displayOwner;
1613         NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
1614 
1615         /* Initialize the assembly state */
1616         SyncEvoLockState();
1617 
1618         if (ApplyLockActionIfPossible(pDispEvo, pEvoSubDev,
1619                                       NV_EVO_REM_SLI)) {
1620             /* Update the hardware if anything has changed */
1621             UpdateEvoLockState();
1622         }
1623 
1624         pEvoSubDev->flipLockProhibitedHeadMask = 0x0;
1625 
1626         nvAssert(pDispEvo->pLockGroup == pLockGroup);
1627         pDispEvo->pLockGroup = NULL;
1628     }
1629 
1630     /*
1631      * Disable any SLI video bridge features we may have enabled for locking.
1632      * This is a separate loop from the above in order to handle both cases:
1633      *
1634      * a) Multiple pDispEvos on the same pDevEvo (linked RM-SLI): all disps in
1635      *    the lock group will share the same pDevEvo.  In that case we should
1636      *    not call RM to disable the video bridge power across the entire
1637      *    device until we've disabled locking on all GPUs).  This loop will
1638      *    call nvEvoUpdateSliVideoBridge() redundantly for the same pDevEvo,
1639      *    but those calls will be filtered out.  (If we did this in the loop
1640      *    above, RM would broadcast the video bridge disable call to all pDisps
1641      *    on the first call, even before we've disabled locking on them.)
1642      *
1643      * b) Each pDispEvo on a separate pDevEvo (client-side SLI or no SLI, when
1644      *    a video bridge is present): in that case each pDispEvo has a separate
1645      *    pDevEvo, and we need to call nvEvoUpdateSliVideoBridge() on each.
1646      *    (It would be okay in this case to call nvEvoUpdateSliVideoBridge() in
1647      *    the loop above since it will only disable the video bridge power for
1648      *    one GPU at a time.)
1649      */
1650     for (i = (int)pRasterLockGroup->numDisps - 1; i >= 0; i--) {
1651         NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i];
1652         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1653 
1654         pDevEvo->sli.bridge.powerNeededForRasterLock = FALSE;
1655         nvEvoUpdateSliVideoBridge(pDevEvo);
1656     }
1657 
1658     nvFree(pLockGroup);
1659 }
1660 
1661 /*
1662  * Unlock all any cross-GPU locking in the rasterlock group(s) associated with
1663  * the given device.
1664  */
UnlockLockGroupsForDevice(NVDevEvoPtr pDevEvo)1665 static void UnlockLockGroupsForDevice(NVDevEvoPtr pDevEvo)
1666 {
1667     NVDispEvoPtr pDispEvo;
1668     NvU32 sd;
1669 
1670     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1671         UnlockLockGroup(pDispEvo->pLockGroup);
1672         nvAssert(pDispEvo->pLockGroup == NULL);
1673     }
1674 }
1675 
nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo)1676 void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo)
1677 {
1678     NVDispEvoPtr pDispEvo;
1679     int i;
1680 
1681     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
1682         NvU32 head;
1683         for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
1684             nvAssert(!nvHeadIsActive(pDispEvo, head));
1685         }
1686     }
1687 }
1688 
1689 /*!
1690  * Disable locking-related state.
1691  */
DisableLockState(NVDevEvoPtr pDevEvo)1692 static void DisableLockState(NVDevEvoPtr pDevEvo)
1693 {
1694     NvU32 dispIndex;
1695     NVDispEvoPtr pDispEvo;
1696 
1697     /* Disable flip lock as requested by swap groups/framelock. */
1698 
1699     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1700         nvToggleFlipLockPerDisp(pDispEvo,
1701                                 nvGetActiveHeadMask(pDispEvo),
1702                                 FALSE /* enable */);
1703     }
1704 
1705     /* Disable any locking across GPUs. */
1706 
1707     UnlockLockGroupsForDevice(pDevEvo);
1708 
1709     /* Disable intra-GPU rasterlock on this pDevEvo. */
1710     UnlockRasterLockOneDev(pDevEvo);
1711 
1712     /* Reset the EVO locking state machine. */
1713 
1714     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1715         nvEvoStateAssertNoLock(&pDevEvo->gpus[pDispEvo->displayOwner]);
1716         nvEvoStateStartNoLock(&pDevEvo->gpus[pDispEvo->displayOwner]);
1717     }
1718 }
1719 
nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo)1720 void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo)
1721 {
1722     DisableLockState(pDevEvo);
1723 }
1724 
1725 /*!
1726  * Set up raster lock between GPUs, if applicable.
1727  */
nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo,const NvBool doRasterLock)1728 void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock)
1729 {
1730     RasterLockGroup *pRasterLockGroups, *pRasterLockGroup;
1731     unsigned int numRasterLockGroups;
1732 
1733     if (!doRasterLock) {
1734         return;
1735     }
1736 
1737     FinishModesetOneDev(pDevEvo);
1738 
1739     pRasterLockGroups = GetRasterLockGroups(pDevEvo, &numRasterLockGroups);
1740     if (!pRasterLockGroups) {
1741         return;
1742     }
1743 
1744     for (pRasterLockGroup = pRasterLockGroups;
1745          pRasterLockGroup < pRasterLockGroups + numRasterLockGroups;
1746          pRasterLockGroup++) {
1747         FinishModesetOneGroup(pRasterLockGroup);
1748     }
1749 
1750     nvFree(pRasterLockGroups);
1751 }
1752 
1753 /*!
1754  * Updates the hardware based on software needs tracked in pDevEvo->sli.bridge.
1755  * Call this function after changing any of those needs variables.
1756  */
nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo)1757 void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo)
1758 {
1759     NV0080_CTRL_GPU_SET_VIDLINK_PARAMS params = { 0 };
1760     const NvBool enable = pDevEvo->sli.bridge.powerNeededForRasterLock;
1761     NvU32 status;
1762 
1763     if (pDevEvo->sli.bridge.powered == enable) {
1764         return;
1765     }
1766 
1767     if (enable) {
1768         /* SLI should be prohibited earlier if no bridge is present. */
1769         nvAssert(pDevEvo->sli.bridge.present);
1770     }
1771 
1772     params.enable = enable ?
1773         NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_TRUE :
1774         NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_FALSE;
1775 
1776     status = nvRmApiControl(nvEvoGlobal.clientHandle,
1777                             pDevEvo->deviceHandle,
1778                             NV0080_CTRL_CMD_GPU_SET_VIDLINK,
1779                             &params, sizeof(params));
1780     if (status != NV_OK) {
1781         nvAssert(!"NV0080_CTRL_CMD_GPU_SET_VIDLINK failed");
1782     }
1783 
1784     pDevEvo->sli.bridge.powered = enable;
1785 }
1786 
1787 /*
1788  * Check if VRR or MergeMode are enabled; if so, go into the special "prohibit
1789  * lock" mode which prevents other scanlock states from being reached.
1790  *
1791  * Return TRUE iff VRR or MergeMode is in use on this GPU.
1792  */
ProhibitLockIfNecessary(NVDispEvoRec * pDispEvo)1793 static NvBool ProhibitLockIfNecessary(NVDispEvoRec *pDispEvo)
1794 {
1795     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1796     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
1797     NvU32 activeHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
1798     NvBool prohibitLock = FALSE;
1799     NvU32 numActiveHeads = 0;
1800     NvU32 head;
1801 
1802     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
1803         if (nvHeadIsActive(pDispEvo, head)) {
1804             activeHeads[numActiveHeads++] = head;
1805             if ((pDispEvo->headState[head].timings.vrr.type !=
1806                  NVKMS_DPY_VRR_TYPE_NONE)) {
1807                 prohibitLock = TRUE;
1808             }
1809 
1810             if (pDispEvo->headState[head].mergeMode !=
1811                     NV_EVO_MERGE_MODE_DISABLED) {
1812                 prohibitLock = TRUE;
1813             }
1814         }
1815     }
1816 
1817 
1818     if (prohibitLock) {
1819         activeHeads[numActiveHeads] = NV_INVALID_HEAD;
1820 
1821         SyncEvoLockState();
1822 
1823         if (!pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
1824                                        NV_EVO_PROHIBIT_LOCK,
1825                                        activeHeads)) {
1826             nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
1827                               "Failed to prohibit lock");
1828             return FALSE;
1829         }
1830 
1831         UpdateEvoLockState();
1832 
1833         return TRUE;
1834     }
1835     return FALSE;
1836 }
1837 
1838 
1839 /*
1840  * Prohibit locking if necessary for the active configuration.
1841  *
1842  * Set up rasterlock between heads on a single GPU, if certain conditions are met:
1843  * - Locking is not prohibited due to the active configuration
1844  * - Opportunistic display sync is not disabled via kernel module parameter
1845  * - All active heads have identical mode timings
1846  *
1847  * Set pDispEvo->pRasterLockPossible to indicate whether rasterlock is possible
1848  * on this GPU, which will be used to determine if rasterlock is possible
1849  * between this GPU and other GPUs.
1850  * Note that this isn't the same as whether heads were locked: if fewer than
1851  * two heads were active, heads will not be locked but rasterlock with other
1852  * GPUs may still be possible.
1853  */
FinishModesetOneDisp(NVDispEvoRec * pDispEvo)1854 static void FinishModesetOneDisp(
1855     NVDispEvoRec *pDispEvo)
1856 {
1857     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1858     NVEvoSubDevPtr pEvoSubDev;
1859     const NVDispHeadStateEvoRec *pPrevHeadState = NULL;
1860     NvU32 head, usedHeads = 0;
1861     NvU32 headsToLock[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
1862 
1863     if (pDevEvo->gpus == NULL) {
1864         return;
1865     }
1866 
1867     pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
1868 
1869     pDispEvo->rasterLockPossible = FALSE;
1870 
1871     if (ProhibitLockIfNecessary(pDispEvo)) {
1872         /* If all locking is prohibited, do not attempt rasterlock. */
1873         return;
1874     }
1875 
1876     if (!nvkms_opportunistic_display_sync()) {
1877         /* If opportunistic display sync is disabled, do not attempt rasterlock. */
1878         return;
1879     }
1880 
1881     /*
1882      * Determine if rasterlock is possible: check each active display for
1883      * rasterlock compatibility with the previous one we looked at.  If any of
1884      * them aren't compatible, rasterlock is not possible.
1885      */
1886     pDispEvo->rasterLockPossible = TRUE;
1887     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
1888         const NVDispHeadStateEvoRec *pHeadState =
1889             &pDispEvo->headState[head];
1890 
1891         if (!nvHeadIsActive(pDispEvo, head)) {
1892             continue;
1893         }
1894 
1895         if (pPrevHeadState &&
1896             !RasterLockPossible(pHeadState, pPrevHeadState)) {
1897             pDispEvo->rasterLockPossible = FALSE;
1898             break;
1899         }
1900 
1901         pPrevHeadState = pHeadState;
1902 
1903         headsToLock[usedHeads] = head;
1904         usedHeads++;
1905     }
1906 
1907     if (!pDispEvo->rasterLockPossible) {
1908         return;
1909     }
1910 
1911     if (usedHeads > 1) {
1912         /* Terminate array */
1913         headsToLock[usedHeads] = NV_INVALID_HEAD;
1914 
1915         /* Initialize the assembly state */
1916         SyncEvoLockState();
1917 
1918         /* Set up rasterlock between heads on this disp. */
1919         nvAssert(headsToLock[0] != NV_INVALID_HEAD);
1920         if (!pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
1921                                        NV_EVO_LOCK_HEADS,
1922                                        headsToLock)) {
1923             nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
1924                               "Unable to lock heads");
1925             pDispEvo->rasterLockPossible = FALSE;
1926         }
1927 
1928         /* Update the hardware with the new state */
1929         UpdateEvoLockState();
1930     }
1931 }
1932 
1933 /* Call FinishModesetOneDisp() for each disp on this device to set up intra-GPU
1934  * locking on each. */
FinishModesetOneDev(NVDevEvoRec * pDevEvo)1935 static void FinishModesetOneDev(
1936     NVDevEvoRec *pDevEvo)
1937 {
1938     NVDispEvoPtr pDispEvo;
1939     NvU32 sd;
1940 
1941     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1942         FinishModesetOneDisp(pDispEvo);
1943     }
1944 }
1945 
1946 /*
1947  * Enable fliplock for the specified pLockGroup.
1948  * This assumes that rasterlock was already enabled.
1949  */
EnableLockGroupFlipLock(NVLockGroup * pLockGroup)1950 static void EnableLockGroupFlipLock(NVLockGroup *pLockGroup)
1951 {
1952     const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup;
1953     NvU32 i;
1954 
1955     if (pRasterLockGroup->numDisps < 2) {
1956         /* TODO: enable fliplock for single GPUs */
1957         return;
1958     }
1959 
1960     pLockGroup->flipLockEnabled = TRUE;
1961 
1962     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
1963         NVEvoUpdateState updateState = { };
1964         NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i];
1965         NvU32 sd = pDispEvo->displayOwner;
1966         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1967         NvU32 head;
1968 
1969         for (head = 0; head < pDevEvo->numHeads; head++) {
1970             NvU64 startTime = 0;
1971 
1972             if (!nvHeadIsActive(pDispEvo, head)) {
1973                 continue;
1974             }
1975 
1976             NVEvoLockPin pin =
1977                 nvEvoGetPinForSignal(pDispEvo, &pDevEvo->gpus[sd],
1978                                      NV_EVO_LOCK_SIGNAL_FLIP_LOCK);
1979 
1980             /* Wait for the raster lock to sync in.. */
1981             if (pin == NV_EVO_LOCK_PIN_ERROR ||
1982                 !EvoWaitForLock(pDevEvo, sd, head, EVO_RASTER_LOCK,
1983                                 &startTime)) {
1984                 nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
1985                     "Timed out waiting for rasterlock; not enabling fliplock.");
1986                 goto fail;
1987             }
1988 
1989             /*
1990              * Enable fliplock, and register that we've enabled
1991              * fliplock for SLI to ensure it doesn't get disabled
1992              * later.
1993              */
1994             pDevEvo->gpus[sd].headControl[head].flipLockPin = pin;
1995             pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask =
1996                 HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask, head);
1997 
1998             pDevEvo->gpus[sd].headControl[head].flipLock = TRUE;
1999             pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask =
2000                 HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask, head);
2001 
2002             EvoUpdateHeadParams(pDispEvo, head, &updateState);
2003         }
2004 
2005          /*
2006          * This must be synchronous as EVO reports lock success if
2007          * locking isn't enabled, so we could race through the
2008          * WaitForLock check below otherwise.
2009          */
2010         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
2011                               TRUE /* releaseElv */);
2012 
2013         /*
2014          * Wait for flip lock sync.  I'm not sure this is really
2015          * necessary, but the docs say to do this before attempting any
2016          * flips in the base channel.
2017          */
2018         for (head = 0; head < pDevEvo->numHeads; head++) {
2019             NvU64 startTime = 0;
2020 
2021             if (!nvHeadIsActive(pDispEvo, head)) {
2022                 continue;
2023             }
2024 
2025             if (!EvoWaitForLock(pDevEvo, sd, head, EVO_FLIP_LOCK,
2026                                 &startTime)) {
2027                 nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
2028                     "Timed out waiting for fliplock.");
2029                 goto fail;
2030             }
2031         }
2032     }
2033 
2034     return;
2035 fail:
2036     DisableLockGroupFlipLock(pLockGroup);
2037 }
2038 
2039 /*
2040  * FinishModesetOneGroup() - Set up raster lock between GPUs, if applicable,
2041  * for one RasterLockGroup.  Called in a loop from nvFinishModesetEvo().
2042  */
2043 
FinishModesetOneGroup(RasterLockGroup * pRasterLockGroup)2044 static void FinishModesetOneGroup(RasterLockGroup *pRasterLockGroup)
2045 {
2046     NVDispEvoPtr *pDispEvoOrder = pRasterLockGroup->pDispEvoOrder;
2047     NvU32 numUsedGpus = 0;
2048     const NVDispHeadStateEvoRec *pPrevHeadState = NULL;
2049     NvBool headInUse[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP];
2050     NvBool rasterLockPossible = TRUE, foundUnused = FALSE;
2051     unsigned int i, j;
2052     NVLockGroup *pLockGroup = NULL;
2053 
2054     /* Don't attempt locking across GPUs if, on any individual GPU, rasterlock
2055      * isn't possible. */
2056     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
2057         NVDispEvoPtr pDispEvo = pDispEvoOrder[i];
2058 
2059         if (!pDispEvo->rasterLockPossible) {
2060             return;
2061         }
2062     }
2063 
2064     nvkms_memset(headInUse, 0, sizeof(headInUse));
2065 
2066     /*
2067      * Next, figure out if we can perform cross-GPU locking and which
2068      * GPUs/heads we can use.  Only attempt locking if all heads across GPUs
2069      * have compatible timings and are consecutive in the video bridge order.
2070      */
2071     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
2072         NVDispEvoPtr pDispEvo = pDispEvoOrder[i];
2073         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2074         NvU32 head;
2075 
2076         /*
2077          * We can't lock if there is an unused GPU between two used GPUs on the
2078          * video bridge chain.
2079          * We much check if pDevEvo->gpus is NULL in case we haven't been
2080          * through AllocDeviceObject for this pDev (yet?).
2081          */
2082         if (!HasActiveHeads(pDispEvo) ||
2083             !pDevEvo->gpus) {
2084             foundUnused = TRUE;
2085             continue;
2086         } else {
2087             if (foundUnused) {
2088                 rasterLockPossible = FALSE;
2089                 break;
2090             }
2091 
2092             numUsedGpus++;
2093         }
2094 
2095         /*
2096          * Compare modetimings for each active display with the previous one we
2097          * looked at.  If any of them don't match, punt on locking.
2098          */
2099         for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
2100             const NVDispHeadStateEvoRec *pHeadState =
2101                 &pDispEvo->headState[head];
2102 
2103             if (!nvHeadIsActive(pDispEvo, head)) {
2104                 continue;
2105             }
2106 
2107             if (pPrevHeadState &&
2108                 !RasterLockPossible(pHeadState, pPrevHeadState)) {
2109                 rasterLockPossible = FALSE;
2110                 goto exitHeadLoop;
2111             }
2112 
2113             headInUse[i][head] = TRUE;
2114 
2115             pPrevHeadState = pHeadState;
2116         }
2117 
2118 exitHeadLoop:
2119         if (!rasterLockPossible) {
2120             break;
2121         }
2122     }
2123 
2124     if (!rasterLockPossible || numUsedGpus == 0) {
2125         return;
2126     }
2127 
2128     /* Create a new lock group to store the current configuration */
2129     pLockGroup = nvCalloc(1, sizeof(*pLockGroup));
2130 
2131     if (pLockGroup == NULL) {
2132         return;
2133     }
2134 
2135     pLockGroup->rasterLockGroup = *pRasterLockGroup;
2136 
2137     /*
2138      * Finally, actually set up locking: go through the video bridge order
2139      * setting it up.
2140      */
2141     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
2142         NVDispEvoPtr pDispEvo = pDispEvoOrder[i];
2143         NvU32 sd = pDispEvo->displayOwner;
2144         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2145         NvU32 head[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
2146         unsigned int usedHeads = 0;
2147         NvBool gpusLocked = FALSE;
2148 
2149         /* Remember that we've enabled this lock group on this GPU. */
2150         nvAssert(pDispEvo->pLockGroup == NULL);
2151         pDispEvo->pLockGroup = pLockGroup;
2152 
2153         /* If we're past the end of the chain, stop applying locking below, but
2154          * continue this loop to assign pDispEvo->pLockGroup above. */
2155         if (i >= numUsedGpus) {
2156             continue;
2157         }
2158 
2159         /* Initialize the assembly state */
2160         SyncEvoLockState();
2161 
2162         for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) {
2163             if (headInUse[i][j]) {
2164 
2165                 head[usedHeads] = j;
2166 
2167                 usedHeads++;
2168             }
2169         }
2170         head[usedHeads] = NV_INVALID_HEAD;
2171 
2172         /* Then set up cross-GPU locking, if we have enough active GPUs */
2173         if (numUsedGpus > 1) {
2174             NVEvoLockAction action;
2175             NVEvoLockPin *pServerPin = &pDevEvo->gpus[sd].sliServerLockPin;
2176             NVEvoLockPin *pClientPin = &pDevEvo->gpus[sd].sliClientLockPin;
2177 
2178             *pServerPin = NV_EVO_LOCK_PIN_ERROR;
2179             *pClientPin = NV_EVO_LOCK_PIN_ERROR;
2180 
2181             if (i == 0) {
2182                 action = NV_EVO_ADD_SLI_PRIMARY;
2183             } else {
2184                 if (i == (numUsedGpus - 1)) {
2185                     action = NV_EVO_ADD_SLI_LAST_SECONDARY;
2186                 } else {
2187                     action = NV_EVO_ADD_SLI_SECONDARY;
2188                 }
2189             }
2190 
2191             if (action == NV_EVO_ADD_SLI_PRIMARY ||
2192                 action == NV_EVO_ADD_SLI_SECONDARY) {
2193                 /* Find pin for server to next */
2194                 NVDispEvoPtr pDispEvoNext = pDispEvoOrder[i + 1];
2195                 NvU32 headNext = 0;
2196 
2197                 for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) {
2198                     if (headInUse[i + 1][j]) {
2199                         headNext = j;
2200                         break;
2201                     }
2202                 }
2203 
2204                 GetRasterLockPin(pDispEvo, head[0],
2205                                  pDispEvoNext, headNext,
2206                                  pServerPin, NULL);
2207             }
2208 
2209             if (action == NV_EVO_ADD_SLI_SECONDARY ||
2210                 action == NV_EVO_ADD_SLI_LAST_SECONDARY) {
2211 
2212                 /* Find pin for client to prev */
2213                 NVDispEvoPtr pDispEvoPrev = pDispEvoOrder[i - 1];
2214                 NvU32 headPrev = 0;
2215 
2216                 for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) {
2217                     if (headInUse[i - 1][j]) {
2218                         headPrev = j;
2219                         break;
2220                     }
2221                 }
2222 
2223                 GetRasterLockPin(pDispEvo, head[0],
2224                                  pDispEvoPrev, headPrev,
2225                                  NULL, pClientPin);
2226             }
2227 
2228             if (!pDevEvo->gpus[sd].scanLockState(pDispEvo, &pDevEvo->gpus[sd],
2229                                                  action, head)) {
2230                 nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
2231                                   "Unable to set up SLI locking");
2232             } else {
2233                 gpusLocked = TRUE;
2234             }
2235         }
2236 
2237         /*
2238          * On certain GPUs, we need to enable the video bridge (MIO pads) when
2239          * enabling rasterlock.  Note that we don't disable in this function,
2240          * so if gpusLocked is true for any iteration of these loops, this bit
2241          * will be on.
2242          */
2243         if (gpusLocked && NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
2244                 NV0073_CTRL_SYSTEM_CAPS_RASTER_LOCK_NEEDS_MIO_POWER)) {
2245             pDevEvo->sli.bridge.powerNeededForRasterLock = TRUE;
2246             nvEvoUpdateSliVideoBridge(pDevEvo);
2247         }
2248 
2249         /* If anything changed, update the hardware */
2250         if (gpusLocked) {
2251             UpdateEvoLockState();
2252         }
2253     }
2254 
2255     /* Enable fliplock, if we can */
2256     EnableFlipLockIfRequested(pLockGroup);
2257 }
2258 
2259 /*
2260  * Check if the given LockGroup matches the given FlipLockRequestedGroup.
2261  * This is true if the flip lock heads match the currently-active
2262  * heads on all pDispEvos.
2263  */
CheckLockGroupMatchFlipLockRequestedGroup(const NVLockGroup * pLockGroup,const FlipLockRequestedGroup * pFLRG)2264 static NvBool CheckLockGroupMatchFlipLockRequestedGroup(
2265     const NVLockGroup *pLockGroup,
2266     const FlipLockRequestedGroup *pFLRG)
2267 {
2268     const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup;
2269     NvU32 disp, requestedDisp;
2270 
2271     /* Verify the number of disps is the same. */
2272     NvU32 numRequestedDisps = 0;
2273     for (requestedDisp = 0;
2274          requestedDisp < ARRAY_LEN(pFLRG->disp);
2275          requestedDisp++) {
2276         const NVDispEvoRec *pRequestedDispEvo =
2277             pFLRG->disp[requestedDisp].pDispEvo;
2278         if (pRequestedDispEvo == NULL) {
2279             break;
2280         }
2281         numRequestedDisps++;
2282     }
2283     if (numRequestedDisps != pRasterLockGroup->numDisps) {
2284         return FALSE;
2285     }
2286 
2287     /*
2288      * For each disp in the rasterlock group:
2289      * - If there is no matching disp in the pFLRG, no match
2290      * - If the disp's active head mask doesn't match the pFLRG's requested
2291      *   head mask for that disp, no match
2292      * If none of the conditions above failed, then we have a match.
2293      */
2294     for (disp = 0; disp < pRasterLockGroup->numDisps; disp++) {
2295         const NVDispEvoRec *pDispEvo = pRasterLockGroup->pDispEvoOrder[disp];
2296         NvBool found = FALSE;
2297         for (requestedDisp = 0;
2298              requestedDisp < ARRAY_LEN(pFLRG->disp);
2299              requestedDisp++) {
2300             const NVDispEvoRec *pRequestedDispEvo =
2301                 pFLRG->disp[requestedDisp].pDispEvo;
2302             if (pRequestedDispEvo == NULL) {
2303                 break;
2304             }
2305             if (pRequestedDispEvo == pDispEvo) {
2306                 if (pFLRG->disp[requestedDisp].flipLockHeads !=
2307                     nvGetActiveHeadMask(pDispEvo)) {
2308                     return FALSE;
2309                 }
2310                 found = TRUE;
2311                 break;
2312             }
2313         }
2314         if (!found) {
2315             return FALSE;
2316         }
2317     }
2318 
2319     return TRUE;
2320 }
2321 
2322 /*
2323  * Check if any requested fliplock groups match this lockgroup; if so, enable
2324  * fliplock on the lockgroup.
2325  */
EnableFlipLockIfRequested(NVLockGroup * pLockGroup)2326 static void EnableFlipLockIfRequested(NVLockGroup *pLockGroup)
2327 {
2328     FlipLockRequestedGroup *pFLRG;
2329     nvListForEachEntry(pFLRG, &requestedFlipLockGroups, listEntry) {
2330         if (CheckLockGroupMatchFlipLockRequestedGroup(pLockGroup, pFLRG)) {
2331             EnableLockGroupFlipLock(pLockGroup);
2332             break;
2333         }
2334     }
2335 }
2336 
2337 /*
2338  * Check if there is an active NVLockGroup that matches the given
2339  * FlipLockRequestedGroup.
2340  * "Matches" means that the NVLockGroup extends to the exact same GPUs as the
2341  * FlipLockRequestedGroup, and that the *active* heads on those GPUs exactly
2342  * match the heads requested in the FlipLockRequestedGroup.
2343  */
FindMatchingLockGroup(const FlipLockRequestedGroup * pFLRG)2344 static NVLockGroup *FindMatchingLockGroup(const FlipLockRequestedGroup *pFLRG)
2345 {
2346     /* If there is an active lock group that matches this pFLRG, it must also
2347      * be active on the first disp, so we don't need to bother looping over
2348      * all disps. */
2349     NVLockGroup *pLockGroup = pFLRG->disp[0].pDispEvo->pLockGroup;
2350 
2351     if (pLockGroup != NULL &&
2352         CheckLockGroupMatchFlipLockRequestedGroup(pLockGroup, pFLRG)) {
2353         return pLockGroup;
2354     }
2355     return NULL;
2356 }
2357 
2358 /* Disable any currently-active lock groups that match the given pFLRG */
2359 static void
DisableRequestedFlipLockGroup(const FlipLockRequestedGroup * pFLRG)2360 DisableRequestedFlipLockGroup(const FlipLockRequestedGroup *pFLRG)
2361 {
2362     NVLockGroup *pLockGroup = FindMatchingLockGroup(pFLRG);
2363     if (pLockGroup != NULL) {
2364         DisableLockGroupFlipLock(pLockGroup);
2365 
2366         nvAssert(!pLockGroup->flipLockEnabled);
2367     }
2368 }
2369 
2370 /*
2371  * Check if there is a currently-active rasterlock group that matches the
2372  * disps/heads of this FlipLockRequestedGroup.  If so, enable flip lock between
2373  * those heads.
2374  */
2375 static void
EnableRequestedFlipLockGroup(const FlipLockRequestedGroup * pFLRG)2376 EnableRequestedFlipLockGroup(const FlipLockRequestedGroup *pFLRG)
2377 {
2378     NVLockGroup *pLockGroup = FindMatchingLockGroup(pFLRG);
2379     if (pLockGroup != NULL) {
2380         EnableLockGroupFlipLock(pLockGroup);
2381     }
2382 }
2383 
2384 /*
2385  * Convert the given API head mask to a HW head mask, using the
2386  * currently-active API head->HW head mapping.
2387  */
ApiHeadMaskToHwHeadMask(const NVDispEvoRec * pDispEvo,const NvU32 apiHeadMask)2388 static NvU32 ApiHeadMaskToHwHeadMask(
2389     const NVDispEvoRec *pDispEvo,
2390     const NvU32 apiHeadMask)
2391 {
2392     const NvU32 numHeads = pDispEvo->pDevEvo->numHeads;
2393     NvU32 apiHead;
2394     NvU32 hwHeadMask = 0;
2395 
2396     for (apiHead = 0; apiHead < numHeads; apiHead++) {
2397         if ((apiHeadMask & (1 << apiHead)) != 0) {
2398             const NVDispApiHeadStateEvoRec *pApiHeadState =
2399                 &pDispEvo->apiHeadState[apiHead];
2400             if (nvApiHeadIsActive(pDispEvo, apiHead)) {
2401                 hwHeadMask |= pApiHeadState->hwHeadsMask;
2402             }
2403         }
2404     }
2405 
2406     return hwHeadMask;
2407 }
2408 
2409 /*
2410  * Return true if all main channels are idle on the heads specified in the
2411  * FlipLockRequestedGroup.
2412  */
CheckFlipLockGroupIdle(const FlipLockRequestedGroup * pFLRG)2413 static NvBool CheckFlipLockGroupIdle(
2414     const FlipLockRequestedGroup *pFLRG)
2415 {
2416     NvU32 i;
2417 
2418     for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) {
2419         NVDispEvoPtr pDispEvo = pFLRG->disp[i].pDispEvo;
2420         if (pDispEvo != NULL) {
2421             NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2422             const NvU32 sd = pDispEvo->displayOwner;
2423             const NvU32 numHeads = pDevEvo->numHeads;
2424             NvU32 head;
2425 
2426             for (head = 0; head < numHeads; head++) {
2427                 NvBool isMethodPending;
2428                 if (!nvHeadIsActive(pDispEvo, head)) {
2429                     continue;
2430                 }
2431                 if (!pDevEvo->hal->IsChannelMethodPending(
2432                         pDevEvo,
2433                         pDevEvo->head[head].layer[NVKMS_MAIN_LAYER],
2434                         sd,
2435                         &isMethodPending) || isMethodPending) {
2436                     return FALSE;
2437                 }
2438             }
2439         }
2440     }
2441 
2442     return TRUE;
2443 }
2444 
2445 /*
2446  * Return true if all main channels are idle on each head in overlapping flip
2447  * lock groups.
2448  */
CheckOverlappingFlipLockRequestGroupsIdle(NVDevEvoRec * pDevEvo[NV_MAX_SUBDEVICES],const struct NvKmsSetFlipLockGroupRequest * pRequest)2449 static NvBool CheckOverlappingFlipLockRequestGroupsIdle(
2450     NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES],
2451     const struct NvKmsSetFlipLockGroupRequest *pRequest)
2452 {
2453     NvU32 dev;
2454 
2455     /* Loop over the GPUs specified in this FlipLockGroupRequest */
2456     for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) {
2457         NVDispEvoPtr pDispEvo;
2458         NvU32 sd;
2459 
2460         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) {
2461             FlipLockRequestedGroup *pFLRG;
2462 
2463             if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) {
2464                 continue;
2465             }
2466 
2467             /*
2468              * For each specified GPU, search through existing requested
2469              * fliplock groups and find any that overlap with heads in this
2470              * request.
2471              *
2472              * Return FALSE if any overlapping fliplock groups are not idle.
2473              */
2474             nvListForEachEntry(pFLRG, &requestedFlipLockGroups, listEntry) {
2475                 NvU32 i;
2476                 for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) {
2477                     if (pFLRG->disp[i].pDispEvo == NULL) {
2478                         break;
2479                     }
2480                     if (pFLRG->disp[i].pDispEvo == pDispEvo) {
2481                         /* API heads requested for this disp by the client */
2482                         const NvU32 requestedApiHeadMask =
2483                             pRequest->dev[dev].disp[sd].requestedHeadsBitMask;
2484                         const NvU32 requestedHwHeadMask =
2485                             ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask);
2486 
2487                         if ((requestedHwHeadMask &
2488                              pFLRG->disp[i].flipLockHeads) != 0) {
2489                             /* Match */
2490                             if (!CheckFlipLockGroupIdle(pFLRG)) {
2491                                 return FALSE;
2492                             }
2493                         }
2494                         break;
2495                     }
2496                 }
2497             }
2498         }
2499     }
2500 
2501     return TRUE;
2502 }
2503 
2504 /*
2505  * Disable and remove any FlipLockRequestGroups that contain any of the heads
2506  * in 'hwHeadsMask' on the given pDispEvo.
2507  */
2508 static void
RemoveOverlappingFlipLockRequestGroupsOneDisp(NVDispEvoRec * pDispEvo,NvU32 hwHeadMask)2509 RemoveOverlappingFlipLockRequestGroupsOneDisp(
2510     NVDispEvoRec *pDispEvo,
2511     NvU32 hwHeadMask)
2512 {
2513     FlipLockRequestedGroup *pFLRG, *tmp;
2514 
2515     /*
2516      * For each specified GPU, search through existing requested
2517      * fliplock groups and find any that overlap with heads in this
2518      * request.
2519      *
2520      * For any that are found, disable fliplock and remove the
2521      * requested flip lock group.
2522      */
2523     nvListForEachEntry_safe(pFLRG, tmp, &requestedFlipLockGroups, listEntry) {
2524         NvU32 i;
2525 
2526         for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) {
2527             if (pFLRG->disp[i].pDispEvo == NULL) {
2528                 break;
2529             }
2530             if (pFLRG->disp[i].pDispEvo == pDispEvo) {
2531 
2532                 if ((hwHeadMask &
2533                      pFLRG->disp[i].flipLockHeads) != 0) {
2534                     /* Match */
2535                     DisableRequestedFlipLockGroup(pFLRG);
2536 
2537                     /* Remove from global list */
2538                     nvListDel(&pFLRG->listEntry);
2539                     nvFree(pFLRG);
2540                 }
2541                 break;
2542             }
2543         }
2544     }
2545 }
2546 
2547 /*
2548  * Disable and remove any FlipLockRequestGroups that contain any of the heads
2549  * specified in 'pRequest'.
2550  */
2551 static void
RemoveOverlappingFlipLockRequestGroups(NVDevEvoRec * pDevEvo[NV_MAX_SUBDEVICES],const struct NvKmsSetFlipLockGroupRequest * pRequest)2552 RemoveOverlappingFlipLockRequestGroups(
2553     NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES],
2554     const struct NvKmsSetFlipLockGroupRequest *pRequest)
2555 {
2556     NvU32 dev;
2557 
2558     /* Loop over the GPUs specified in this FlipLockGroupRequest */
2559     for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) {
2560         NVDispEvoPtr pDispEvo;
2561         NvU32 sd;
2562 
2563         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) {
2564             NvU32 requestedApiHeadMask, requestedHwHeadMask;
2565 
2566             if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) {
2567                 continue;
2568             }
2569 
2570             /* API heads requested for this disp by the client */
2571             requestedApiHeadMask =
2572                 pRequest->dev[dev].disp[sd].requestedHeadsBitMask;
2573             requestedHwHeadMask =
2574                 ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask);
2575 
2576             RemoveOverlappingFlipLockRequestGroupsOneDisp(pDispEvo,
2577                                                           requestedHwHeadMask);
2578         }
2579     }
2580 }
2581 
2582 /*
2583  * Disable and remove any FlipLockRequestGroups that contain any of the heads
2584  * specified in 'pRequest'.
2585  */
nvEvoRemoveOverlappingFlipLockRequestGroupsForModeset(NVDevEvoPtr pDevEvo,const struct NvKmsSetModeRequest * pRequest)2586 void nvEvoRemoveOverlappingFlipLockRequestGroupsForModeset(
2587     NVDevEvoPtr pDevEvo,
2588     const struct NvKmsSetModeRequest *pRequest)
2589 {
2590     NVDispEvoPtr pDispEvo;
2591     NvU32 sd;
2592 
2593     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
2594         NvU32 requestedApiHeadMask, requestedHwHeadMask;
2595 
2596         if ((pRequest->requestedDispsBitMask & (1 << sd)) == 0) {
2597             continue;
2598         }
2599 
2600         /* API heads requested for this disp by the client */
2601         requestedApiHeadMask =
2602             pRequest->disp[sd].requestedHeadsBitMask;
2603         requestedHwHeadMask =
2604             ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask);
2605 
2606         RemoveOverlappingFlipLockRequestGroupsOneDisp(pDispEvo,
2607                                                       requestedHwHeadMask);
2608     }
2609 }
2610 
2611 /*!
2612  * Handle a NVKMS_IOCTL_SET_FLIPLOCK_GROUP request.  This assumes that the
2613  * request was already validated by nvkms.c:SetFlipLockGroup().
2614  *
2615  * param[in]  pDevEvo  Array of NVDevEvoPtr pointers, in the same order as
2616  *                     the deviceHandle were specified in the request.
2617  * param[in]  pRequest The ioctl request.
2618  */
2619 NvBool
nvSetFlipLockGroup(NVDevEvoRec * pDevEvo[NV_MAX_SUBDEVICES],const struct NvKmsSetFlipLockGroupRequest * pRequest)2620 nvSetFlipLockGroup(NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES],
2621                    const struct NvKmsSetFlipLockGroupRequest *pRequest)
2622 {
2623     FlipLockRequestedGroup *pFLRG = NULL;
2624 
2625     /* Construct the new RequestedFlipLockGroup first, so if it fails we can
2626      * return before removing overlapping groups. */
2627     if (pRequest->enable) {
2628         NvU32 dev, disp;
2629 
2630         pFLRG = nvCalloc(1, sizeof(*pFLRG));
2631         if (pFLRG == NULL) {
2632             goto fail;
2633         }
2634 
2635         disp = 0;
2636         for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) {
2637             NVDispEvoPtr pDispEvo;
2638             NvU32 sd;
2639 
2640             FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) {
2641                 const NvU32 requestedApiHeads =
2642                     pRequest->dev[dev].disp[sd].requestedHeadsBitMask;
2643 
2644                 if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) {
2645                     continue;
2646                 }
2647 
2648                 if (disp >= ARRAY_LEN(pFLRG->disp)) {
2649                     nvAssert(!"FlipLockRequestedGroup::disp too short?");
2650                     goto fail;
2651                 }
2652 
2653                 pFLRG->disp[disp].pDispEvo = pDispEvo;
2654                 pFLRG->disp[disp].flipLockHeads =
2655                     ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeads);
2656                 disp++;
2657             }
2658         }
2659 
2660         if (!CheckFlipLockGroupIdle(pFLRG)) {
2661             nvEvoLogDebug(EVO_LOG_ERROR,
2662                           "Failed to request flip lock: group not idle");
2663             goto fail;
2664         }
2665     }
2666 
2667     if (!CheckOverlappingFlipLockRequestGroupsIdle(pDevEvo, pRequest)) {
2668         nvEvoLogDebug(EVO_LOG_ERROR,
2669                       "Failed to request flip lock: overlapping group(s) not idle");
2670         goto fail;
2671     }
2672 
2673     RemoveOverlappingFlipLockRequestGroups(pDevEvo, pRequest);
2674 
2675     if (pFLRG) {
2676         nvListAdd(&pFLRG->listEntry, &requestedFlipLockGroups);
2677 
2678         EnableRequestedFlipLockGroup(pFLRG);
2679     }
2680 
2681     return TRUE;
2682 
2683 fail:
2684     nvFree(pFLRG);
2685     return FALSE;
2686 }
2687 
nvSetUsageBoundsEvo(NVDevEvoPtr pDevEvo,const NvU32 sd,const NvU32 head,const struct NvKmsUsageBounds * pUsage,NVEvoUpdateState * updateState)2688 NvBool nvSetUsageBoundsEvo(
2689     NVDevEvoPtr pDevEvo,
2690     const NvU32 sd,
2691     const NvU32 head,
2692     const struct NvKmsUsageBounds *pUsage,
2693     NVEvoUpdateState *updateState)
2694 {
2695     NvBool needCoreUpdate;
2696 
2697     nvPushEvoSubDevMask(pDevEvo, NVBIT(sd));
2698 
2699     needCoreUpdate = pDevEvo->hal->SetUsageBounds(pDevEvo, sd, head, pUsage,
2700                                                   updateState);
2701 
2702     nvPopEvoSubDevMask(pDevEvo);
2703 
2704     pDevEvo->gpus[sd].headState[head].usage = *pUsage;
2705 
2706     return needCoreUpdate;
2707 }
2708 
nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo,NvU32 sd,NvU32 head,NvBool enable,NVEvoUpdateState * pUpdateState)2709 void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo,
2710                                       NvU32 sd,
2711                                       NvU32 head,
2712                                       NvBool enable,
2713                                       NVEvoUpdateState *pUpdateState)
2714 {
2715     pDevEvo->gpus[sd].headState[head].
2716         disableMidFrameAndDWCFWatermark = !enable;
2717 
2718     if (pDevEvo->hal->EnableMidFrameAndDWCFWatermark == NULL) {
2719         nvEvoLogDev(pDevEvo,
2720                     EVO_LOG_ERROR,
2721                     "EnableMidFrameAndDWCFWatermark() is not defined");
2722         return;
2723     }
2724 
2725     pDevEvo->hal->EnableMidFrameAndDWCFWatermark(pDevEvo,
2726                                                  sd,
2727                                                  head,
2728                                                  enable,
2729                                                  pUpdateState);
2730 }
2731 
nvGetDefaultColorSpace(const NVColorFormatInfoRec * pColorFormatsInfo,enum NvKmsDpyAttributeCurrentColorSpaceValue * pColorSpace,enum NvKmsDpyAttributeColorBpcValue * pColorBpc)2732 NvBool nvGetDefaultColorSpace(
2733     const NVColorFormatInfoRec *pColorFormatsInfo,
2734     enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace,
2735     enum NvKmsDpyAttributeColorBpcValue *pColorBpc)
2736 {
2737     if (pColorFormatsInfo->rgb444.maxBpc !=
2738             NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
2739         *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
2740         *pColorBpc = pColorFormatsInfo->rgb444.maxBpc;
2741         return TRUE;
2742     }
2743 
2744     if (pColorFormatsInfo->yuv444.maxBpc !=
2745             NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
2746         *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444;
2747         *pColorBpc = pColorFormatsInfo->yuv444.maxBpc;
2748         return TRUE;
2749     }
2750 
2751     if (pColorFormatsInfo->yuv422.maxBpc !=
2752             NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
2753         *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422;
2754         *pColorBpc = pColorFormatsInfo->yuv422.maxBpc;
2755         return TRUE;
2756     }
2757 
2758     return FALSE;
2759 }
2760 
nvChooseColorRangeEvo(enum NvKmsOutputColorimetry colorimetry,const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,const enum NvKmsDpyAttributeColorBpcValue colorBpc,enum NvKmsDpyAttributeColorRangeValue * pColorRange)2761 NvBool nvChooseColorRangeEvo(
2762     enum NvKmsOutputColorimetry colorimetry,
2763     const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,
2764     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
2765     const enum NvKmsDpyAttributeColorBpcValue colorBpc,
2766     enum NvKmsDpyAttributeColorRangeValue *pColorRange)
2767 {
2768     /* Hardware supports BPC_6 only for RGB */
2769     nvAssert((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) ||
2770                 (colorBpc != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6));
2771 
2772     if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) &&
2773             (colorBpc == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6)) {
2774         /* At depth 18 only RGB and full range are allowed */
2775         if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
2776             /* BT2100 requires limited color range */
2777             return FALSE;
2778         }
2779         *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
2780     } else if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) ||
2781                (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) ||
2782                (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420) ||
2783                (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100)) {
2784         /* Both YUV and BT2100 colorimetry require limited color range. */
2785         *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED;
2786     } else {
2787         *pColorRange = requestedColorRange;
2788     }
2789 
2790     return TRUE;
2791 }
2792 
2793 /*!
2794  * Choose current colorSpace and colorRange for the given dpy based on
2795  * the dpy's color format capailities, the given modeset parameters (YUV420
2796  * mode and output transfer function) and the requested color space and range.
2797  *
2798  * This needs to be called during a modeset as well as when the requested color
2799  * space or range have changed.
2800  *
2801  * If SW YUV420 mode is enabled, EVO HW is programmed with default (RGB color
2802  * space, FULL color range) values, and the real values are used in a
2803  * headSurface composite shader.
2804  */
nvChooseCurrentColorSpaceAndRangeEvo(const NVDpyEvoRec * pDpyEvo,const NVHwModeTimingsEvo * pHwTimings,NvU8 hdmiFrlBpc,enum NvKmsOutputColorimetry colorimetry,const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace,const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,enum NvKmsDpyAttributeCurrentColorSpaceValue * pCurrentColorSpace,enum NvKmsDpyAttributeColorBpcValue * pCurrentColorBpc,enum NvKmsDpyAttributeColorRangeValue * pCurrentColorRange)2805 NvBool nvChooseCurrentColorSpaceAndRangeEvo(
2806     const NVDpyEvoRec *pDpyEvo,
2807     const NVHwModeTimingsEvo *pHwTimings,
2808     NvU8 hdmiFrlBpc,
2809     enum NvKmsOutputColorimetry colorimetry,
2810     const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace,
2811     const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,
2812     enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace,
2813     enum NvKmsDpyAttributeColorBpcValue *pCurrentColorBpc,
2814     enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange)
2815 {
2816     enum NvKmsDpyAttributeCurrentColorSpaceValue newColorSpace =
2817         NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
2818     enum NvKmsDpyAttributeColorBpcValue newColorBpc =
2819         NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10;
2820     enum NvKmsDpyAttributeColorRangeValue newColorRange =
2821         NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
2822     const NVColorFormatInfoRec colorFormatsInfo =
2823         nvGetColorFormatInfo(pDpyEvo);
2824 
2825     // XXX HDR TODO: Handle other colorimetries
2826     // XXX HDR TODO: Handle YUV
2827     if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
2828         /*
2829          * If the head currently has BT2100 colorimetry, we override the
2830          * requested color space with RGB.  We cannot support yuv420Mode in
2831          * that configuration, so fail in that case.
2832          */
2833         if (pHwTimings->yuv420Mode != NV_YUV420_MODE_NONE) {
2834             return FALSE;
2835         }
2836 
2837         newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
2838         newColorBpc = colorFormatsInfo.rgb444.maxBpc;
2839     } else if (pHwTimings->yuv420Mode != NV_YUV420_MODE_NONE) {
2840         /*
2841          * If the current mode timing requires YUV420 compression, we override the
2842          * requested color space with YUV420.
2843          */
2844         newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420;
2845         newColorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
2846 
2847         nvAssert(colorFormatsInfo.rgb444.maxBpc >=
2848                     NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8);
2849     } else {
2850         /*
2851          * Note this is an assignment between different enum types. Checking the
2852          * value of requested colorSpace and then assigning the value to current
2853          * colorSpace, to avoid warnings about cross-enum assignment.
2854          */
2855         switch (requestedColorSpace) {
2856         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB:
2857             newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
2858             newColorBpc = colorFormatsInfo.rgb444.maxBpc;
2859             break;
2860         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422:
2861             newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422;
2862             newColorBpc = colorFormatsInfo.yuv422.maxBpc;
2863             break;
2864         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444:
2865             newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444;
2866             newColorBpc = colorFormatsInfo.yuv444.maxBpc;
2867             break;
2868         default:
2869             nvAssert(!"Invalid Requested ColorSpace");
2870         }
2871 
2872         if ((newColorBpc ==
2873                 NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) &&
2874             !nvGetDefaultColorSpace(&colorFormatsInfo, &newColorSpace,
2875                                     &newColorBpc)) {
2876             return FALSE;
2877         }
2878     }
2879 
2880     /*
2881      * Downgrade BPC if HDMI configuration does not support current selection
2882      * with TMDS or FRL.
2883      */
2884     if (nvDpyIsHdmiEvo(pDpyEvo) &&
2885         nvHdmiTimingsNeedFrl(pDpyEvo, pHwTimings, newColorBpc) &&
2886         (newColorBpc > hdmiFrlBpc))  {
2887 
2888         newColorBpc =
2889             hdmiFrlBpc ? hdmiFrlBpc : NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
2890         nvAssert(newColorBpc >= NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8);
2891     }
2892 
2893     // 10 BPC required for HDR
2894     // XXX HDR TODO: Handle other colorimetries
2895     // XXX HDR TODO: Handle YUV
2896     if ((colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) &&
2897         (newColorBpc < NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10)) {
2898         return FALSE;
2899     }
2900 
2901     if (!nvChooseColorRangeEvo(colorimetry, requestedColorRange, newColorSpace,
2902                                newColorBpc, &newColorRange)) {
2903     }
2904 
2905     *pCurrentColorSpace = newColorSpace;
2906     *pCurrentColorRange = newColorRange;
2907     *pCurrentColorBpc = newColorBpc;
2908 
2909     return TRUE;
2910 }
2911 
nvUpdateCurrentHardwareColorSpaceAndRangeEvo(NVDispEvoPtr pDispEvo,const NvU32 head,enum NvKmsOutputColorimetry colorimetry,const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,const enum NvKmsDpyAttributeColorRangeValue colorRange,NVEvoUpdateState * pUpdateState)2912 void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
2913     NVDispEvoPtr pDispEvo,
2914     const NvU32 head,
2915     enum NvKmsOutputColorimetry colorimetry,
2916     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
2917     const enum NvKmsDpyAttributeColorRangeValue colorRange,
2918     NVEvoUpdateState *pUpdateState)
2919 {
2920     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2921     NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
2922     const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo;
2923 
2924     nvAssert(pConnectorEvo != NULL);
2925 
2926     // XXX HDR TODO: Support more output colorimetries
2927     if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
2928         nvAssert(pHeadState->timings.yuv420Mode == NV_YUV420_MODE_NONE);
2929         nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB);
2930         nvAssert(colorRange == NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED);
2931 
2932         pHeadState->procAmp.colorimetry =  NVT_COLORIMETRY_BT2020RGB;
2933         pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_LIMITED;
2934         pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB;
2935     } else if ((pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) &&
2936         (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) {
2937         /*
2938          * In SW YUV420 mode, HW is programmed with RGB color space and full
2939          * color range.  The color space conversion and color range compression
2940          * happen in a headSurface composite shader.
2941          */
2942         pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
2943         pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL;
2944         pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB;
2945     } else {
2946 
2947         // Set default colorimetry to RGB and default color range to full
2948         pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
2949         pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL;
2950 
2951         // Set color format
2952         switch (colorSpace) {
2953         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
2954             pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB;
2955             break;
2956         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
2957             pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr444;
2958             break;
2959         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
2960             pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr422;
2961             break;
2962         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
2963             pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr420;
2964             break;
2965         default:
2966             nvAssert(!"unrecognized colorSpace");
2967         }
2968 
2969         switch (pConnectorEvo->legacyType) {
2970         case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP:
2971             // program HW with RGB/YCbCr
2972             switch (colorSpace) {
2973             case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
2974                 pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
2975                 break;
2976             case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
2977             case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
2978             case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
2979                 if (nvEvoIsHDQualityVideoTimings(&pHeadState->timings)) {
2980                     pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_709;
2981                 } else {
2982                     pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_601;
2983                 }
2984                 break;
2985             default:
2986                 nvAssert(!"unrecognized colorSpace");
2987             }
2988             break;
2989         case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT:
2990             // colorSpace isn't used for DEVICE_TYPE_CRT and
2991             // hence should be set to the "unchanged" value
2992             // (i.e. the default - RGB)
2993             nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB);
2994 
2995             // program HW with RGB only
2996             pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
2997             break;
2998         default:
2999             nvAssert(!"ERROR: invalid pDpyEvo->type");
3000         }
3001 
3002         /* YCbCr444 should be advertise only for DisplayPort and HDMI */
3003         nvAssert((colorSpace != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) ||
3004                     nvConnectorUsesDPLib(pConnectorEvo) ||
3005                     pConnectorEvo->isHdmiEnabled);
3006 
3007         /* YcbCr422 should be advertised only for HDMI and DP on supported GPUs */
3008         nvAssert((colorSpace != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) ||
3009                      (((pDevEvo->caps.hdmiYCbCr422MaxBpc != 0) &&
3010                        pConnectorEvo->isHdmiEnabled)) ||
3011                       ((pDevEvo->caps.dpYCbCr422MaxBpc != 0) &&
3012                        nvConnectorUsesDPLib(pConnectorEvo)));
3013 
3014         switch (colorRange) {
3015         case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL:
3016             pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL;
3017             break;
3018         case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED:
3019             pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_LIMITED;
3020             break;
3021         default:
3022             nvAssert(!"Invalid colorRange");
3023             break;
3024         }
3025     }
3026 
3027     // In YUV colorimetry, only limited color range is allowed.
3028     nvAssert(!((pHeadState->procAmp.colorimetry != NVT_COLORIMETRY_RGB) &&
3029                (pHeadState->procAmp.colorRange != NVT_COLOR_RANGE_LIMITED)));
3030 
3031     // Limited color range is not allowed with 18bpp mode
3032     nvAssert(!((pHeadState->pixelDepth == NVKMS_PIXEL_DEPTH_18_444) &&
3033                (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED)));
3034 
3035     nvPushEvoSubDevMaskDisp(pDispEvo);
3036 
3037     // Set the procamp head method
3038     pDevEvo->hal->SetProcAmp(pDispEvo, head, pUpdateState);
3039 
3040     // Clean up
3041     nvPopEvoSubDevMask(pDevEvo);
3042 }
3043 
nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo,const NvU32 head,NVEvoUpdateState * pUpdateState)3044 void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo,
3045                            const NvU32 head, NVEvoUpdateState *pUpdateState)
3046 {
3047     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
3048     const NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[head];
3049     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
3050     const enum nvKmsPixelDepth pixelDepth = pHeadState->pixelDepth;
3051     NvBool colorSpaceOverride = FALSE;
3052 
3053     /*
3054      * Determine whether or not this dpy will need its color space
3055      * overridden.
3056      *
3057      * This is currently only used for DP 1.3 YUV420 mode, where the
3058      * HW's normal support for carrying color space information
3059      * together with the frame is insufficient.
3060      */
3061     if ((pTimings->yuv420Mode == NV_YUV420_MODE_SW) &&
3062         nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) {
3063 
3064         nvAssert(pDispEvo->pDevEvo->caps.supportsDP13);
3065         colorSpaceOverride = TRUE;
3066     }
3067 
3068     // Only set up the actual output for SLI primary.
3069     nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner);
3070 
3071     pDevEvo->hal->HeadSetControlOR(pDevEvo, head, pTimings, pixelDepth,
3072                                    colorSpaceOverride,
3073                                    pUpdateState);
3074 
3075     nvPopEvoSubDevMask(pDevEvo);
3076 }
3077 
3078 static const struct {
3079     NvU32 algo;
3080     enum NvKmsDpyAttributeCurrentDitheringModeValue nvKmsDitherMode;
3081 } ditherModeTable[] = {
3082     { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2,
3083       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2 },
3084     { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2,
3085       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2 },
3086     { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL,
3087       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL },
3088     { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN,
3089       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE }
3090 };
3091 
3092 static const struct {
3093     NvU32 type;
3094     enum NvKmsDpyAttributeCurrentDitheringDepthValue nvKmsDitherDepth;
3095 } ditherDepthTable[] = {
3096     { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS,
3097       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS },
3098     { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS,
3099       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS },
3100     { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF,
3101       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE }
3102 };
3103 
3104 /*!
3105  * Choose dithering based on the requested dithering config
3106  * NVConnectorEvo::or::dither.
3107  */
nvChooseDitheringEvo(const NVConnectorEvoRec * pConnectorEvo,enum NvKmsDpyAttributeColorBpcValue bpc,const NVDpyAttributeRequestedDitheringConfig * pReqDithering,NVDpyAttributeCurrentDitheringConfig * pCurrDithering)3108 void nvChooseDitheringEvo(
3109     const NVConnectorEvoRec *pConnectorEvo,
3110     enum NvKmsDpyAttributeColorBpcValue bpc,
3111     const NVDpyAttributeRequestedDitheringConfig *pReqDithering,
3112     NVDpyAttributeCurrentDitheringConfig *pCurrDithering)
3113 {
3114     NvU32 i;
3115     NVDpyAttributeCurrentDitheringConfig currDithering = {
3116         .enabled = FALSE,
3117         .mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE,
3118         .depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE,
3119     };
3120 
3121     currDithering.enabled = (pConnectorEvo->or.ditherType !=
3122                                 NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF);
3123 
3124     for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) {
3125         if (ditherDepthTable[i].type == pConnectorEvo->or.ditherType) {
3126             currDithering.depth = ditherDepthTable[i].nvKmsDitherDepth;
3127             break;
3128         }
3129     }
3130 
3131     for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) {
3132         if (ditherModeTable[i].algo == pConnectorEvo->or.ditherAlgo) {
3133             currDithering.mode = ditherModeTable[i].nvKmsDitherMode;
3134             break;
3135         }
3136     }
3137 
3138     switch (pReqDithering->state) {
3139     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED:
3140         currDithering.enabled = TRUE;
3141         break;
3142     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED:
3143         currDithering.enabled = FALSE;
3144         break;
3145     default:
3146         nvAssert(!"Unknown Dithering configuration");
3147         // Fall through
3148     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO:
3149         /*
3150          * Left it initialized
3151          * based on value NVDpyEvoRec::or::dither::init::enabled.
3152          */
3153         break;
3154     }
3155 
3156     switch (pReqDithering->depth) {
3157     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS:
3158         currDithering.depth =
3159             NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS;
3160         break;
3161     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS:
3162         currDithering.depth =
3163             NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS;
3164         break;
3165     default:
3166         nvAssert(!"Unknown Dithering Depth");
3167         // Fall through
3168     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO:
3169         /*
3170          * Left it initialized
3171          * based on value NVDpyEvoRec::or::dither::init::type.
3172          */
3173         break;
3174     }
3175 
3176 
3177     if (nvConnectorUsesDPLib(pConnectorEvo) &&
3178         (pReqDithering->state !=
3179             NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED)) {
3180         NvU32 lutBits = 11;
3181 
3182         /* If we are using DisplayPort panel with bandwidth constraints
3183          * which lowers the color depth, consider that while applying
3184          * dithering effects.
3185          */
3186         if (bpc == 0) {
3187             nvAssert(!"Unknown dpBits");
3188             bpc = 8;
3189         }
3190 
3191         /*
3192          * If fewer than 8 DP bits are available, dither.  Ideally we'd
3193          * dither from lutBits > 10 to 10 bpc, but EVO doesn't have an
3194          * option for that.
3195          *
3196          * XXX TODO: nvdisplay can dither to 10 bpc.
3197          */
3198         if ((bpc <= 8) && (lutBits > bpc)) {
3199             if (pReqDithering->state ==
3200                     NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO) {
3201                 currDithering.enabled = TRUE;
3202             }
3203         }
3204 
3205         if (pReqDithering->depth ==
3206                 NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO) {
3207             if (bpc <= 6) {
3208                 currDithering.depth =
3209                     NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS;
3210             } else if (bpc <= 8) {
3211                 currDithering.depth =
3212                     NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS;
3213             }
3214         }
3215     }
3216 
3217     if (currDithering.enabled) {
3218         switch (pReqDithering->mode) {
3219         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL:
3220             currDithering.mode =
3221                 NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL;
3222             break;
3223         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2:
3224             currDithering.mode =
3225                 NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2;
3226             break;
3227         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2:
3228             currDithering.mode =
3229                 NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2;
3230             break;
3231         default:
3232             nvAssert(!"Unknown Dithering Mode");
3233             // Fall through
3234         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO:
3235             /*
3236              * Left it initialized
3237              * based on value NVDpyEvoRec::or::dither::init::algo.
3238              */
3239             break;
3240         }
3241     } else {
3242         currDithering.depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE;
3243         currDithering.mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE;
3244     }
3245 
3246     *pCurrDithering = currDithering;
3247 }
3248 
nvSetDitheringEvo(NVDispEvoPtr pDispEvo,const NvU32 head,const NVDpyAttributeCurrentDitheringConfig * pCurrDithering,NVEvoUpdateState * pUpdateState)3249 void nvSetDitheringEvo(
3250     NVDispEvoPtr pDispEvo,
3251     const NvU32 head,
3252     const NVDpyAttributeCurrentDitheringConfig *pCurrDithering,
3253     NVEvoUpdateState *pUpdateState)
3254 {
3255     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
3256     NvU32 i;
3257     NvU32 algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN;
3258     NvU32 type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF;
3259     NvU32 enabled = pCurrDithering->enabled;
3260 
3261     for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) {
3262         if (ditherModeTable[i].nvKmsDitherMode == pCurrDithering->mode) {
3263             algo = ditherModeTable[i].algo;
3264             break;
3265         }
3266     }
3267     nvAssert(i < ARRAY_LEN(ditherModeTable));
3268 
3269     for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) {
3270         if (ditherDepthTable[i].nvKmsDitherDepth == pCurrDithering->depth) {
3271             type = ditherDepthTable[i].type;
3272             break;
3273         }
3274     }
3275     nvAssert(i < ARRAY_LEN(ditherDepthTable));
3276 
3277     /*
3278      * Make sure algo is a recognizable value that we will be able to program
3279      * in hardware.
3280      */
3281     if (algo == NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN) {
3282         algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2;
3283     }
3284 
3285     nvPushEvoSubDevMaskDisp(pDispEvo);
3286     pDevEvo->hal->SetDither(pDispEvo, head, enabled, type, algo,
3287                             pUpdateState);
3288     nvPopEvoSubDevMask(pDevEvo);
3289 }
3290 
3291 /*
3292  * HeadCanStereoLock() - Return whether or not this head can use stereo lock
3293  * mode.  This can only be called from UpdateEvoLockState, when the pending
3294  * interlaced/locked values are still in the head control assembly structure.
3295  */
HeadCanStereoLock(NVDevEvoPtr pDevEvo,int sd,int head)3296 static NvBool HeadCanStereoLock(NVDevEvoPtr pDevEvo, int sd, int head)
3297 {
3298     NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head];
3299 
3300     return (!pHC->interlaced && !pHC->mergeMode &&
3301             ((pHC->serverLock != NV_EVO_NO_LOCK) ||
3302              (pHC->clientLock != NV_EVO_NO_LOCK)));
3303 }
3304 
3305 /*
3306  * SetStereoLockMode() - For stereo lock mode, we need to notify
3307  * the gsync board that this GPU requires stereo lock mode.
3308  */
SetStereoLockMode(NVDispEvoPtr pDispEvo,NvBool stereoLocked)3309 static NvBool SetStereoLockMode(NVDispEvoPtr pDispEvo, NvBool stereoLocked)
3310 {
3311     NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS
3312         statusParams = { 0 };
3313     NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
3314 
3315     if (!pFrameLockEvo ||
3316         ((pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2060) &&
3317         (pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061))) {
3318         return TRUE;
3319     }
3320 
3321     statusParams.gpuId = nvGpuIdOfDispEvo(pDispEvo);
3322     statusParams.enable = stereoLocked ? 1 : 0;
3323 
3324     if (nvRmApiControl(nvEvoGlobal.clientHandle,
3325                        pFrameLockEvo->device,
3326                        NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE,
3327                        &statusParams,
3328                        sizeof(statusParams)) != NVOS_STATUS_SUCCESS) {
3329         nvAssert(!"Failed to set stereo lock mode");
3330         return FALSE;
3331     }
3332 
3333     return TRUE;
3334 }
3335 
3336 /*
3337  * SyncEvoLockState()
3338  *
3339  * Set the Assembly state based on the current Armed state.  This should be
3340  * called before transitioning between states in the EVO state machine.
3341  */
SyncEvoLockState(void)3342 static void SyncEvoLockState(void)
3343 {
3344     NVDispEvoPtr pDispEvo;
3345     unsigned int sd;
3346     NVDevEvoPtr pDevEvo;
3347 
3348     FOR_ALL_EVO_DEVS(pDevEvo) {
3349 
3350         if (!pDevEvo->gpus) {
3351             continue;
3352         }
3353 
3354         if (pDevEvo->displayHandle == 0) {
3355             continue;
3356         }
3357 
3358         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3359             NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
3360             NvU32 updateHeadMask = nvGetActiveHeadMask(pDispEvo);
3361             unsigned int head;
3362 
3363             /* Update the cached HEAD_SET_CONTROL EVO method state */
3364             FOR_ALL_HEADS(head, updateHeadMask) {
3365                 pEvoSubDev->headControlAssy[head] =
3366                     pEvoSubDev->headControl[head];
3367 
3368                 /*
3369                  * The following are probably not necessary, since no other
3370                  * code touches them (as opposed to headControl above which
3371                  * is updated beyond the scope of the state machine).  But
3372                  * update them here anyway to be consistent.
3373                  */
3374                 pEvoSubDev->frameLockClientMaskAssy =
3375                     pEvoSubDev->frameLockClientMaskArmed;
3376                 pEvoSubDev->frameLockServerMaskAssy =
3377                     pEvoSubDev->frameLockServerMaskArmed;
3378                 pEvoSubDev->frameLockExtRefClkMaskAssy =
3379                     pEvoSubDev->frameLockExtRefClkMaskArmed;
3380             }
3381         }
3382     }
3383 }
3384 
3385 /*
3386  * Determine a unique index for the given (pDevEvo, sd) tuple.
3387  * This is used to index into an array of size NV_MAX_DEVICES.
3388  *
3389  * It would be more straightforward to use a two-dimensional array of
3390  * NV_MAX_DEVICES x NV_MAX_SUBDEVICES and index by (devIndex, sd), but
3391  * that makes the array too large to fit on the stack.  This is safe because
3392  * we should only ever have at most NV_MAX_DEVICES GPUs in the system
3393  * total, although at any given time they may be split into many single-GPU
3394  * device or a small number of many-GPU SLI devices.
3395  */
GpuIndex(const NVDevEvoRec * pDevEvo,NvU32 sd)3396 static NvU32 GpuIndex(const NVDevEvoRec *pDevEvo, NvU32 sd)
3397 {
3398     const NVDevEvoRec *pDevEvoIter;
3399     NvU32 index = 0;
3400 
3401     nvAssert(sd < pDevEvo->numSubDevices);
3402 
3403     FOR_ALL_EVO_DEVS(pDevEvoIter) {
3404         if (pDevEvoIter == pDevEvo) {
3405             index += sd;
3406             nvAssert(index < NV_MAX_DEVICES);
3407             return index;
3408         }
3409         index += pDevEvo->numSubDevices;
3410     }
3411 
3412     nvAssert(!"Failed to look up GPU index");
3413     return 0;
3414 }
3415 
nvGetRefreshRate10kHz(const NVHwModeTimingsEvo * pTimings)3416 NvU32 nvGetRefreshRate10kHz(const NVHwModeTimingsEvo *pTimings)
3417 {
3418     const NvU32 totalPixels = pTimings->rasterSize.x * pTimings->rasterSize.y;
3419 
3420     /*
3421      * pTimings->pixelClock is in 1000/s
3422      * we want 0.0001/s
3423      * factor = 1000/0.0001 = 10000000.
3424      */
3425     NvU32 factor = 10000000;
3426 
3427     if (pTimings->doubleScan) factor /= 2;
3428     if (pTimings->interlaced) factor *= 2;
3429 
3430     if (totalPixels == 0) {
3431         return 0;
3432     }
3433 
3434     return axb_div_c(pTimings->pixelClock, factor, totalPixels);
3435 }
3436 
3437 /*!
3438  * Get the current refresh rate for the heads in headMask, in 0.0001 Hz units.
3439  * All heads in headMask are expected to have the same refresh rate.
3440  */
GetRefreshRateHeadMask10kHz(const NVDispEvoRec * pDispEvo,NvU32 headMask)3441 static NvU32 GetRefreshRateHeadMask10kHz(const NVDispEvoRec *pDispEvo,
3442                                          NvU32 headMask)
3443 {
3444     const NVHwModeTimingsEvo *pTimings = NULL;
3445     NvU32 head;
3446 
3447     FOR_ALL_HEADS(head, headMask) {
3448         const NVDispHeadStateEvoRec *pHeadState =
3449             &pDispEvo->headState[head];
3450 
3451         if (head >= pDispEvo->pDevEvo->numHeads &&
3452                 pHeadState->activeRmId == 0x0) {
3453             continue;
3454         }
3455 
3456         if (pTimings == NULL) {
3457             pTimings = &pHeadState->timings;
3458         } else {
3459             nvAssert(pTimings->rasterSize.x ==
3460                         pHeadState->timings.rasterSize.x);
3461             nvAssert(pTimings->rasterSize.y ==
3462                         pHeadState->timings.rasterSize.y);
3463             nvAssert(pTimings->doubleScan == pHeadState->timings.doubleScan);
3464             nvAssert(pTimings->interlaced == pHeadState->timings.interlaced);
3465             nvAssert(pTimings->pixelClock == pHeadState->timings.pixelClock);
3466         }
3467     }
3468 
3469     if (pTimings == NULL) {
3470         return 0;
3471     }
3472 
3473     return nvGetRefreshRate10kHz(pTimings);
3474 }
3475 
3476 /*!
3477  * Return a the mask of RmIds from the heads mask.
3478  */
HeadMaskToActiveRmIdMask(const NVDispEvoRec * pDispEvo,const NvU32 headMask)3479 static NvU32 HeadMaskToActiveRmIdMask(const NVDispEvoRec *pDispEvo,
3480                                       const NvU32 headMask)
3481 {
3482     NvU32 head;
3483     NvU32 rmDisplayMask = 0;
3484 
3485     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
3486         if ((NVBIT(head) & headMask) != 0x0) {
3487             rmDisplayMask |=
3488                 pDispEvo->headState[head].activeRmId;
3489         }
3490     }
3491 
3492     return rmDisplayMask;
3493 }
3494 
FramelockSetControlSync(NVDispEvoPtr pDispEvo,const NvU32 headMask,NvBool server)3495 static NvBool FramelockSetControlSync(NVDispEvoPtr pDispEvo, const NvU32 headMask,
3496                                   NvBool server)
3497 {
3498     NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
3499     NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS gsyncSetControlSyncParams = { 0 };
3500     NvU32 ret;
3501 
3502     /* There can only be one server. */
3503 
3504     nvAssert(!server || (nvPopCount32(headMask) == 1));
3505 
3506     gsyncSetControlSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo);
3507     gsyncSetControlSyncParams.master = server;
3508     gsyncSetControlSyncParams.displays =
3509         HeadMaskToActiveRmIdMask(pDispEvo, headMask);
3510 
3511     if (gsyncSetControlSyncParams.displays == 0x0) {
3512         return FALSE;
3513     }
3514 
3515     gsyncSetControlSyncParams.refresh =
3516         GetRefreshRateHeadMask10kHz(pDispEvo, headMask);
3517 
3518     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3519                          pFrameLockEvo->device,
3520                          NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC,
3521                          &gsyncSetControlSyncParams,
3522                          sizeof(gsyncSetControlSyncParams));
3523 
3524     if (ret != NVOS_STATUS_SUCCESS) {
3525         return FALSE;
3526     }
3527 
3528     return TRUE;
3529 }
3530 
nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo,const NvU32 headMask,NvBool server)3531 NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask,
3532                                       NvBool server)
3533 {
3534     NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
3535     NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS
3536         gsyncSetControlUnsyncParams = { 0 };
3537     NvU32 ret;
3538 
3539     gsyncSetControlUnsyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo);
3540     gsyncSetControlUnsyncParams.master = server;
3541     gsyncSetControlUnsyncParams.displays =
3542         HeadMaskToActiveRmIdMask(pDispEvo, headMask);
3543 
3544     if (gsyncSetControlUnsyncParams.displays == 0x0) {
3545         return FALSE;
3546     }
3547 
3548     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3549                          pFrameLockEvo->device,
3550                          NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC,
3551                          &gsyncSetControlUnsyncParams,
3552                          sizeof(gsyncSetControlUnsyncParams));
3553 
3554     if (ret != NVOS_STATUS_SUCCESS) {
3555         return FALSE;
3556     }
3557 
3558     return TRUE;
3559 }
3560 
3561 /*
3562  * UpdateEvoLockState()
3563  *
3564  * Update the hardware based on the Assembly state, if it is different from the
3565  * current Armed state.  This should be called after transitioning through
3566  * states in the EVO state machine to propagate all of the necessary values to
3567  * HW.
3568  */
UpdateEvoLockState(void)3569 static void UpdateEvoLockState(void)
3570 {
3571     NVDispEvoPtr pDispEvo;
3572     NVFrameLockEvoPtr pFrameLockEvo;
3573     unsigned int sd;
3574     NVDevEvoPtr pDevEvo;
3575     NvBool ret;
3576     enum {
3577         FIRST_ITERATION,
3578         DISABLE_UNNEEDED_CLIENTS = FIRST_ITERATION,
3579         DISABLE_UNNEEDED_SERVER,
3580         COMPUTE_HOUSE_SYNC,
3581         UPDATE_HOUSE_SYNC,
3582         ENABLE_SERVER,
3583         ENABLE_CLIENTS,
3584         LAST_ITERATION = ENABLE_CLIENTS,
3585     } iteration;
3586     struct {
3587         unsigned char disableServer:1;
3588         unsigned char disableClient:1;
3589         unsigned char enableServer:1;
3590         unsigned char enableClient:1;
3591     } cache[NV_MAX_DEVICES][NVKMS_MAX_HEADS_PER_DISP];
3592 
3593     nvkms_memset(cache, 0, sizeof(cache));
3594 
3595     /* XXX NVKMS TODO: idle base channel, first? */
3596 
3597     /*
3598      * Stereo lock mode is enabled if all heads are either raster locked or
3599      * frame locked, and if all heads are not using interlaced mode.
3600      */
3601     FOR_ALL_EVO_DEVS(pDevEvo) {
3602         if (!pDevEvo->gpus) {
3603             continue;
3604         }
3605         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3606             NvBool gpuCanStereoLock = TRUE;
3607             NvBool testedOneHead = FALSE;
3608 
3609             /*
3610              * If at least one head is not locked or driving an interlaced
3611              * mode, then no heads on this GPU will use stereo lock mode.
3612              */
3613             NvU32 head;
3614             for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
3615                 NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head];
3616 
3617                 if (!nvHeadIsActive(pDispEvo, head) ||
3618                     ((pHC->serverLock == NV_EVO_NO_LOCK) &&
3619                      (pHC->clientLock  == NV_EVO_NO_LOCK))) {
3620                     /*
3621                      * If the heads aren't scan locked then we should skip
3622                      * them as if they aren't connected. NOTE this
3623                      * conservative approach means that we won't disable
3624                      * StereoLockMode when frameLock is turned off. This
3625                      * should be harmless.
3626                      */
3627                     continue;
3628                 }
3629                 testedOneHead = TRUE;
3630                 if (!HeadCanStereoLock(pDevEvo, sd, head)) {
3631                     gpuCanStereoLock = FALSE;
3632                 }
3633             }
3634             /*
3635              * Don't set StereoLockMode for screenless GPUs. As above we'll also
3636              * count heads that can't stereoLock as unconnected.
3637              */
3638             if (!testedOneHead) {
3639                 continue;
3640             }
3641 
3642             /*
3643              * Notify the framelock board whether or not we we will use stereo
3644              * lock mode.  If it failed, then don't enable stereo lock mode on
3645              * the GPU.
3646              */
3647             if (!SetStereoLockMode(pDispEvo, gpuCanStereoLock)) {
3648                 gpuCanStereoLock = FALSE;
3649             }
3650 
3651             /*
3652              * Cache whether or not we can use stereo lock mode, so we know
3653              * whether or not to enable stereo lock mode on the GPU during
3654              * SetHeadControl
3655              */
3656             for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
3657                 if (nvHeadIsActive(pDispEvo, head)) {
3658                     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
3659                     pEvoSubDev->headControlAssy[head].stereoLocked =
3660                         gpuCanStereoLock;
3661                 }
3662             }
3663         }
3664     }
3665 
3666     /*
3667      * Go through every GPU on the system, making its framelock state match the
3668      * assembly state that we've saved.
3669      *
3670      * We do this in six steps, in order to keep the overall system state sane
3671      * throughout:
3672      * 1. Disable any clients we no longer need
3673      * 2. Disable server we no longer need
3674      * 3. Compute which framelock devices need house sync
3675      * 4. Update framelock devices with new house sync info
3676      * 5. Enable new server
3677      * 6. Enable new clients
3678      */
3679     for (iteration = FIRST_ITERATION;
3680          iteration <= LAST_ITERATION;
3681          iteration++) {
3682 
3683         if (iteration == COMPUTE_HOUSE_SYNC) {
3684             /* First, clear assy state */
3685             FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) {
3686                 pFrameLockEvo->houseSyncAssy = FALSE;
3687             }
3688         }
3689 
3690         if (iteration == UPDATE_HOUSE_SYNC) {
3691             FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) {
3692                 /*
3693                  * Since nvFrameLockSetUseHouseSyncEvo sets house sync
3694                  * output mode in addition to house sync input mode and
3695                  * input polarity, this needs to be done unconditionally,
3696                  * even if a house sync state transition hasn't occurred.
3697                  */
3698                 if (!nvFrameLockSetUseHouseSyncEvo(
3699                         pFrameLockEvo, pFrameLockEvo->houseSyncAssy)) {
3700                     nvAssert(!"Setting house sync failed");
3701                 } else {
3702                     pFrameLockEvo->houseSyncArmed =
3703                         pFrameLockEvo->houseSyncAssy;
3704                 }
3705             }
3706 
3707             continue;
3708         }
3709 
3710         FOR_ALL_EVO_DEVS(pDevEvo) {
3711 
3712             if (!pDevEvo->gpus) {
3713                 continue;
3714             }
3715 
3716             if (pDevEvo->displayHandle == 0) {
3717                 /*
3718                  * This may happen during init, when setting initial modes on
3719                  * one device while other devices have not yet been allocated.
3720                  * Skip these devices for now; we'll come back later when
3721                  * they've been brought up.
3722                  */
3723                 continue;
3724             }
3725 
3726             FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3727                 NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
3728                 NvBool server = FALSE;
3729                 NvU32 needsEnableMask = 0, needsDisableMask = 0;
3730                 unsigned int head;
3731 
3732                 switch (iteration) {
3733                 case COMPUTE_HOUSE_SYNC:
3734                     /* Accumulate house sync across pDisps */
3735                     if (pEvoSubDev->frameLockHouseSync) {
3736                         pDispEvo->pFrameLockEvo->houseSyncAssy = TRUE;
3737                     }
3738                     break;
3739                 case DISABLE_UNNEEDED_CLIENTS:
3740                     needsDisableMask = pEvoSubDev->frameLockClientMaskArmed &
3741                                        ~pEvoSubDev->frameLockClientMaskAssy;
3742                     server = FALSE;
3743                     break;
3744                 case DISABLE_UNNEEDED_SERVER:
3745                     needsDisableMask = pEvoSubDev->frameLockServerMaskArmed &
3746                                        ~pEvoSubDev->frameLockServerMaskAssy;
3747                     server = TRUE;
3748                     break;
3749                 case ENABLE_SERVER:
3750                     needsEnableMask = pEvoSubDev->frameLockServerMaskAssy &
3751                                       ~pEvoSubDev->frameLockServerMaskArmed;
3752                     server = TRUE;
3753                     break;
3754                 case ENABLE_CLIENTS:
3755                     needsEnableMask = pEvoSubDev->frameLockClientMaskAssy &
3756                                       ~pEvoSubDev->frameLockClientMaskArmed;
3757                     server = FALSE;
3758                     break;
3759                 case UPDATE_HOUSE_SYNC:
3760                     nvAssert(!"Shouldn't reach here");
3761                     break;
3762                 }
3763 
3764                 if (needsDisableMask) {
3765                     ret = nvFramelockSetControlUnsyncEvo(pDispEvo,
3766                                                          needsDisableMask,
3767                                                          server);
3768                     nvAssert(ret);
3769 
3770                     if (ret) {
3771                         if (server) {
3772                             pEvoSubDev->frameLockServerMaskArmed &=
3773                                 ~needsDisableMask;
3774 
3775                             FOR_ALL_HEADS(head, needsDisableMask) {
3776                                 cache[GpuIndex(pDevEvo, sd)][head].disableServer = TRUE;
3777                             }
3778                         } else {
3779                             pEvoSubDev->frameLockClientMaskArmed &=
3780                                 ~needsDisableMask;
3781 
3782                             FOR_ALL_HEADS(head, needsDisableMask) {
3783                                 cache[GpuIndex(pDevEvo, sd)][head].disableClient = TRUE;
3784                             }
3785                         }
3786                     }
3787                 }
3788                 if (needsEnableMask) {
3789                     ret = FramelockSetControlSync(pDispEvo,
3790                                                   needsEnableMask,
3791                                                   server);
3792 
3793                     nvAssert(ret);
3794 
3795                     if (ret) {
3796                         if (server) {
3797                             pEvoSubDev->frameLockServerMaskArmed |=
3798                                 needsEnableMask;
3799 
3800                             FOR_ALL_HEADS(head, needsEnableMask) {
3801                                 cache[GpuIndex(pDevEvo, sd)][head].enableServer = TRUE;
3802                             }
3803                         } else {
3804                             pEvoSubDev->frameLockClientMaskArmed |=
3805                                 needsEnableMask;
3806 
3807                             FOR_ALL_HEADS(head, needsEnableMask) {
3808                                 cache[GpuIndex(pDevEvo, sd)][head].enableClient = TRUE;
3809                             }
3810                         }
3811                     }
3812                 }
3813 
3814                 /* After the above process, we should have "promoted" assy
3815                  * to armed */
3816                 if (iteration == LAST_ITERATION) {
3817                     nvAssert(pEvoSubDev->frameLockServerMaskArmed ==
3818                              pEvoSubDev->frameLockServerMaskAssy);
3819                     nvAssert(pEvoSubDev->frameLockClientMaskArmed ==
3820                              pEvoSubDev->frameLockClientMaskAssy);
3821                 }
3822             }
3823         }
3824     }
3825 
3826     /*
3827      * Update the EVO HW state.  Make this a separate set of loops to not
3828      * confuse the one above
3829      */
3830     FOR_ALL_EVO_DEVS(pDevEvo) {
3831 
3832         if (!pDevEvo->gpus) {
3833             continue;
3834         }
3835 
3836         if (pDevEvo->displayHandle == 0) {
3837             continue;
3838         }
3839 
3840         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3841             NvBool needUpdate = FALSE;
3842             NVEvoUpdateState updateState = { };
3843             NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
3844             NvU32 extRefClkMaskAssy, extRefClkUpdateMask;
3845             NvU32 possibleHeadMask;
3846             NvBool refClkChanged[NVKMS_MAX_HEADS_PER_DISP] = { FALSE };
3847             unsigned int head;
3848 
3849             extRefClkMaskAssy = pEvoSubDev->frameLockExtRefClkMaskAssy;
3850 
3851             /* Set the external reference clock, if different */
3852             extRefClkUpdateMask = extRefClkMaskAssy ^
3853                 pEvoSubDev->frameLockExtRefClkMaskArmed;
3854 
3855             FOR_ALL_HEADS(head, extRefClkUpdateMask) {
3856                 NvBool extRefClkNeeded =
3857                     !!(extRefClkMaskAssy & (1 << head));
3858 
3859                 SetRefClk(pDevEvo, sd, head, extRefClkNeeded, &updateState);
3860                 refClkChanged[head] = TRUE;
3861 
3862                 /* Update armed state for this head */
3863                 pEvoSubDev->frameLockExtRefClkMaskArmed =
3864                     (pEvoSubDev->frameLockExtRefClkMaskArmed &
3865                      (~(1 << head))) |
3866                     (extRefClkMaskAssy & (1 << head));
3867             }
3868             /* After the above process, the armed state should match
3869              * assembly state */
3870             nvAssert(extRefClkMaskAssy ==
3871                      pEvoSubDev->frameLockExtRefClkMaskArmed);
3872 
3873             /* Update the HEAD_SET_CONTROL EVO method state */
3874 
3875             possibleHeadMask = nvGetActiveHeadMask(pDispEvo);
3876 
3877             FOR_ALL_HEADS(head, possibleHeadMask) {
3878                 if (nvkms_memcmp(&pEvoSubDev->headControl[head],
3879                                  &pEvoSubDev->headControlAssy[head],
3880                                  sizeof(NVEvoHeadControl))) {
3881 
3882                     nvPushEvoSubDevMask(pDevEvo, 1 << sd);
3883 
3884                     pEvoSubDev->headControl[head] =
3885                         pEvoSubDev->headControlAssy[head];
3886                     pDevEvo->hal->SetHeadControl(pDevEvo, sd, head,
3887                                                  &updateState);
3888                     needUpdate = TRUE;
3889 
3890                     nvPopEvoSubDevMask(pDevEvo);
3891                 } else if (refClkChanged[head]) {
3892                     needUpdate = TRUE;
3893                 }
3894             }
3895 
3896             if (needUpdate) {
3897                 nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
3898                                       TRUE /* releaseElv */);
3899             }
3900         }
3901     }
3902 
3903     /*
3904      * Inform GLS of framelock changes.  It uses this information to do things
3905      * like enable fake stereo to get stereo sync when stereo apps start
3906      * without flickering the displays.
3907      */
3908     for (iteration = FIRST_ITERATION;
3909          iteration <= LAST_ITERATION;
3910          iteration++) {
3911 
3912         FOR_ALL_EVO_DEVS(pDevEvo) {
3913 
3914             if (!pDevEvo->gpus) {
3915                 continue;
3916             }
3917 
3918             if (pDevEvo->displayHandle == 0) {
3919                 continue;
3920             }
3921 
3922             FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3923                 NvU32 head;
3924                 for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
3925                     NvBool sendEvent = FALSE;
3926                     NvBool enable = FALSE, server = FALSE;
3927 
3928                     if (!nvHeadIsActive(pDispEvo, head)) {
3929                         continue;
3930                     }
3931 
3932                     switch (iteration) {
3933                     case DISABLE_UNNEEDED_CLIENTS:
3934                         if (cache[GpuIndex(pDevEvo, sd)][head].disableClient) {
3935                             enable = FALSE;
3936                             server = FALSE;
3937                             sendEvent = TRUE;
3938                         }
3939                         break;
3940                     case DISABLE_UNNEEDED_SERVER:
3941                         if (cache[GpuIndex(pDevEvo, sd)][head].disableServer) {
3942                             enable = FALSE;
3943                             server = TRUE;
3944                             sendEvent = TRUE;
3945                         }
3946                         break;
3947                     case ENABLE_SERVER:
3948                         if (cache[GpuIndex(pDevEvo, sd)][head].enableServer) {
3949                             enable = TRUE;
3950                             server = TRUE;
3951                             sendEvent = TRUE;
3952                         }
3953                         break;
3954                     case ENABLE_CLIENTS:
3955                         if (cache[GpuIndex(pDevEvo, sd)][head].enableClient) {
3956                             enable = TRUE;
3957                             server = FALSE;
3958                             sendEvent = TRUE;
3959                         }
3960                         break;
3961                     case UPDATE_HOUSE_SYNC:
3962                     case COMPUTE_HOUSE_SYNC:
3963                         sendEvent = FALSE;
3964                         break;
3965                     }
3966 
3967                     if (sendEvent) {
3968                         nvUpdateGLSFramelock(pDispEvo, head, enable, server);
3969                     }
3970                 }
3971             }
3972         }
3973     }
3974 }
3975 
3976 /*
3977  * For every head in the headMask on pDispEvo, construct a prioritized
3978  * list of heads and call into the EVO locking state machine to
3979  * perform the given transition.
3980  *
3981  * Return the list of heads that actually succeeded.
3982  */
applyActionForHeads(NVDispEvoPtr pDispEvo,const NvU32 headMask,NVEvoLockAction action)3983 static NvU32 applyActionForHeads(NVDispEvoPtr pDispEvo,
3984                                  const NvU32 headMask,
3985                                  NVEvoLockAction action)
3986 {
3987     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
3988     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
3989     NvU32 appliedHeadMask = 0;
3990     NvU32 head;
3991 
3992     FOR_ALL_HEADS(head, headMask) {
3993         NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
3994         unsigned int i = 0;
3995         NvU32 tmpHead, usedHeadMask = 0;
3996 
3997         /* Fill in the array starting with this head, then with the others in
3998          * the list, and finally any other active heads */
3999         pHeads[i++] = head;
4000         usedHeadMask |= (1 << head);
4001 
4002         FOR_ALL_HEADS(tmpHead, headMask) {
4003             if (usedHeadMask & (1 << tmpHead)) {
4004                 continue;
4005             }
4006             pHeads[i++] = tmpHead;
4007             usedHeadMask |= (1 << tmpHead);
4008         }
4009 
4010         for (tmpHead = 0; tmpHead < NVKMS_MAX_HEADS_PER_DISP; tmpHead++) {
4011             if (!nvHeadIsActive(pDispEvo, tmpHead)) {
4012                 continue;
4013             }
4014             if (usedHeadMask & (1 << tmpHead)) {
4015                 continue;
4016             }
4017             pHeads[i++] = tmpHead;
4018             usedHeadMask |= (1 << tmpHead);
4019         }
4020 
4021         nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP);
4022         pHeads[i] = NV_INVALID_HEAD;
4023 
4024         if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads)) {
4025             appliedHeadMask |= (1 << head);
4026         }
4027     }
4028 
4029     return appliedHeadMask;
4030 }
4031 
4032 //
4033 // Set up raster lock and frame lock for external frame lock
4034 //
4035 
nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo)4036 NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo)
4037 {
4038     NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
4039     NvU32 serverHead = nvGetFramelockServerHead(pDispEvo);
4040     NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo);
4041     NvU32 appliedHeadMask;
4042     NvU32 activeClientHeadsMask;
4043     NvBool useHouseSync = FALSE;
4044     NvU32 head;
4045 
4046     nvAssert(pDispEvo->framelock.currentServerHead == NV_INVALID_HEAD);
4047     nvAssert(pDispEvo->framelock.currentClientHeadsMask == 0x0);
4048 
4049     if (serverHead != NV_INVALID_HEAD  &&
4050         (pFrameLockEvo->houseSyncMode ==
4051          NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT)) {
4052 
4053         NvS64 houseSync;
4054 
4055         /*
4056          * Only use house sync if present.
4057          * XXX what happens when house sync is unplugged?  why not enable it
4058          * now and let the FPGA decide?
4059          */
4060         if (!nvFrameLockGetStatusEvo(pFrameLockEvo,
4061                                      NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS,
4062                                      &houseSync)) {
4063             return FALSE;
4064         }
4065 
4066         useHouseSync = (houseSync != 0);
4067     }
4068 
4069     /* Initialize the assembly state */
4070     SyncEvoLockState();
4071 
4072     /* Enable the server */
4073     if ((serverHead != NV_INVALID_HEAD) &&
4074             nvHeadIsActive(pDispEvo, serverHead)) {
4075         NvU32 serverHeadMask;
4076 
4077         serverHeadMask = (1 << serverHead);
4078         appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask,
4079                                               NV_EVO_ADD_FRAME_LOCK_SERVER);
4080 
4081         nvAssert(appliedHeadMask == serverHeadMask);
4082         pDispEvo->framelock.currentServerHead = serverHead;
4083 
4084         /* Enable house sync, if requested */
4085         if (useHouseSync) {
4086             appliedHeadMask =
4087                 applyActionForHeads(pDispEvo, serverHeadMask,
4088                                     NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC);
4089 
4090             if (appliedHeadMask == serverHeadMask) {
4091                 pDispEvo->framelock.currentHouseSync = TRUE;
4092             }
4093         }
4094     }
4095 
4096     /* Enable the clients */
4097     activeClientHeadsMask = 0;
4098     FOR_ALL_HEADS(head, clientHeadsMask) {
4099         if (nvHeadIsActive(pDispEvo, head)) {
4100             activeClientHeadsMask |= (1 << head);
4101         }
4102     }
4103     appliedHeadMask = applyActionForHeads(pDispEvo, activeClientHeadsMask,
4104                                           NV_EVO_ADD_FRAME_LOCK_CLIENT);
4105 
4106     nvAssert(appliedHeadMask == activeClientHeadsMask);
4107     pDispEvo->framelock.currentClientHeadsMask = activeClientHeadsMask;
4108 
4109     /* Finally, update the hardware */
4110     UpdateEvoLockState();
4111 
4112     return TRUE;
4113 }
4114 
4115 //
4116 // Disable raster lock and frame lock
4117 //
4118 
nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo)4119 NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo)
4120 {
4121     NvU32 serverHead = nvGetFramelockServerHead(pDispEvo);
4122     NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo);
4123     NvU32 activeClientHeadsMask;
4124     NvU32 appliedHeadMask;
4125     NvU32 head;
4126 
4127     /* Initialize the assembly state */
4128     SyncEvoLockState();
4129 
4130     /* Disable the clients */
4131     activeClientHeadsMask = 0;
4132     FOR_ALL_HEADS(head, clientHeadsMask) {
4133         if (nvHeadIsActive(pDispEvo, head)) {
4134             activeClientHeadsMask |= (1 << head);
4135         }
4136     }
4137     appliedHeadMask = applyActionForHeads(pDispEvo,
4138                                           activeClientHeadsMask,
4139                                           NV_EVO_REM_FRAME_LOCK_CLIENT);
4140 
4141     nvAssert(appliedHeadMask == activeClientHeadsMask);
4142     pDispEvo->framelock.currentClientHeadsMask &= ~activeClientHeadsMask;
4143 
4144     /* Disable house sync */
4145     if (serverHead != NV_INVALID_HEAD &&
4146             nvHeadIsActive(pDispEvo, serverHead)) {
4147         NvU32 serverHeadMask = (1 << serverHead);
4148 
4149         if (pDispEvo->framelock.currentHouseSync) {
4150             appliedHeadMask =
4151                 applyActionForHeads(pDispEvo, serverHeadMask,
4152                                     NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC);
4153 
4154             nvAssert(appliedHeadMask == serverHeadMask);
4155             pDispEvo->framelock.currentHouseSync = FALSE;
4156         }
4157 
4158         /* Disable the server */
4159         appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask,
4160                                               NV_EVO_REM_FRAME_LOCK_SERVER);
4161         nvAssert(appliedHeadMask == serverHeadMask);
4162         if (appliedHeadMask == serverHeadMask) {
4163             pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD;
4164         }
4165     }
4166 
4167     /* Finally, update the hardware */
4168     UpdateEvoLockState();
4169 
4170     return TRUE;
4171 }
4172 
4173 //
4174 // Enable/Disable External Reference Clock Sync
4175 //
4176 // This function is used by frame lock to make the GPU sync to
4177 // the external device's reference clock.
4178 //
SetRefClk(NVDevEvoPtr pDevEvo,NvU32 sd,NvU32 head,NvBool external,NVEvoUpdateState * updateState)4179 static void SetRefClk(NVDevEvoPtr pDevEvo,
4180                       NvU32 sd, NvU32 head, NvBool external,
4181                       NVEvoUpdateState *updateState)
4182 {
4183     nvPushEvoSubDevMask(pDevEvo, 1 << sd);
4184 
4185     pDevEvo->hal->SetHeadRefClk(pDevEvo, head, external, updateState);
4186 
4187     nvPopEvoSubDevMask(pDevEvo);
4188 }
4189 
4190 
4191 //
4192 // Query raster lock state
4193 //
4194 
nvQueryRasterLockEvo(const NVDpyEvoRec * pDpyEvo,NvS64 * val)4195 NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val)
4196 {
4197     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
4198     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4199     NVEvoSubDevPtr pEvoSubDev;
4200     const NvU32 apiHead = pDpyEvo->apiHead;
4201     const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead);
4202     NVEvoHeadControlPtr pHC;
4203 
4204     /*
4205      * XXX[2Heads1OR] The EVO lock state machine is not currently supported with
4206      * 2Heads1OR, the api head is expected to be mapped onto a single
4207      * hardware head (which is the primary hardware head) if 2Heads1OR is not
4208      * active and the EVO lock state machine is in use.
4209      */
4210     if ((apiHead == NV_INVALID_HEAD) ||
4211             (nvPopCount32(pDispEvo->apiHeadState[apiHead].hwHeadsMask) != 1)) {
4212         return FALSE;
4213     }
4214 
4215     if ((head == NV_INVALID_HEAD) || (pDevEvo->gpus == NULL)) {
4216         return FALSE;
4217     }
4218 
4219     pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4220     pHC = &pEvoSubDev->headControl[head];
4221 
4222     *val = pHC->serverLock == NV_EVO_RASTER_LOCK ||
4223            pHC->clientLock == NV_EVO_RASTER_LOCK;
4224 
4225     return TRUE;
4226 }
4227 
nvInvalidateRasterLockGroupsEvo(void)4228 void nvInvalidateRasterLockGroupsEvo(void)
4229 {
4230     if (globalRasterLockGroups) {
4231         nvFree(globalRasterLockGroups);
4232 
4233         globalRasterLockGroups = NULL;
4234         numGlobalRasterLockGroups = 0;
4235     }
4236 }
4237 
4238 /*
4239  * Return the surface format usage bounds that NVKMS will program for the
4240  * requested format.
4241  *
4242  * For an RGB XBPP format, this function will return a bitmask of all RGB YBPP
4243  * formats, where Y <= X.
4244  *
4245  * For a YUV format, this function will return a bitmask of all YUV formats
4246  * that:
4247  * - Have the same number of planes as the requested format
4248  * - Have the same chroma decimation factors as the requested format
4249  * - Have the same or lower effective fetch bpp as the requested format
4250  *
4251  * For example, if the requested format is YUV420 12-bit SP, this function will
4252  * include all YUV420 8/10/12-bit SP formats.
4253  */
nvEvoGetFormatsWithEqualOrLowerUsageBound(const enum NvKmsSurfaceMemoryFormat format,NvU64 supportedFormatsCapMask)4254 NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound(
4255     const enum NvKmsSurfaceMemoryFormat format,
4256     NvU64 supportedFormatsCapMask)
4257 {
4258     const NvKmsSurfaceMemoryFormatInfo *pFormatInfo =
4259         nvKmsGetSurfaceMemoryFormatInfo(format);
4260     NvU64 supportedFormatsUsageBound = 0;
4261     NvU8 formatIdx;
4262 
4263     FOR_EACH_INDEX_IN_MASK(64, formatIdx, supportedFormatsCapMask) {
4264 
4265         const NvKmsSurfaceMemoryFormatInfo *pOtherFormatInfo =
4266             nvKmsGetSurfaceMemoryFormatInfo(formatIdx);
4267 
4268         if ((pFormatInfo->isYUV != pOtherFormatInfo->isYUV) ||
4269             (pFormatInfo->numPlanes != pOtherFormatInfo->numPlanes)) {
4270             continue;
4271         }
4272 
4273         if (pFormatInfo->isYUV) {
4274             if ((pFormatInfo->yuv.horizChromaDecimationFactor !=
4275                  pOtherFormatInfo->yuv.horizChromaDecimationFactor) ||
4276                 (pFormatInfo->yuv.vertChromaDecimationFactor !=
4277                  pOtherFormatInfo->yuv.vertChromaDecimationFactor) ||
4278                 (pFormatInfo->yuv.depthPerComponent <
4279                  pOtherFormatInfo->yuv.depthPerComponent)) {
4280                 continue;
4281             }
4282         } else {
4283             if (pFormatInfo->rgb.bitsPerPixel <
4284                 pOtherFormatInfo->rgb.bitsPerPixel) {
4285                 continue;
4286             }
4287         }
4288 
4289         supportedFormatsUsageBound |= NVBIT64(formatIdx);
4290 
4291     } FOR_EACH_INDEX_IN_MASK_END;
4292 
4293     return supportedFormatsUsageBound;
4294 }
4295 
4296 //
4297 // Enable or disable flip lock (or query state)
4298 //
4299 
nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo,const NvU32 head,NvU32 * val,NvBool set,NVEvoUpdateState * updateState)4300 NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head,
4301                                   NvU32 *val, NvBool set,
4302                                   NVEvoUpdateState *updateState)
4303 {
4304     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4305     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4306     NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head];
4307 
4308     if (set) {
4309         // make sure we're dealing with a bool
4310         NvBool setVal = !!*val;
4311 
4312         if (setVal ^ pHC->flipLock) {
4313             NvBool isMethodPending;
4314             NvBool changed = FALSE;
4315 
4316             if (!pDevEvo->hal->
4317                     IsChannelMethodPending(pDevEvo,
4318                                            pDevEvo->head[head].layer[NVKMS_MAIN_LAYER],
4319                                            pDispEvo->displayOwner,
4320                                            &isMethodPending) ||
4321                 isMethodPending) {
4322                 nvAssert(!"Base channel not idle");
4323                 return FALSE;
4324             }
4325 
4326             if (setVal) {
4327                 /* make sure flip lock is not prohibited and raster lock is enabled
4328                  *
4329                  * XXX: [2Heads1OR] If head is locked in the merge mode then
4330                  * its flip-lock state can not be changed.
4331                  */
4332                 if ((pHC->serverLock == NV_EVO_NO_LOCK &&
4333                      pHC->clientLock == NV_EVO_NO_LOCK) ||
4334                     HEAD_MASK_QUERY(pEvoSubDev->flipLockProhibitedHeadMask,
4335                                     head) ||
4336                     pHC->mergeMode) {
4337                     return FALSE;
4338                 }
4339                 pHC->flipLock = TRUE;
4340                 changed = TRUE;
4341             } else {
4342                 /* Only actually disable fliplock if it's not needed for SLI.
4343                  *
4344                  * XXX: [2Heads1OR] If head is locked in the merge mode then
4345                  * its flip-lock state can not be changed.
4346                  */
4347                 if (!pHC->mergeMode &&
4348                     !HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask,
4349                                      head)) {
4350                     pHC->flipLock = FALSE;
4351                     changed = TRUE;
4352                 }
4353             }
4354 
4355             if (changed) {
4356                 EvoUpdateHeadParams(pDispEvo, head, updateState);
4357             }
4358         }
4359 
4360         /* Remember if we currently need fliplock enabled for framelock */
4361         pEvoSubDev->flipLockEnabledForFrameLockHeadMask =
4362             setVal ?
4363                 HEAD_MASK_SET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head) :
4364                 HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head);
4365     }
4366 
4367     /*
4368      * XXX should the query return the cached "enabled for framelock" state
4369      * instead?
4370      */
4371     *val = pHC->flipLock;
4372 
4373 
4374     return TRUE;
4375 }
4376 
4377 
UpdateFlipLock50(const NVDpyEvoRec * pDpyEvo,NvU32 * val,NvBool set)4378 static NvBool UpdateFlipLock50(const NVDpyEvoRec *pDpyEvo,
4379                                NvU32 *val, NvBool set)
4380 {
4381     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
4382     const NvU32 apiHead = pDpyEvo->apiHead;
4383     const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead);
4384     NVEvoUpdateState updateState = { };
4385     NvBool ret;
4386 
4387     if (head == NV_INVALID_HEAD) {
4388         return FALSE;
4389     }
4390 
4391     ret = nvUpdateFlipLockEvoOneHead(pDispEvo, head, val, set,
4392                                      &updateState);
4393 
4394     if (set && ret) {
4395         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
4396                               TRUE /* releaseElv */);
4397     }
4398 
4399     return ret;
4400 }
4401 
nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo,NvS64 value)4402 NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value)
4403 {
4404     NvU32 val32 = !!value;
4405     return UpdateFlipLock50(pDpyEvo, &val32, TRUE /* set */);
4406 }
4407 
nvGetFlipLockEvo(const NVDpyEvoRec * pDpyEvo,NvS64 * pValue)4408 NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue)
4409 {
4410     NvBool ret;
4411     NvU32 val32 = 0;
4412     ret = UpdateFlipLock50(pDpyEvo, &val32, FALSE /* set */);
4413 
4414     if (ret) {
4415         *pValue = !!val32;
4416     }
4417 
4418     return ret;
4419 }
4420 
ProhibitFlipLock50(NVDispEvoPtr pDispEvo)4421 static void ProhibitFlipLock50(NVDispEvoPtr pDispEvo)
4422 {
4423     NvU32 head;
4424     NvBool needUpdate = FALSE;
4425     NVEvoUpdateState updateState = { };
4426 
4427     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4428     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4429 
4430     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
4431         NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head];
4432         /*
4433          * XXX: [2Heads1OR] If head is locked in the merge mode then its flip-lock
4434          * state can not be changed.
4435          */
4436         if (!nvHeadIsActive(pDispEvo, head) || pHC->mergeMode) {
4437             continue;
4438         }
4439 
4440         if (HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask,
4441                             head)) {
4442             nvAssert(!"Can not prohibit flip lock "
4443                       "because it is already enabled for frame lock");
4444             continue;
4445         }
4446 
4447         if (pHC->flipLock) {
4448             needUpdate = TRUE;
4449 
4450             pHC->flipLock = FALSE;
4451             EvoUpdateHeadParams(pDispEvo, head, &updateState);
4452         }
4453 
4454         pEvoSubDev->flipLockProhibitedHeadMask =
4455             HEAD_MASK_SET(pEvoSubDev->flipLockProhibitedHeadMask, head);
4456     }
4457 
4458     if (needUpdate) {
4459         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
4460                               TRUE /* releaseElv */);
4461     }
4462 }
4463 
AllowFlipLock50(NVDispEvoPtr pDispEvo)4464 static void AllowFlipLock50(NVDispEvoPtr pDispEvo)
4465 {
4466     NvU32 head;
4467     NvBool needUpdate = FALSE;
4468     NVEvoUpdateState updateState = { };
4469     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4470     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4471 
4472     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
4473         NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head];
4474 
4475         /*
4476          * XXX: [2Heads1OR] If head is locked in the merge mode then its flip-lock
4477          * state can not be changed.
4478          */
4479         if (!nvHeadIsActive(pDispEvo, head) || pHC->mergeMode) {
4480             continue;
4481         }
4482 
4483         if (!pHC->flipLock &&
4484             HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask,
4485                             head)) {
4486             needUpdate = TRUE;
4487 
4488             nvAssert(pHC->serverLock != NV_EVO_NO_LOCK ||
4489                      pHC->clientLock != NV_EVO_NO_LOCK);
4490 
4491             pHC->flipLock = TRUE;
4492             EvoUpdateHeadParams(pDispEvo, head, &updateState);
4493         }
4494 
4495         pEvoSubDev->flipLockProhibitedHeadMask =
4496             HEAD_MASK_UNSET(pEvoSubDev->flipLockProhibitedHeadMask, head);
4497     }
4498 
4499     if (needUpdate) {
4500         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
4501                               TRUE /* releaseElv */);
4502     }
4503 }
4504 
nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo,NvS64 value)4505 NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value)
4506 {
4507     if (value == 0) {
4508         ProhibitFlipLock50(pDispEvo);
4509     } else {
4510         AllowFlipLock50(pDispEvo);
4511     }
4512     return TRUE;
4513 }
4514 
4515 /*!
4516  * Enable or disable stereo.
4517  *
4518  * XXX SLI+Stereo For now, just set stereo on the display owner.
4519  */
nvSetStereoEvo(const NVDispEvoRec * pDispEvo,const NvU32 head,NvBool enable)4520 NvBool nvSetStereoEvo(
4521     const NVDispEvoRec *pDispEvo,
4522     const NvU32 head,
4523     NvBool enable)
4524 {
4525     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4526     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4527     NVEvoHeadControlPtr pHC;
4528     NVEvoLockPin pin;
4529 
4530     nvAssert(head != NV_INVALID_HEAD);
4531 
4532     pHC = &pEvoSubDev->headControl[head];
4533     pin = NV_EVO_LOCK_PIN_INTERNAL(head);
4534 
4535     // make sure we're dealing with a bool
4536     NvBool stereo = !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin);
4537 
4538     if (enable ^ stereo) {
4539         NVEvoUpdateState updateState = { };
4540 
4541         if (enable) {
4542             NvU32 otherHead;
4543             NvU32 signalPin;
4544 
4545             // If any other head is already driving stereo, fail
4546             for (otherHead = 0; otherHead < NVKMS_MAX_HEADS_PER_DISP;
4547                  otherHead++) {
4548                 if (!nvHeadIsActive(pDispEvo, otherHead)) {
4549                     continue;
4550                 }
4551                 if (head == otherHead) {
4552                     continue;
4553                 }
4554 
4555                 const NVEvoHeadControl *pOtherHC =
4556                     &pEvoSubDev->headControl[otherHead];
4557 
4558                 if (!NV_EVO_LOCK_PIN_IS_INTERNAL(pOtherHC->stereoPin)) {
4559                     return FALSE;
4560                 }
4561             }
4562 
4563             signalPin = nvEvoGetPinForSignal(pDispEvo,
4564                                              pEvoSubDev,
4565                                              NV_EVO_LOCK_SIGNAL_STEREO);
4566             if (signalPin != NV_EVO_LOCK_PIN_ERROR) {
4567                 pin = signalPin;
4568             }
4569         }
4570 
4571         pHC->stereoPin = pin;
4572 
4573         EvoUpdateHeadParams(pDispEvo, head, &updateState);
4574 
4575         // Make method take effect.
4576         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
4577                               TRUE /* releaseElv */);
4578     }
4579 
4580     return TRUE;
4581 }
4582 
4583 /*!
4584  * Query stereo state.
4585  *
4586  * XXX SLI+Stereo For now, just get stereo on the display owner.
4587  */
nvGetStereoEvo(const NVDispEvoRec * pDispEvo,const NvU32 head)4588 NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head)
4589 {
4590     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4591     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4592     NVEvoHeadControlPtr pHC;
4593 
4594     nvAssert(head != NV_INVALID_HEAD);
4595 
4596     pHC = &pEvoSubDev->headControl[head];
4597 
4598     return !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin);
4599 }
4600 
nvSetViewPortsEvo(NVDispEvoPtr pDispEvo,const NvU32 head,NVEvoUpdateState * updateState)4601 void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo,
4602                        const NvU32 head, NVEvoUpdateState *updateState)
4603 {
4604     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4605     NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
4606     const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort;
4607 
4608     nvPushEvoSubDevMaskDisp(pDispEvo);
4609     pDevEvo->hal->SetViewportInOut(pDevEvo, head,
4610                                    pViewPort, pViewPort, pViewPort,
4611                                    updateState);
4612     nvPopEvoSubDevMask(pDevEvo);
4613 
4614     /*
4615      * Specify safe default values of 0 for viewPortPointIn x and y; these
4616      * may be changed when panning out of band of a modeset.
4617      */
4618     EvoSetViewportPointIn(pDispEvo, head, 0 /* x */, 0 /* y */, updateState);
4619 }
4620 
4621 
4622 
EvoSetViewportPointIn(NVDispEvoPtr pDispEvo,const NvU32 head,NvU16 x,NvU16 y,NVEvoUpdateState * updateState)4623 static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, const NvU32 head,
4624                                   NvU16 x, NvU16 y,
4625                                   NVEvoUpdateState *updateState)
4626 {
4627     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4628 
4629     nvPushEvoSubDevMaskDisp(pDispEvo);
4630     pDevEvo->hal->SetViewportPointIn(pDevEvo, head, x, y, updateState);
4631     nvPopEvoSubDevMask(pDevEvo);
4632 }
4633 
nvEvoSetLUTContextDma(NVDispEvoPtr pDispEvo,const NvU32 head,NVEvoUpdateState * pUpdateState)4634 void nvEvoSetLUTContextDma(NVDispEvoPtr pDispEvo,
4635                            const NvU32 head, NVEvoUpdateState *pUpdateState)
4636 {
4637     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4638     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
4639 
4640     pDevEvo->hal->SetLUTContextDma(pDispEvo,
4641                                    head,
4642                                    pHeadState->lut.pCurrSurface,
4643                                    pHeadState->lut.baseLutEnabled,
4644                                    pHeadState->lut.outputLutEnabled,
4645                                    pUpdateState,
4646                                    pHeadState->bypassComposition);
4647 }
4648 
EvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo,const NvU32 apiHead)4649 static void EvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, const NvU32 apiHead)
4650 {
4651     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4652     NVDispApiHeadStateEvoRec *pApiHeadState =
4653                               &pDispEvo->apiHeadState[apiHead];
4654     const int dispIndex = pDispEvo->displayOwner;
4655     NvU32 head;
4656     NVEvoUpdateState updateState = { };
4657 
4658     FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
4659         nvEvoSetLUTContextDma(pDispEvo, head, &updateState);
4660     }
4661 
4662     /*
4663      * EVO2 does not set LUT context DMA if the core channel
4664      * doesn't have a scanout surface set, in that case there is no update
4665      * state to kickoff.
4666      */
4667     if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) {
4668         int notifier;
4669         NvBool notify;
4670 
4671         nvEvoStageLUTNotifier(pDispEvo, apiHead);
4672         notifier = nvEvoCommitLUTNotifiers(pDispEvo);
4673 
4674         nvAssert(notifier >= 0);
4675 
4676         /*
4677          * XXX: The notifier index returned by nvEvoCommitLUTNotifiers here
4678          * shouldn't be < 0 because this function shouldn't have been called
4679          * while a previous LUT update is outstanding. If
4680          * nvEvoCommitLUTNotifiers ever returns -1 for one reason or another,
4681          * using notify and setting notifier to 0 in this manner to avoid
4682          * setting an invalid notifier in the following Update call prevents
4683          * potential kernel panics and Xids.
4684          */
4685         notify = notifier >= 0;
4686         if (!notify) {
4687             notifier = 0;
4688         }
4689 
4690         // Clear the completion notifier and kick off an update.  Wait for it
4691         // here if NV_CTRL_SYNCHRONOUS_PALETTE_UPDATES is enabled.  Otherwise,
4692         // don't wait for the notifier -- it'll be checked the next time a LUT
4693         // change request comes in.
4694         EvoUpdateAndKickOffWithNotifier(pDispEvo,
4695                                         notify, /* notify */
4696                                         FALSE, /* sync */
4697                                         notifier,
4698                                         &updateState,
4699                                         TRUE /* releaseElv */);
4700         pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate |= notify;
4701     }
4702 }
4703 
UpdateMaxPixelClock(NVDevEvoPtr pDevEvo)4704 static void UpdateMaxPixelClock(NVDevEvoPtr pDevEvo)
4705 {
4706     NVDispEvoPtr pDispEvo;
4707     NVDpyEvoPtr pDpyEvo;
4708     int i;
4709 
4710     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
4711         FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) {
4712             nvDpyProbeMaxPixelClock(pDpyEvo);
4713         }
4714     }
4715 }
4716 
AllocEvoSubDevs(NVDevEvoPtr pDevEvo)4717 static NvBool AllocEvoSubDevs(NVDevEvoPtr pDevEvo)
4718 {
4719     NVDispEvoPtr pDispEvo;
4720     NvU32 sd;
4721 
4722     pDevEvo->gpus = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoSubDevRec));
4723 
4724     if (pDevEvo->gpus == NULL) {
4725         return FALSE;
4726     }
4727 
4728     /* Assign the pDispEvo for each evoSubDevice */
4729 
4730     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
4731         pDevEvo->gpus[sd].pDispEvo = pDispEvo;
4732     }
4733     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4734         nvAssert(pDevEvo->gpus[sd].pDispEvo != NULL);
4735     }
4736 
4737     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4738         NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
4739         NvU32 head;
4740 
4741         pDevEvo->gpus[sd].subDeviceInstance = sd;
4742         // Initialize the lock state.
4743         nvEvoStateStartNoLock(pEvoSubDev);
4744 
4745         for (head = 0; head < pDevEvo->numHeads; head++) {
4746             NVEvoSubDevHeadStateRec *pSdHeadState =
4747                 &pDevEvo->gpus[sd].headState[head];
4748             NvU32 i;
4749 
4750             for (i = 0; i < ARRAY_LEN(pSdHeadState->layer); i++) {
4751                 pSdHeadState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX;
4752             }
4753 
4754             pSdHeadState->cursor.cursorCompParams =
4755                 nvDefaultCursorCompositionParams(pDevEvo);
4756         }
4757     }
4758 
4759     return TRUE;
4760 }
4761 
4762 
4763 // Replace default cursor composition params when zeroed-out values are unsupported.
nvDefaultCursorCompositionParams(const NVDevEvoRec * pDevEvo)4764 struct NvKmsCompositionParams nvDefaultCursorCompositionParams(const NVDevEvoRec *pDevEvo)
4765 {
4766     const struct NvKmsCompositionCapabilities *pCaps =
4767         &pDevEvo->caps.cursorCompositionCaps;
4768     const NvU32 supportedBlendMode =
4769         pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE].supportedBlendModes[1];
4770 
4771     struct NvKmsCompositionParams params = { };
4772 
4773     if ((supportedBlendMode & NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) != 0x0) {
4774         params.blendingMode[1] = NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE;
4775     } else {
4776         params.blendingMode[1] = NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA;
4777     }
4778 
4779     return params;
4780 }
4781 
ValidateConnectorTypes(const NVDevEvoRec * pDevEvo)4782 static NvBool ValidateConnectorTypes(const NVDevEvoRec *pDevEvo)
4783 {
4784     const NVDispEvoRec *pDispEvo;
4785     const NVConnectorEvoRec *pConnectorEvo;
4786     NvU32 dispIndex;
4787 
4788     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
4789         const NVEvoSubDevRec *pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4790         const NVEvoCapabilities *pEvoCaps = &pEvoSubDev->capabilities;
4791         const NVEvoMiscCaps *pMiscCaps = &pEvoCaps->misc;
4792 
4793         FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
4794             if (!pMiscCaps->supportsDSI &&
4795                 pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) {
4796                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
4797                             "DSI connectors are unsupported!");
4798                 return FALSE;
4799             }
4800         }
4801     }
4802     return TRUE;
4803 }
4804 
UnregisterFlipOccurredEventOneHead(NVDispEvoRec * pDispEvo,const NvU32 head)4805 static void UnregisterFlipOccurredEventOneHead(NVDispEvoRec *pDispEvo,
4806                                                const NvU32 head)
4807 {
4808     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
4809     NvU32 layer;
4810 
4811     /* XXX NVKMS TODO: need disp-scope in event */
4812     if (pDispEvo->displayOwner != 0) {
4813         return;
4814     }
4815 
4816     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
4817         NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer];
4818 
4819         nvAssert((pChannel->completionNotifierEventHandle == 0) ||
4820                     (pChannel->completionNotifierEventRefPtr != NULL));
4821 
4822         if (pChannel->completionNotifierEventHandle != 0) {
4823             nvRmApiFree(nvEvoGlobal.clientHandle,
4824                         pChannel->pb.channel_handle,
4825                         pChannel->completionNotifierEventHandle);
4826             nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4827                                pChannel->completionNotifierEventHandle);
4828             pChannel->completionNotifierEventHandle = 0;
4829             pChannel->completionNotifierEventRefPtr = NULL;
4830         }
4831     }
4832 }
4833 
ClearApiHeadStateOneDisp(NVDispEvoRec * pDispEvo)4834 static void ClearApiHeadStateOneDisp(NVDispEvoRec *pDispEvo)
4835 {
4836     NvU32 apiHead;
4837 
4838     /*
4839      * Unregister all the flip-occurred event callbacks which are
4840      * registered with the (api-head, layer) pair event data,
4841      * before destroying the api-head states.
4842      */
4843     for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) {
4844         UnregisterFlipOccurredEventOneHead(pDispEvo, head);
4845     }
4846 
4847     for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->apiHeadState); apiHead++) {
4848         NvU32 layer;
4849         NVDispApiHeadStateEvoRec *pApiHeadState =
4850             &pDispEvo->apiHeadState[apiHead];
4851         nvAssert(nvListIsEmpty(&pApiHeadState->vblankCallbackList));
4852         for (layer = 0; layer < ARRAY_LEN(pApiHeadState->flipOccurredEvent); layer++) {
4853             if (pApiHeadState->flipOccurredEvent[layer].ref_ptr != NULL) {
4854                 nvkms_free_ref_ptr(pApiHeadState->flipOccurredEvent[layer].ref_ptr);
4855                 pApiHeadState->flipOccurredEvent[layer].ref_ptr = NULL;
4856             }
4857         }
4858     }
4859 
4860     nvkms_memset(pDispEvo->apiHeadState, 0, sizeof(pDispEvo->apiHeadState));
4861 }
4862 
ClearApiHeadState(NVDevEvoRec * pDevEvo)4863 static void ClearApiHeadState(NVDevEvoRec *pDevEvo)
4864 {
4865     NvU32 dispIndex;
4866     NVDispEvoRec *pDispEvo;
4867 
4868     nvRmFreeCoreRGSyncpts(pDevEvo);
4869 
4870     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
4871         ClearApiHeadStateOneDisp(pDispEvo);
4872     }
4873 
4874     nvkms_memset(pDevEvo->apiHead, 0, sizeof(pDevEvo->apiHead));
4875 }
4876 
InitApiHeadStateOneDisp(NVDispEvoRec * pDispEvo)4877 static NvBool InitApiHeadStateOneDisp(NVDispEvoRec *pDispEvo)
4878 {
4879     NvU32 usedApiHeadsMask = 0x0;
4880     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
4881 
4882     for (NvU32 apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->apiHeadState); apiHead++) {
4883         NvU32 layer;
4884         NVDispApiHeadStateEvoRec *pApiHeadState =
4885             &pDispEvo->apiHeadState[apiHead];
4886 
4887         pApiHeadState->activeDpys = nvEmptyDpyIdList();
4888         pApiHeadState->attributes = NV_EVO_DEFAULT_ATTRIBUTES_SET;
4889 
4890         nvListInit(&pApiHeadState->vblankCallbackList);
4891 
4892         for (layer = 0; layer < ARRAY_LEN(pApiHeadState->flipOccurredEvent); layer++) {
4893             pApiHeadState->flipOccurredEvent[layer].ref_ptr =
4894                 nvkms_alloc_ref_ptr(&pApiHeadState->flipOccurredEvent[layer].data);
4895             if (pApiHeadState->flipOccurredEvent[layer].ref_ptr == NULL) {
4896                 goto failed;
4897             }
4898 
4899             pApiHeadState->flipOccurredEvent[layer].data =
4900                 (NVDispFlipOccurredEventDataEvoRec) {
4901                 .pDispEvo = pDispEvo,
4902                 .apiHead = apiHead,
4903                 .layer = layer,
4904             };
4905         }
4906     }
4907 
4908     for (NvU32 head = 0; head < pDevEvo->numHeads; head++) {
4909         if (pDispEvo->headState[head].pConnectorEvo != NULL) {
4910             NvU32 apiHead;
4911             const NVConnectorEvoRec *pConnectorEvo =
4912                 pDispEvo->headState[head].pConnectorEvo;
4913 
4914             /* Find unused api-head which support the equal number of layers */
4915             for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
4916                 if ((NVBIT(apiHead) & usedApiHeadsMask) != 0x0) {
4917                     continue;
4918                 }
4919 
4920                 if (pDevEvo->apiHead[apiHead].numLayers ==
4921                         pDevEvo->head[head].numLayers) {
4922                     usedApiHeadsMask |= NVBIT(apiHead);
4923                     break;
4924                 }
4925             }
4926             nvAssert(apiHead < pDevEvo->numApiHeads);
4927 
4928             /*
4929              * Use the pDpyEvo for the connector, since we may not have one
4930              * for display id if it's a dynamic one.
4931              */
4932             NVDpyEvoRec *pDpyEvo = nvGetDpyEvoFromDispEvo(pDispEvo,
4933                 pConnectorEvo->displayId);
4934 
4935             nvAssert(pDpyEvo->apiHead == NV_INVALID_HEAD);
4936 
4937             pDpyEvo->apiHead = apiHead;
4938             nvAssignHwHeadsMaskApiHeadState(
4939                 &pDispEvo->apiHeadState[apiHead],
4940                 NVBIT(head));
4941             pDispEvo->apiHeadState[apiHead].activeDpys =
4942                 nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId);
4943         }
4944     }
4945 
4946     return TRUE;
4947 
4948 failed:
4949     ClearApiHeadStateOneDisp(pDispEvo);
4950 
4951     return FALSE;
4952 }
4953 
4954 static void
CompletionNotifierEventDeferredWork(void * dataPtr,NvU32 dataU32)4955 CompletionNotifierEventDeferredWork(void *dataPtr, NvU32 dataU32)
4956 {
4957     NVDispFlipOccurredEventDataEvoRec *pEventData = dataPtr;
4958 
4959     nvSendFlipOccurredEventEvo(pEventData->pDispEvo, pEventData->apiHead,
4960                                pEventData->layer);
4961 }
4962 
CompletionNotifierEvent(void * arg,void * pEventDataVoid,NvU32 hEvent,NvU32 Data,NV_STATUS Status)4963 static void CompletionNotifierEvent(void *arg, void *pEventDataVoid,
4964                                     NvU32 hEvent, NvU32 Data, NV_STATUS Status)
4965 {
4966   (void) nvkms_alloc_timer_with_ref_ptr(
4967         CompletionNotifierEventDeferredWork, /* callback */
4968         arg, /* argument (this is a ref_ptr to NVDispFlipOccurredEventDataEvoRec) */
4969         0,   /* dataU32 */
4970         0);  /* timeout: schedule the work immediately */
4971 }
4972 
nvEvoPreModesetRegisterFlipOccurredEvent(NVDispEvoRec * pDispEvo,const NvU32 head,const NVEvoModesetUpdateState * pModesetUpdate)4973 void nvEvoPreModesetRegisterFlipOccurredEvent(NVDispEvoRec *pDispEvo,
4974                                               const NvU32 head,
4975                                               const NVEvoModesetUpdateState
4976                                                   *pModesetUpdate)
4977 {
4978     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
4979     NvU32 layer;
4980 
4981     /* XXX NVKMS TODO: need disp-scope in event */
4982     if (pDispEvo->displayOwner != 0) {
4983         return;
4984     }
4985 
4986     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
4987         NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer];
4988         const struct _NVEvoModesetUpdateStateOneLayer *pLayer =
4989              &pModesetUpdate->flipOccurredEvent[head].layer[layer];
4990 
4991         if (!pLayer->changed ||
4992                 (pLayer->ref_ptr == NULL) ||
4993                 (pLayer->ref_ptr == pChannel->completionNotifierEventRefPtr)) {
4994             continue;
4995         }
4996 
4997         nvAssert((pChannel->completionNotifierEventHandle == 0) &&
4998                     (pChannel->completionNotifierEventRefPtr == NULL));
4999 
5000         pChannel->completionNotifierEventHandle =
5001             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
5002 
5003         if (!nvRmRegisterCallback(pDevEvo,
5004                                   &pChannel->completionNotifierEventCallback,
5005                                   pLayer->ref_ptr,
5006                                   pChannel->pb.channel_handle,
5007                                   pChannel->completionNotifierEventHandle,
5008                                   CompletionNotifierEvent,
5009                                   0)) {
5010             nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
5011                                pChannel->completionNotifierEventHandle);
5012             pChannel->completionNotifierEventHandle = 0;
5013         } else {
5014             pChannel->completionNotifierEventRefPtr = pLayer->ref_ptr;
5015         }
5016     }
5017 }
5018 
nvEvoPostModesetUnregisterFlipOccurredEvent(NVDispEvoRec * pDispEvo,const NvU32 head,const NVEvoModesetUpdateState * pModesetUpdate)5019 void nvEvoPostModesetUnregisterFlipOccurredEvent(NVDispEvoRec *pDispEvo,
5020                                                    const NvU32 head,
5021                                                    const NVEvoModesetUpdateState
5022                                                        *pModesetUpdate)
5023 {
5024     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
5025     NvU32 layer;
5026 
5027     /* XXX NVKMS TODO: need disp-scope in event */
5028     if (pDispEvo->displayOwner != 0) {
5029         return;
5030     }
5031 
5032     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
5033         NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer];
5034         const struct _NVEvoModesetUpdateStateOneLayer *pLayer =
5035              &pModesetUpdate->flipOccurredEvent[head].layer[layer];
5036 
5037         if (!pLayer->changed ||
5038                 (pLayer->ref_ptr != NULL) ||
5039                 (pChannel->completionNotifierEventHandle == 0)) {
5040 
5041             /*
5042              * If the flip occurred event of this layer is updated to get
5043              * enabled (pLayer->ref_ptr != NULL) then that update should have
5044              * been already processed by
5045              * nvEvoPreModesetRegisterFlipOccurredEvent() and
5046              * pChannel->completionNotifierEventRefPtr == pLayer->ref_ptr.
5047              */
5048             nvAssert(!pLayer->changed ||
5049                         (pChannel->completionNotifierEventHandle == 0) ||
5050                         (pChannel->completionNotifierEventRefPtr ==
5051                             pLayer->ref_ptr));
5052             continue;
5053         }
5054 
5055         nvRmApiFree(nvEvoGlobal.clientHandle,
5056                     pChannel->pb.channel_handle,
5057                     pChannel->completionNotifierEventHandle);
5058         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
5059                            pChannel->completionNotifierEventHandle);
5060         pChannel->completionNotifierEventHandle = 0;
5061         pChannel->completionNotifierEventRefPtr = NULL;
5062     }
5063 }
5064 
InitApiHeadState(NVDevEvoRec * pDevEvo)5065 static NvBool InitApiHeadState(NVDevEvoRec *pDevEvo)
5066 {
5067     NVDispEvoRec *pDispEvo;
5068     NvU32 dispIndex;
5069 
5070     /*
5071      * For every hardware head, there should be at least one api-head
5072      * which supports the equal number of layer.
5073      */
5074     nvAssert(pDevEvo->numApiHeads == pDevEvo->numHeads);
5075     for (NvU32 head = 0; head < pDevEvo->numHeads; head++) {
5076         pDevEvo->apiHead[head].numLayers = pDevEvo->head[head].numLayers;
5077     }
5078 
5079     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5080         if (!InitApiHeadStateOneDisp(pDispEvo)) {
5081             goto failed;
5082         }
5083     }
5084 
5085     nvRmAllocCoreRGSyncpts(pDevEvo);
5086 
5087     return TRUE;
5088 
5089 failed:
5090     ClearApiHeadState(pDevEvo);
5091 
5092     return FALSE;
5093 }
5094 
5095 /*!
5096  * Allocate the EVO core channel.
5097  *
5098  * This function trivially succeeds if the core channel is already allocated.
5099  */
nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo)5100 NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo)
5101 {
5102     NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { };
5103     NvU32 ret;
5104     NvBool bRet;
5105     NVDispEvoRec *pDispEvo;
5106     NvU32 dispIndex;
5107     NvU32 head;
5108 
5109     /* Do nothing if the display was already allocated */
5110     if (pDevEvo->displayHandle != 0) {
5111         return TRUE;
5112     }
5113 
5114     if (!AllocEvoSubDevs(pDevEvo)) {
5115         goto failed;
5116     }
5117 
5118     // Disallow GC6 in anticipation of touching GPU/displays.
5119     if (!nvRmSetGc6Allowed(pDevEvo, FALSE)) {
5120         goto failed;
5121     }
5122 
5123     /* Query console FB info, and save the result into pDevEvo->vtFbInfo.
5124      * This is done at device allocation time.
5125      * nvRmImportFbConsoleMemory will import the surface for console restore by
5126      * nvEvoRestoreConsole if the surface format is compatible.
5127      * Else, console restore will cause core channel realloc, telling RM to
5128      * restore the console via nvRmVTSwitch.
5129      */
5130     if (!nvRmGetVTFBInfo(pDevEvo)) {
5131         goto failed;
5132     }
5133 
5134     if (!nvRmVTSwitch(pDevEvo,
5135                       NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE)) {
5136         goto failed;
5137     }
5138 
5139     /* Evo object (parent of all other NV50 display stuff) */
5140     nvAssert(nvRmEvoClassListCheck(pDevEvo, pDevEvo->dispClass));
5141     pDevEvo->displayHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
5142 
5143     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
5144                        pDevEvo->deviceHandle,
5145                        pDevEvo->displayHandle,
5146                        pDevEvo->dispClass,
5147                        NULL);
5148     if (ret != NVOS_STATUS_SUCCESS) {
5149         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5150                     "Failed to initialize display engine: 0x%x (%s)",
5151                     ret, nvstatusToString(ret));
5152         goto failed;
5153     }
5154 
5155     /* Get the display caps bits */
5156 
5157     ct_assert(sizeof(pDevEvo->capsBits) == sizeof(capsParams.capsTbl));
5158     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5159                          pDevEvo->displayHandle,
5160                          NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2,
5161                          &capsParams, sizeof(capsParams));
5162     if (ret != NVOS_STATUS_SUCCESS) {
5163         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5164                     "Failed to determine display capabilities");
5165         goto failed;
5166     }
5167     nvkms_memcpy(pDevEvo->capsBits, capsParams.capsTbl,
5168                  sizeof(pDevEvo->capsBits));
5169 
5170     // Evo core channel. Allocated once, shared per GPU
5171     if (!nvRMSetupEvoCoreChannel(pDevEvo)) {
5172         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5173                     "Failed to allocate display engine core DMA push buffer");
5174         goto failed;
5175     }
5176 
5177     pDevEvo->coreInitMethodsPending = TRUE;
5178 
5179     bRet = pDevEvo->hal->GetCapabilities(pDevEvo);
5180 
5181     if (!bRet) {
5182         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5183                     "Failed to query display engine capability bits.");
5184         goto failed;
5185     }
5186 
5187     /*
5188      * XXX NVKMS TODO: if the EVO core channel is allocated (and
5189      * capability notifier queried) before any nvDpyConnectEvo(), then
5190      * we won't need to update the pixelClock here.
5191      */
5192     UpdateMaxPixelClock(pDevEvo);
5193 
5194     if (pDevEvo->numWindows > 0) {
5195         int win;
5196 
5197         if (!nvRMAllocateWindowChannels(pDevEvo)) {
5198             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5199                         "Failed to allocate display engine window channels");
5200             goto failed;
5201         }
5202 
5203         for (win = 0; win < pDevEvo->numWindows; win++) {
5204             const NvU32 head = pDevEvo->headForWindow[win];
5205 
5206             if (head == NV_INVALID_HEAD) {
5207                 continue;
5208             }
5209 
5210             pDevEvo->head[head].layer[pDevEvo->head[head].numLayers]  =
5211                 pDevEvo->window[win];
5212             pDevEvo->head[head].numLayers++;
5213         }
5214     } else {
5215         // Allocate the base channels
5216         if (!nvRMAllocateBaseChannels(pDevEvo)) {
5217             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5218                         "Failed to allocate display engine base channels");
5219             goto failed;
5220         }
5221 
5222         // Allocate the overlay channels
5223         if (!nvRMAllocateOverlayChannels(pDevEvo)) {
5224             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5225                         "Failed to allocate display engine overlay channels");
5226             goto failed;
5227         }
5228 
5229         /* Map base and overlay channels onto main and overlay layers. */
5230         for (head = 0; head < pDevEvo->numHeads; head++) {
5231             nvAssert(pDevEvo->base[head] != NULL && pDevEvo->overlay[head] != NULL);
5232 
5233             pDevEvo->head[head].layer[NVKMS_MAIN_LAYER] = pDevEvo->base[head];
5234             pDevEvo->head[head].layer[NVKMS_OVERLAY_LAYER] = pDevEvo->overlay[head];
5235             pDevEvo->head[head].numLayers = 2;
5236         }
5237     }
5238 
5239     // Allocate and map the cursor controls for all heads
5240     bRet = nvAllocCursorEvo(pDevEvo);
5241     if (!bRet) {
5242         goto failed;
5243     }
5244 
5245     if (!nvAllocLutSurfacesEvo(pDevEvo)) {
5246         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5247             "Failed to allocate memory for the display color lookup table.");
5248         goto failed;
5249     }
5250 
5251     // Resume the DisplayPort library's control of the device.
5252     if (!nvRmResumeDP(pDevEvo)) {
5253         nvEvoLogDev(
5254             pDevEvo,
5255             EVO_LOG_ERROR,
5256             "Failed to initialize DisplayPort sub-system.");
5257         goto failed;
5258     }
5259 
5260     if (!InitApiHeadState(pDevEvo)) {
5261         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5262                     "Failed to initialize the api heads.");
5263         goto failed;
5264     }
5265 
5266     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5267         nvRmRegisterBacklight(pDispEvo);
5268     }
5269 
5270     // Allow GC6 if no heads are active.
5271     if (nvAllHeadsInactive(pDevEvo)) {
5272         if (!nvRmSetGc6Allowed(pDevEvo, TRUE)) {
5273             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
5274                         "No head is active, but failed to allow GC6");
5275         }
5276     }
5277 
5278     return TRUE;
5279 
5280 failed:
5281     nvFreeCoreChannelEvo(pDevEvo);
5282 
5283     return FALSE;
5284 }
5285 
5286 /*!
5287  * Clear the pConnectorEvo->or.primary and pConnectorEvo->or.secondaryMask
5288  * tracking.
5289  */
ClearSORAssignmentsOneDisp(const NVDispEvoRec * pDispEvo)5290 static void ClearSORAssignmentsOneDisp(const NVDispEvoRec *pDispEvo)
5291 {
5292     NVConnectorEvoPtr pConnectorEvo;
5293 
5294     nvAssert(NV0073_CTRL_SYSTEM_GET_CAP(pDispEvo->pDevEvo->commonCapsBits,
5295                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED));
5296 
5297     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
5298         if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
5299             continue;
5300         }
5301 
5302         pConnectorEvo->or.primary = NV_INVALID_OR;
5303         pConnectorEvo->or.secondaryMask = 0x0;
5304     }
5305 }
5306 
5307 /*!
5308  * Update pConnectorEvo->or.primary and pConnectorEvo->or.secondaryMask from
5309  * the list given to us by RM.
5310  */
RefreshSORAssignments(const NVDispEvoRec * pDispEvo,const NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS * pParams)5311 static void RefreshSORAssignments(const NVDispEvoRec *pDispEvo,
5312                                   const NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams)
5313 {
5314     NVConnectorEvoPtr pConnectorEvo;
5315 
5316     ClearSORAssignmentsOneDisp(pDispEvo);
5317 
5318     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
5319         const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
5320         NvU32 sorIndex;
5321 
5322         if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
5323             continue;
5324         }
5325 
5326         for (sorIndex = 0;
5327              sorIndex < ARRAY_LEN(pParams->sorAssignList) &&
5328              sorIndex < ARRAY_LEN(pConnectorEvo->or.ownerHeadMask);
5329              sorIndex++) {
5330             if ((pParams->sorAssignListWithTag[sorIndex].displayMask &
5331                     displayId) == displayId) {
5332                 if ((pParams->sorAssignListWithTag[sorIndex].sorType ==
5333                         NV0073_CTRL_DFP_SOR_TYPE_SINGLE) ||
5334                         (pParams->sorAssignListWithTag[sorIndex].sorType ==
5335                          NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY)) {
5336                     pConnectorEvo->or.primary = sorIndex;
5337                 } else {
5338                     nvAssert(pParams->sorAssignListWithTag[sorIndex].sorType ==
5339                                 NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY);
5340                     pConnectorEvo->or.secondaryMask |= NVBIT(sorIndex);
5341                 }
5342             }
5343         }
5344 
5345         nvAssert((pConnectorEvo->or.secondaryMask == 0) ||
5346                     (pConnectorEvo->or.primary != NV_INVALID_OR));
5347     }
5348 }
5349 
5350 /*
5351  * Ask RM to assign an SOR to given displayId.
5352  *
5353  * In 2Heads1OR MST case, this function gets called with the dynamic displayId.
5354  *
5355  * Note that this assignment may be temporary.  This function will always call
5356  * RM, and unless the connector is currently in use (i.e., being driven by a
5357  * head), a previously-assigned SOR may be reused.
5358  *
5359  * The RM will either:
5360  * a) return an SOR that's already assigned/attached
5361  *    to root port of this displayId, or
5362  * b) pick a new "unused" SOR, assign and attach it to this connector, and
5363  *    return that -- where "unused" means both not being actively driven by a
5364  *    head and not in the "exclude mask" argument.
5365  *    The "exclude mask" is useful if we need to assign multiple SORs up front
5366  *    before activating heads to drive them.
5367  *
5368  * For example, if head 0 is currently actively scanning out to SOR 0 and we
5369  * are doing a modeset to activate currently-inactive heads 1 and 2:
5370  * 1. nvkms calls RM for nvAssignSOREvo(pConnectorForHead1, 0);
5371  *    RM returns any SOR other than 0 (say 3)
5372  * 2. nvkms calls RM for nvAssignSOREvo(pConnectorForHead2, (1 << 3));
5373  *    RM returns any SOR other than 0 and 3 (say 1)
5374  * 3. At this point nvkms can push methods and UPDATE to enable heads 1 and 2
5375  *    to drive SORs 3 and 1.
5376  * In the example above, the sorExcludeMask == (1 << 3) at step 2 is important
5377  * to ensure that RM doesn't reuse the SOR 3 from step 1.  It won't reuse SOR 0
5378  * because it's in use by head 0.
5379  *
5380  * If an SOR is only needed temporarily (e.g., to do link training to "assess"
5381  * a DisplayPort or HDMI FRL link), then sorExcludeMask should be 0 -- any SOR
5382  * that's not actively used by a head can be used, and as soon as nvkms
5383  * finishes the "assessment", the SOR is again eligible for reuse.
5384  *
5385  * Because of the potential for SOR reuse, nvAssignSOREvo() will always call
5386  * RefreshSORAssignments() to update pConnectorEvo->or.primary and
5387  * pConnectorEvo->or.secondaryMask on *every* connector after calling
5388  * NV0073_CTRL_CMD_DFP_ASSIGN_SOR for *any* connector.
5389  */
nvAssignSOREvo(const NVDispEvoRec * pDispEvo,const NvU32 displayId,const NvBool b2Heads1Or,const NvU32 sorExcludeMask)5390 NvBool nvAssignSOREvo(const NVDispEvoRec *pDispEvo, const NvU32 displayId,
5391                       const NvBool b2Heads1Or, const NvU32 sorExcludeMask)
5392 {
5393     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
5394     NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = { 0 };
5395     NvU32 ret;
5396 
5397     if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
5398                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
5399         return TRUE;
5400     }
5401 
5402     params.subDeviceInstance = pDispEvo->displayOwner;
5403     params.displayId = displayId;
5404     params.bIs2Head1Or = b2Heads1Or;
5405     params.sorExcludeMask = sorExcludeMask;
5406 
5407     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5408                          pDevEvo->displayCommonHandle,
5409                          NV0073_CTRL_CMD_DFP_ASSIGN_SOR,
5410                          &params,
5411                          sizeof(params));
5412 
5413     if (ret != NVOS_STATUS_SUCCESS) {
5414         return FALSE;
5415     }
5416 
5417     RefreshSORAssignments(pDispEvo, &params);
5418 
5419     return TRUE;
5420 }
5421 
CacheSorAssignList(const NVDispEvoRec * pDispEvo,const NVConnectorEvoRec * sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS])5422 static void CacheSorAssignList(const NVDispEvoRec *pDispEvo,
5423     const NVConnectorEvoRec *sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS])
5424 {
5425     const NVConnectorEvoRec *pConnectorEvo;
5426 
5427     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
5428         if ((pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) ||
5429                 (pConnectorEvo->or.primary == NV_INVALID_OR)) {
5430             continue;
5431         }
5432 
5433         /*
5434          * RM populates same sor index into more than one connectors if
5435          * they are are DCC partners, this checks make sure SOR
5436          * assignment happens only for a single connector. The sor
5437          * assignment call before modeset/dp-link-training makes sure
5438          * assignment happens for the correct connector.
5439          */
5440         if (sorAssignList[pConnectorEvo->or.primary] != NULL) {
5441             continue;
5442         }
5443         sorAssignList[pConnectorEvo->or.primary] =
5444             pConnectorEvo;
5445     }
5446 }
5447 
RestoreSorAssignList(NVDispEvoRec * pDispEvo,const NVConnectorEvoRec * sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS])5448 static void RestoreSorAssignList(NVDispEvoRec *pDispEvo,
5449     const NVConnectorEvoRec *sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS])
5450 {
5451     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
5452     NvU32 sorIndex;
5453 
5454     for (sorIndex = 0;
5455          sorIndex < NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS; sorIndex++) {
5456 
5457         if (sorAssignList[sorIndex] == NULL) {
5458             continue;
5459         }
5460 
5461         NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = {
5462             .subDeviceInstance = pDispEvo->displayOwner,
5463             .displayId = nvDpyIdToNvU32(sorAssignList[sorIndex]->displayId),
5464             .sorExcludeMask = ~NVBIT(sorIndex),
5465         };
5466         NvU32 ret;
5467 
5468         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5469                              pDevEvo->displayCommonHandle,
5470                              NV0073_CTRL_CMD_DFP_ASSIGN_SOR,
5471                              &params,
5472                              sizeof(params));
5473 
5474         if (ret != NVOS_STATUS_SUCCESS) {
5475             nvEvoLogDispDebug(pDispEvo,
5476                               EVO_LOG_ERROR,
5477                               "Failed to restore SOR-%u -> %s assignment.",
5478                               sorIndex, sorAssignList[sorIndex]->name);
5479         } else {
5480             RefreshSORAssignments(pDispEvo, &params);
5481         }
5482     }
5483 }
5484 
nvResumeDevEvo(NVDevEvoRec * pDevEvo)5485 NvBool nvResumeDevEvo(NVDevEvoRec *pDevEvo)
5486 {
5487     struct {
5488         const NVConnectorEvoRec *
5489             sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
5490     } disp[NVKMS_MAX_SUBDEVICES] = { };
5491     NVDispEvoRec *pDispEvo;
5492     NvU32 dispIndex;
5493 
5494     if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
5495                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
5496         FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5497             CacheSorAssignList(pDispEvo, disp[dispIndex].sorAssignList);
5498         }
5499     }
5500 
5501     if (!nvAllocCoreChannelEvo(pDevEvo)) {
5502         return FALSE;
5503     }
5504 
5505     /*
5506      * During the hibernate-resume cycle vbios or GOP driver programs
5507      * the display engine to lit up the boot display. In
5508      * hibernate-resume path, doing NV0073_CTRL_CMD_DFP_ASSIGN_SOR
5509      * rm-control call before the core channel allocation causes display
5510      * channel hang because at that stage RM is not aware of the boot
5511      * display actived by vbios and it ends up unrouting active SOR
5512      * assignments. Therefore restore the SOR assignment only after the
5513      * core channel allocation.
5514      */
5515 
5516     if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
5517                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
5518 
5519         /*
5520          * Shutdown all heads before restoring the SOR assignments because in
5521          * case of hibernate-resume the SOR, for which NVKMS is trying to
5522          * restore the assignment, might be in use by the boot display setup
5523          * by vbios/gop driver.
5524          */
5525         nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev,
5526                            NULL /* pTestFunc, shut down all heads */,
5527                            NULL /* pData */,
5528                            TRUE /* doRasterLock */);
5529 
5530         FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5531             RestoreSorAssignList(pDispEvo, disp[dispIndex].sorAssignList);
5532         }
5533     }
5534 
5535     return TRUE;
5536 }
5537 
nvSuspendDevEvo(NVDevEvoRec * pDevEvo)5538 void nvSuspendDevEvo(NVDevEvoRec *pDevEvo)
5539 {
5540     nvFreeCoreChannelEvo(pDevEvo);
5541 }
5542 
5543 /*!
5544  * Free the EVO core channel.
5545  *
5546  * This function does nothing if the core channel was already free.
5547  */
nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo)5548 void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo)
5549 {
5550     NVDispEvoPtr pDispEvo;
5551     NvU32 dispIndex;
5552     NvU32 head;
5553 
5554     ClearApiHeadState(pDevEvo);
5555 
5556     nvEvoCancelPostFlipIMPTimer(pDevEvo);
5557     nvCancelVrrFrameReleaseTimers(pDevEvo);
5558 
5559     nvCancelLowerDispBandwidthTimer(pDevEvo);
5560 
5561     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5562         nvRmUnregisterBacklight(pDispEvo);
5563 
5564         nvAssert(pDevEvo->skipConsoleRestore ||
5565                  nvDpyIdListIsEmpty(nvActiveDpysOnDispEvo(pDispEvo)));
5566     }
5567 
5568     // Pause the DisplayPort library's control of the device.
5569     nvRmPauseDP(pDevEvo);
5570 
5571     nvFreeLutSurfacesEvo(pDevEvo);
5572 
5573     // Unmap and free the cursor controls for all heads
5574     nvFreeCursorEvo(pDevEvo);
5575 
5576     // TODO: Unregister all surfaces registered with this device.
5577 
5578     for (head = 0; head < pDevEvo->numHeads; head++) {
5579         NvU32 layer;
5580 
5581         for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
5582             nvRmEvoFreePreSyncpt(pDevEvo, pDevEvo->head[head].layer[layer]);
5583             pDevEvo->head[head].layer[layer] = NULL;
5584         }
5585         pDevEvo->head[head].numLayers = 0;
5586     }
5587 
5588     nvRMFreeWindowChannels(pDevEvo);
5589     nvRMFreeOverlayChannels(pDevEvo);
5590     nvRMFreeBaseChannels(pDevEvo);
5591 
5592     nvRMFreeEvoCoreChannel(pDevEvo);
5593 
5594     if (pDevEvo->displayHandle != 0) {
5595         if (nvRmApiFree(nvEvoGlobal.clientHandle,
5596                         pDevEvo->deviceHandle,
5597                         pDevEvo->displayHandle) != NVOS_STATUS_SUCCESS) {
5598             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to tear down Disp");
5599         }
5600         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->displayHandle);
5601         pDevEvo->displayHandle = 0;
5602 
5603         if (!pDevEvo->skipConsoleRestore) {
5604             nvRmVTSwitch(pDevEvo,
5605                          NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE);
5606         } else {
5607             nvRmVTSwitch(pDevEvo,
5608                          NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED);
5609         }
5610     }
5611 
5612     // No longer possible that NVKMS is driving any displays, allow GC6.
5613     nvRmSetGc6Allowed(pDevEvo, TRUE);
5614 
5615     nvFree(pDevEvo->gpus);
5616     pDevEvo->gpus = NULL;
5617 }
5618 
5619 
5620 #define ASSIGN_PIN(_pPin, _pin)                         \
5621     do {                                                \
5622         ct_assert(NV_IS_UNSIGNED((_pin)));              \
5623         if ((_pPin)) {                                  \
5624             if ((_pin) >= NV_EVO_NUM_LOCK_PIN_CAPS) {   \
5625                 return FALSE;                           \
5626             }                                           \
5627             *(_pPin) = (_pin);                          \
5628         }                                               \
5629     } while (0)
5630 
QueryFrameLockHeaderPins(const NVDispEvoRec * pDispEvo,NVEvoSubDevPtr pEvoSubDev,NvU32 * pFrameLockPin,NvU32 * pRasterLockPin,NvU32 * pFlipLockPin)5631 static NvBool QueryFrameLockHeaderPins(const NVDispEvoRec *pDispEvo,
5632                                        NVEvoSubDevPtr pEvoSubDev,
5633                                        NvU32 *pFrameLockPin,
5634                                        NvU32 *pRasterLockPin,
5635                                        NvU32 *pFlipLockPin)
5636 {
5637     NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS params = { };
5638     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5639 
5640     params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance;
5641 
5642     if (nvRmApiControl(nvEvoGlobal.clientHandle,
5643                        pDevEvo->displayHandle,
5644                        NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS,
5645                        &params, sizeof(params)) != NVOS_STATUS_SUCCESS) {
5646         nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
5647                           "Failed to query framelock header pins");
5648         return FALSE;
5649     }
5650 
5651     ASSIGN_PIN(pFrameLockPin, params.frameLockPin);
5652     ASSIGN_PIN(pRasterLockPin, params.rasterLockPin);
5653     ASSIGN_PIN(pFlipLockPin, params.flipLockPin);
5654 
5655     return TRUE;
5656 }
5657 
5658 // Gets the lock pin dedicated for a given signal and returns the corresponding method
nvEvoGetPinForSignal(const NVDispEvoRec * pDispEvo,NVEvoSubDevPtr pEvoSubDev,NVEvoLockSignal signal)5659 NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *pDispEvo,
5660                                   NVEvoSubDevPtr pEvoSubDev,
5661                                   NVEvoLockSignal signal)
5662 {
5663     NVEvoLockPinCaps *caps = pEvoSubDev->capabilities.pin;
5664     NvU32 pin;
5665 
5666     switch (signal) {
5667 
5668         case NV_EVO_LOCK_SIGNAL_RASTER_LOCK:
5669             if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev,
5670                                           NULL, &pin, NULL)) {
5671                 break;
5672             }
5673 
5674             if (!caps[pin].scanLock) break;
5675 
5676             return NV_EVO_LOCK_PIN_0 + pin;
5677 
5678         case NV_EVO_LOCK_SIGNAL_FRAME_LOCK:
5679             if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev,
5680                                           &pin, NULL, NULL)) {
5681                 break;
5682             }
5683 
5684             if (!caps[pin].scanLock) break;
5685 
5686             return NV_EVO_LOCK_PIN_0 + pin;
5687 
5688         case NV_EVO_LOCK_SIGNAL_FLIP_LOCK:
5689             if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev,
5690                                           NULL, NULL, &pin) ||
5691                 !caps[pin].flipLock) {
5692                 // If the query from RM fails (or returns a bogus pin), fall
5693                 // back to an alternate mechanism.  This may happen on boards
5694                 // with no framelock header.  Look in the capabilities for the
5695                 // pin that has the requested capability.
5696                 for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) {
5697                     if (caps[pin].flipLock)
5698                         break;
5699                 }
5700 
5701                 if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) {
5702                     // Not found
5703                     break;
5704                 }
5705             }
5706 
5707             if (!caps[pin].flipLock) {
5708                 break;
5709             }
5710 
5711             return NV_EVO_LOCK_PIN_0 + pin;
5712 
5713         case NV_EVO_LOCK_SIGNAL_STEREO:
5714             // Look in the capabilities for the pin that has the requested capability
5715             for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) {
5716                 if (caps[pin].stereo)
5717                     break;
5718             }
5719 
5720             if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) break;
5721 
5722             return NV_EVO_LOCK_PIN_0 + pin;
5723 
5724         default:
5725             nvAssert(!"Unknown signal type");
5726             break;
5727     }
5728 
5729     // Pin not found
5730     return NV_EVO_LOCK_PIN_ERROR;
5731 }
5732 
nvSetDVCEvo(NVDispEvoPtr pDispEvo,const NvU32 head,NvS32 dvc,NVEvoUpdateState * updateState)5733 void nvSetDVCEvo(NVDispEvoPtr pDispEvo,
5734                  const NvU32 head,
5735                  NvS32 dvc,
5736                  NVEvoUpdateState *updateState)
5737 {
5738     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5739     NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
5740 
5741     nvAssert(dvc >= NV_EVO_DVC_MIN);
5742     nvAssert(dvc <= NV_EVO_DVC_MAX);
5743 
5744     // HW range is from -2048 to + 2047
5745     // Negative values, are not used they distort the colors
5746     // Values from 1023 to 0 are greying the colors out.
5747     // We use 0 to 2047 with 1024 as default.
5748     dvc += 1024;
5749     nvAssert(dvc >= 0);
5750     pHeadState->procAmp.satCos = dvc;
5751 
5752     // In SW YUV420 mode, HW is programmed with default DVC. The DVC is handled
5753     // in a headSurface composite shader.
5754     if (pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) {
5755         pHeadState->procAmp.satCos = 1024;
5756     }
5757 
5758     nvPushEvoSubDevMaskDisp(pDispEvo);
5759     pDevEvo->hal->SetProcAmp(pDispEvo, head, updateState);
5760     nvPopEvoSubDevMask(pDevEvo);
5761 }
5762 
nvSetImageSharpeningEvo(NVDispEvoRec * pDispEvo,const NvU32 head,NvU32 value,NVEvoUpdateState * updateState)5763 void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head,
5764                              NvU32 value, NVEvoUpdateState *updateState)
5765 {
5766     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5767 
5768     /*
5769      * Evo values are from -128 to 127, with a default of 0.
5770      * Negative values sharpen.
5771      * Control panel values from 0 (less sharp) to 255
5772      */
5773     value = 127 - value;
5774 
5775     nvPushEvoSubDevMaskDisp(pDispEvo);
5776     pDevEvo->hal->SetOutputScaler(pDispEvo, head, value, updateState);
5777     nvPopEvoSubDevMask(pDevEvo);
5778 }
5779 
LayerSetPositionOneApiHead(NVDispEvoRec * pDispEvo,const NvU32 apiHead,const NvU32 layer,const NvS16 x,const NvS16 y,NVEvoUpdateState * pUpdateState)5780 static void LayerSetPositionOneApiHead(NVDispEvoRec *pDispEvo,
5781                                        const NvU32 apiHead,
5782                                        const NvU32 layer,
5783                                        const NvS16 x,
5784                                        const NvS16 y,
5785                                        NVEvoUpdateState *pUpdateState)
5786 {
5787     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5788     const NVDispApiHeadStateEvoRec *pApiHeadState =
5789         &pDispEvo->apiHeadState[apiHead];
5790     const NvU32 sd = pDispEvo->displayOwner;
5791     NvU32 head;
5792 
5793     nvPushEvoSubDevMaskDisp(pDispEvo);
5794     FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
5795         NVEvoSubDevHeadStateRec *pSdHeadState =
5796             &pDevEvo->gpus[sd].headState[head];
5797 
5798         if ((pSdHeadState->layer[layer].outputPosition.x != x) ||
5799             (pSdHeadState->layer[layer].outputPosition.y != y)) {
5800             NVEvoChannelPtr pChannel =
5801                 pDevEvo->head[head].layer[layer];
5802 
5803             pSdHeadState->layer[layer].outputPosition.x = x;
5804             pSdHeadState->layer[layer].outputPosition.y = y;
5805 
5806             pDevEvo->hal->SetImmPointOut(pDevEvo, pChannel, sd, pUpdateState,
5807                                          x, y);
5808         }
5809     }
5810     nvPopEvoSubDevMask(pDevEvo);
5811 }
5812 
nvLayerSetPositionEvo(NVDevEvoPtr pDevEvo,const struct NvKmsSetLayerPositionRequest * pRequest)5813 NvBool nvLayerSetPositionEvo(
5814     NVDevEvoPtr pDevEvo,
5815     const struct NvKmsSetLayerPositionRequest *pRequest)
5816 {
5817     NVDispEvoPtr pDispEvo;
5818     NvU32 sd;
5819 
5820     /*
5821      * We need this call to not modify any state if it will fail, so we
5822      * first verify that all relevant layers support output positioning,
5823      * then go back through the layers to actually modify the relevant
5824      * state.
5825      */
5826     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
5827         NvU32 apiHead;
5828 
5829         if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) {
5830             continue;
5831         }
5832 
5833         for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) {
5834             NvU32 layer;
5835 
5836             if ((pRequest->disp[sd].requestedHeadsBitMask &
5837                  NVBIT(apiHead)) == 0) {
5838                 continue;
5839             }
5840 
5841             if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
5842                 continue;
5843             }
5844 
5845             for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
5846                 const NvS16 x = pRequest->disp[sd].head[apiHead].layerPosition[layer].x;
5847                 const NvS16 y = pRequest->disp[sd].head[apiHead].layerPosition[layer].y;
5848 
5849                 if ((pRequest->disp[sd].head[apiHead].requestedLayerBitMask &
5850                         NVBIT(layer)) == 0x0) {
5851                     continue;
5852                 }
5853 
5854                 /*
5855                  * Error out if a requested layer does not support position
5856                  * updates and the requested position is not (0, 0).
5857                  */
5858                 if (!pDevEvo->caps.layerCaps[layer].supportsWindowMode &&
5859                     (x != 0 || y != 0)) {
5860                     nvEvoLogDebug(EVO_LOG_ERROR, "Layer %d does not support "
5861                                                  "position updates.", layer);
5862                     return FALSE;
5863                 }
5864             }
5865         }
5866     }
5867 
5868     /* Checks in above block passed, so make the requested changes. */
5869     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
5870         NvU32 apiHead;
5871 
5872         if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) {
5873             continue;
5874         }
5875 
5876         for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) {
5877             NVEvoUpdateState updateState = { };
5878             NvU32 layer;
5879 
5880             if ((pRequest->disp[sd].requestedHeadsBitMask &
5881                  NVBIT(apiHead)) == 0) {
5882                 continue;
5883             }
5884 
5885             if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
5886                 continue;
5887             }
5888 
5889             for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
5890                 const NvS16 x = pRequest->disp[sd].head[apiHead].layerPosition[layer].x;
5891                 const NvS16 y = pRequest->disp[sd].head[apiHead].layerPosition[layer].y;
5892 
5893                 if ((pRequest->disp[sd].head[apiHead].requestedLayerBitMask &
5894                         NVBIT(layer)) == 0x0) {
5895                     continue;
5896                 }
5897 
5898                 LayerSetPositionOneApiHead(pDispEvo, apiHead, layer, x, y,
5899                                            &updateState);
5900             }
5901 
5902             pDevEvo->hal->Update(pDevEvo, &updateState, TRUE /* releaseElv */);
5903         }
5904     }
5905 
5906     return TRUE;
5907 }
5908 
5909 /*
5910  * nvConstructHwModeTimingsImpCheckEvo() - perform an IMP check on the
5911  * given raster timings and viewport during the
5912  * nvConstructHwModeTimingsEvo path.  If IMP fails, we try multiple
5913  * times, each time scaling back the usage bounds until we find a
5914  * configuration IMP will accept, or until we can't scale back any
5915  * further.  If this fails, mark the viewport as invalid.
5916  */
5917 
nvConstructHwModeTimingsImpCheckEvo(const NVConnectorEvoRec * pConnectorEvo,const NVHwModeTimingsEvo * pTimings,const NvBool enableDsc,const NvBool b2Heads1Or,const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,const enum NvKmsDpyAttributeColorBpcValue colorBpc,const struct NvKmsModeValidationParams * pParams,NVHwModeTimingsEvo timings[NVKMS_MAX_HEADS_PER_DISP],NvU32 * pNumHeads,NVEvoInfoStringPtr pInfoString)5918 NvBool nvConstructHwModeTimingsImpCheckEvo(
5919     const NVConnectorEvoRec                *pConnectorEvo,
5920     const NVHwModeTimingsEvo               *pTimings,
5921     const NvBool                            enableDsc,
5922     const NvBool                            b2Heads1Or,
5923     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
5924     const enum NvKmsDpyAttributeColorBpcValue colorBpc,
5925     const struct NvKmsModeValidationParams *pParams,
5926     NVHwModeTimingsEvo                      timings[NVKMS_MAX_HEADS_PER_DISP],
5927     NvU32                                  *pNumHeads,
5928     NVEvoInfoStringPtr                      pInfoString)
5929 {
5930     NvU32 head;
5931     NvU32 activeRmId;
5932     const NvU32 numHeads = b2Heads1Or ? 2 : 1;
5933     NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP];
5934     NvBool requireBootClocks = !!(pParams->overrides &
5935                                   NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS);
5936     NvU32 ret;
5937 
5938     activeRmId = nvRmAllocDisplayId(pConnectorEvo->pDispEvo,
5939                     nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId));
5940     if (activeRmId == 0x0) {
5941         return FALSE;
5942     }
5943 
5944     nvkms_memset(&timingsParams, 0, sizeof(timingsParams));
5945 
5946     for (head = 0; head < numHeads; head++) {
5947         timingsParams[head].pConnectorEvo = pConnectorEvo;
5948         timingsParams[head].activeRmId = activeRmId;
5949         timingsParams[head].pixelDepth =
5950             nvEvoColorSpaceBpcToPixelDepth(colorSpace, colorBpc);
5951         if (!nvEvoGetSingleTileHwModeTimings(pTimings, numHeads,
5952                                              &timings[head])) {
5953             ret = FALSE;
5954             goto done;
5955         }
5956         timingsParams[head].pTimings = &timings[head];
5957         timingsParams[head].enableDsc = enableDsc;
5958         timingsParams[head].b2Heads1Or = b2Heads1Or;
5959         timingsParams[head].pUsage = &timings[head].viewPort.guaranteedUsage;
5960     }
5961 
5962     /* bypass this checking if the user disabled IMP */
5963     if ((pParams->overrides &
5964          NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK) != 0) {
5965         ret = TRUE;
5966     } else {
5967         ret = nvValidateImpOneDispDowngrade(pConnectorEvo->pDispEvo, timingsParams,
5968                                             requireBootClocks,
5969                                             NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE,
5970                                             /* downgradePossibleHeadsBitMask */
5971                                             (NVBIT(NVKMS_MAX_HEADS_PER_DISP) - 1UL));
5972     }
5973 
5974     if (ret) {
5975         *pNumHeads = numHeads;
5976     } else {
5977         nvEvoLogInfoString(pInfoString,
5978                            "ViewPort %dx%d exceeds hardware capabilities.",
5979                            pTimings->viewPort.out.width,
5980                            pTimings->viewPort.out.height);
5981     }
5982 
5983 done:
5984     nvRmFreeDisplayId(pConnectorEvo->pDispEvo, activeRmId);
5985 
5986     return ret;
5987 }
5988 
5989 /*
5990  * Convert from NvModeTimings values to NVHwModeTimingsEvo.
5991  */
5992 
5993 static void
ConstructHwModeTimingsFromNvModeTimings(const NvModeTimings * pModeTimings,NVHwModeTimingsEvoPtr pTimings)5994 ConstructHwModeTimingsFromNvModeTimings(const NvModeTimings *pModeTimings,
5995                                         NVHwModeTimingsEvoPtr pTimings)
5996 {
5997     NvU32 hBlankStart;
5998     NvU32 vBlankStart;
5999     NvU32 hBlankEnd;
6000     NvU32 vBlankEnd;
6001     NvU32 hSyncWidth;
6002     NvU32 vSyncWidth;
6003     NvU32 vTotalAdjustment = 0;
6004 
6005     NvModeTimings modeTimings;
6006 
6007     modeTimings = *pModeTimings;
6008 
6009     if (modeTimings.doubleScan) {
6010         modeTimings.vVisible *= 2;
6011         modeTimings.vSyncStart *= 2;
6012         modeTimings.vSyncEnd *= 2;
6013         modeTimings.vTotal *= 2;
6014     }
6015 
6016     /*
6017      * The real pixel clock and width values for modes using YUV 420 emulation
6018      * are half of the incoming values parsed from the EDID. This conversion is
6019      * performed here, so NvModeTimings will have the user-visible (full width)
6020      * values, and NVHwModeTimingsEvo will have the real (half width) values.
6021      *
6022      * HW YUV 420 requires setting the full width mode timings, which are then
6023      * converted in HW.  RM will recognize YUV420 mode is in use and halve
6024      * these values for IMP.
6025      *
6026      * In either case, only modes with even width are allowed in YUV 420 mode.
6027      */
6028     if (modeTimings.yuv420Mode != NV_YUV420_MODE_NONE) {
6029         nvAssert(((modeTimings.pixelClockHz & 1) == 0) &&
6030                  ((modeTimings.hVisible & 1) == 0) &&
6031                  ((modeTimings.hSyncStart & 1) == 0) &&
6032                  ((modeTimings.hSyncEnd & 1) == 0) &&
6033                  ((modeTimings.hTotal & 1) == 0) &&
6034                  ((modeTimings.vVisible & 1) == 0));
6035         if (modeTimings.yuv420Mode == NV_YUV420_MODE_SW) {
6036             modeTimings.pixelClockHz /= 2;
6037             modeTimings.hVisible /= 2;
6038             modeTimings.hSyncStart /= 2;
6039             modeTimings.hSyncEnd /= 2;
6040             modeTimings.hTotal /= 2;
6041         }
6042     }
6043 
6044     pTimings->hSyncPol = modeTimings.hSyncNeg;
6045     pTimings->vSyncPol = modeTimings.vSyncNeg;
6046     pTimings->interlaced = modeTimings.interlaced;
6047     pTimings->doubleScan = modeTimings.doubleScan;
6048 
6049     /* pTimings->pixelClock are in KHz but modeTimings.pixelClock are in Hz */
6050 
6051     pTimings->pixelClock = HzToKHz(modeTimings.pixelClockHz);
6052 
6053     /*
6054      * assign total width, height; note that when the rastertimings
6055      * are interlaced, we need to make sure SetRasterSize.Height is
6056      * odd, per EVO's mfs file
6057      */
6058 
6059     if (pTimings->interlaced) vTotalAdjustment = 1;
6060 
6061     pTimings->rasterSize.x = modeTimings.hTotal;
6062     pTimings->rasterSize.y = modeTimings.vTotal | vTotalAdjustment;
6063 
6064     /*
6065      * A bit of EVO quirkiness: The hw increases the blank/sync values
6066      * by one. So we need to offset by subtracting one.
6067      *
6068      * In other words, the h/w inserts one extra sync line/pixel thus
6069      * incrementing the raster params by one. The number of blank
6070      * lines/pixels we get is true to what we ask for.  Note the hw
6071      * does not increase the TotalImageSize by one so we don't need to
6072      * adjust SetRasterSize.
6073      *
6074      * This is slightly unintuitive. Per Evo's specs, the blankEnd
6075      * comes before blankStart as defined below:  BlankStart: The last
6076      * pixel/line at the end of the h/v active area.  BlankEnd: The
6077      * last pixel/line at the end of the h/v blanking.
6078      *
6079      * Also: note that in the below computations, we divide by two for
6080      * interlaced modes *before* subtracting; see bug 263622.
6081      */
6082 
6083     hBlankStart = modeTimings.hVisible +
6084         (modeTimings.hTotal - modeTimings.hSyncStart);
6085 
6086     vBlankStart = modeTimings.vVisible +
6087         (modeTimings.vTotal - modeTimings.vSyncStart);
6088 
6089     hBlankEnd = (modeTimings.hTotal - modeTimings.hSyncStart);
6090     vBlankEnd = (modeTimings.vTotal - modeTimings.vSyncStart);
6091 
6092     hSyncWidth = (modeTimings.hSyncEnd - modeTimings.hSyncStart);
6093     vSyncWidth = (modeTimings.vSyncEnd - modeTimings.vSyncStart);
6094 
6095     if (pTimings->interlaced) {
6096         vBlankStart /= 2;
6097         vBlankEnd /= 2;
6098         vSyncWidth /= 2;
6099     }
6100 
6101     pTimings->rasterSyncEnd.x           = hSyncWidth - 1;
6102     pTimings->rasterSyncEnd.y           = vSyncWidth - 1;
6103     pTimings->rasterBlankStart.x        = hBlankStart - 1;
6104     pTimings->rasterBlankStart.y        = vBlankStart - 1;
6105     pTimings->rasterBlankEnd.x          = hBlankEnd - 1;
6106     pTimings->rasterBlankEnd.y          = vBlankEnd - 1;
6107 
6108     /* assign rasterVertBlank2 */
6109 
6110     if (pTimings->interlaced) {
6111         const NvU32 firstFieldHeight = modeTimings.vTotal / 2;
6112 
6113         pTimings->rasterVertBlank2Start = firstFieldHeight + vBlankStart - 1;
6114         pTimings->rasterVertBlank2End = firstFieldHeight + vBlankEnd - 1;
6115     } else {
6116         pTimings->rasterVertBlank2Start = 0;
6117         pTimings->rasterVertBlank2End = 0;
6118     }
6119 
6120     pTimings->hdmi3D = modeTimings.hdmi3D;
6121     pTimings->yuv420Mode = modeTimings.yuv420Mode;
6122 }
6123 
6124 
6125 
6126 /*
6127  * Adjust the HwModeTimings as necessary to meet dual link dvi
6128  * requirements; returns TRUE if the timings were successfully
6129  * modified; returns FALSE if the timings cannot be made valid for
6130  * dual link dvi.
6131  */
ApplyDualLinkRequirements(const NVDpyEvoRec * pDpyEvo,const struct NvKmsModeValidationParams * pParams,NVHwModeTimingsEvoPtr pTimings,NVEvoInfoStringPtr pInfoString)6132 static NvBool ApplyDualLinkRequirements(const NVDpyEvoRec *pDpyEvo,
6133                                         const struct
6134                                         NvKmsModeValidationParams *pParams,
6135                                         NVHwModeTimingsEvoPtr pTimings,
6136                                         NVEvoInfoStringPtr pInfoString)
6137 {
6138     int adjust;
6139 
6140     nvAssert(pDpyEvo->pConnectorEvo->legacyType ==
6141              NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP);
6142 
6143     if (pTimings->protocol != NVKMS_PROTOCOL_SOR_DUAL_TMDS) {
6144         return TRUE;
6145     }
6146 
6147     if ((pParams->overrides &
6148          NVKMS_MODE_VALIDATION_NO_DUAL_LINK_DVI_CHECK) != 0) {
6149         return TRUE;
6150     }
6151 
6152     /* extract the fields we will need below */
6153 
6154     /*
6155      * hTotal must be even for dual link dvi; we won't try to patch
6156      * the htotal size; just give up if it isn't even
6157      */
6158 
6159     if ((pTimings->rasterSize.x % 2) != 0) {
6160         nvEvoLogInfoString(pInfoString,
6161             "Horizontal Total (%d) must be even for dual link DVI mode timings.",
6162             pTimings->rasterSize.x);
6163         return FALSE;
6164     }
6165 
6166     /*
6167      * RASTER_BLANK_END_X must be odd, so that the active region
6168      * starts on the following (even) pixel; if it is odd, we are
6169      * already done
6170      */
6171 
6172     if ((pTimings->rasterBlankEnd.x % 2) == 1) return TRUE;
6173 
6174     /*
6175      * RASTER_BLANK_END_X is even, so we need to adjust both
6176      * RASTER_BLANK_END_X and RASTER_BLANK_START_X by one; we'll first
6177      * try to subtract one pixel from both
6178      */
6179 
6180     adjust = -1;
6181 
6182     /*
6183      * if RASTER_BLANK_END_X cannot be made smaller (would collide
6184      * with hSyncEnd), see if it would be safe to instead add one to
6185      * RASTER_BLANK_END_X and RASTER_BLANK_START_X
6186      */
6187 
6188     if (pTimings->rasterBlankEnd.x <= pTimings->rasterSyncEnd.x + 1) {
6189         if (pTimings->rasterBlankStart.x + 1 >= pTimings->rasterSize.x) {
6190             nvEvoLogInfoString(pInfoString,
6191                 "Cannot adjust mode timings for dual link DVI requirements.");
6192             return FALSE;
6193         }
6194         adjust = 1;
6195     }
6196 
6197     pTimings->rasterBlankEnd.x += adjust;
6198     pTimings->rasterBlankStart.x += adjust;
6199 
6200     nvEvoLogInfoString(pInfoString,
6201         "Adjusted mode timings for dual link DVI requirements.");
6202 
6203     return TRUE;
6204 }
6205 
nvInitScalingUsageBounds(const NVDevEvoRec * pDevEvo,struct NvKmsScalingUsageBounds * pScaling)6206 void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo,
6207                               struct NvKmsScalingUsageBounds *pScaling)
6208 {
6209     pScaling->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X;
6210     pScaling->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X;
6211     pScaling->vTaps = pDevEvo->hal->caps.minScalerTaps;
6212     pScaling->vUpscalingAllowed = FALSE;
6213 }
6214 
6215 /*
6216  * Check if the provided number of vertical taps is possible based on the
6217  * capabilities: the lineStore (the smaller of inWidth and outWidth) must
6218  * not exceed the maximum pixels for the desired taps; see bug 241014
6219  */
IsVTapsPossible(const NVEvoScalerCaps * pScalerCaps,NvU32 inWidth,NvU32 outWidth,NVEvoScalerTaps nTaps)6220 static NvBool IsVTapsPossible(const NVEvoScalerCaps *pScalerCaps,
6221                               NvU32 inWidth, NvU32 outWidth,
6222                               NVEvoScalerTaps nTaps)
6223 {
6224     const NvU32 lineStore = NV_MIN(inWidth, outWidth);
6225     NvU32 maxPixels = pScalerCaps->taps[nTaps].maxPixelsVTaps;
6226 
6227     return lineStore <= maxPixels;
6228 }
6229 
6230 /*!
6231  * Compute the scale factor and check against the maximum.
6232  *
6233  * param[in]    max     Max scale factor to check against (* 1024)
6234  * param[in]    in      Input width or height
6235  * param[in]    out     Output width or height
6236  * param[out]   pFactor Output scale factor (* 1024)
6237  */
ComputeScalingFactor(NvU32 max,NvU16 in,NvU16 out,NvU16 * pFactor)6238 static NvBool ComputeScalingFactor(NvU32 max,
6239                                    NvU16 in, NvU16 out,
6240                                    NvU16 *pFactor)
6241 {
6242     /* Use a 32-bit temporary to prevent overflow */
6243     NvU32 tmp;
6244 
6245     /* Add (out - 1) to round up */
6246     tmp = ((in * 1024) + (out - 1)) / out;
6247 
6248     /* Check against scaling limits. */
6249     if (tmp > max) {
6250         return FALSE;
6251     }
6252 
6253     *pFactor = tmp;
6254     return TRUE;
6255 }
6256 
6257 /*!
6258  * Compute scaling factors based on in/out dimensions.
6259  * Used by IMP and when programming viewport and window parameters in HW.
6260  *
6261  * The 'maxScaleFactor' values are defined by nvdClass_01.mfs as:
6262  *      SizeIn/SizeOut * 1024
6263  */
nvComputeScalingUsageBounds(const NVEvoScalerCaps * pScalerCaps,const NvU32 inWidth,const NvU32 inHeight,const NvU32 outWidth,const NvU32 outHeight,NVEvoScalerTaps hTaps,NVEvoScalerTaps vTaps,struct NvKmsScalingUsageBounds * out)6264 NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps,
6265                                    const NvU32 inWidth, const NvU32 inHeight,
6266                                    const NvU32 outWidth, const NvU32 outHeight,
6267                                    NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps,
6268                                    struct NvKmsScalingUsageBounds *out)
6269 {
6270     const NVEvoScalerTapsCaps *pTapsCaps = NULL;
6271 
6272     out->vTaps = vTaps;
6273 
6274     /* Start with default values (1.0) */
6275     out->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X;
6276     out->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X;
6277 
6278     if (outHeight > inHeight) {
6279         out->vUpscalingAllowed = TRUE;
6280     } else if (outHeight < inHeight) {
6281         out->vUpscalingAllowed = FALSE;
6282 
6283         pTapsCaps = &pScalerCaps->taps[vTaps];
6284         if (!ComputeScalingFactor(pTapsCaps->maxVDownscaleFactor,
6285                                   inHeight, outHeight,
6286                                   &out->maxVDownscaleFactor)) {
6287             return FALSE;
6288         }
6289     }
6290 
6291     if (outWidth < inWidth) {
6292         pTapsCaps = &pScalerCaps->taps[hTaps];
6293         if (!ComputeScalingFactor(pTapsCaps->maxHDownscaleFactor,
6294                                   inWidth, outWidth,
6295                                   &out->maxHDownscaleFactor)) {
6296             return FALSE;
6297         }
6298     }
6299 
6300     return TRUE;
6301 }
6302 
nvAssignScalerTaps(const NVDevEvoRec * pDevEvo,const NVEvoScalerCaps * pScalerCaps,const NvU32 inWidth,const NvU32 inHeight,const NvU32 outWidth,const NvU32 outHeight,NvBool doubleScan,NVEvoScalerTaps * hTapsOut,NVEvoScalerTaps * vTapsOut)6303 NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo,
6304                           const NVEvoScalerCaps *pScalerCaps,
6305                           const NvU32 inWidth, const NvU32 inHeight,
6306                           const NvU32 outWidth, const NvU32 outHeight,
6307                           NvBool doubleScan,
6308                           NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut)
6309 {
6310     NVEvoScalerTaps hTaps, vTaps;
6311     NvBool setHTaps = (outWidth != inWidth);
6312     NvBool setVTaps = (outHeight != inHeight);
6313 
6314     /*
6315      * Select the taps filtering; we select the highest taps allowed with our
6316      * scaling configuration.
6317      *
6318      * Note if requiresScalingTapsInBothDimensions is true and if we are
6319      * scaling in *either* dimension, then we need to program > 1 taps
6320      * in *both* dimensions.
6321      */
6322     if ((setHTaps || setVTaps) &&
6323         pDevEvo->hal->caps.requiresScalingTapsInBothDimensions) {
6324         setHTaps = TRUE;
6325         setVTaps = TRUE;
6326     }
6327 
6328     /*
6329      * Horizontal taps: if not scaling, then no filtering; otherwise, set the
6330      * maximum filtering, because htaps shouldn't have any constraints (unlike
6331      * vtaps... see below).
6332      */
6333     if (setHTaps) {
6334         /*
6335          * XXX dispClass_01.mfs says: "For text and desktop scaling, the 2 tap
6336          * bilinear frequently looks better than the 8 tap filter which is more
6337          * optimized for video type scaling." Once we determine how best to
6338          * expose configuration of taps, we should choose how to indicate that 8
6339          * or 5 taps is the maximum.
6340          *
6341          * For now, we'll start with 2 taps as the default, but may end up
6342          * picking a higher taps value if the required H downscaling factor
6343          * isn't possible with 2 taps.
6344          */
6345         NvBool hTapsFound = FALSE;
6346 
6347         for (hTaps = NV_EVO_SCALER_2TAPS;
6348              hTaps <= NV_EVO_SCALER_TAPS_MAX;
6349              hTaps++) {
6350             NvU16 hFactor;
6351 
6352             if (!ComputeScalingFactor(
6353                     pScalerCaps->taps[hTaps].maxHDownscaleFactor,
6354                     inWidth, outWidth,
6355                     &hFactor)) {
6356                 continue;
6357             }
6358 
6359             hTapsFound = TRUE;
6360             break;
6361         }
6362 
6363         if (!hTapsFound) {
6364             return FALSE;
6365         }
6366     } else {
6367         hTaps = pDevEvo->hal->caps.minScalerTaps;
6368     }
6369 
6370     /*
6371      * Vertical taps: if scaling, set the maximum valid filtering, otherwise, no
6372      * filtering.
6373      */
6374     if (setVTaps) {
6375         /*
6376          * Select the maximum vertical taps based on the capabilities.
6377          *
6378          * For doublescan modes, limit to 2 taps to reduce blurriness. We really
6379          * want plain old line doubling, but EVO doesn't support that.
6380          */
6381         if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_5TAPS) &&
6382             !doubleScan) {
6383             vTaps = NV_EVO_SCALER_5TAPS;
6384         } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_3TAPS) &&
6385                    !doubleScan) {
6386             vTaps = NV_EVO_SCALER_3TAPS;
6387         } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_2TAPS)) {
6388             vTaps = NV_EVO_SCALER_2TAPS;
6389         } else {
6390             return FALSE;
6391         }
6392     } else {
6393         vTaps = pDevEvo->hal->caps.minScalerTaps;
6394     }
6395 
6396     *hTapsOut = hTaps;
6397     *vTapsOut = vTaps;
6398 
6399     return TRUE;
6400 }
6401 
6402 /*
6403  * Check that ViewPortIn does not exceed hardware limits and compute vTaps and
6404  * hTaps based on configured ViewPortIn/Out scaling if possible given scaler
6405  * capabilities.
6406  */
nvValidateHwModeTimingsViewPort(const NVDevEvoRec * pDevEvo,const NVEvoScalerCaps * pScalerCaps,NVHwModeTimingsEvoPtr pTimings,NVEvoInfoStringPtr pInfoString)6407 NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo,
6408                                        const NVEvoScalerCaps *pScalerCaps,
6409                                        NVHwModeTimingsEvoPtr pTimings,
6410                                        NVEvoInfoStringPtr pInfoString)
6411 {
6412     NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort;
6413     const NvU32 inWidth   = pViewPort->in.width;
6414     const NvU32 inHeight  = pViewPort->in.height;
6415     const NvU32 outWidth  = pViewPort->out.width;
6416     const NvU32 outHeight = pViewPort->out.height;
6417     const NvBool scaling = (outWidth != inWidth) || (outHeight != inHeight);
6418     NVEvoScalerTaps hTaps, vTaps;
6419 
6420     /*
6421      * As per the MFS, there is a restriction for the width and height
6422      * of ViewPortIn and ViewPortOut
6423      */
6424     if (inWidth > 8192 || inHeight > 8192 ||
6425         outWidth > 8192 || outHeight > 8192) {
6426         nvEvoLogInfoString(pInfoString,
6427                            "Viewport dimensions exceed hardware capabilities");
6428         return FALSE;
6429     }
6430 
6431     if (!nvAssignScalerTaps(pDevEvo, pScalerCaps, inWidth, inHeight, outWidth, outHeight,
6432                             pTimings->doubleScan, &hTaps, &vTaps)) {
6433         nvEvoLogInfoString(pInfoString,
6434                            "Unable to configure scaling from %dx%d to %dx%d (exceeds filtering capabilities)",
6435                            inWidth, inHeight,
6436                            outWidth, outHeight);
6437         return FALSE;
6438     }
6439 
6440     /*
6441      * If this is an interlaced mode but we don't have scaling
6442      * configured, check that the width will fit in the 2-tap vertical
6443      * LineStoreSize; this is an EVO requirement for interlaced
6444      * rasters
6445      */
6446     if (pTimings->interlaced && !scaling) {
6447         /* !scaling means widths should be same */
6448         nvAssert(outWidth == inWidth);
6449 
6450         if (outWidth > pScalerCaps->taps[NV_EVO_SCALER_2TAPS].maxPixelsVTaps) {
6451             nvEvoLogInfoString(pInfoString,
6452                                "Interlaced mode requires filtering, but line width (%d) exceeds filtering capabilities",
6453                                outWidth);
6454             return FALSE;
6455         }
6456 
6457         /* hTaps and vTaps should have been set to minScalerTaps above */
6458         nvAssert(hTaps == pDevEvo->hal->caps.minScalerTaps);
6459         nvAssert(vTaps == pDevEvo->hal->caps.minScalerTaps);
6460     }
6461 
6462     pViewPort->hTaps = hTaps;
6463     pViewPort->vTaps = vTaps;
6464     return TRUE;
6465 }
6466 
AssignGuaranteedSOCBounds(const NVDevEvoRec * pDevEvo,struct NvKmsUsageBounds * pGuaranteed)6467 static void AssignGuaranteedSOCBounds(const NVDevEvoRec *pDevEvo,
6468                                       struct NvKmsUsageBounds *pGuaranteed)
6469 {
6470     NvU32 layer;
6471 
6472     pGuaranteed->layer[NVKMS_MAIN_LAYER].usable = TRUE;
6473     pGuaranteed->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats =
6474         nvEvoGetFormatsWithEqualOrLowerUsageBound(
6475             NvKmsSurfaceMemoryFormatA8R8G8B8,
6476             pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats);
6477     nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[NVKMS_MAIN_LAYER].scaling);
6478 
6479     for (layer = 1; layer < ARRAY_LEN(pGuaranteed->layer); layer++) {
6480         pGuaranteed->layer[layer].usable = FALSE;
6481         nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[layer].scaling);
6482     }
6483 }
6484 
6485 /*
6486  * Initialize the given NvKmsUsageBounds. Ask for everything supported by the HW
6487  * by default.  Later, based on what IMP says, we will scale back as needed.
6488  */
nvAssignDefaultUsageBounds(const NVDispEvoRec * pDispEvo,NVHwModeViewPortEvo * pViewPort)6489 void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo,
6490                                 NVHwModeViewPortEvo *pViewPort)
6491 {
6492     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
6493     struct NvKmsUsageBounds *pPossible = &pViewPort->possibleUsage;
6494     NvU32 i;
6495 
6496     for (i = 0; i < ARRAY_LEN(pPossible->layer); i++) {
6497         struct NvKmsScalingUsageBounds *pScaling = &pPossible->layer[i].scaling;
6498 
6499         pPossible->layer[i].supportedSurfaceMemoryFormats =
6500             pDevEvo->caps.layerCaps[i].supportedSurfaceMemoryFormats;
6501         pPossible->layer[i].usable =
6502             (pPossible->layer[i].supportedSurfaceMemoryFormats != 0);
6503         if (!pPossible->layer[i].usable) {
6504             continue;
6505         }
6506 
6507         nvInitScalingUsageBounds(pDevEvo, pScaling);
6508 
6509         if (pDevEvo->hal->GetWindowScalingCaps) {
6510             const NVEvoScalerCaps *pScalerCaps =
6511                 pDevEvo->hal->GetWindowScalingCaps(pDevEvo);
6512             int j;
6513 
6514             for (j = NV_EVO_SCALER_TAPS_MAX; j >= NV_EVO_SCALER_TAPS_MIN; j--) {
6515                 const NVEvoScalerTapsCaps *pTapsCaps = &pScalerCaps->taps[j];
6516 
6517                 if ((pTapsCaps->maxVDownscaleFactor == 0) &&
6518                     (pTapsCaps->maxHDownscaleFactor == 0)) {
6519                     continue;
6520                 }
6521 
6522                 pScaling->maxVDownscaleFactor = pTapsCaps->maxVDownscaleFactor;
6523                 pScaling->maxHDownscaleFactor = pTapsCaps->maxHDownscaleFactor;
6524                 pScaling->vTaps = j;
6525                 pScaling->vUpscalingAllowed = (pTapsCaps->maxPixelsVTaps > 0);
6526                 break;
6527             }
6528         }
6529     }
6530 
6531     if (pDevEvo->isSOCDisplay) {
6532         AssignGuaranteedSOCBounds(pDevEvo, &pViewPort->guaranteedUsage);
6533     } else {
6534         pViewPort->guaranteedUsage = *pPossible;
6535     }
6536 }
6537 
6538 /*
6539  * ConstructHwModeTimingsViewPort() - determine the ViewPortOut size
6540  *
6541  * ViewPortIn (specified by inWidth, inHeight) selects the pixels to
6542  * extract from the scanout surface; ViewPortOut positions those
6543  * pixels within the raster timings.
6544  *
6545  * If the configuration is not possible, pViewPort->valid will be set
6546  * to false; otherwise, pViewPort->valid will be set to true.
6547  */
6548 
6549 static NvBool
ConstructHwModeTimingsViewPort(const NVDispEvoRec * pDispEvo,NVHwModeTimingsEvoPtr pTimings,NVEvoInfoStringPtr pInfoString,const struct NvKmsSize * pViewPortSizeIn,const struct NvKmsRect * pViewPortOut)6550 ConstructHwModeTimingsViewPort(const NVDispEvoRec *pDispEvo,
6551                                NVHwModeTimingsEvoPtr pTimings,
6552                                NVEvoInfoStringPtr pInfoString,
6553                                const struct NvKmsSize *pViewPortSizeIn,
6554                                const struct NvKmsRect *pViewPortOut)
6555 {
6556     NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort;
6557     NvU32 outWidth, outHeight;
6558     const NvU32 hVisible = nvEvoVisibleWidth(pTimings);
6559     const NvU32 vVisible = nvEvoVisibleHeight(pTimings);
6560 
6561     /* the ViewPortOut should default to the raster size */
6562 
6563     outWidth = hVisible;
6564     outHeight = vVisible;
6565 
6566     pViewPort->out.xAdjust = 0;
6567     pViewPort->out.yAdjust = 0;
6568     pViewPort->out.width = outWidth;
6569     pViewPort->out.height = outHeight;
6570 
6571     /*
6572      * If custom viewPortOut or viewPortIn were specified, do basic
6573      * validation and then assign them to pViewPort.  We'll do more
6574      * extensive checking of these values as part of IMP.  Note that
6575      * pViewPort->out.[xy]Adjust are relative to viewPortOut centered
6576      * within the raster timings, but pViewPortOut->[xy]1 are relative
6577      * to 0,0.
6578      */
6579     if (pViewPortOut) {
6580         NvS16 offset;
6581         struct NvKmsRect viewPortOut = *pViewPortOut;
6582 
6583         /*
6584          * When converting from user viewport out to hardware raster timings,
6585          * double in the vertical dimension
6586          */
6587         if (pTimings->doubleScan) {
6588             viewPortOut.y *= 2;
6589             viewPortOut.height *= 2;
6590         }
6591 
6592         /*
6593          * The client-specified viewPortOut is in "full" horizontal space for
6594          * SW YUV420 modes. Convert to "half" horizontal space (matching
6595          * NVHwModeTimingsEvo and viewPortIn).
6596          */
6597         if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) {
6598             viewPortOut.x /= 2;
6599             viewPortOut.width /= 2;
6600         }
6601 
6602         if (A_plus_B_greater_than_C_U16(viewPortOut.x,
6603                                         viewPortOut.width,
6604                                         hVisible)) {
6605             return FALSE;
6606         }
6607 
6608         if (A_plus_B_greater_than_C_U16(viewPortOut.y,
6609                                         viewPortOut.height,
6610                                         vVisible)) {
6611             return FALSE;
6612         }
6613 
6614         offset = (hVisible - viewPortOut.width) / 2 * -1;
6615         pViewPort->out.xAdjust = offset + viewPortOut.x;
6616 
6617         offset = (vVisible - viewPortOut.height) / 2 * -1;
6618         pViewPort->out.yAdjust = offset + viewPortOut.y;
6619 
6620         pViewPort->out.width = viewPortOut.width;
6621         pViewPort->out.height = viewPortOut.height;
6622     }
6623 
6624     if (pViewPortSizeIn) {
6625         if (pViewPortSizeIn->width <= 0) {
6626             return FALSE;
6627         }
6628         if (pViewPortSizeIn->height <= 0) {
6629             return FALSE;
6630         }
6631 
6632         pViewPort->in.width = pViewPortSizeIn->width;
6633         pViewPort->in.height = pViewPortSizeIn->height;
6634     } else {
6635         pViewPort->in.width = pViewPort->out.width;
6636         pViewPort->in.height = pViewPort->out.height;
6637 
6638         /* When deriving viewportIn from viewportOut, halve the height for
6639          * doubleScan */
6640         if (pTimings->doubleScan) {
6641             pViewPort->in.height /= 2;
6642         }
6643     }
6644 
6645     nvAssignDefaultUsageBounds(pDispEvo, &pTimings->viewPort);
6646 
6647     return TRUE;
6648 }
6649 
6650 
6651 
6652 /*
6653  * GetDfpProtocol()- determine the protocol to use on the given pDpy
6654  * with the given pTimings; assigns pTimings->protocol.
6655  */
6656 
GetDfpProtocol(const NVDpyEvoRec * pDpyEvo,const struct NvKmsModeValidationParams * pParams,NVHwModeTimingsEvoPtr pTimings)6657 static NvBool GetDfpProtocol(const NVDpyEvoRec *pDpyEvo,
6658                              const struct NvKmsModeValidationParams *pParams,
6659                              NVHwModeTimingsEvoPtr pTimings)
6660 {
6661     NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
6662     const NvU32 rmProtocol = pConnectorEvo->or.protocol;
6663     const NvU32 overrides = pParams->overrides;
6664     enum nvKmsTimingsProtocol timingsProtocol;
6665 
6666     nvAssert(pConnectorEvo->legacyType ==
6667              NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP);
6668 
6669     if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
6670         /* Override protocol if this mode requires HDMI FRL. */
6671         if (nvDpyIsHdmiEvo(pDpyEvo) &&
6672             /* If we don't require boot clocks... */
6673             ((overrides & NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS) == 0) &&
6674             /* If FRL is supported... */
6675             nvHdmiDpySupportsFrl(pDpyEvo) &&
6676             /* Use FRL for 10 BPC if needed. */
6677             ((nvDpyIsHdmiDepth30Evo(pDpyEvo) &&
6678               nvHdmiTimingsNeedFrl(pDpyEvo, pTimings, HDMI_BPC10)) ||
6679             /* Use FRL for 8 BPC if needed. */
6680              nvHdmiTimingsNeedFrl(pDpyEvo, pTimings, HDMI_BPC8))) {
6681 
6682             nvAssert(nvDpyIsHdmiEvo(pDpyEvo));
6683             nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A ||
6684                      rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B);
6685             timingsProtocol = NVKMS_PROTOCOL_SOR_HDMI_FRL;
6686         } else {
6687             /* If HDMI FRL is needed for 8 BPC, but not supported, fail. */
6688             if (nvDpyIsHdmiEvo(pDpyEvo) &&
6689                 nvHdmiTimingsNeedFrl(pDpyEvo, pTimings, HDMI_BPC8) &&
6690                 ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0)) {
6691                 nvAssert(!nvHdmiDpySupportsFrl(pDpyEvo));
6692                 return FALSE;
6693             }
6694 
6695             switch (rmProtocol) {
6696             default:
6697                 nvAssert(!"unrecognized SOR RM protocol");
6698                 return FALSE;
6699             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
6700                 if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings) &&
6701                     ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0)) {
6702                     return FALSE;
6703                 }
6704                 timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A;
6705                 break;
6706             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
6707                 if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings) &&
6708                     ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0)) {
6709                     return FALSE;
6710                 }
6711                 timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B;
6712                 break;
6713             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
6714                 /*
6715                  * Override dual/single link TMDS protocol if necessary.
6716                  * XXX might be nice to give a way for users to override the
6717                  * SingleLink/DualLink decision.
6718                  *
6719                  * TMDS_A: "use A side of the link"
6720                  * TMDS_B: "use B side of the link"
6721                  */
6722                 if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) {
6723                     timingsProtocol = NVKMS_PROTOCOL_SOR_DUAL_TMDS;
6724                 } else {
6725                     timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A;
6726                 }
6727                 break;
6728             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
6729                 timingsProtocol = NVKMS_PROTOCOL_SOR_DP_A;
6730                 break;
6731             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
6732                 timingsProtocol = NVKMS_PROTOCOL_SOR_DP_B;
6733                 break;
6734             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM:
6735                 timingsProtocol = NVKMS_PROTOCOL_SOR_LVDS_CUSTOM;
6736                 break;
6737             }
6738         }
6739     } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) {
6740         nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC);
6741         timingsProtocol = NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC;
6742     } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_DSI) {
6743         nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI);
6744         timingsProtocol = NVKMS_PROTOCOL_DSI;
6745     } else {
6746         nvAssert(!"Unknown OR type");
6747         return FALSE;
6748     }
6749 
6750     pTimings->protocol = timingsProtocol;
6751 
6752     return TRUE;
6753 
6754 }
6755 
6756 
6757 
6758 /*
6759  * ConstructHwModeTimingsEvoCrt() - construct EVO hardware timings to
6760  * drive a CRT, given the mode timings in pMt
6761  */
6762 
6763 static NvBool
ConstructHwModeTimingsEvoCrt(const NVConnectorEvoRec * pConnectorEvo,const NvModeTimings * pModeTimings,const struct NvKmsSize * pViewPortSizeIn,const struct NvKmsRect * pViewPortOut,NVHwModeTimingsEvoPtr pTimings,NVEvoInfoStringPtr pInfoString)6764 ConstructHwModeTimingsEvoCrt(const NVConnectorEvoRec *pConnectorEvo,
6765                              const NvModeTimings *pModeTimings,
6766                              const struct NvKmsSize *pViewPortSizeIn,
6767                              const struct NvKmsRect *pViewPortOut,
6768                              NVHwModeTimingsEvoPtr pTimings,
6769                              NVEvoInfoStringPtr pInfoString)
6770 {
6771     ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings);
6772 
6773     /* assign the protocol; we expect DACs to have RGB protocol */
6774 
6775     nvAssert(pConnectorEvo->or.protocol ==
6776              NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT);
6777 
6778     pTimings->protocol = NVKMS_PROTOCOL_DAC_RGB;
6779 
6780     /* assign scaling fields */
6781 
6782     return ConstructHwModeTimingsViewPort(pConnectorEvo->pDispEvo, pTimings,
6783                                           pInfoString, pViewPortSizeIn,
6784                                           pViewPortOut);
6785 }
6786 
6787 
6788 /*!
6789  * Construct EVO hardware timings to drive a digital protocol (TMDS,
6790  * DP, etc).
6791  *
6792  * \param[in]  pDpy          The display device for which to build timings.
6793  * \param[in]  pModeTimings  The hw-neutral description of the timings.
6794  * \param[out] pTimings      The EVO-specific modetimings.
6795  *
6796  * \return     TRUE if the EVO modetimings could be built; FALSE if failure.
6797  */
ConstructHwModeTimingsEvoDfp(const NVDpyEvoRec * pDpyEvo,const NvModeTimings * pModeTimings,const struct NvKmsSize * pViewPortSizeIn,const struct NvKmsRect * pViewPortOut,NVHwModeTimingsEvoPtr pTimings,const struct NvKmsModeValidationParams * pParams,NVEvoInfoStringPtr pInfoString)6798 static NvBool ConstructHwModeTimingsEvoDfp(const NVDpyEvoRec *pDpyEvo,
6799                                            const NvModeTimings *pModeTimings,
6800                                            const struct NvKmsSize *pViewPortSizeIn,
6801                                            const struct NvKmsRect *pViewPortOut,
6802                                            NVHwModeTimingsEvoPtr pTimings,
6803                                            const struct
6804                                            NvKmsModeValidationParams *pParams,
6805                                            NVEvoInfoStringPtr pInfoString)
6806 {
6807     NvBool ret;
6808 
6809     ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings);
6810 
6811     ret = GetDfpProtocol(pDpyEvo, pParams, pTimings);
6812 
6813     if (!ret) {
6814         return ret;
6815     }
6816 
6817     ret = ApplyDualLinkRequirements(pDpyEvo, pParams, pTimings, pInfoString);
6818 
6819     if (!ret) {
6820         return ret;
6821     }
6822 
6823     return ConstructHwModeTimingsViewPort(pDpyEvo->pDispEvo, pTimings,
6824                                           pInfoString, pViewPortSizeIn,
6825                                           pViewPortOut);
6826 }
6827 
DowngradeColorBpc(const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,enum NvKmsDpyAttributeColorBpcValue * pColorBpc,enum NvKmsDpyAttributeColorRangeValue * pColorRange)6828 static NvBool DowngradeColorBpc(
6829     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
6830     enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
6831     enum NvKmsDpyAttributeColorRangeValue *pColorRange)
6832 {
6833     switch (*pColorBpc) {
6834         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
6835             *pColorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
6836             break;
6837         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
6838             /* At depth 18 only RGB and full range are allowed */
6839             if (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) {
6840                 *pColorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6;
6841                 *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
6842             } else {
6843                 return FALSE;
6844             }
6845             break;
6846         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN:
6847         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6:
6848             return FALSE;
6849     }
6850 
6851     return TRUE;
6852 }
6853 
nvDowngradeColorSpaceAndBpc(const NVColorFormatInfoRec * pSupportedColorFormats,enum NvKmsDpyAttributeCurrentColorSpaceValue * pColorSpace,enum NvKmsDpyAttributeColorBpcValue * pColorBpc,enum NvKmsDpyAttributeColorRangeValue * pColorRange)6854 NvBool nvDowngradeColorSpaceAndBpc(
6855     const NVColorFormatInfoRec *pSupportedColorFormats,
6856     enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace,
6857     enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
6858     enum NvKmsDpyAttributeColorRangeValue *pColorRange)
6859 {
6860     if (DowngradeColorBpc(*pColorSpace, pColorBpc, pColorRange)) {
6861         return TRUE;
6862     }
6863 
6864     switch (*pColorSpace) {
6865         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: /* fallthrough */
6866         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
6867             if (pSupportedColorFormats->yuv422.maxBpc !=
6868                     NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
6869                 *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422;
6870                 *pColorBpc = pSupportedColorFormats->yuv422.maxBpc;
6871                 *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED;
6872                 return TRUE;
6873             }
6874             break;
6875         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: /* fallthrough */
6876         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
6877             break;
6878     }
6879 
6880     return FALSE;
6881 }
6882 
6883 /*
6884  * nvDPValidateModeEvo() - For DP devices handled by the DP lib, check DP
6885  * bandwidth and pick the best possible/supported pixel depth to use for
6886  * the given mode timings.
6887  */
6888 
nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,NVHwModeTimingsEvoPtr pTimings,enum NvKmsDpyAttributeCurrentColorSpaceValue * pColorSpace,enum NvKmsDpyAttributeColorBpcValue * pColorBpc,const NvBool b2Heads1Or,NVDscInfoEvoRec * pDscInfo,const struct NvKmsModeValidationParams * pParams)6889 NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,
6890                            NVHwModeTimingsEvoPtr pTimings,
6891                            enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace,
6892                            enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
6893                            const NvBool b2Heads1Or,
6894                            NVDscInfoEvoRec *pDscInfo,
6895                            const struct NvKmsModeValidationParams *pParams)
6896 {
6897     NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
6898     enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace = *pColorSpace;
6899     enum NvKmsDpyAttributeColorBpcValue colorBpc = *pColorBpc;
6900     enum NvKmsDpyAttributeColorRangeValue colorRange;
6901     const NVColorFormatInfoRec supportedColorFormats =
6902         nvGetColorFormatInfo(pDpyEvo);
6903 
6904     /* Only do this for DP devices. */
6905     if (!nvConnectorUsesDPLib(pConnectorEvo)) {
6906         return TRUE;
6907     }
6908 
6909     if ((pParams->overrides &
6910          NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK) != 0) {
6911         return TRUE;
6912     }
6913 
6914     if (colorSpace != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) {
6915         colorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED;
6916     } else {
6917         colorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
6918     }
6919 
6920     nvAssert(nvDpyUsesDPLib(pDpyEvo));
6921     nvAssert(pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR);
6922 
6923  tryAgain:
6924 
6925     if (!nvDPValidateModeForDpyEvo(pDpyEvo, colorSpace, colorBpc, pParams,
6926                                    pTimings, b2Heads1Or, pDscInfo)) {
6927         if (nvDowngradeColorSpaceAndBpc(&supportedColorFormats, &colorSpace,
6928                                         &colorBpc, &colorRange)) {
6929              goto tryAgain;
6930         }
6931         /*
6932          * Cannot downgrade pixelDepth further --
6933          *     this mode is not possible on this DP link, so fail.
6934          */
6935 
6936         return FALSE;
6937     }
6938 
6939     *pColorSpace = colorSpace;
6940     *pColorBpc = colorBpc;
6941     return TRUE;
6942 }
6943 
6944 /*
6945  * Construct the hardware values to program EVO for the specified
6946  * NVModeTimings
6947  */
6948 
nvConstructHwModeTimingsEvo(const NVDpyEvoRec * pDpyEvo,const struct NvKmsMode * pKmsMode,const struct NvKmsSize * pViewPortSizeIn,const struct NvKmsRect * pViewPortOut,NVHwModeTimingsEvoPtr pTimings,const struct NvKmsModeValidationParams * pParams,NVEvoInfoStringPtr pInfoString)6949 NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo,
6950                                    const struct NvKmsMode *pKmsMode,
6951                                    const struct NvKmsSize *pViewPortSizeIn,
6952                                    const struct NvKmsRect *pViewPortOut,
6953                                    NVHwModeTimingsEvoPtr pTimings,
6954                                    const struct NvKmsModeValidationParams
6955                                    *pParams,
6956                                    NVEvoInfoStringPtr pInfoString)
6957 {
6958     const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo;
6959     NvBool ret;
6960 
6961     /* assign the pTimings values */
6962 
6963     if (pConnectorEvo->legacyType ==
6964                NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) {
6965         ret = ConstructHwModeTimingsEvoDfp(pDpyEvo,
6966                                            &pKmsMode->timings,
6967                                            pViewPortSizeIn, pViewPortOut,
6968                                            pTimings, pParams, pInfoString);
6969     } else if (pConnectorEvo->legacyType ==
6970                NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) {
6971         ret = ConstructHwModeTimingsEvoCrt(pConnectorEvo,
6972                                            &pKmsMode->timings,
6973                                            pViewPortSizeIn, pViewPortOut,
6974                                            pTimings, pInfoString);
6975     } else {
6976         nvAssert(!"Invalid pDpyEvo->type");
6977         return FALSE;
6978     }
6979 
6980     if (!ret) return FALSE;
6981 
6982     /* tweak the raster timings for gsync */
6983 
6984     if (pDpyEvo->pDispEvo->pFrameLockEvo) {
6985         // if this fails, the timing remains untweaked, which just means
6986         // that the mode may not work well with frame lock
6987         TweakTimingsForGsync(pDpyEvo, pTimings, pInfoString, pParams->stereoMode);
6988     }
6989 
6990     return TRUE;
6991 }
6992 
DowngradeViewPortTaps(const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NVEvoScalerTaps srcTaps,NVEvoScalerTaps dstTaps,NvBool isVert,NVEvoScalerTaps * pTaps)6993 static NvBool DowngradeViewPortTaps(const NVEvoHeadCaps *pHeadCaps,
6994                                     NVHwModeViewPortEvoPtr pViewPort,
6995                                     NVEvoScalerTaps srcTaps,
6996                                     NVEvoScalerTaps dstTaps,
6997                                     NvBool isVert,
6998                                     NVEvoScalerTaps *pTaps)
6999 {
7000     const NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps;
7001     NvBool dstPossible;
7002 
7003     if (isVert) {
7004         dstPossible = IsVTapsPossible(pScalerCaps, pViewPort->in.width,
7005                                       pViewPort->out.width, dstTaps);
7006     } else {
7007         dstPossible = pScalerCaps->taps[dstTaps].maxHDownscaleFactor > 0;
7008     }
7009 
7010     if (*pTaps >= srcTaps && dstPossible) {
7011         *pTaps = dstTaps;
7012         return TRUE;
7013     }
7014 
7015     return FALSE;
7016 }
7017 
7018 /* Downgrade the htaps from 8 to 5 */
DowngradeViewPortHTaps8(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7019 static NvBool DowngradeViewPortHTaps8(const NVDevEvoRec *pDevEvo,
7020                                       const NvU32 head,
7021                                       const NVEvoHeadCaps *pHeadCaps,
7022                                       NVHwModeViewPortEvoPtr pViewPort,
7023                                       NvU64 unused)
7024 {
7025     return DowngradeViewPortTaps(pHeadCaps,
7026                                  pViewPort,
7027                                  NV_EVO_SCALER_8TAPS,
7028                                  NV_EVO_SCALER_5TAPS,
7029                                  FALSE /* isVert */,
7030                                  &pViewPort->hTaps);
7031 }
7032 
7033 /* Downgrade the htaps from 5 to 2 */
DowngradeViewPortHTaps5(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7034 static NvBool DowngradeViewPortHTaps5(const NVDevEvoRec *pDevEvo,
7035                                       const NvU32 head,
7036                                       const NVEvoHeadCaps *pHeadCaps,
7037                                       NVHwModeViewPortEvoPtr pViewPort,
7038                                       NvU64 unused)
7039 {
7040     return DowngradeViewPortTaps(pHeadCaps,
7041                                  pViewPort,
7042                                  NV_EVO_SCALER_5TAPS,
7043                                  NV_EVO_SCALER_2TAPS,
7044                                  FALSE /* isVert */,
7045                                  &pViewPort->hTaps);
7046 }
7047 
7048 /* Downgrade the vtaps from 5 to 3 */
DowngradeViewPortVTaps5(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7049 static NvBool DowngradeViewPortVTaps5(const NVDevEvoRec *pDevEvo,
7050                                       const NvU32 head,
7051                                       const NVEvoHeadCaps *pHeadCaps,
7052                                       NVHwModeViewPortEvoPtr pViewPort,
7053                                       NvU64 unused)
7054 {
7055     return DowngradeViewPortTaps(pHeadCaps,
7056                                  pViewPort,
7057                                  NV_EVO_SCALER_5TAPS,
7058                                  NV_EVO_SCALER_3TAPS,
7059                                  TRUE /* isVert */,
7060                                  &pViewPort->vTaps);
7061 }
7062 
7063 /* Downgrade the vtaps from 3 to 2 */
DowngradeViewPortVTaps3(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7064 static NvBool DowngradeViewPortVTaps3(const NVDevEvoRec *pDevEvo,
7065                                       const NvU32 head,
7066                                       const NVEvoHeadCaps *pHeadCaps,
7067                                       NVHwModeViewPortEvoPtr pViewPort,
7068                                       NvU64 unused)
7069 {
7070     return DowngradeViewPortTaps(pHeadCaps,
7071                                  pViewPort,
7072                                  NV_EVO_SCALER_3TAPS,
7073                                  NV_EVO_SCALER_2TAPS,
7074                                  TRUE /* isVert */,
7075                                  &pViewPort->vTaps);
7076 }
7077 
7078 static NvBool
DowngradeLayerDownscaleFactor(NVHwModeViewPortEvoPtr pViewPort,const NvU32 layer,NvU16 srcFactor,NvU16 dstFactor,NvU16 * pFactor)7079 DowngradeLayerDownscaleFactor(NVHwModeViewPortEvoPtr pViewPort,
7080                               const NvU32 layer,
7081                               NvU16 srcFactor,
7082                               NvU16 dstFactor,
7083                               NvU16 *pFactor)
7084 {
7085     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7086 
7087     if (!pUsage->layer[layer].usable) {
7088         return FALSE;
7089     }
7090 
7091     if (*pFactor == srcFactor) {
7092         *pFactor = dstFactor;
7093         return TRUE;
7094     }
7095 
7096     return FALSE;
7097 }
7098 
7099 static NvBool
DowngradeLayerVDownscaleFactor4X(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7100 DowngradeLayerVDownscaleFactor4X(const NVDevEvoRec *pDevEvo,
7101                                  const NvU32 head,
7102                                  const NVEvoHeadCaps *pHeadCaps,
7103                                  NVHwModeViewPortEvoPtr pViewPort,
7104                                  NvU64 unused)
7105 {
7106     NvU32 layer;
7107 
7108     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7109         struct NvKmsScalingUsageBounds *pScaling =
7110             &pViewPort->guaranteedUsage.layer[layer].scaling;
7111 
7112         if (DowngradeLayerDownscaleFactor(pViewPort,
7113                                           layer,
7114                                           NV_EVO_SCALE_FACTOR_4X,
7115                                           NV_EVO_SCALE_FACTOR_3X,
7116                                           &pScaling->maxVDownscaleFactor)) {
7117             return TRUE;
7118         }
7119     }
7120 
7121     return FALSE;
7122 }
7123 
7124 static NvBool
DowngradeLayerVDownscaleFactor3X(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7125 DowngradeLayerVDownscaleFactor3X(const NVDevEvoRec *pDevEvo,
7126                                  const NvU32 head,
7127                                  const NVEvoHeadCaps *pHeadCaps,
7128                                  NVHwModeViewPortEvoPtr pViewPort,
7129                                  NvU64 unused)
7130 {
7131     NvU32 layer;
7132 
7133     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7134         struct NvKmsScalingUsageBounds *pScaling =
7135             &pViewPort->guaranteedUsage.layer[layer].scaling;
7136 
7137         if (DowngradeLayerDownscaleFactor(pViewPort,
7138                                           layer,
7139                                           NV_EVO_SCALE_FACTOR_3X,
7140                                           NV_EVO_SCALE_FACTOR_2X,
7141                                           &pScaling->maxVDownscaleFactor)) {
7142             return TRUE;
7143         }
7144     }
7145 
7146     return FALSE;
7147 }
7148 
7149 static NvBool
DowngradeLayerVDownscaleFactor2X(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7150 DowngradeLayerVDownscaleFactor2X(const NVDevEvoRec *pDevEvo,
7151                                  const NvU32 head,
7152                                  const NVEvoHeadCaps *pHeadCaps,
7153                                  NVHwModeViewPortEvoPtr pViewPort,
7154                                  NvU64 unused)
7155 {
7156     NvU32 layer;
7157 
7158     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7159         struct NvKmsScalingUsageBounds *pScaling =
7160             &pViewPort->guaranteedUsage.layer[layer].scaling;
7161 
7162         if (DowngradeLayerDownscaleFactor(pViewPort,
7163                                           layer,
7164                                           NV_EVO_SCALE_FACTOR_2X,
7165                                           NV_EVO_SCALE_FACTOR_1X,
7166                                           &pScaling->maxVDownscaleFactor)) {
7167             return TRUE;
7168         }
7169     }
7170 
7171     return FALSE;
7172 }
7173 
7174 static NvBool
DowngradeLayerHDownscaleFactor4X(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7175 DowngradeLayerHDownscaleFactor4X(const NVDevEvoRec *pDevEvo,
7176                                  const NvU32 head,
7177                                  const NVEvoHeadCaps *pHeadCaps,
7178                                  NVHwModeViewPortEvoPtr pViewPort,
7179                                  NvU64 unused)
7180 {
7181     NvU32 layer;
7182 
7183     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7184         struct NvKmsScalingUsageBounds *pScaling =
7185             &pViewPort->guaranteedUsage.layer[layer].scaling;
7186 
7187         if (DowngradeLayerDownscaleFactor(pViewPort,
7188                                           layer,
7189                                           NV_EVO_SCALE_FACTOR_4X,
7190                                           NV_EVO_SCALE_FACTOR_3X,
7191                                           &pScaling->maxHDownscaleFactor)) {
7192             return TRUE;
7193         }
7194     }
7195 
7196     return FALSE;
7197 }
7198 
DowngradeLayerHDownscaleFactor3X(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7199 static NvBool DowngradeLayerHDownscaleFactor3X(const NVDevEvoRec *pDevEvo,
7200                                                const NvU32 head,
7201                                                const NVEvoHeadCaps *pHeadCaps,
7202                                                NVHwModeViewPortEvoPtr pViewPort,
7203                                                NvU64 unused)
7204 {
7205     NvU32 layer;
7206 
7207     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7208         struct NvKmsScalingUsageBounds *pScaling =
7209             &pViewPort->guaranteedUsage.layer[layer].scaling;
7210 
7211         if (DowngradeLayerDownscaleFactor(pViewPort,
7212                                      layer,
7213                                      NV_EVO_SCALE_FACTOR_3X,
7214                                      NV_EVO_SCALE_FACTOR_2X,
7215                                      &pScaling->maxHDownscaleFactor)) {
7216             return TRUE;
7217         }
7218     }
7219 
7220     return FALSE;
7221 }
7222 
DowngradeLayerHDownscaleFactor2X(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7223 static NvBool DowngradeLayerHDownscaleFactor2X(const NVDevEvoRec *pDevEvo,
7224                                                const NvU32 head,
7225                                                const NVEvoHeadCaps *pHeadCaps,
7226                                                NVHwModeViewPortEvoPtr pViewPort,
7227                                                NvU64 unused)
7228 {
7229     NvU32 layer;
7230 
7231     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7232         struct NvKmsScalingUsageBounds *pScaling =
7233             &pViewPort->guaranteedUsage.layer[layer].scaling;
7234 
7235         if (DowngradeLayerDownscaleFactor(pViewPort,
7236                                           layer,
7237                                           NV_EVO_SCALE_FACTOR_2X,
7238                                           NV_EVO_SCALE_FACTOR_1X,
7239                                           &pScaling->maxHDownscaleFactor)) {
7240             return TRUE;
7241         }
7242     }
7243 
7244     return FALSE;
7245 }
7246 
7247 /* Downgrade the vtaps from 5 to 2 */
DowngradeLayerVTaps5(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7248 static NvBool DowngradeLayerVTaps5(const NVDevEvoRec *pDevEvo,
7249                                    const NvU32 head,
7250                                    const NVEvoHeadCaps *pHeadCaps,
7251                                    NVHwModeViewPortEvoPtr pViewPort,
7252                                    NvU64 unused)
7253 {
7254     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7255     NvU32 layer;
7256 
7257     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7258         struct NvKmsScalingUsageBounds *pScaling =
7259             &pUsage->layer[layer].scaling;
7260 
7261         if (!pUsage->layer[layer].usable) {
7262             continue;
7263         }
7264 
7265         if (pScaling->vTaps == NV_EVO_SCALER_5TAPS) {
7266             pScaling->vTaps = NV_EVO_SCALER_2TAPS;
7267             return TRUE;
7268         }
7269     }
7270 
7271     return FALSE;
7272 }
7273 
DowngradeLayerVUpscaling(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 unused)7274 static NvBool DowngradeLayerVUpscaling(const NVDevEvoRec *pDevEvo,
7275                                        const NvU32 head,
7276                                        const NVEvoHeadCaps *pHeadCaps,
7277                                        NVHwModeViewPortEvoPtr pViewPort,
7278                                        NvU64 unused)
7279 {
7280     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7281     NvU32 layer;
7282 
7283     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7284         struct NvKmsScalingUsageBounds *pScaling =
7285             &pUsage->layer[layer].scaling;
7286 
7287         if (!pUsage->layer[layer].usable) {
7288             continue;
7289         }
7290 
7291         if (pScaling->vUpscalingAllowed) {
7292             pScaling->vUpscalingAllowed = FALSE;
7293             return TRUE;
7294         }
7295     }
7296 
7297     return FALSE;
7298 }
7299 
DowngradeViewPortOverlayFormats(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 removeSurfaceMemoryFormats)7300 static NvBool DowngradeViewPortOverlayFormats(
7301     const NVDevEvoRec *pDevEvo,
7302     const NvU32 head,
7303     const NVEvoHeadCaps *pHeadCaps,
7304     NVHwModeViewPortEvoPtr pViewPort,
7305     NvU64 removeSurfaceMemoryFormats)
7306 {
7307     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7308     NvU32 layer;
7309 
7310     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7311         if (layer == NVKMS_MAIN_LAYER || !pUsage->layer[layer].usable) {
7312             continue;
7313         }
7314 
7315         if (pUsage->layer[layer].supportedSurfaceMemoryFormats &
7316             removeSurfaceMemoryFormats) {
7317             pUsage->layer[layer].supportedSurfaceMemoryFormats &=
7318                 ~removeSurfaceMemoryFormats;
7319             if (pUsage->layer[layer].supportedSurfaceMemoryFormats == 0) {
7320                 pUsage->layer[layer].usable = FALSE;
7321             }
7322 
7323             return TRUE;
7324         }
7325     }
7326 
7327     return FALSE;
7328 }
7329 
DowngradeViewPortBaseFormats(const NVDevEvoRec * pDevEvo,const NvU32 head,const NVEvoHeadCaps * pHeadCaps,NVHwModeViewPortEvoPtr pViewPort,NvU64 removeSurfaceMemoryFormats)7330 static NvBool DowngradeViewPortBaseFormats(
7331     const NVDevEvoRec *pDevEvo,
7332     const NvU32 head,
7333     const NVEvoHeadCaps *pHeadCaps,
7334     NVHwModeViewPortEvoPtr pViewPort,
7335     NvU64 removeSurfaceMemoryFormats)
7336 {
7337     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7338 
7339     if (!pUsage->layer[NVKMS_MAIN_LAYER].usable) {
7340         return FALSE;
7341     }
7342 
7343     if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
7344         removeSurfaceMemoryFormats) {
7345         pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &=
7346             ~removeSurfaceMemoryFormats;
7347         if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats == 0) {
7348             pUsage->layer[NVKMS_MAIN_LAYER].usable = FALSE;
7349         }
7350 
7351         return TRUE;
7352     }
7353 
7354     return FALSE;
7355 }
7356 
7357 typedef NvBool (*DowngradeViewPortFuncPtr)(const NVDevEvoRec *pDevEvo,
7358                                            const NvU32 head,
7359                                            const NVEvoHeadCaps *pHeadCaps,
7360                                            NVHwModeViewPortEvoPtr pViewPort,
7361                                            NvU64 removeSurfaceMemoryFormats);
7362 
7363 /*
7364  * Try to downgrade the usage bounds of the viewports, keeping the
7365  * viewports roughly equal in capability; we do this from
7366  * ValidateMetaMode50() when IMP rejects the mode.  Return TRUE if we
7367  * were able to downgrade something; return FALSE if there was nothing
7368  * left to downgrade.
7369  */
7370 
DownGradeMetaModeUsageBounds(const NVDevEvoRec * pDevEvo,const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],NvU32 downgradePossibleHeadsBitMask)7371 static NvBool DownGradeMetaModeUsageBounds(
7372     const NVDevEvoRec                      *pDevEvo,
7373     const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],
7374     NvU32                                   downgradePossibleHeadsBitMask)
7375 {
7376     static const struct {
7377         DowngradeViewPortFuncPtr downgradeFunc;
7378         NvU64 removeSurfaceMemoryFormats;
7379     } downgradeFuncs[] = {
7380         { DowngradeLayerVDownscaleFactor4X,
7381           0 },
7382         { DowngradeLayerHDownscaleFactor4X,
7383           0 },
7384         { DowngradeViewPortOverlayFormats,
7385           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 },
7386         { DowngradeViewPortBaseFormats,
7387           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 },
7388         { DowngradeViewPortOverlayFormats,
7389           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 },
7390         { DowngradeViewPortBaseFormats,
7391           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 },
7392         { DowngradeViewPortOverlayFormats,
7393           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 },
7394         { DowngradeViewPortBaseFormats,
7395           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 },
7396         { DowngradeViewPortOverlayFormats,
7397           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 },
7398         { DowngradeViewPortBaseFormats,
7399           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 },
7400         { DowngradeViewPortOverlayFormats,
7401           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 },
7402         { DowngradeViewPortBaseFormats,
7403           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 },
7404         { DowngradeViewPortOverlayFormats,
7405           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 },
7406         { DowngradeViewPortBaseFormats,
7407           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 },
7408         { DowngradeViewPortOverlayFormats,
7409           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 },
7410         { DowngradeViewPortBaseFormats,
7411           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 },
7412         { DowngradeViewPortOverlayFormats,
7413           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 },
7414         { DowngradeViewPortBaseFormats,
7415           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 },
7416         { DowngradeViewPortOverlayFormats,
7417           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 },
7418         { DowngradeViewPortBaseFormats,
7419           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 },
7420         { DowngradeViewPortOverlayFormats,
7421           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP },
7422         { DowngradeViewPortBaseFormats,
7423           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP },
7424         { DowngradeLayerVDownscaleFactor3X,
7425           0 },
7426         { DowngradeLayerHDownscaleFactor3X,
7427           0 },
7428         { DowngradeViewPortVTaps5,
7429           0 },
7430         { DowngradeViewPortVTaps3,
7431           0 },
7432         { DowngradeViewPortHTaps8,
7433           0 },
7434         { DowngradeViewPortHTaps5,
7435           0 },
7436         { DowngradeViewPortOverlayFormats,
7437           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP },
7438         { DowngradeLayerVTaps5,
7439           0 },
7440         { DowngradeViewPortOverlayFormats,
7441           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP },
7442         { DowngradeLayerVDownscaleFactor2X,
7443           0 },
7444         { DowngradeLayerHDownscaleFactor2X,
7445           0 },
7446         { DowngradeLayerVUpscaling,
7447           0 },
7448         { DowngradeViewPortOverlayFormats,
7449           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP },
7450         { DowngradeViewPortBaseFormats,
7451           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP },
7452         { DowngradeViewPortBaseFormats,
7453           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP },
7454         { DowngradeViewPortBaseFormats,
7455           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP },
7456     };
7457     int i;
7458 
7459     // XXX assume the heads have equal capabilities
7460     // XXX assume the gpus have equal capabilities
7461 
7462     const NVEvoHeadCaps *pHeadCaps =
7463         &pDevEvo->gpus[0].capabilities.head[0];
7464 
7465 
7466     for (i = 0; i < ARRAY_LEN(downgradeFuncs); i++) {
7467         int head;
7468         FOR_ALL_HEADS(head, downgradePossibleHeadsBitMask) {
7469             if (timingsParams[head].pTimings == NULL) {
7470                 continue;
7471             }
7472 
7473             if (downgradeFuncs[i].downgradeFunc(
7474                     pDevEvo,
7475                     head,
7476                     pHeadCaps,
7477                     &timingsParams[head].pTimings->viewPort,
7478                     downgradeFuncs[i].removeSurfaceMemoryFormats)) {
7479                 return TRUE;
7480             }
7481         }
7482     }
7483 
7484     /* Nothing else to downgrade */
7485     return FALSE;
7486 }
7487 
nvAllocateDisplayBandwidth(NVDispEvoPtr pDispEvo,NvU32 newIsoBandwidthKBPS,NvU32 newDramFloorKBPS)7488 NvBool nvAllocateDisplayBandwidth(
7489     NVDispEvoPtr pDispEvo,
7490     NvU32 newIsoBandwidthKBPS,
7491     NvU32 newDramFloorKBPS)
7492 {
7493     NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS params = { };
7494     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
7495     NvU32 ret;
7496 
7497     if (!pDevEvo->isSOCDisplay) {
7498         return TRUE;
7499     }
7500 
7501     params.subDeviceInstance = 0;
7502     params.averageBandwidthKBPS = newIsoBandwidthKBPS;
7503     params.floorBandwidthKBPS = newDramFloorKBPS;
7504 
7505     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
7506                          pDevEvo->displayCommonHandle,
7507                          NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH,
7508                          &params, sizeof(params));
7509     if (ret != NV_OK) {
7510         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
7511                     "Failed to allocate %u KBPS Iso and %u KBPS Dram",
7512                     newIsoBandwidthKBPS, newDramFloorKBPS);
7513         return FALSE;
7514     }
7515 
7516     pDispEvo->isoBandwidthKBPS = newIsoBandwidthKBPS;
7517     pDispEvo->dramFloorKBPS = newDramFloorKBPS;
7518 
7519     return TRUE;
7520 }
7521 
AssignNVEvoIsModePossibleDispInput(NVDispEvoPtr pDispEvo,const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],NvBool requireBootClocks,NVEvoReallocateBandwidthMode reallocBandwidth,NVEvoIsModePossibleDispInput * pImpInput)7522 static void AssignNVEvoIsModePossibleDispInput(
7523     NVDispEvoPtr                             pDispEvo,
7524     const NVValidateImpOneDispHeadParamsRec  timingsParams[NVKMS_MAX_HEADS_PER_DISP],
7525     NvBool                                   requireBootClocks,
7526     NVEvoReallocateBandwidthMode             reallocBandwidth,
7527     NVEvoIsModePossibleDispInput            *pImpInput)
7528 {
7529     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7530     NvU32 head;
7531     NvU32 nextSorIndex = 0;
7532 
7533     nvkms_memset(pImpInput, 0, sizeof(*pImpInput));
7534 
7535     pImpInput->requireBootClocks = requireBootClocks;
7536     pImpInput->reallocBandwidth = reallocBandwidth;
7537 
7538     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
7539         const NVConnectorEvoRec *pConnectorEvo =
7540                     timingsParams[head].pConnectorEvo;
7541         NvU32 otherHead = 0;
7542 
7543         nvAssert((timingsParams[head].pTimings == NULL) ==
7544                  (timingsParams[head].pConnectorEvo == NULL));
7545 
7546         pImpInput->head[head].orIndex = NV_INVALID_OR;
7547 
7548         if (timingsParams[head].pTimings == NULL) {
7549             continue;
7550         }
7551 
7552         pImpInput->head[head].pTimings = timingsParams[head].pTimings;
7553         pImpInput->head[head].enableDsc = timingsParams[head].enableDsc;
7554         pImpInput->head[head].b2Heads1Or = timingsParams[head].b2Heads1Or;
7555         pImpInput->head[head].pixelDepth = timingsParams[head].pixelDepth;
7556         pImpInput->head[head].displayId = timingsParams[head].activeRmId;
7557         pImpInput->head[head].orType = pConnectorEvo->or.type;
7558         pImpInput->head[head].pUsage = timingsParams[head].pUsage;
7559 
7560         if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
7561                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) ||
7562              pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
7563 
7564             nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR);
7565 
7566             pImpInput->head[head].orIndex = pConnectorEvo->or.primary;
7567             continue;
7568         }
7569 
7570         /*
7571          * If more than one head is attached to the same connector, then make
7572          * sure that all of them use the same SOR index.
7573          */
7574         for (otherHead = 0; otherHead < head; otherHead++) {
7575             if (timingsParams[otherHead].pConnectorEvo == pConnectorEvo) {
7576                 pImpInput->head[head].orIndex = pImpInput->head[otherHead].orIndex;
7577                 break;
7578             }
7579         }
7580 
7581         /*
7582          * On GPUs with a full crossbar, the SORs are equally capable, so just
7583          * use next unused SOR.
7584          *
7585          * We assume there are as many SORs as there are heads.
7586          */
7587         if (pImpInput->head[head].orIndex == NV_INVALID_OR) {
7588             pImpInput->head[head].orIndex = nextSorIndex;
7589             nextSorIndex++;
7590         }
7591     }
7592 }
7593 
7594 /*!
7595  * Validate the described disp configuration through IMP.
7596 
7597  * \param[in]      pDispEvo        The disp of the dpyIdList.
7598  *
7599  * \param[in.out]  timingsParams[] The proposed configuration to use on each head
7600  *                                 includes -
7601  *
7602  *                                   pConnectorEvo -
7603  *                                     The proposed connector to drive on each head.
7604  *
7605  *                                   activeRmId -
7606  *                                     The display ID that we use to talk to RM
7607  *                                     about the dpy(s) on each head.
7608  *
7609  *                                   pTimings -
7610  *                                     The proposed timings to use on each head;
7611  *                                     note the usage bounds within pTimings
7612  *                                     may be altered by this function.
7613  *
7614  *                                   depth -
7615  *                                     The depth of the buffer to be displayed on
7616  *                                     each head.
7617  * \param[in]      requireBootClocks
7618  *                                 Only validate modes that will work at P8
7619  *                                 clocks.
7620  *
7621  * \param[in]      reallocBandwidth
7622  *                                 Try to allocate the required display
7623  *                                 bandwidth if IMP passes.
7624  *
7625  * \param[out]     pMinIsoBandwidthKBPS
7626  *                                 The ISO bandwidth that's required for the
7627  *                                 proposed disp configuration only. This value
7628  *                                 doesn't take the current display state into
7629  *                                 account.
7630  *
7631  * \param[out]     pMinDramFloorKBPS
7632  *                                 The DRAM floor that's required for the
7633  *                                 proposed disp configuration only. This value
7634  *                                 doesn't take the current display state into
7635  *                                 account.
7636  *
7637  * \return         Return TRUE if the proposed disp configuration is
7638  *                 considered valid for IMP purposes.
7639  */
nvValidateImpOneDisp(NVDispEvoPtr pDispEvo,const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],NvBool requireBootClocks,NVEvoReallocateBandwidthMode reallocBandwidth,NvU32 * pMinIsoBandwidthKBPS,NvU32 * pMinDramFloorKBPS)7640 NvBool nvValidateImpOneDisp(
7641     NVDispEvoPtr                            pDispEvo,
7642     const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],
7643     NvBool                                  requireBootClocks,
7644     NVEvoReallocateBandwidthMode            reallocBandwidth,
7645     NvU32                                   *pMinIsoBandwidthKBPS,
7646     NvU32                                   *pMinDramFloorKBPS)
7647 {
7648     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7649     NVEvoIsModePossibleDispInput impInput = { };
7650     NVEvoIsModePossibleDispOutput impOutput = { };
7651     NvU32 newIsoBandwidthKBPS, newDramFloorKBPS;
7652     NvBool needToRealloc = FALSE;
7653 
7654     AssignNVEvoIsModePossibleDispInput(pDispEvo,
7655                                        timingsParams, requireBootClocks,
7656                                        reallocBandwidth,
7657                                        &impInput);
7658 
7659     pDevEvo->hal->IsModePossible(pDispEvo, &impInput, &impOutput);
7660     if (!impOutput.possible) {
7661         return FALSE;
7662     }
7663 
7664     switch (reallocBandwidth) {
7665         case NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE:
7666             needToRealloc = (impOutput.minRequiredBandwidthKBPS > pDispEvo->isoBandwidthKBPS) ||
7667                             (impOutput.floorBandwidthKBPS > pDispEvo->dramFloorKBPS);
7668             newIsoBandwidthKBPS =
7669                 NV_MAX(pDispEvo->isoBandwidthKBPS, impOutput.minRequiredBandwidthKBPS);
7670             newDramFloorKBPS =
7671                 NV_MAX(pDispEvo->dramFloorKBPS, impOutput.floorBandwidthKBPS);
7672 
7673             break;
7674         case NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST:
7675             needToRealloc = (impOutput.minRequiredBandwidthKBPS != pDispEvo->isoBandwidthKBPS) ||
7676                             (impOutput.floorBandwidthKBPS != pDispEvo->dramFloorKBPS);
7677             newIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS;
7678             newDramFloorKBPS = impOutput.floorBandwidthKBPS;
7679 
7680             break;
7681         case NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE:
7682         default:
7683             break;
7684     }
7685 
7686     if (needToRealloc) {
7687         if (!nvAllocateDisplayBandwidth(pDispEvo,
7688                                         newIsoBandwidthKBPS,
7689                                         newDramFloorKBPS)) {
7690             return FALSE;
7691         }
7692     }
7693 
7694     if (pMinIsoBandwidthKBPS != NULL) {
7695         *pMinIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS;
7696     }
7697 
7698     if (pMinDramFloorKBPS != NULL) {
7699         *pMinDramFloorKBPS = impOutput.floorBandwidthKBPS;
7700     }
7701 
7702     return TRUE;
7703 }
7704 
nvValidateImpOneDispDowngrade(NVDispEvoPtr pDispEvo,const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],NvBool requireBootClocks,NVEvoReallocateBandwidthMode reallocBandwidth,NvU32 downgradePossibleHeadsBitMask)7705 NvBool nvValidateImpOneDispDowngrade(
7706     NVDispEvoPtr                            pDispEvo,
7707     const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],
7708     NvBool                                  requireBootClocks,
7709     NVEvoReallocateBandwidthMode            reallocBandwidth,
7710     NvU32                                   downgradePossibleHeadsBitMask)
7711 {
7712     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7713     NvBool impPassed = FALSE;
7714 
7715     do {
7716         impPassed = nvValidateImpOneDisp(pDispEvo,
7717                                          timingsParams,
7718                                          requireBootClocks,
7719                                          reallocBandwidth,
7720                                          NULL /* pMinIsoBandwidthKBPS */,
7721                                          NULL /* pMinDramFloorKBPS */);
7722         if (impPassed) {
7723             break;
7724         }
7725     } while (DownGradeMetaModeUsageBounds(pDevEvo, timingsParams,
7726                                           downgradePossibleHeadsBitMask));
7727 
7728     if (impPassed && !pDevEvo->isSOCDisplay) {
7729         NvU32 head;
7730 
7731         for (head = 0; head < pDevEvo->numHeads; head++) {
7732             if (timingsParams[head].pTimings != NULL) {
7733                 timingsParams[head].pTimings->viewPort.possibleUsage =
7734                     timingsParams[head].pTimings->viewPort.guaranteedUsage;
7735             }
7736         }
7737     }
7738 
7739     return impPassed;
7740 }
7741 
7742 /*
7743  * Return TRUE iff this display can be configured as a framelock
7744  * server given the current modetimings/framelock configuration, FALSE
7745  * o.w.
7746  */
7747 
nvFrameLockServerPossibleEvo(const NVDpyEvoRec * pDpyEvo)7748 NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo)
7749 {
7750 
7751     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
7752     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7753     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
7754 
7755     return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
7756                                      NV_EVO_ADD_FRAME_LOCK_SERVER,
7757                                      NULL);
7758 }
7759 
7760 /*
7761  * Return TRUE iff this display can be configured as a framelock client
7762  * given the current modetimings/framelock configuration, FALSE o.w.
7763  */
7764 
nvFrameLockClientPossibleEvo(const NVDpyEvoRec * pDpyEvo)7765 NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo)
7766 {
7767     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
7768     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7769     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
7770 
7771     return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
7772                                      NV_EVO_ADD_FRAME_LOCK_CLIENT,
7773                                      NULL);
7774 }
7775 
7776 
7777 /*
7778  * FrameLockSli() - Helper function for nvEvoRefFrameLockSli() and
7779  * nvEvoUnRefFrameLockSli(), which are hooked into the EVO locking state
7780  * machine via custom rules.  This function will find the GPU acting as the
7781  * given GPU's SLI primary and perform the NV_EVO_{ADD,REM}_FRAMELOCK_REF action
7782  * to increment or decrement the refcount on that GPU.
7783  * If queryOnly, it also figures out which displays to pass into the EVO state
7784  * machine; otherwise, it passes NULLs to perform a query without affecting
7785  * state.
7786  */
7787 
FrameLockSli(NVDevEvoPtr pDevEvo,NvU32 action,NvBool queryOnly)7788 static NvBool FrameLockSli(NVDevEvoPtr pDevEvo,
7789                            NvU32 action,
7790                            NvBool queryOnly)
7791 {
7792     RasterLockGroup *pRasterLockGroups;
7793     NVEvoSubDevPtr pEvoSubDev;
7794     NVDispEvoPtr pDispEvo;
7795     unsigned int numRasterLockGroups;
7796 
7797     pRasterLockGroups = GetRasterLockGroups(pDevEvo, &numRasterLockGroups);
7798     if (!pRasterLockGroups) {
7799         return FALSE;
7800     }
7801 
7802     nvAssert(numRasterLockGroups == 1);
7803     if (numRasterLockGroups != 1) {
7804         nvFree(pRasterLockGroups);
7805         return FALSE;
7806     }
7807 
7808     /* Want to be framelock server */
7809     pDispEvo = pRasterLockGroups[0].pDispEvoOrder[0];
7810 
7811     nvFree(pRasterLockGroups);
7812 
7813     if (!pDispEvo) {
7814         return FALSE;
7815     }
7816 
7817     nvAssert(pDevEvo == pDispEvo->pDevEvo);
7818 
7819     pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
7820 
7821     if (queryOnly) {
7822         return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, NULL);
7823     } else {
7824         NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
7825         NvU32 i = 0;
7826         NvU32 head;
7827 
7828         for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
7829             if (nvHeadIsActive(pDispEvo, head)) {
7830                 pHeads[i++] = head;
7831             }
7832         }
7833         nvAssert(i > 0 && i <= NVKMS_MAX_HEADS_PER_DISP);
7834         pHeads[i] = NV_INVALID_HEAD;
7835 
7836         return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action,
7837                                          pHeads);
7838     }
7839 }
7840 
7841 
7842 /*
7843  * nvEvoRefFrameLockSli() - Attempt to set up framelock on the GPU's SLI
7844  * primary.  Hooked into EVO state machine via custom rules.
7845  * If pHeads is NULL, only perform a query.
7846  */
7847 
nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo,NVEvoSubDevPtr pEvoSubDev,const NvU32 * pHeads)7848 NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo,
7849                             NVEvoSubDevPtr pEvoSubDev,
7850                             const NvU32 *pHeads)
7851 {
7852     return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_ADD_FRAME_LOCK_REF,
7853                         pHeads == NULL);
7854 
7855 } /* nvEvoRefFrameLockSli */
7856 
7857 
7858 /*
7859  * nvEvoUnRefFrameLockSli() - Attempt to clean up framelock on the GPU's SLI
7860  * primary.  Hooked into EVO state machine via custom rules.
7861  * If pHeads is NULL, only perform a query.
7862  */
7863 
nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo,NVEvoSubDevPtr pEvoSubDev,const NvU32 * pHeads)7864 NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo,
7865                               NVEvoSubDevPtr pEvoSubDev,
7866                               const NvU32 *pHeads)
7867 {
7868     return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_REM_FRAME_LOCK_REF,
7869                         pHeads == NULL);
7870 
7871 } /* nvEvoUnRefFrameLockSli */
7872 
7873 
7874 /*
7875  * GetRasterLockPin() - Ask RM which lockpin to use in order to configure GPU0
7876  * be a server or client of GPU1, where GPUn is represented by the duple
7877  * (pDispn, headn) (or NV_EVO_LOCK_PIN_ERROR if the two cannot be locked).
7878  */
GetRasterLockPin(NVDispEvoPtr pDispEvo0,NvU32 head0,NVDispEvoPtr pDispEvo1,NvU32 head1,NVEvoLockPin * serverPin,NVEvoLockPin * clientPin)7879 static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0,
7880                              NVDispEvoPtr pDispEvo1, NvU32 head1,
7881                              NVEvoLockPin *serverPin, NVEvoLockPin *clientPin)
7882 {
7883     NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS params = { };
7884     NvU32 displayHandle0 = pDispEvo0->pDevEvo->displayHandle;
7885     NvU32 displayHandle1 = pDispEvo1->pDevEvo->displayHandle;
7886     NvU32 ret;
7887 
7888     params.base.subdeviceIndex = pDispEvo0->displayOwner;
7889     params.head = head0;
7890 
7891     params.peer.hDisplay = displayHandle1;
7892     params.peer.subdeviceIndex = pDispEvo1->displayOwner;
7893     params.peer.head = head1;
7894 
7895     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
7896                          displayHandle0,
7897                          NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7898                          &params, sizeof(params));
7899 
7900     if (ret != NVOS_STATUS_SUCCESS) {
7901         nvEvoLogDispDebug(pDispEvo0, EVO_LOG_ERROR,
7902                           "stateless lockpin query failed; ret: 0x%x", ret);
7903         if (serverPin) *serverPin = NV_EVO_LOCK_PIN_ERROR;
7904         if (clientPin) *clientPin = NV_EVO_LOCK_PIN_ERROR;
7905         return;
7906     }
7907 
7908     if (serverPin) {
7909         if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7910                                _MASTER_SCAN_LOCK_CONNECTED, _NO,
7911                                params.masterScanLock)) {
7912             *serverPin = NV_EVO_LOCK_PIN_ERROR;
7913         } else {
7914             int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7915                               _MASTER_SCAN_LOCK_PIN,
7916                               params.masterScanLock);
7917             *serverPin = NV_EVO_LOCK_PIN_0 + pin;
7918         }
7919     }
7920 
7921     if (clientPin) {
7922         if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7923                                _SLAVE_SCAN_LOCK_CONNECTED, _NO,
7924                                params.slaveScanLock)) {
7925             *clientPin = NV_EVO_LOCK_PIN_ERROR;
7926         } else {
7927             int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7928                               _SLAVE_SCAN_LOCK_PIN,
7929                               params.slaveScanLock);
7930             *clientPin = NV_EVO_LOCK_PIN_0 + pin;
7931         }
7932     }
7933 } /* GetRasterLockPin */
7934 
UpdateLUTNotifierTracking(NVDispEvoPtr pDispEvo)7935 static void UpdateLUTNotifierTracking(
7936     NVDispEvoPtr pDispEvo)
7937 {
7938     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7939     const int dispIndex = pDispEvo->displayOwner;
7940     NvU32 i;
7941 
7942     for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) {
7943         int notifier = pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier;
7944 
7945         if (!pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) {
7946             continue;
7947         }
7948 
7949         if (!pDevEvo->hal->IsCompNotifierComplete(pDevEvo->pDispEvo[dispIndex],
7950                                                   notifier)) {
7951             continue;
7952         }
7953 
7954         pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask &=
7955             ~pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask;
7956         pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting = FALSE;
7957     }
7958 }
7959 
7960 /*
7961  * Check whether there are any staged API head LUT notifiers that need to be
7962  * committed.
7963  */
nvEvoLUTNotifiersNeedCommit(NVDispEvoPtr pDispEvo)7964 NvBool nvEvoLUTNotifiersNeedCommit(
7965     NVDispEvoPtr pDispEvo)
7966 {
7967     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7968     const int dispIndex = pDispEvo->displayOwner;
7969     NvU32 apiHeadMask = pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask;
7970 
7971     return apiHeadMask != 0;
7972 }
7973 
7974 /*
7975  * Set up tracking for a LUT Notifier for the apiHeads in stagedApiHeadMask.
7976  *
7977  * The notifier returned by this function must be passed to a subsequent call to
7978  * EvoUpdateAndKickOffWithNotifier.
7979  *
7980  * Returns -1 if an error occurs or no apiHeads need a new LUT notifier. Passing
7981  * the -1 to EvoUpdateAndKickOffWithNotifier with its notify parameter set may
7982  * result in kernel panics.
7983  */
nvEvoCommitLUTNotifiers(NVDispEvoPtr pDispEvo)7984 int nvEvoCommitLUTNotifiers(
7985     NVDispEvoPtr pDispEvo)
7986 {
7987     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7988     const int dispIndex = pDispEvo->displayOwner;
7989     NvU32 apiHeadMask = pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask;
7990     int i;
7991 
7992     pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask = 0;
7993 
7994     UpdateLUTNotifierTracking(pDispEvo);
7995 
7996     if (apiHeadMask == 0) {
7997         return -1;
7998     }
7999 
8000     if (pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask &
8001         apiHeadMask) {
8002         /*
8003          * an apiHead in the requested list is already waiting on a
8004          * notifier
8005          */
8006         nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "A requested API head is already waiting on a notifier");
8007         return -1;
8008     }
8009 
8010     for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) {
8011         int notifier = (dispIndex * NVKMS_MAX_HEADS_PER_DISP) + i + 1;
8012 
8013         if (pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) {
8014             continue;
8015         }
8016 
8017         /* use this notifier */
8018         pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier = notifier;
8019         pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting = TRUE;
8020         pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask = apiHeadMask;
8021 
8022         pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask |=
8023             apiHeadMask;
8024 
8025         return notifier;
8026     }
8027 
8028     /* slot not found */
8029     nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "No remaining LUT notifier slots");
8030     return -1;
8031 }
8032 
8033 /*
8034  * Unstage any staged API Heads' notifiers.
8035  */
nvEvoClearStagedLUTNotifiers(NVDispEvoPtr pDispEvo)8036 void nvEvoClearStagedLUTNotifiers(
8037     NVDispEvoPtr pDispEvo)
8038 {
8039     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8040     const int dispIndex = pDispEvo->displayOwner;
8041 
8042     pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask = 0;
8043 }
8044 
8045 /*
8046  * Stage the API Head's notifier for tracking. In order to kickoff the staged
8047  * notifier, nvEvoCommitLUTNotifiers must be called and its return value
8048  * passed to EvoUpdateAndKickoffWithNotifier.
8049  *
8050  * This function and its siblings nvEvoIsLUTNotifierComplete and
8051  * nvEvoWaitForLUTNotifier can be used by callers of nvEvoSetLut to ensure the
8052  * triple-buffer for the color LUT is not overflowed even when nvEvoSetLut is
8053  * called with kickoff = FALSE.
8054  */
nvEvoStageLUTNotifier(NVDispEvoPtr pDispEvo,NvU32 apiHead)8055 void nvEvoStageLUTNotifier(
8056     NVDispEvoPtr pDispEvo,
8057     NvU32 apiHead)
8058 {
8059     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8060     const int dispIndex = pDispEvo->displayOwner;
8061 
8062     nvAssert((pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask &
8063              NVBIT(apiHead)) == 0);
8064 
8065     pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask |=
8066         NVBIT(apiHead);
8067 }
8068 
8069 /*
8070  * Check if the api head's LUT Notifier is complete.
8071  */
8072 
nvEvoIsLUTNotifierComplete(NVDispEvoPtr pDispEvo,NvU32 apiHead)8073 NvBool nvEvoIsLUTNotifierComplete(
8074     NVDispEvoPtr pDispEvo,
8075     NvU32 apiHead)
8076 {
8077     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8078     const int dispIndex = pDispEvo->displayOwner;
8079 
8080     UpdateLUTNotifierTracking(pDispEvo);
8081 
8082     return (pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask &
8083             NVBIT(apiHead)) == 0;
8084 }
8085 
8086 /*
8087  * Wait for the api head's LUT Notifier to complete.
8088  *
8089  * This function blocks while waiting for the notifier.
8090  */
8091 
nvEvoWaitForLUTNotifier(const NVDispEvoPtr pDispEvo,NvU32 apiHead)8092 void nvEvoWaitForLUTNotifier(
8093     const NVDispEvoPtr pDispEvo,
8094     NvU32 apiHead)
8095 {
8096     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8097     const int dispIndex = pDispEvo->displayOwner;
8098     int i;
8099 
8100     if (nvEvoIsLUTNotifierComplete(pDispEvo, apiHead)) {
8101         return;
8102     }
8103 
8104     for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) {
8105         int notifier = pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier;
8106 
8107         if (!pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) {
8108             continue;
8109         }
8110 
8111         if ((pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask &
8112             NVBIT(apiHead)) == 0) {
8113 
8114             continue;
8115         }
8116 
8117         pDevEvo->hal->WaitForCompNotifier(pDispEvo, notifier);
8118         return;
8119     }
8120 }
8121 
EvoIncrementCurrentLutIndex(NVDispEvoRec * pDispEvo,const NvU32 apiHead,const NvBool baseLutEnabled,const NvBool outputLutEnabled)8122 static void EvoIncrementCurrentLutIndex(NVDispEvoRec *pDispEvo,
8123                                         const NvU32 apiHead,
8124                                         const NvBool baseLutEnabled,
8125                                         const NvBool outputLutEnabled)
8126 {
8127     NvU32 head;
8128     const int dispIndex = pDispEvo->displayOwner;
8129     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
8130     const int numLUTs = ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT);
8131     NVDispApiHeadStateEvoRec *pApiHeadState =
8132         &pDispEvo->apiHeadState[apiHead];
8133 
8134     pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex++;
8135     pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex %= numLUTs;
8136     pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled = baseLutEnabled;
8137     pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled = outputLutEnabled;
8138 
8139     FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
8140         const NvU32 curLutIndex =
8141             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex;
8142         NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
8143 
8144         pHeadState->lut.outputLutEnabled =
8145             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled;
8146         pHeadState->lut.baseLutEnabled =
8147             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled;
8148         pHeadState->lut.pCurrSurface =
8149             pDevEvo->lut.apiHead[apiHead].LUT[curLutIndex];
8150 
8151     }
8152 }
8153 
UpdateLUTTimer(NVDispEvoPtr pDispEvo,const NvU32 apiHead,const NvBool baseLutEnabled,const NvBool outputLutEnabled)8154 static NvU32 UpdateLUTTimer(NVDispEvoPtr pDispEvo,
8155                             const NvU32 apiHead,
8156                             const NvBool baseLutEnabled,
8157                             const NvBool outputLutEnabled)
8158 {
8159     if (!nvEvoIsLUTNotifierComplete(pDispEvo, apiHead)) {
8160         // If the notifier is still pending, then the previous update is still
8161         // pending and further LUT changes should continue to go into the third
8162         // buffer.  Reschedule the timer for another 10 ms.
8163         return 10;
8164     }
8165 
8166     // Update the current LUT index and kick off an update.
8167     EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled,
8168                                 outputLutEnabled);
8169 
8170     EvoUpdateCurrentPalette(pDispEvo, apiHead);
8171 
8172     // Return 0 to cancel the timer.
8173     return 0;
8174 }
8175 
UpdateLUTTimerNVKMS(void * dataPtr,NvU32 dataU32)8176 static void UpdateLUTTimerNVKMS(void *dataPtr, NvU32 dataU32)
8177 {
8178     NVDispEvoPtr pDispEvo = dataPtr;
8179     const NvU32 apiHead = DRF_VAL(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD,
8180                                dataU32);
8181     const NvBool baseLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32,
8182                                                _BASE_LUT, _ENABLE, dataU32);
8183     const NvBool outputLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32,
8184                                                  _OUTPUT_LUT, _ENABLE, dataU32);
8185     NvU32 ret = UpdateLUTTimer(pDispEvo, apiHead, baseLutEnabled,
8186                                outputLutEnabled);
8187 
8188     if (ret != 0) {
8189         ScheduleLutUpdate(pDispEvo, apiHead, dataU32, ret * 1000);
8190     }
8191 }
8192 
ScheduleLutUpdate(NVDispEvoRec * pDispEvo,const NvU32 apiHead,const NvU32 data,const NvU64 usec)8193 static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo,
8194                               const NvU32 apiHead, const NvU32 data,
8195                               const NvU64 usec)
8196 {
8197     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
8198 
8199     /* Cancel previous update */
8200     nvCancelLutUpdateEvo(pDispEvo, apiHead);
8201 
8202     /* schedule a new timer */
8203     pDevEvo->lut.apiHead[apiHead].disp[pDispEvo->displayOwner].updateTimer =
8204         nvkms_alloc_timer(UpdateLUTTimerNVKMS,
8205                           pDispEvo, data,
8206                           usec);
8207 }
8208 
8209 /*
8210  * The gamma ramp, if specified, has a 16-bit range.  Convert it to EVO's 14-bit
8211  * shifted range and zero out the low 3 bits for bug 813188.
8212  */
GammaToEvo(NvU16 gamma)8213 static inline NvU16 GammaToEvo(NvU16 gamma)
8214 {
8215     return ((gamma >> 2) & ~7) + 24576;
8216 }
8217 
GetNewLutBuffer(const NVDispEvoRec * pDispEvo,const struct NvKmsSetLutCommonParams * pParams)8218 static NVEvoLutDataRec *GetNewLutBuffer(
8219     const NVDispEvoRec *pDispEvo,
8220     const struct NvKmsSetLutCommonParams *pParams)
8221 {
8222     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
8223     NVEvoLutDataRec *pLUTBuffer = NULL;
8224 
8225     // XXX NVKMS TODO: If only input or output are specified and the other one
8226     // is enabled in the hardware, this will zero out the one not specified. In
8227     // practice it isn't a problem today because the X driver always specifies
8228     // both, but we should fix this once we start always using the base channel,
8229     // where we have a separate base LUT ctxdma.
8230     //
8231     // This is also a problem if a partial update of the input LUT is attempted
8232     // (i.e. start != 0 or end != numberOfLutEntries-1).
8233     //
8234     // Filed bug: 2042919 to track removing this TODO.
8235 
8236     pLUTBuffer = nvCalloc(1, sizeof(*pLUTBuffer));
8237 
8238     if (pLUTBuffer == NULL) {
8239         goto done;
8240     }
8241 
8242     if (pParams->input.specified && pParams->input.end != 0) {
8243         const struct NvKmsLutRamps *pRamps =
8244             nvKmsNvU64ToPointer(pParams->input.pRamps);
8245         const NvU16 *red = pRamps->red;
8246         const NvU16 *green = pRamps->green;
8247         const NvU16 *blue = pRamps->blue;
8248 
8249         nvAssert(pRamps != NULL);
8250 
8251         // Update our shadow copy of the LUT.
8252         pDevEvo->hal->FillLUTSurface(pLUTBuffer->base,
8253                                      red, green, blue,
8254                                      pParams->input.end + 1,
8255                                      pParams->input.depth);
8256     }
8257 
8258     if (pParams->output.specified && pParams->output.enabled) {
8259         const struct NvKmsLutRamps *pRamps =
8260             nvKmsNvU64ToPointer(pParams->output.pRamps);
8261         int i;
8262 
8263         nvAssert(pRamps != NULL);
8264 
8265         if (pDevEvo->hal->caps.hasUnorm16OLUT) {
8266             for (i = 0; i < 1024; i++) {
8267                 // Copy the client's 16-bit ramp directly to the LUT buffer.
8268                 pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Red = pRamps->red[i];
8269                 pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Green = pRamps->green[i];
8270                 pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Blue = pRamps->blue[i];
8271             }
8272 
8273             pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1024] =
8274                 pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1023];
8275         } else {
8276             for (i = 0; i < 1024; i++) {
8277                 // Convert from the client's 16-bit range to the EVO 14-bit shifted
8278                 // range.
8279                 pLUTBuffer->output[i].Red = GammaToEvo(pRamps->red[i]);
8280                 pLUTBuffer->output[i].Green = GammaToEvo(pRamps->green[i]);
8281                 pLUTBuffer->output[i].Blue = GammaToEvo(pRamps->blue[i]);
8282             }
8283 
8284             pLUTBuffer->output[1024] = pLUTBuffer->output[1023];
8285         }
8286     }
8287 
8288     /* fall through */
8289 
8290 done:
8291     return pLUTBuffer;
8292 }
8293 
8294 
8295 /*
8296  * Update the api head's LUT with the given colors.
8297  *
8298  * The color LUT is triple-buffered.
8299  *
8300  * curLUTIndex indicates the buffer currently being updated.  What the other
8301  * two buffers are used for depends on whether the previous update has
8302  * completed.  If not (case 1):
8303  *   curLUTIndex + 1 (mod 3): currently being displayed
8304  *   curLUTIndex + 2 (mod 3): will be displayed at next vblank
8305  * If so (case 2):
8306  *   curLUTIndex + 1 (mod 3): unused
8307  *   curLUTIndex + 2 (mod 3): currently being displayed
8308  *
8309  * In case 1, just update the current buffer and kick off a timer to submit the
8310  * update from i+2 to i.  If more LUT changes come in before the first update
8311  * happens, kill the timer and start a new one.
8312  *
8313  * In case 2, kill the timer if it still hasn't gone off, update buffer i, and
8314  * kick off an update.  No new timer needs to be scheduled.
8315  */
8316 
nvEvoSetLut(NVDispEvoPtr pDispEvo,NvU32 apiHead,NvBool kickoff,const struct NvKmsSetLutCommonParams * pParams)8317 void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 apiHead, NvBool kickoff,
8318                  const struct NvKmsSetLutCommonParams *pParams)
8319 {
8320     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8321     const int dispIndex = pDispEvo->displayOwner;
8322     const int curLUT = pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex;
8323     const NvBool waitForPreviousUpdate =
8324         pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate;
8325     const int numLUTs = ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT);
8326     const int lutToFill = (curLUT + 1) % numLUTs;
8327     NVLutSurfaceEvoPtr pSurfEvo = pDevEvo->lut.apiHead[apiHead].LUT[lutToFill];
8328     NvBool baseLutEnabled =
8329         pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled ;
8330     NvBool outputLutEnabled =
8331         pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled;
8332 
8333     if (!pParams->input.specified && !pParams->output.specified) {
8334         return;
8335     }
8336 
8337     if (pParams->input.specified) {
8338         baseLutEnabled = (pParams->input.end != 0);
8339     }
8340 
8341     if (pParams->output.specified) {
8342         outputLutEnabled = pParams->output.enabled;
8343     }
8344 
8345     nvAssert(pSurfEvo != NULL);
8346 
8347     if ((pParams->input.specified && pParams->input.end != 0) ||
8348         (pParams->output.specified && pParams->output.enabled)) {
8349         NVEvoLutDataRec *pLUTBuffer = GetNewLutBuffer(pDispEvo, pParams);
8350 
8351         if (pLUTBuffer == NULL) {
8352             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
8353                         "LUT Allocation failure; skipping LUT update");
8354             return;
8355         }
8356 
8357         // Fill in the new LUT buffer.
8358         nvUploadDataToLutSurfaceEvo(pSurfEvo, pLUTBuffer, pDispEvo);
8359 
8360         nvFree(pLUTBuffer);
8361     }
8362 
8363     /* Kill a pending timer */
8364     nvCancelLutUpdateEvo(pDispEvo, apiHead);
8365 
8366     if (!kickoff) {
8367         EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled,
8368                                     outputLutEnabled);
8369         return;
8370     }
8371 
8372     // See if we can just fill the next LUT buffer and kick off an update now.
8373     // We can do that if this is the very first update, or if the previous
8374     // update is complete, or if we need to guarantee that this update
8375     // is synchronous.
8376     NvBool previousUpdateComplete =
8377         nvEvoIsLUTNotifierComplete(pDispEvo, apiHead);
8378     if (!waitForPreviousUpdate || previousUpdateComplete ||
8379         pParams->synchronous) {
8380 
8381         if (!previousUpdateComplete) {
8382             nvEvoWaitForLUTNotifier(pDispEvo, apiHead);
8383         }
8384 
8385         // Kick off an update now.
8386         EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled,
8387                                     outputLutEnabled);
8388         EvoUpdateCurrentPalette(pDispEvo, apiHead);
8389 
8390         // If this LUT update is synchronous, then sync before returning.
8391         if (pParams->synchronous &&
8392             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate) {
8393 
8394             nvEvoWaitForLUTNotifier(pDispEvo, apiHead);
8395             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate =
8396                 FALSE;
8397         }
8398     } else {
8399         // Schedule a timer to kick off an update later.
8400         // XXX 5 ms is a guess.  We could probably look at this pDpy's refresh
8401         // rate to come up with a more reasonable estimate.
8402         NvU32 dataU32 = DRF_NUM(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD, apiHead);
8403 
8404         nvAssert((apiHead & ~0xff) == 0);
8405 
8406         if (baseLutEnabled) {
8407             dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _BASE_LUT,
8408                                _ENABLE);
8409         }
8410 
8411         if (outputLutEnabled) {
8412             dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _OUTPUT_LUT,
8413                                _ENABLE);
8414         }
8415 
8416         ScheduleLutUpdate(pDispEvo, apiHead, dataU32, 5 * 1000);
8417     }
8418 }
8419 
nvValidateSetLutCommonParams(const NVDevEvoRec * pDevEvo,const struct NvKmsSetLutCommonParams * pParams)8420 NvBool nvValidateSetLutCommonParams(
8421     const NVDevEvoRec *pDevEvo,
8422     const struct NvKmsSetLutCommonParams *pParams)
8423 {
8424     NvU32 maxSize = 0;
8425 
8426     if (pParams->output.specified && pParams->output.enabled) {
8427         if (pParams->output.pRamps == 0) {
8428             return FALSE;
8429         }
8430     }
8431 
8432     if (!pParams->input.specified || pParams->input.end == 0) {
8433         return TRUE;
8434     }
8435 
8436     if (pParams->input.pRamps == 0) {
8437         return FALSE;
8438     }
8439 
8440     switch (pParams->input.depth) {
8441         case 8:  maxSize = 256;  break;
8442         case 15: maxSize = 32;   break;
8443         case 16: maxSize = 64;   break;
8444         case 24: maxSize = 256;  break;
8445         case 30: maxSize = 1024; break;
8446         default: return FALSE;
8447     }
8448 
8449     nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE);
8450     nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE);
8451     nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE);
8452 
8453     /* Currently, the implementation assumes start==0. */
8454     if (pParams->input.start != 0) {
8455         return FALSE;
8456     }
8457 
8458     if (pParams->input.end >= maxSize) {
8459         return FALSE;
8460     }
8461 
8462     return TRUE;
8463 }
8464 
GetSwapLockoutWindowUs(NVDispEvoPtr pDispEvo)8465 static NvU32 GetSwapLockoutWindowUs(NVDispEvoPtr pDispEvo)
8466 {
8467     NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS params = { 0 };
8468     NvU32 ret;
8469 
8470     nvAssert(pDispEvo->pFrameLockEvo != NULL);
8471 
8472     ret = nvRmApiControl(
8473             nvEvoGlobal.clientHandle,
8474             pDispEvo->pFrameLockEvo->device,
8475             NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW,
8476             &params, sizeof(params));
8477 
8478     if (ret != NVOS_STATUS_SUCCESS) {
8479         nvAssert(!"NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW failed");
8480     }
8481 
8482     return params.tSwapRdyHi;
8483 }
8484 
CalculateSwapLockoutStartP2060(NVDispEvoPtr pDispEvo,const NvU32 head,const NvU32 tSwapRdyHiUs)8485 static NvU32 CalculateSwapLockoutStartP2060(NVDispEvoPtr pDispEvo,
8486                                             const NvU32 head,
8487                                             const NvU32 tSwapRdyHiUs)
8488 {
8489     const NVHwModeTimingsEvo *pTimings;
8490 
8491     nvAssert(head != NV_INVALID_HEAD);
8492     nvAssert(nvHeadIsActive(pDispEvo, head));
8493 
8494     pTimings = &pDispEvo->headState[head].timings;
8495 
8496     /*
8497      *  SWAP_LOCKOUT_START = Vtotal * TswapRdyHi * Refresh_Rate
8498      *
8499      * = Vtotal * TswapRdyHi * (pclk / Refresh_Rate)
8500      * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal))
8501      * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal))
8502      * = TswapRdyHi * (pclk / Htotal)
8503      * = TswapRdyHiUs * 1e-6 * pclk / Htotal
8504      * = TswapRdyHiUs * pclk / (Htotal * 1000000)
8505      * = TswapRdyHiUs * (pclkKhz * 1000) / (Htotal * 1000000)
8506      * = TswapRdyHiUs * pclkKhz / (Htotal * 1000)
8507      *
8508      * Since SWAP_LOCKOUT_START must be higher than LSR_MIN_TIME, round this
8509      * result up to the nearest integer.
8510      */
8511 
8512     return NV_ROUNDUP_DIV(tSwapRdyHiUs * pTimings->pixelClock,
8513                           pTimings->rasterSize.x * 1000);
8514 }
8515 
8516 /**
8517  * Override the swap lockout start value on heads on this pDisp, or restore the
8518  * default value.
8519  *
8520  * This is called before (with isPre == TRUE) and after (with isPre == FALSE)
8521  * swap barriers are enabled on the G-Sync board.  In order to satisfy certain
8522  * timing criteria, we need to set a special value for SWAP_LOCKOUT_START for
8523  * the duration of swap barriers being enabled.
8524  */
nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo,NvBool enable,NvBool isPre)8525 void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo,
8526                                NvBool enable, NvBool isPre)
8527 {
8528     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8529     NvU32 tSwapRdyHiUs = 0;
8530     NvU32 head;
8531 
8532     if ((isPre && !enable) || (!isPre && enable)) {
8533         return;
8534     }
8535 
8536     if (enable) {
8537         tSwapRdyHiUs = GetSwapLockoutWindowUs(pDispEvo);
8538     }
8539 
8540     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
8541         NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS params = { };
8542         NvU32 ret;
8543 
8544         if (!nvHeadIsActive(pDispEvo, head)) {
8545             continue;
8546         }
8547 
8548         params.maxSwapLockoutSkew =
8549             NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW_INIT;
8550 
8551         if (enable) {
8552             params.swapLockoutStart =
8553                 CalculateSwapLockoutStartP2060(pDispEvo, head, tSwapRdyHiUs);
8554         } else {
8555             params.swapLockoutStart =
8556                 NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START_INIT;
8557         }
8558 
8559         params.head = head;
8560 
8561         params.base.subdeviceIndex = pDispEvo->displayOwner;
8562 
8563         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
8564                              pDevEvo->displayHandle,
8565                              NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP,
8566                              &params,
8567                              sizeof(params));
8568 
8569         if (ret != NVOS_STATUS_SUCCESS) {
8570             nvAssert(!"NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP failed");
8571         }
8572     }
8573 }
8574 
8575 /*!
8576  * Release a reference to a pDevEvo
8577  *
8578  * If the refcount of the device drops to 0, this frees the device.
8579  *
8580  * \return TRUE if the device was freed, FALSE otherwise.
8581  */
nvFreeDevEvo(NVDevEvoPtr pDevEvo)8582 NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo)
8583 {
8584     if (pDevEvo == NULL) {
8585         return FALSE;
8586     }
8587 
8588     pDevEvo->allocRefCnt--;
8589 
8590     if (pDevEvo->allocRefCnt > 0) {
8591         return FALSE;
8592     }
8593 
8594     if (pDevEvo->pDifrState) {
8595         nvRmUnregisterDIFREventHandler(pDevEvo);
8596         nvDIFRFree(pDevEvo->pDifrState);
8597         pDevEvo->pDifrState = NULL;
8598     }
8599 
8600     if (pDevEvo->pNvKmsOpenDev != NULL) {
8601         /*
8602          * DP-MST allows to attach more than one heads/stream to single DP
8603          * connector, and there is no way to convey that DP-MST configuration to
8604          * next driver load; therefore disallow DP-MST.
8605          */
8606         nvEvoRestoreConsole(pDevEvo, FALSE /* allowMST */);
8607 
8608         nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev,
8609                                pDevEvo->fbConsoleSurfaceHandle,
8610                                TRUE /* skipUpdate */);
8611         pDevEvo->fbConsoleSurfaceHandle = 0;
8612     }
8613 
8614     nvFreeCoreChannelEvo(pDevEvo);
8615 
8616     nvTeardownHdmiLibrary(pDevEvo);
8617 
8618     nvHsFreeDevice(pDevEvo);
8619 
8620     nvFreePerOpenDev(nvEvoGlobal.nvKmsPerOpen, pDevEvo->pNvKmsOpenDev);
8621 
8622     nvFreeFrameLocksEvo(pDevEvo);
8623 
8624     if (pDevEvo->hal) {
8625         pDevEvo->hal->FreeRmCtrlObject(pDevEvo);
8626     }
8627 
8628     nvRmDestroyDisplays(pDevEvo);
8629 
8630     nvkms_free_timer(pDevEvo->consoleRestoreTimer);
8631     pDevEvo->consoleRestoreTimer = NULL;
8632 
8633     nvPreallocFree(pDevEvo);
8634 
8635     nvRmFreeDeviceEvo(pDevEvo);
8636 
8637     nvListDel(&pDevEvo->devListEntry);
8638 
8639     nvkms_free_ref_ptr(pDevEvo->ref_ptr);
8640 
8641     nvFree(pDevEvo);
8642     return TRUE;
8643 }
8644 
AssignNumberOfApiHeads(NVDevEvoRec * pDevEvo)8645 static void AssignNumberOfApiHeads(NVDevEvoRec *pDevEvo)
8646 {
8647     pDevEvo->numApiHeads = pDevEvo->numHeads;
8648 }
8649 
nvAllocDevEvo(const struct NvKmsAllocDeviceRequest * pRequest,enum NvKmsAllocDeviceStatus * pStatus)8650 NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest,
8651                           enum NvKmsAllocDeviceStatus *pStatus)
8652 {
8653     NVDevEvoPtr pDevEvo = NULL;
8654     enum NvKmsAllocDeviceStatus status =
8655         NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
8656     NvU32 i;
8657 
8658     nvAssert(nvFindDevEvoByDeviceId(pRequest->deviceId) == NULL);
8659 
8660     pDevEvo = nvCalloc(1, sizeof(*pDevEvo));
8661 
8662     if (pDevEvo == NULL) {
8663         goto done;
8664     }
8665 
8666     pDevEvo->allocRefCnt = 1;
8667 
8668     pDevEvo->gpuLogIndex = NV_INVALID_GPU_LOG_INDEX;
8669 
8670     pDevEvo->gc6Allowed = TRUE;
8671 
8672     nvListAppend(&pDevEvo->devListEntry, &nvEvoGlobal.devList);
8673 
8674     pDevEvo->ref_ptr = nvkms_alloc_ref_ptr(pDevEvo);
8675     if (!pDevEvo->ref_ptr) {
8676         goto done;
8677     }
8678 
8679     for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) {
8680         pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID;
8681     }
8682 
8683     for (i = 0; i < ARRAY_LEN(pDevEvo->headForWindow); i++) {
8684         pDevEvo->headForWindow[i] = NV_INVALID_HEAD;
8685     }
8686 
8687     if (!nvRmAllocDeviceEvo(pDevEvo, pRequest)) {
8688         goto done;
8689     }
8690 
8691     status = nvAssignEvoCaps(pDevEvo);
8692 
8693     if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) {
8694         goto done;
8695     }
8696 
8697     if (!nvPreallocAlloc(pDevEvo)) {
8698         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8699         goto done;
8700     }
8701 
8702     /*
8703      * Copy the registry keys from the alloc device request to the device.
8704      *
8705      * This needs to be set before nvRmAllocDisplays, because nvRmAllocDisplays
8706      * will initialize DP lib which may read registry keys that we want to
8707      * allow clients to override.
8708      */
8709     ct_assert(ARRAY_LEN(pRequest->registryKeys) ==
8710               ARRAY_LEN(pDevEvo->registryKeys));
8711     ct_assert(ARRAY_LEN(pRequest->registryKeys[0].name) ==
8712               ARRAY_LEN(pDevEvo->registryKeys[0].name));
8713 
8714     for (i = 0; i < ARRAY_LEN(pRequest->registryKeys); i++) {
8715         const size_t nameLen = sizeof(pDevEvo->registryKeys[i].name);
8716         nvkms_memcpy(pDevEvo->registryKeys[i].name,
8717                      pRequest->registryKeys[i].name,
8718                      nameLen);
8719         pDevEvo->registryKeys[i].name[nameLen - 1] = '\0';
8720         pDevEvo->registryKeys[i].value = pRequest->registryKeys[i].value;
8721     }
8722 
8723     status = nvRmAllocDisplays(pDevEvo);
8724 
8725     if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) {
8726         goto done;
8727     }
8728 
8729     nvAllocFrameLocksEvo(pDevEvo);
8730 
8731     if (!pDevEvo->hal->AllocRmCtrlObject(pDevEvo)) {
8732         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8733         goto done;
8734     }
8735 
8736     AssignNumberOfApiHeads(pDevEvo);
8737 
8738     if (!nvAllocCoreChannelEvo(pDevEvo)) {
8739         status = NVKMS_ALLOC_DEVICE_STATUS_CORE_CHANNEL_ALLOC_FAILED;
8740         goto done;
8741     }
8742 
8743     pDevEvo->pNvKmsOpenDev = nvAllocPerOpenDev(nvEvoGlobal.nvKmsPerOpen,
8744                                                pDevEvo, TRUE /* isPrivileged */);
8745     if (!pDevEvo->pNvKmsOpenDev) {
8746         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8747         goto done;
8748     }
8749 
8750     nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
8751 
8752     /*
8753      * Import the framebuffer console, if there is one,
8754      * as a surface we can flip to.
8755      */
8756     nvRmImportFbConsoleMemory(pDevEvo);
8757 
8758     /*
8759      * This check must be placed after nvAllocCoreChannelEvo() since it depends
8760      * on the HW capabilities that are read in that function.
8761      */
8762     if (!ValidateConnectorTypes(pDevEvo)) {
8763         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8764         goto done;
8765     }
8766 
8767     if (!nvHsAllocDevice(pDevEvo, pRequest)) {
8768         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8769         goto done;
8770     }
8771 
8772     if (!nvInitHdmiLibrary(pDevEvo)) {
8773         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8774         goto done;
8775     }
8776 
8777     nvRmMuxInit(pDevEvo);
8778 
8779     status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS;
8780 
8781     /*
8782      * We can't allocate DIFR state if h/w doesn't support it. Only register
8783      * event handlers with DIFR state.
8784      */
8785     pDevEvo->pDifrState = nvDIFRAllocate(pDevEvo);
8786     if (pDevEvo->pDifrState) {
8787         if (!nvRmRegisterDIFREventHandler(pDevEvo)) {
8788             nvDIFRFree(pDevEvo->pDifrState);
8789             pDevEvo->pDifrState = NULL;
8790         }
8791     }
8792 
8793     /* fall through */
8794 
8795 done:
8796     if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) {
8797         nvFreeDevEvo(pDevEvo);
8798         pDevEvo = NULL;
8799     }
8800 
8801     *pStatus = status;
8802 
8803     return pDevEvo;
8804 }
8805 
8806 
8807 // How long before we time out waiting for lock?
8808 // In microseconds.
8809 #define LOCK_TIMEOUT 5000000
8810 
8811 //
8812 // EvoWaitForLock()
8813 // Wait for raster or flip lock to complete
8814 // Note that we use pDev and subdevice here instead of pDisp since this is used
8815 // per-subdev in SLI (including the pDispEvo->numSubDevices > 1 case).
8816 //
EvoWaitForLock(const NVDevEvoRec * pDevEvo,const NvU32 sd,const NvU32 head,const NvU32 type,NvU64 * pStartTime)8817 static NvBool EvoWaitForLock(const NVDevEvoRec *pDevEvo, const NvU32 sd,
8818                              const NvU32 head, const NvU32 type,
8819                              NvU64 *pStartTime)
8820 {
8821     NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS status = { };
8822     NvU32 ret;
8823 
8824     nvAssert(type == EVO_RASTER_LOCK || type == EVO_FLIP_LOCK);
8825 
8826     if ((type == EVO_FLIP_LOCK) &&
8827         !pDevEvo->hal->caps.supportsFlipLockRGStatus) {
8828         return TRUE;
8829     }
8830 
8831     status.head             = head;
8832     status.base.subdeviceIndex = sd;
8833     status.scanLocked       = NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_NO;
8834     status.flipLocked       = NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_NO;
8835 
8836     // Just keep looping until we get what we want.
8837     do {
8838         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
8839                              pDevEvo->displayHandle,
8840                              NV5070_CTRL_CMD_GET_RG_STATUS,
8841                              &status,
8842                              sizeof(status));
8843         if (ret != NVOS_STATUS_SUCCESS) {
8844             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
8845                         "Unable to read SLI lock status");
8846             return FALSE;
8847         }
8848 
8849         if ((type == EVO_RASTER_LOCK) &&
8850             (status.scanLocked ==
8851                 NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_YES)) {
8852             break;
8853         }
8854         if ((type == EVO_FLIP_LOCK) &&
8855             (status.flipLocked ==
8856                 NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_YES)) {
8857             break;
8858         }
8859 
8860         if (nvExceedsTimeoutUSec(pDevEvo, pStartTime, LOCK_TIMEOUT)) {
8861             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
8862                         "SLI lock timeout exceeded (type %d)", type);
8863             return FALSE;
8864         }
8865 
8866         nvkms_yield();
8867 
8868     } while (TRUE);
8869 
8870     // Once we've exited from the various loops above, we should be locked
8871     // as requested.
8872     return TRUE;
8873 }
8874 
8875 //
8876 // EvoUpdateHeadParams()
8877 // Send GPUs HeadParams updates; accounts for SLI.
8878 //
EvoUpdateHeadParams(const NVDispEvoRec * pDispEvo,NvU32 head,NVEvoUpdateState * updateState)8879 static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head,
8880                                 NVEvoUpdateState *updateState)
8881 {
8882     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8883 
8884     nvPushEvoSubDevMaskDisp(pDispEvo);
8885 
8886     pDevEvo->hal->SetHeadControl(pDevEvo, pDispEvo->displayOwner, head, updateState);
8887 
8888     nvPopEvoSubDevMask(pDevEvo);
8889 }
8890 
8891 //
8892 // nvReadCRC32Evo()
8893 // Returns the last CRC32 value
nvReadCRC32Evo(NVDispEvoPtr pDispEvo,NvU32 head,CRC32NotifierCrcOut * crcOut)8894 NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head,
8895                       CRC32NotifierCrcOut *crcOut)
8896 {
8897     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8898     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
8899     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
8900     NVEvoDmaPtr dma = NULL;
8901     NVConnectorEvoPtr pConnectorEvo = NULL;
8902     NVEvoUpdateState updateState = { };
8903     NvU32 numCRC32 = 0;
8904     NvBool res = TRUE;
8905     NvBool found = FALSE;
8906     NvU32 ret;
8907 
8908     // Look up the head connector
8909     nvListForEachEntry(pConnectorEvo,
8910                        &pDispEvo->connectorList,
8911                        connectorListEntry) {
8912         NvU32 activeHeadMask =
8913             nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo);
8914         if (activeHeadMask & NVBIT(head)) {
8915             found = TRUE;
8916             break;
8917         }
8918     }
8919 
8920     if (!found) {
8921         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
8922                     "Unable to find active connector for head %d", head);
8923         return FALSE;
8924     }
8925 
8926     // Allocate a temporary DMA notifier
8927     dma = nvCalloc(1, sizeof(NVEvoDma));
8928     if ((dma == NULL) ||
8929         !nvRmAllocEvoDma(pDevEvo,
8930                          dma,
8931                          NV_DMA_EVO_NOTIFIER_SIZE - 1,
8932                          DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER),
8933                          1 << pDispEvo->displayOwner)) {
8934         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
8935                     "CRC32 notifier DMA allocation failed");
8936         nvFree(dma);
8937         return FALSE;
8938     }
8939 
8940     // Bind the CRC32 notifier surface descriptor
8941     ret = pDevEvo->hal->BindSurfaceDescriptor(pDevEvo, pDevEvo->core, &dma->surfaceDesc);
8942     if (ret != NVOS_STATUS_SUCCESS) {
8943         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
8944                          "Failed to bind display engine CRC32 notify surface descriptor "
8945                          ": 0x%x (%s)", ret, nvstatusToString(ret));
8946         res = FALSE;
8947         goto done;
8948     }
8949 
8950     // Only set up the actual output for SLI primary.
8951     nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner);
8952 
8953     /* CRC notifiers are similar to completion notifiers, but work slightly
8954      * different:
8955      *
8956      *   1. In order to start CRC generation for a head, we need to:
8957      *
8958      *      - Point an EVO head at a block of memory with
8959      *        HEAD_SET_CONTEXT_DMA_CRC(head)
8960      *
8961      *      - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to select
8962      *        what output we want to capture CRC values from, and kicking off a
8963      *        core channel update (this already generates a CRC value for the
8964      *        last scanout buffer)
8965      *
8966      *      ----> hal->StartCRC32Capture()
8967      *
8968      *   2. From 1) on, a new CRC value is generated per vblank and written to
8969      *      an incrementing entry in the CRC notifier. With pre-nvdisplay chips,
8970      *      a CRC notifier can hold up to 256 entries. Once filled up, new CRC
8971      *      values are discarded. Either case, we are only interested in the
8972      *      last CRC32 value.
8973      *
8974      *   3. In order to stop CRC generation, we need to perform the inverse
8975      *      operation of 1):
8976      *
8977      *      - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to
8978      *        unselect all outputs we were capturing CRC values from.
8979      *
8980      *      - Unset the CRC context DMA with HEAD_SET_CONTEXT_DMA_CRC(head)
8981      *
8982      *      ----> hal->StopCRC32Capture()
8983      *
8984      *   4. From 3) on, it is safe to wait for the CRC notifier and query all
8985      *      entries.
8986      *
8987      *      ----> hal->QueryCRC32()
8988      */
8989     pDevEvo->hal->StartCRC32Capture(pDevEvo,
8990                                     dma,
8991                                     pConnectorEvo,
8992                                     pTimings->protocol,
8993                                     pConnectorEvo->or.primary,
8994                                     head,
8995                                     pDispEvo->displayOwner,
8996                                     &updateState);
8997 
8998     // This update should generate one CRC value.
8999     nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */);
9000 
9001     pDevEvo->hal->StopCRC32Capture(pDevEvo,
9002                                    head,
9003                                    &updateState);
9004 
9005     nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */);
9006 
9007     if (!pDevEvo->hal->QueryCRC32(pDevEvo,
9008                                   dma,
9009                                   pDispEvo->displayOwner,
9010                                   1,
9011                                   crcOut,
9012                                   &numCRC32) ||
9013         (numCRC32 == 0)) {
9014         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query all CRC32 values");
9015     }
9016 
9017     nvPopEvoSubDevMask(pDevEvo);
9018 
9019 done:
9020     // Clean-up
9021     nvRmFreeEvoDma(pDevEvo, dma);
9022     nvFree(dma);
9023 
9024     return res;
9025 }
9026 
nvGetActiveSorMask(const NVDispEvoRec * pDispEvo)9027 NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo)
9028 {
9029     NvU32 activeSorMask = 0;
9030     NvU32 head;
9031 
9032     for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) {
9033         NVConnectorEvoPtr pConnectorEvo =
9034             pDispEvo->headState[head].pConnectorEvo;
9035 
9036         if (pConnectorEvo != NULL &&
9037             pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
9038             NvU32 orIndex;
9039             nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR);
9040             FOR_EACH_INDEX_IN_MASK(32, orIndex, nvConnectorGetORMaskEvo(pConnectorEvo)) {
9041                 if (pConnectorEvo->or.ownerHeadMask[orIndex] == 0x0) {
9042                     continue;
9043                 }
9044                 activeSorMask |= NVBIT(orIndex);
9045             } FOR_EACH_INDEX_IN_MASK_END;
9046         }
9047     }
9048 
9049     return activeSorMask;
9050 }
9051 
nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo,const NvU32 sd,NVEvoChannelPtr pChannel,NvU64 * pStartTime,const NvU32 timeout)9052 NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo,
9053                                    const NvU32 sd,
9054                                    NVEvoChannelPtr pChannel,
9055                                    NvU64 *pStartTime,
9056                                    const NvU32 timeout)
9057 {
9058     do
9059     {
9060         NvBool isMethodPending = TRUE;
9061 
9062         if (pDevEvo->hal->IsChannelMethodPending(
9063                                     pDevEvo,
9064                                     pChannel,
9065                                     sd,
9066                                     &isMethodPending) && !isMethodPending) {
9067             break;
9068         }
9069 
9070         if (nvExceedsTimeoutUSec(pDevEvo, pStartTime, timeout)) {
9071             return FALSE;
9072         }
9073 
9074         nvkms_yield();
9075    } while (TRUE);
9076 
9077     return TRUE;
9078 }
9079 
SetSORFlushMode(NVDevEvoPtr pDevEvo,NvU32 sorNumber,NvU32 headMask,NvBool enable)9080 static NvU32 SetSORFlushMode(NVDevEvoPtr pDevEvo,
9081                              NvU32 sorNumber,
9082                              NvU32 headMask,
9083                              NvBool enable)
9084 {
9085     NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params = { };
9086 
9087     params.base.subdeviceIndex = 0;
9088     params.sorNumber = sorNumber;
9089     params.headMask = headMask;
9090     params.bEnable = enable;
9091 
9092     return nvRmApiControl(nvEvoGlobal.clientHandle,
9093                           pDevEvo->displayHandle,
9094                           NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE,
9095                           &params, sizeof(params));
9096 }
9097 
DPSerializerLinkTrain(NVDispEvoPtr pDispEvo,NVConnectorEvoPtr pConnectorEvo,NvBool enableLink,NvBool reTrain)9098 static void DPSerializerLinkTrain(NVDispEvoPtr pDispEvo,
9099                                   NVConnectorEvoPtr pConnectorEvo,
9100                                   NvBool enableLink,
9101                                   NvBool reTrain)
9102 {
9103     const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
9104     const NvU32 sorNumber = pConnectorEvo->or.primary;
9105     const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo);
9106     NvBool force = NV_FALSE;
9107     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
9108 
9109     /*
9110      * The NV0073_CTRL_DP_DATA_SET_{LANE_COUNT, LINK_BW} defines are the same
9111      * as the actual DPCD values. As such, we can directly assign the
9112      * dpSerializerCaps here.
9113      */
9114     NvBool isMST = pConnectorEvo->dpSerializerCaps.supportsMST;
9115     NvU32 linkBW = pConnectorEvo->dpSerializerCaps.maxLinkBW;
9116     NvU32 laneCount = pConnectorEvo->dpSerializerCaps.maxLaneCount;
9117 
9118     nvAssert(nvConnectorIsDPSerializer(pConnectorEvo));
9119 
9120     if (sorNumber == NV_INVALID_OR) {
9121         return;
9122     }
9123 
9124     if (reTrain) {
9125         if (!pConnectorEvo->dpSerializerEnabled) {
9126             nvEvoLogDev(pDevEvo, EVO_LOG_INFO,
9127                         "Received expected HPD_IRQ during serializer shutdown");
9128             return;
9129         }
9130     } else if (enableLink) {
9131         pConnectorEvo->dpSerializerEnabled = NV_TRUE;
9132     } else {
9133         linkBW = 0;
9134         laneCount = NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0;
9135         pConnectorEvo->dpSerializerEnabled = NV_FALSE;
9136     }
9137 
9138     if (isMST) {
9139         NvU32 dpcdData = 0;
9140 
9141         dpcdData = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, dpcdData);
9142         dpcdData =
9143             FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, dpcdData);
9144         if (!nvWriteDPCDReg(pConnectorEvo, NV_DPCD_MSTM_CTRL, dpcdData)) {
9145             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to enable MST DPCD");
9146             return;
9147         }
9148     }
9149 
9150     /*
9151      * We cannot perform link training while the OR has an attached head
9152      * since we would be changing the OR clocks and link frequency while
9153      * it's actively encoding pixels, and this could lead to FIFO overflow/
9154      * underflow issues. Instead, the recommended, safe sequence is to enter
9155      * flush mode first, re-train the link, and exit flush mode after.
9156      */
9157     if (reTrain) {
9158         if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_TRUE) !=
9159             NVOS_STATUS_SUCCESS) {
9160             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
9161                         "Failed to enter flush mode");
9162             return;
9163         }
9164     }
9165 
9166     do {
9167         NvU32 dpCtrlData = 0;
9168         NvU32 dpCtrlCmd = 0;
9169         NV0073_CTRL_DP_CTRL_PARAMS dpCtrlParams = { };
9170 
9171         dpCtrlCmd = DRF_DEF(0073_CTRL, _DP_CMD, _SET_LANE_COUNT, _TRUE) |
9172                     DRF_DEF(0073_CTRL, _DP_CMD, _SET_LINK_BW, _TRUE) |
9173                     DRF_DEF(0073_CTRL, _DP_CMD, _SET_ENHANCED_FRAMING, _TRUE);
9174 
9175         if (isMST) {
9176             dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_FORMAT_MODE, _MULTI_STREAM);
9177         }
9178 
9179         if (force) {
9180             dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAKE_LINK_TRAINING, _DONOT_TOGGLE_TRANSMISSION);
9181         }
9182 
9183         dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LINK_BW,
9184                                      linkBW, dpCtrlData);
9185         dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LANE_COUNT,
9186                                      laneCount, dpCtrlData);
9187         dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _TARGET,
9188                                      NV0073_CTRL_DP_DATA_TARGET_SINK,
9189                                      dpCtrlData);
9190 
9191         dpCtrlParams.subDeviceInstance = pDispEvo->displayOwner;
9192         dpCtrlParams.displayId = displayId;
9193         dpCtrlParams.cmd = dpCtrlCmd;
9194         dpCtrlParams.data = dpCtrlData;
9195 
9196         if (nvRmApiControl(nvEvoGlobal.clientHandle,
9197                            pDevEvo->displayCommonHandle,
9198                            NV0073_CTRL_CMD_DP_CTRL,
9199                            &dpCtrlParams, sizeof(dpCtrlParams)) == NVOS_STATUS_SUCCESS) {
9200             break;
9201         }
9202 
9203         if (force) {
9204             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Fake link training failed");
9205             break;
9206         }
9207 
9208         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Link training failed");
9209 
9210         /*
9211          * XXX Force the link config on the GPU side to avoid hanging the display
9212          * pipe during modeset. Eventually, we need to figure out how to deal
9213          * with/report these kinds of LT failures.
9214          */
9215         force = NV_TRUE;
9216 
9217     } while (NV_TRUE);
9218 
9219     if (reTrain) {
9220         if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_FALSE) !=
9221             NVOS_STATUS_SUCCESS) {
9222             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
9223                         "Failed to exit flush mode");
9224         }
9225     }
9226 }
9227 
nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo,NVConnectorEvoPtr pConnectorEvo)9228 void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo,
9229                                NVConnectorEvoPtr pConnectorEvo)
9230 {
9231     DPSerializerLinkTrain(pDispEvo, pConnectorEvo,
9232                           NV_TRUE /* enableLink */,
9233                           NV_TRUE /* reTrain */);
9234 }
9235 
nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo,NVConnectorEvoPtr pConnectorEvo)9236 void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo,
9237                               NVConnectorEvoPtr pConnectorEvo)
9238 {
9239     const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo);
9240 
9241     if (!pConnectorEvo->dpSerializerEnabled && (headMask != 0)) {
9242         DPSerializerLinkTrain(pDispEvo, pConnectorEvo,
9243                               NV_TRUE /* enableLink */,
9244                               NV_FALSE /* reTrain */);
9245     }
9246 }
9247 
nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo,NVConnectorEvoPtr pConnectorEvo)9248 void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo,
9249                                NVConnectorEvoPtr pConnectorEvo)
9250 {
9251     const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo);
9252 
9253     if (pConnectorEvo->dpSerializerEnabled && (headMask == 0)) {
9254         DPSerializerLinkTrain(pDispEvo, pConnectorEvo,
9255                               NV_FALSE /* enableLink */,
9256                               NV_FALSE /* reTrain */);
9257     }
9258 }
9259 
nvGetHDRSrcMaxLum(const NVFlipChannelEvoHwState * pHwState)9260 NvU32 nvGetHDRSrcMaxLum(const NVFlipChannelEvoHwState *pHwState)
9261 {
9262     if (!pHwState->hdrStaticMetadata.enabled) {
9263         return 0;
9264     }
9265 
9266     if (pHwState->hdrStaticMetadata.val.maxCLL > 0) {
9267         return pHwState->hdrStaticMetadata.val.maxCLL;
9268     }
9269 
9270     return pHwState->hdrStaticMetadata.val.maxDisplayMasteringLuminance;
9271 }
9272 
nvNeedsTmoLut(NVDevEvoPtr pDevEvo,NVEvoChannelPtr pChannel,const NVFlipChannelEvoHwState * pHwState,NvU32 srcMaxLum,NvU32 targetMaxCLL)9273 NvBool nvNeedsTmoLut(NVDevEvoPtr pDevEvo,
9274                      NVEvoChannelPtr pChannel,
9275                      const NVFlipChannelEvoHwState *pHwState,
9276                      NvU32 srcMaxLum,
9277                      NvU32 targetMaxCLL)
9278 {
9279     const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask);
9280     const NvU32 head = pDevEvo->headForWindow[win];
9281     const NvU32 sdMask = nvPeekEvoSubDevMask(pDevEvo);
9282     const NvU32 sd = (sdMask == 0) ? 0 : nv_ffs(sdMask) - 1;
9283     const NVDispHeadStateEvoRec *pHeadState =
9284         &pDevEvo->pDispEvo[sd]->headState[head];
9285     const NVEvoWindowCaps *pWinCaps =
9286         &pDevEvo->gpus[sd].capabilities.window[pChannel->instance];
9287 
9288     // Don't tone map if flipped to NULL.
9289     if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) {
9290         return FALSE;
9291     }
9292 
9293     // Don't tone map if layer doesn't have static metadata.
9294     // XXX HDR TODO: Support tone mapping SDR surfaces to HDR
9295     if (!pHwState->hdrStaticMetadata.enabled) {
9296         return FALSE;
9297     }
9298 
9299     // Don't tone map if HDR infoframe isn't enabled
9300     // XXX HDR TODO: Support tone mapping HDR surfaces to SDR
9301     if (pHeadState->hdrInfoFrame.state != NVKMS_HDR_INFOFRAME_STATE_ENABLED) {
9302         return FALSE;
9303     }
9304 
9305     // Don't tone map if TMO not present
9306     if (!pWinCaps->tmoPresent) {
9307         return FALSE;
9308     }
9309 
9310     // Don't tone map if source or target max luminance is unspecified.
9311     if ((srcMaxLum == 0) || (targetMaxCLL == 0)) {
9312         return FALSE;
9313     }
9314 
9315     // Don't tone map unless source max luminance exceeds target by 10%.
9316     if (srcMaxLum <= ((targetMaxCLL * 110) / 100)) {
9317         return FALSE;
9318     }
9319 
9320     return TRUE;
9321 }
9322 
nvIsCscMatrixIdentity(const struct NvKmsCscMatrix * matrix)9323 NvBool nvIsCscMatrixIdentity(const struct NvKmsCscMatrix *matrix)
9324 {
9325     const struct NvKmsCscMatrix identity = NVKMS_IDENTITY_CSC_MATRIX;
9326 
9327     int y;
9328     for (y = 0; y < 3; y++) {
9329         int x;
9330 
9331         for (x = 0; x < 4; x++) {
9332             if (matrix->m[y][x] != identity.m[y][x]) {
9333                 return FALSE;
9334             }
9335         }
9336     }
9337 
9338     return TRUE;
9339 }
9340 
nvEvoColorSpaceBpcToPixelDepth(const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,const enum NvKmsDpyAttributeColorBpcValue colorBpc)9341 enum nvKmsPixelDepth nvEvoColorSpaceBpcToPixelDepth(
9342     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
9343     const enum NvKmsDpyAttributeColorBpcValue colorBpc)
9344 {
9345     switch (colorSpace) {
9346         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
9347         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
9348         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
9349             switch (colorBpc) {
9350                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
9351                     return NVKMS_PIXEL_DEPTH_30_444;
9352                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
9353                     return NVKMS_PIXEL_DEPTH_24_444;
9354                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN: /* fallthrough */
9355                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6:
9356                     return NVKMS_PIXEL_DEPTH_18_444;
9357             }
9358             break;
9359         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
9360             nvAssert(colorBpc != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6);
9361             switch (colorBpc) {
9362                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
9363                     return NVKMS_PIXEL_DEPTH_20_422;
9364                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6: /* fallthrough */
9365                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN: /* fallthrough */
9366                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
9367                     return NVKMS_PIXEL_DEPTH_16_422;
9368             }
9369             break;
9370     }
9371 
9372     return NVKMS_PIXEL_DEPTH_18_444;
9373 }
9374 
nvEvoEnableMergeModePreModeset(NVDispEvoRec * pDispEvo,const NvU32 headsMask,NVEvoUpdateState * pUpdateState)9375 void nvEvoEnableMergeModePreModeset(NVDispEvoRec *pDispEvo,
9376                                     const NvU32 headsMask,
9377                                     NVEvoUpdateState *pUpdateState)
9378 {
9379     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
9380     const NvU32 sd = pDispEvo->displayOwner;
9381     const NvU32 primaryHead = nvGetPrimaryHwHeadFromMask(headsMask);
9382     NvU32 head;
9383 
9384     nvAssert(pDevEvo->hal->caps.supportsMergeMode);
9385     nvAssert((nvPopCount32(headsMask) > 1) &&
9386                 (primaryHead != NV_INVALID_HEAD));
9387 
9388     FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) {
9389         NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
9390         const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
9391         NVEvoHeadControl *pHC =
9392             &pDevEvo->gpus[sd].headControl[head];
9393 
9394         nvAssert(pHeadState->mergeMode ==
9395                     NV_EVO_MERGE_MODE_DISABLED);
9396 
9397         /*
9398          * Heads requires to be raster locked before they transition to
9399          * PRIMARY/SECONDARY merge mode.
9400          *
9401          * SETUP should be the intermediate state before head transition to
9402          * PRIMARY/SECONDARY  merge mode. During SETUP state, there is no pixel
9403          * transmission from secondary to primary head, RG fetches and drops
9404          * pixels, viewport gets filled by the special gray/black pixels.
9405          */
9406         pHeadState->mergeMode = NV_EVO_MERGE_MODE_SETUP;
9407         pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode,
9408                                    pUpdateState);
9409 
9410         nvAssert((pHC->serverLock == NV_EVO_NO_LOCK) &&
9411                     (pHC->clientLock == NV_EVO_NO_LOCK));
9412 
9413         pHC->mergeMode = TRUE;
9414         if (head == primaryHead) {
9415             pHC->serverLock = NV_EVO_RASTER_LOCK;
9416             pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead);
9417             pHC->setLockOffsetX = TRUE;
9418             pHC->crashLockUnstallMode = FALSE;
9419         } else {
9420             pHC->clientLock = NV_EVO_RASTER_LOCK;
9421             pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead);
9422             if (pTimings->vrr.type != NVKMS_DPY_VRR_TYPE_NONE) {
9423                 pHC->clientLockoutWindow = 4;
9424                 pHC->useStallLockPin = TRUE;
9425                 pHC->stallLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead);
9426             } else {
9427                 pHC->clientLockoutWindow = 2;
9428             }
9429             pHC->crashLockUnstallMode =
9430                 (pTimings->vrr.type != NVKMS_DPY_VRR_TYPE_NONE);
9431         }
9432 
9433         pHC->stereoLocked = FALSE;
9434 
9435         EvoUpdateHeadParams(pDispEvo, head, pUpdateState);
9436     }
9437 }
9438 
nvEvoEnableMergeModePostModeset(NVDispEvoRec * pDispEvo,const NvU32 headsMask,NVEvoUpdateState * pUpdateState)9439 void nvEvoEnableMergeModePostModeset(NVDispEvoRec *pDispEvo,
9440                                      const NvU32 headsMask,
9441                                      NVEvoUpdateState *pUpdateState)
9442 {
9443     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
9444     const NvU32 sd = pDispEvo->displayOwner;
9445     const NvU32 primaryHead = nvGetPrimaryHwHeadFromMask(headsMask);
9446     NvU64 startTime = 0;
9447     NvU32 head;
9448 
9449     nvAssert(pDevEvo->hal->caps.supportsMergeMode);
9450     nvAssert((nvPopCount32(headsMask) > 1) &&
9451                 (primaryHead != NV_INVALID_HEAD));
9452 
9453     FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) {
9454         nvAssert(pDispEvo->headState[head].mergeMode ==
9455                     NV_EVO_MERGE_MODE_SETUP);
9456 
9457         if (!EvoWaitForLock(pDevEvo, sd, head, EVO_RASTER_LOCK,
9458                             &startTime)) {
9459             nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "Raster lock timeout");
9460             return;
9461         }
9462     }
9463 
9464     FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) {
9465         NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
9466         NVEvoHeadControl *pHC = &pDevEvo->gpus[sd].headControl[head];
9467 
9468         pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead);
9469         pHC->flipLock = TRUE;
9470 
9471         EvoUpdateHeadParams(pDispEvo, head, pUpdateState);
9472 
9473         pHeadState->mergeMode = (head == primaryHead) ?
9474             NV_EVO_MERGE_MODE_PRIMARY : NV_EVO_MERGE_MODE_SECONDARY;
9475         pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode,
9476                                    pUpdateState);
9477     }
9478 }
9479 
nvEvoDisableMergeMode(NVDispEvoRec * pDispEvo,const NvU32 headsMask,NVEvoUpdateState * pUpdateState)9480 void nvEvoDisableMergeMode(NVDispEvoRec *pDispEvo,
9481                            const NvU32 headsMask,
9482                            NVEvoUpdateState *pUpdateState)
9483 {
9484     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
9485     const NvU32 sd = pDispEvo->displayOwner;
9486     NvU32 head;
9487 
9488     nvAssert(pDevEvo->hal->caps.supportsMergeMode);
9489     nvAssert(nvPopCount32(headsMask) > 1);
9490 
9491     FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) {
9492         NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
9493         NVEvoHeadControl *pHC =
9494             &pDevEvo->gpus[sd].headControl[head];
9495 
9496         pHeadState->mergeMode = NV_EVO_MERGE_MODE_DISABLED;
9497         pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode,
9498                                    pUpdateState);
9499 
9500         pHC->mergeMode = FALSE;
9501         pHC->serverLock = NV_EVO_NO_LOCK;
9502         pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
9503         pHC->clientLock = NV_EVO_NO_LOCK;
9504         pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
9505         pHC->clientLockoutWindow = 0;
9506         pHC->setLockOffsetX = FALSE;
9507         pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
9508         pHC->flipLock = FALSE;
9509         pHC->useStallLockPin = FALSE;
9510         pHC->stallLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
9511         pHC->crashLockUnstallMode = FALSE;
9512 
9513 
9514         EvoUpdateHeadParams(pDispEvo, head, pUpdateState);
9515     }
9516 }
9517 
nvEvoGetSingleTileHwModeTimings(const NVHwModeTimingsEvo * pSrc,const NvU32 numTiles,NVHwModeTimingsEvo * pDst)9518 NvBool nvEvoGetSingleTileHwModeTimings(const NVHwModeTimingsEvo *pSrc,
9519                                        const NvU32 numTiles,
9520                                        NVHwModeTimingsEvo *pDst)
9521 {
9522     if (numTiles == 1) {
9523         *pDst = *pSrc;
9524         return TRUE;
9525     }
9526 
9527     if ((numTiles == 0) ||
9528             (pSrc->viewPort.out.xAdjust != 0) ||
9529             (pSrc->viewPort.out.width != nvEvoVisibleWidth(pSrc))) {
9530         return FALSE;
9531     }
9532 
9533     if (((pSrc->rasterSize.x % numTiles) != 0) ||
9534             (((pSrc->rasterSyncEnd.x + 1) % numTiles) != 0) ||
9535             (((pSrc->rasterBlankEnd.x + 1) % numTiles) != 0) ||
9536             (((pSrc->rasterBlankStart.x + 1) % numTiles) != 0) ||
9537             ((pSrc->pixelClock % numTiles) != 0) ||
9538             ((pSrc->viewPort.in.width % numTiles) != 0)) {
9539         return FALSE;
9540     }
9541 
9542     *pDst = *pSrc;
9543 
9544     pDst->rasterSize.x /= numTiles;
9545     pDst->rasterSyncEnd.x /= numTiles;
9546     pDst->rasterBlankEnd.x /= numTiles;
9547     pDst->rasterBlankStart.x /= numTiles;
9548 
9549     pDst->pixelClock /= numTiles;
9550 
9551     pDst->viewPort.out.width /= numTiles;
9552     pDst->viewPort.in.width /= numTiles;
9553 
9554     return TRUE;
9555 }
9556 
nvEvoUse2Heads1OR(const NVDpyEvoRec * pDpyEvo,const NVHwModeTimingsEvo * pTimings,const struct NvKmsModeValidationParams * pParams)9557 NvBool nvEvoUse2Heads1OR(const NVDpyEvoRec *pDpyEvo,
9558                          const NVHwModeTimingsEvo *pTimings,
9559                          const struct NvKmsModeValidationParams *pParams)
9560 {
9561     const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo;
9562     const NvU32 sd = pDispEvo->displayOwner;
9563     const NVEvoHeadCaps *pHeadCaps =
9564         &pDispEvo->pDevEvo->gpus[sd].capabilities.head[0];
9565 
9566     /* The 2Heads1OR mode can not be used if GPU does not
9567      * support merge mode, or */
9568     if (!pDispEvo->pDevEvo->hal->caps.supportsMergeMode ||
9569             /* the 2Heads1OR mode is forced disabled by client, or */
9570             ((pParams->overrides &
9571               NVKMS_MODE_VALIDATION_MAX_ONE_HARDWARE_HEAD) != 0) ||
9572             /* the given dpy does not support the display stream compression
9573              * and the given mode timings are not using the hardware YUV420
9574              * packer, or */
9575             (!nvDPDpyIsDscPossible(pDpyEvo) && !nvHdmiDpySupportsDsc(pDpyEvo) &&
9576                 (pTimings->yuv420Mode != NV_YUV420_MODE_HW)) ||
9577             /* the non-centered viewport out does not work with 2Heads1OR mode
9578              * an for simplicity disable all customized viewport out, or */
9579             (pTimings->viewPort.out.width != nvEvoVisibleWidth(pTimings)) ||
9580             (pTimings->viewPort.out.xAdjust != 0) ||
9581             /* either HVisible, HSyncWidth, HBackPorch, HForntPorch,
9582              * pixelClock, or viewPortIn width is odd and can not be split
9583              * equally across two heads, or */
9584             ((pTimings->rasterSize.x & 1 ) != 0) ||
9585             ((pTimings->rasterSyncEnd.x & 1) != 1) ||
9586             ((pTimings->rasterBlankEnd.x & 1) != 1) ||
9587             ((pTimings->rasterBlankStart.x & 1) != 1) ||
9588             ((pTimings->pixelClock & 1) != 0) ||
9589             ((pTimings->viewPort.in.width & 1) != 0)) {
9590         return FALSE;
9591     }
9592 
9593     /* Use 2Heads1OR mode only if the required pixel clock is greater than the
9594      * maximum pixel clock support by a head. */
9595     return (pTimings->pixelClock > pHeadCaps->maxPClkKHz);
9596 }
9597 
nvIsLockGroupFlipLocked(const NVLockGroup * pLockGroup)9598 NvBool nvIsLockGroupFlipLocked(const NVLockGroup *pLockGroup)
9599 {
9600     return pLockGroup->flipLockEnabled;
9601 }
9602 
nvEvoIsConsoleActive(const NVDevEvoRec * pDevEvo)9603 NvBool nvEvoIsConsoleActive(const NVDevEvoRec *pDevEvo)
9604 {
9605     /*
9606      * The actual console state can be known only after allocation of core
9607      * channel, if core channel is not allocated yet then assume that console
9608      * is active.
9609      */
9610     if (pDevEvo->core == NULL) {
9611         return TRUE;
9612     }
9613 
9614     /*
9615      * If (pDevEvo->modesetOwner == NULL) that means either the vbios
9616      * console or the NVKMS console might be active.
9617      *
9618      * If (pDevEvo->modesetOwner != NULL) but
9619      * pDevEvo->modesetOwnerChanged is TRUE, that means the modeset
9620      * ownership is grabbed by the external client but it hasn't
9621      * performed any modeset and the console is still active.
9622      */
9623     if ((pDevEvo->modesetOwner == NULL) || pDevEvo->modesetOwnerChanged) {
9624         NvU32 sd;
9625         const NVDispEvoRec *pDispEvo;
9626         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
9627             if (nvGetActiveHeadMask(pDispEvo) != 0x0) {
9628                 return TRUE;
9629             }
9630         }
9631     }
9632 
9633     return FALSE;
9634 }
9635 
9636