1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "nvkms-types.h"
25 
26 #include "nvkms-evo-states.h"
27 #include "dp/nvdp-connector.h"
28 #include "dp/nvdp-device.h"
29 #include "nvkms-console-restore.h"
30 #include "nvkms-rm.h"
31 #include "nvkms-dpy.h"
32 #include "nvkms-cursor.h"
33 #include "nvkms-hal.h"
34 #include "nvkms-hdmi.h"
35 #include "nvkms-modepool.h"
36 #include "nvkms-evo.h"
37 #include "nvkms-flip.h"
38 #include "nvkms-hw-flip.h"
39 #include "nvkms-dma.h"
40 #include "nvkms-framelock.h"
41 #include "nvkms-utils.h"
42 #include "nvkms-lut.h"
43 #include "nvkms-modeset.h"
44 #include "nvkms-prealloc.h"
45 #include "nvkms-rmapi.h"
46 #include "nvkms-surface.h"
47 #include "nvkms-headsurface.h"
48 #include "nvkms-difr.h"
49 #include "nvkms-vrr.h"
50 #include "nvkms-ioctl.h"
51 
52 #include "nvctassert.h"
53 
54 #include <ctrl/ctrl0073/ctrl0073dfp.h> // NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS
55 #include <ctrl/ctrl0073/ctrl0073system.h> // NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH
56 #include <ctrl/ctrl0080/ctrl0080gpu.h> // NV0080_CTRL_CMD_GPU_*
57 #include <ctrl/ctrl0080/ctrl0080unix.h> // NV0080_CTRL_OS_UNIX_VT_SWITCH_*
58 #include <ctrl/ctrl30f1.h> // NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_*
59 #include <ctrl/ctrl5070/ctrl5070rg.h> // NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS
60 #include <ctrl/ctrl5070/ctrl5070system.h> // NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2
61 #include <ctrl/ctrl5070/ctrl5070or.h> // NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE
62 #include <ctrl/ctrl0073/ctrl0073dp.h> // NV0073_CTRL_DP_CTRL
63 
64 #include "nvkms.h"
65 #include "nvkms-private.h"
66 #include "nvos.h"
67 
68 #include "displayport/dpcd.h"
69 
70 #define EVO_RASTER_LOCK     1
71 #define EVO_FLIP_LOCK       2
72 
73 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_HEAD                     7:0
74 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT                 8:8
75 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_DISABLE           0
76 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_ENABLE            1
77 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT               9:9
78 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_DISABLE         0
79 #define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_ENABLE          1
80 
81 /*
82  * This struct is used to describe a single set of GPUs to lock together by
83  * GetRasterLockGroups().
84  */
85 typedef struct _NVEvoRasterLockGroup {
86     NvU32 numDisps;
87     NVDispEvoPtr pDispEvoOrder[NVKMS_MAX_SUBDEVICES];
88 } RasterLockGroup;
89 
90 /*
91  * These are used hold additional state for each DispEvo during building of
92  * RasterLockGroups.
93  */
94 typedef struct
95 {
96     NVDispEvoPtr pDispEvo;
97     NvU32 gpuId;
98     RasterLockGroup *pRasterLockGroup;
99 } DispEntry;
100 
101 typedef struct
102 {
103     /* Array of DispEvos and their assigned RasterLockGroups. */
104     NvU32 numDisps;
105     DispEntry disps[NVKMS_MAX_SUBDEVICES];
106 } DispEvoList;
107 
108 struct _NVLockGroup {
109     RasterLockGroup rasterLockGroup;
110     NvBool flipLockEnabled;
111 };
112 
113 static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, NvU32 head,
114                                   NvU16 x, NvU16 y,
115                                   NVEvoUpdateState *updateState);
116 static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0,
117                              NVDispEvoPtr pDispEvo1, NvU32 head1,
118                              NVEvoLockPin *serverPin, NVEvoLockPin *clientPin);
119 static NvBool EvoWaitForLock(const NVDevEvoRec *pDevEvo, const NvU32 sd,
120                              const NvU32 head, const NvU32 type,
121                              NvU64 *pStartTime);
122 static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head,
123                                 NVEvoUpdateState *updateState);
124 
125 static void SetRefClk(NVDevEvoPtr pDevEvo,
126                       NvU32 sd, NvU32 head, NvBool external,
127                       NVEvoUpdateState *updateState);
128 static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo,
129                                         NVEvoSubDevPtr pEvoSubDev,
130                                         NVEvoLockAction action);
131 static void FinishModesetOneDev(NVDevEvoRec *pDevEvo);
132 static void FinishModesetOneGroup(RasterLockGroup *pRasterLockGroup);
133 static void EnableFlipLockIfRequested(NVLockGroup *pLockGroup);
134 
135 static void SyncEvoLockState(void);
136 static void UpdateEvoLockState(void);
137 
138 static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo,
139                               const NvU32 apiHead, const NvU32 data,
140                               const NvU64 usec);
141 
142 static NvBool DowngradeColorBpc(
143     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
144     enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
145     enum NvKmsDpyAttributeColorRangeValue *pColorRange);
146 
147 NVEvoGlobal nvEvoGlobal = {
148     .clientHandle = 0,
149     .frameLockList = NV_LIST_INIT(&nvEvoGlobal.frameLockList),
150     .devList = NV_LIST_INIT(&nvEvoGlobal.devList),
151 #if defined(DEBUG)
152     .debugMemoryAllocationList =
153         NV_LIST_INIT(&nvEvoGlobal.debugMemoryAllocationList),
154 #endif /* DEBUG */
155 };
156 
157 static RasterLockGroup *globalRasterLockGroups = NULL;
158 static NvU32 numGlobalRasterLockGroups = 0;
159 
160 /*
161  * Keep track of groups of HW heads which the modeset owner has requested to be
162  * fliplocked together.  All of the heads specified here are guaranteed to be
163  * active.  A given head can only be in one group at a time.  Fliplock is not
164  * guaranteed to be enabled in the hardware for these groups.
165  */
166 typedef struct _FlipLockRequestedGroup {
167     struct {
168         NVDispEvoPtr pDispEvo;
169         NvU32 flipLockHeads;
170     } disp[NV_MAX_SUBDEVICES];
171 
172     NVListRec listEntry;
173 } FlipLockRequestedGroup;
174 
175 static NVListRec requestedFlipLockGroups =
176     NV_LIST_INIT(&requestedFlipLockGroups);
177 
178 /*
179  * The dummy infoString should be used in paths that take an
180  * NVEvoInfoStringPtr where we don't need to log to a
181  * string.  By setting the 's' field to NULL, nothing will be printed
182  * to the infoString buffer.
183  */
184 NVEvoInfoStringRec dummyInfoString = {
185     .length = 0,
186     .totalLength = 0,
187     .s = NULL,
188 };
189 
190 /*!
191  * Return the NVDevEvoPtr, if any, that matches deviceId.
192  */
193 NVDevEvoPtr nvFindDevEvoByDeviceId(NvU32 deviceId)
194 {
195     NVDevEvoPtr pDevEvo;
196 
197     FOR_ALL_EVO_DEVS(pDevEvo) {
198         if (pDevEvo->usesTegraDevice &&
199             (deviceId == NVKMS_DEVICE_ID_TEGRA)) {
200             return pDevEvo;
201         } else if (pDevEvo->deviceId == deviceId) {
202             return pDevEvo;
203         }
204     };
205 
206     return NULL;
207 }
208 
209 /*!
210  * Find the first unused gpuLogIndex.
211  */
212 NvU8 nvGetGpuLogIndex(void)
213 {
214     NVDevEvoPtr pDevEvo;
215     NvU8 gpuLogIndex = 0;
216 
217  tryAgain:
218     FOR_ALL_EVO_DEVS(pDevEvo) {
219         NvU32 sd;
220         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
221             if (pDevEvo->pSubDevices[sd] == NULL) {
222                 continue;
223             }
224             if (gpuLogIndex == pDevEvo->pSubDevices[sd]->gpuLogIndex) {
225                 gpuLogIndex++;
226                 if (gpuLogIndex == 0xFF) {
227                     nvAssert(!"Too many GPUs");
228                     return NV_INVALID_GPU_LOG_INDEX;
229                 }
230                 goto tryAgain;
231             }
232         }
233     }
234 
235     return gpuLogIndex;
236 }
237 
238 /*!
239  * Return whether there are active heads on this pDispEvo.
240  */
241 static NvBool HasActiveHeads(NVDispEvoPtr pDispEvo)
242 {
243     return nvGetActiveHeadMask(pDispEvo) != 0;
244 }
245 
246 static void BlankHeadEvo(NVDispEvoPtr pDispEvo, const NvU32 head,
247                          NVEvoUpdateState *updateState)
248 {
249     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
250     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
251     struct NvKmsCompositionParams emptyCursorCompParams =
252         nvDefaultCursorCompositionParams(pDevEvo);
253 
254     /*
255      * If core channel surface is supported, ->SetSurface()
256      * disables Lut along with core channel surface. Otherwise need to disable
257      * Lut explicitly.
258      */
259     if (!pDevEvo->hal->caps.supportsCoreChannelSurface) {
260         pDevEvo->hal->SetLUTContextDma(pDispEvo,
261                                        head,
262                                        NULL /* pSurfEvo */,
263                                        FALSE /* baseLutEnabled */,
264                                        FALSE /* outputLutEnabled */,
265                                        updateState,
266                                        pHeadState->bypassComposition);
267     }
268 
269     nvPushEvoSubDevMaskDisp(pDispEvo);
270 
271     pDevEvo->hal->SetCursorImage(pDevEvo,
272                                  head,
273                                  NULL /* pSurfaceEvo */,
274                                  updateState,
275                                  &emptyCursorCompParams);
276 
277     {
278         NVFlipChannelEvoHwState hwState = { { 0 } };
279         NvU32 layer;
280 
281         for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
282             pDevEvo->hal->Flip(pDevEvo,
283                                pDevEvo->head[head].layer[layer],
284                                &hwState,
285                                updateState,
286                                FALSE /* bypassComposition */);
287         }
288     }
289 
290     nvPopEvoSubDevMask(pDevEvo);
291 }
292 
293 void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head,
294                           NVEvoModesetUpdateState *pModesetUpdateState)
295 {
296     NVEvoUpdateState *updateState = &pModesetUpdateState->updateState;
297     NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
298     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
299     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
300     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
301     NvU32 orIndex;
302 
303     for (orIndex = 0;
304             orIndex < ARRAY_LEN(pConnectorEvo->or.ownerHeadMask); orIndex++) {
305         if ((pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head)) != 0x0) {
306             break;
307         }
308     }
309 
310     if (orIndex >= ARRAY_LEN(pConnectorEvo->or.ownerHeadMask)) {
311         nvAssert(!"Not found attached OR");
312         return;
313     }
314 
315     pConnectorEvo->or.ownerHeadMask[orIndex] &= ~NVBIT(head);
316 
317     /* Disable the palette, cursor, and ISO ctxDma on this head. */
318     BlankHeadEvo(pDispEvo, head, updateState);
319 
320     // Only tear down the actual output for SLI primary.
321     nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner);
322 
323     pDevEvo->hal->ORSetControl(pDevEvo,
324                                pConnectorEvo,
325                                pTimings->protocol,
326                                orIndex,
327                                pConnectorEvo->or.ownerHeadMask[orIndex],
328                                updateState);
329 
330     /*
331      * Tell RM that there is no DisplayID is associated with this head anymore.
332      */
333     pDevEvo->hal->HeadSetDisplayId(pDevEvo, head, 0x0, updateState);
334 
335     nvPopEvoSubDevMask(pDevEvo);
336 
337     pModesetUpdateState->connectorIds =
338         nvAddDpyIdToDpyIdList(pHeadState->pConnectorEvo->displayId,
339                               pModesetUpdateState->connectorIds);
340 }
341 
342 static
343 NvU32 GetSorIndexToAttachConnector(const NVConnectorEvoRec *pConnectorEvo,
344                                    const NvBool isPrimaryHead)
345 {
346     NvU32 orIndex = NV_INVALID_OR;
347 
348     nvAssert(isPrimaryHead ||
349                 (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR));
350 
351     if (isPrimaryHead ||
352             (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR)) {
353         orIndex = pConnectorEvo->or.primary;
354     } else {
355         NvU32 i;
356 
357         FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.secondaryMask) {
358             if (pConnectorEvo->or.ownerHeadMask[i] == 0x0) {
359                 orIndex = i;
360                 break;
361             }
362         } FOR_EACH_INDEX_IN_MASK_END;
363     }
364 
365     return orIndex;
366 }
367 
368 void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo,
369                           const NvU32 head,
370                           const NvU32 isPrimaryHead,
371                           NVDPLibModesetStatePtr pDpLibModesetState,
372                           NVEvoModesetUpdateState *pModesetUpdateState)
373 {
374     NVEvoUpdateState *updateState = &pModesetUpdateState->updateState;
375     NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo;
376     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
377     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
378     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
379     NvU32 orIndex =
380         GetSorIndexToAttachConnector(pConnectorEvo, isPrimaryHead);
381     NvU32 i;
382 
383     nvAssert(orIndex != NV_INVALID_OR);
384     nvAssert(!(pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head)));
385     nvAssert(pHeadState->activeRmId != 0);
386 
387     FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.ownerHeadMask[orIndex]) {
388         nvAssert(pTimings->protocol ==
389                  pDispEvo->headState[i].timings.protocol);
390     } FOR_EACH_INDEX_IN_MASK_END;
391 
392     pConnectorEvo->or.ownerHeadMask[orIndex] |= NVBIT(head);
393 
394     // Only set up the actual output for SLI primary.
395     nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner);
396 
397     pDevEvo->hal->ORSetControl(pDevEvo,
398                                pConnectorEvo,
399                                pTimings->protocol,
400                                orIndex,
401                                pConnectorEvo->or.ownerHeadMask[orIndex],
402                                updateState);
403 
404 
405     /* Tell RM which DisplayID is associated with the head. */
406     pDevEvo->hal->HeadSetDisplayId(pDevEvo,
407                                    head, pHeadState->activeRmId,
408                                    updateState);
409 
410     nvPopEvoSubDevMask(pDevEvo);
411 
412     pModesetUpdateState->connectorIds =
413         nvAddDpyIdToDpyIdList(pConnectorEvo->displayId,
414                               pModesetUpdateState->connectorIds);
415     pModesetUpdateState->pDpLibModesetState[head] = pDpLibModesetState;
416 }
417 
418 void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo,
419                              const NvU32 head,
420                              const NvU16 x,
421                              NvU16 y,
422                              NVEvoUpdateState *updateState)
423 {
424     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
425 
426     NVEvoSubDevHeadStateRec *pSdHeadState =
427         &pDevEvo->gpus[pDispEvo->displayOwner].headState[head];
428 
429     pSdHeadState->viewPortPointIn.x = x;
430     pSdHeadState->viewPortPointIn.y = y;
431 
432     EvoSetViewportPointIn(pDispEvo, head, x, y, updateState);
433 }
434 
435 //
436 // Sets the Update method which makes all the other methods in the PB to take effect.
437 //
438 static void EvoUpdateAndKickOffWithNotifier(
439     const NVDispEvoRec *pDispEvo,
440     NvBool notify,
441     NvBool sync, int notifier,
442     NVEvoUpdateState *updateState,
443     NvBool releaseElv)
444 {
445     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
446 
447     // Calling code should reject operations that send updates while the console
448     // is active.
449     nvAssert(!pDevEvo->coreInitMethodsPending);
450 
451     // It doesn't make sense to request sync without requesting a notifier.
452     nvAssert(!sync || notify);
453 
454     if (notify) {
455         // Clear the completion notifier.
456         pDevEvo->hal->InitCompNotifier(pDispEvo, notifier);
457     }
458 
459     nvPushEvoSubDevMaskDisp(pDispEvo);
460     pDevEvo->hal->SetNotifier(pDevEvo, notify, sync, notifier,
461                               updateState);
462     pDevEvo->hal->Update(pDevEvo, updateState, releaseElv);
463     nvPopEvoSubDevMask(pDevEvo);
464 
465     // Wait for completion.
466     if (sync) {
467         pDevEvo->hal->WaitForCompNotifier(pDispEvo, notifier);
468     }
469 
470     if (notify) {
471         const NVDispEvoRec *pDispEvoTmp;
472         NVEvoUpdateState coreUpdateState = { };
473         NvU32 sd;
474 
475         // To work around HW bug 1945716 and to prevent subsequent core updates
476         // from triggering unwanted notifier writes, set the core channel
477         // completion notifier control and context DMA disables when
478         // notification is not requested.
479 
480         nvPushEvoSubDevMaskDisp(pDispEvo);
481         pDevEvo->hal->SetNotifier(pDevEvo,
482                                   FALSE /* notify */,
483                                   FALSE /* awaken */,
484                                   0     /* notifier */,
485                                   &coreUpdateState);
486         nvPopEvoSubDevMask(pDevEvo);
487 
488         // SetCoreNotifier is only expected to push core channel methods.
489         FOR_ALL_EVO_DISPLAYS(pDispEvoTmp, sd, pDevEvo) {
490             if (pDispEvoTmp == pDispEvo) {
491                 nvAssert(coreUpdateState.subdev[sd].channelMask ==
492                          DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE));
493             } else {
494                 nvAssert(coreUpdateState.subdev[sd].channelMask == 0x0);
495             }
496         }
497 
498         // We don't really need to kick off here, but might as well to keep the
499         // state cache up to date.  Note that we intentionally don't use
500         // pDevEvo->hal->Update since we don't want another Update.
501         nvDmaKickoffEvo(pDevEvo->core);
502     }
503 
504     return;
505 }
506 
507 void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync,
508                            NVEvoUpdateState *updateState, NvBool releaseElv)
509 {
510     EvoUpdateAndKickOffWithNotifier(pDispEvo, sync, sync, 0, updateState,
511                                     releaseElv);
512 }
513 
514 void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo,
515                       NVEvoUpdateState *updateState)
516 {
517     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
518 
519     // IMP pre-modeset
520     pDevEvo->hal->PrePostIMP(pDispEvo, TRUE /* isPre */);
521 
522     // Do the update
523     nvEvoUpdateAndKickOff(pDispEvo, TRUE, updateState, TRUE /* releaseElv */);
524 
525     // IMP post-modeset
526     pDevEvo->hal->PrePostIMP(pDispEvo, FALSE /* isPre */);
527 }
528 
529 void nvEvoFlipUpdate(NVDispEvoPtr pDispEvo,
530                      NVEvoUpdateState *updateState)
531 {
532     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
533     int notifier = -1;
534 
535     if (nvEvoLUTNotifiersNeedCommit(pDispEvo)) {
536         notifier = nvEvoCommitLUTNotifiers(pDispEvo);
537     }
538 
539     if (notifier >= 0) {
540         EvoUpdateAndKickOffWithNotifier(pDispEvo,
541                                         TRUE /* notify */,
542                                         FALSE /* sync */,
543                                         notifier,
544                                         updateState,
545                                         TRUE /* releaseElv */);
546     } else {
547         pDevEvo->hal->Update(pDevEvo, updateState, TRUE /* releaseElv */);
548     }
549 }
550 
551 /*!
552  * Tell RM not to expect anything other than a stall lock change during the next
553  * update.
554  */
555 void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo,
556                                    const NvU32 head,
557                                    NvBool isVrr,
558                                    NvBool enable)
559 {
560     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
561     NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS params = { };
562     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
563 
564     if (!nvHeadIsActive(pDispEvo, head)) {
565         return;
566     }
567 
568     nvAssert(!pTimings->interlaced && !pTimings->doubleScan);
569 
570     params.subDeviceInstance = pDispEvo->displayOwner;
571     params.displayId = pHeadState->activeRmId;
572     params.bArmLWSV = enable;
573     params.bVrrState = isVrr;
574     params.vActive = nvEvoVisibleHeight(pTimings);
575     params.vfp = pTimings->rasterSize.y -
576                  pTimings->rasterBlankStart.y;
577 
578     if (nvRmApiControl(nvEvoGlobal.clientHandle,
579                        pDispEvo->pDevEvo->displayCommonHandle,
580                        NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR,
581                        &params, sizeof(params))
582             != NVOS_STATUS_SUCCESS) {
583         nvAssert(!"ARM_LIGHTWEIGHT_SUPERVISOR failed");
584     }
585 }
586 
587 /*
588  * Convert from NVHwModeTimingsEvoPtr to NvModeTimingsPtr.
589  *
590  * Note that converting from NvModeTimingsPtr to
591  * NVHwModeTimingsEvoPtr (via
592  * ConstructHwModeTimingsFromNvModeTimings()) and converting back from
593  * NVHwModeTimingsEvoPtr to NvModeTimingsPtr (via
594  * nvConstructNvModeTimingsFromHwModeTimings()) can lose precision in
595  * the case of interlaced modes due to the division by 2.  This
596  * function should only be used for reporting purposes.
597  */
598 
599 void
600 nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings,
601                                           NvModeTimingsPtr pModeTimings)
602 {
603     NvU32 rasterBlankEndY, rasterSyncEndY;
604 
605     if (!pTimings || !pModeTimings) {
606         nvAssert(!"Null params");
607         return;
608     }
609 
610     pModeTimings->pixelClockHz  = KHzToHz(pTimings->pixelClock);
611     pModeTimings->hVisible      = nvEvoVisibleWidth(pTimings);
612     pModeTimings->hSyncStart    = pTimings->rasterSize.x -
613                                   pTimings->rasterBlankEnd.x - 1;
614     pModeTimings->hSyncEnd      = pTimings->rasterSize.x -
615                                   pTimings->rasterBlankEnd.x +
616                                   pTimings->rasterSyncEnd.x;
617     pModeTimings->hTotal        = pTimings->rasterSize.x;
618     pModeTimings->vVisible      = nvEvoVisibleHeight(pTimings);
619     rasterBlankEndY             = pTimings->rasterBlankEnd.y + 1;
620     rasterSyncEndY              = pTimings->rasterSyncEnd.y + 1;
621 
622     if (pTimings->interlaced) {
623         rasterBlankEndY *= 2;
624         rasterSyncEndY *= 2;
625     }
626 
627     /*
628      * The real pixel clock and width values for modes using YUV 420 emulation
629      * are half of the incoming values parsed from the EDID. This conversion is
630      * performed here, so NvModeTimings will have the user-visible (full width)
631      * values, and NVHwModeTimingsEvo will have the real (half width) values.
632      */
633     if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) {
634         pModeTimings->pixelClockHz *= 2;
635         pModeTimings->hVisible *= 2;
636         pModeTimings->hSyncStart *= 2;
637         pModeTimings->hSyncEnd *= 2;
638         pModeTimings->hTotal *= 2;
639     }
640 
641     pModeTimings->vSyncStart    = pTimings->rasterSize.y - rasterBlankEndY;
642     pModeTimings->vSyncEnd      = pTimings->rasterSize.y - rasterBlankEndY +
643                                   rasterSyncEndY;
644     pModeTimings->vTotal        = pTimings->rasterSize.y;
645     pModeTimings->interlaced    = pTimings->interlaced;
646     pModeTimings->doubleScan    = pTimings->doubleScan;
647     pModeTimings->hSyncNeg      = pTimings->hSyncPol;
648     pModeTimings->hSyncPos      = !pTimings->hSyncPol;
649     pModeTimings->vSyncNeg      = pTimings->vSyncPol;
650     pModeTimings->vSyncPos      = !pTimings->vSyncPol;
651     pModeTimings->RRx1k         = (pModeTimings->pixelClockHz /
652                                    (pModeTimings->hTotal *
653                                     pModeTimings->vTotal));
654 
655     if (pModeTimings->doubleScan) {
656         pModeTimings->vVisible /= 2;
657         pModeTimings->vSyncStart /= 2;
658         pModeTimings->vSyncEnd /= 2;
659         pModeTimings->vTotal /= 2;
660     }
661 
662     pModeTimings->hdmi3D = pTimings->hdmi3D;
663     pModeTimings->yuv420Mode = pTimings->yuv420Mode;
664 }
665 
666 
667 
668 /*
669  * Tweak pTimings to be compatible with gsync.
670  */
671 
672 static void TweakTimingsForGsync(const NVDpyEvoRec *pDpyEvo,
673                                  NVHwModeTimingsEvoPtr pTimings,
674                                  NVEvoInfoStringPtr pInfoString,
675                                  const enum NvKmsStereoMode stereo)
676 {
677     NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS gsyncOptTimingParams = { 0 };
678     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
679     NvModeTimings modeTimings;
680     NvU32 ret;
681 
682     /*
683      * if 3D Vision Stereo is enabled, do not actually
684      * tweak the modetimings; WAR for bug 692266
685      */
686 
687     if (nvIs3DVisionStereoEvo(stereo)) {
688 
689         nvEvoLogInfoString(pInfoString,
690                            "Not adjusting mode timings of %s for Quadro Sync "
691                            "compatibility since 3D Vision Stereo is enabled.",
692                            pDpyEvo->name);
693         return;
694     }
695 
696     gsyncOptTimingParams.gpuId = nvGpuIdOfDispEvo(pDispEvo);
697 
698     if (pDpyEvo->pConnectorEvo->legacyType ==
699         NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) {
700 
701         gsyncOptTimingParams.output =
702             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_SOR;
703         gsyncOptTimingParams.adjust =
704             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_DFP;
705 
706     } else if (pDpyEvo->pConnectorEvo->legacyType ==
707                NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) {
708 
709         gsyncOptTimingParams.output =
710             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_DAC;
711         gsyncOptTimingParams.adjust =
712             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_CRT;
713     }
714 
715     gsyncOptTimingParams.pixelClockHz = KHzToHz(pTimings->pixelClock);
716 
717     if (pTimings->interlaced) {
718         gsyncOptTimingParams.structure =
719             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED;
720     } else {
721         gsyncOptTimingParams.structure =
722             NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_PROGRESSIVE;
723     }
724 
725     gsyncOptTimingParams.hDeltaStep =
726         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_STEP_USE_DEFAULTS;
727     gsyncOptTimingParams.vDeltaStep =
728         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_STEP_USE_DEFAULTS;
729     gsyncOptTimingParams.hDeltaMax =
730         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_MAX_USE_DEFAULTS;
731     gsyncOptTimingParams.vDeltaMax =
732         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_MAX_USE_DEFAULTS;
733 
734     gsyncOptTimingParams.hSyncEnd       = pTimings->rasterSyncEnd.x + 1;
735     gsyncOptTimingParams.hBlankEnd      = pTimings->rasterBlankEnd.x + 1;
736     gsyncOptTimingParams.hBlankStart    = pTimings->rasterBlankStart.x + 1;
737     gsyncOptTimingParams.hTotal         = pTimings->rasterSize.x;
738 
739     gsyncOptTimingParams.vSyncEnd       = pTimings->rasterSyncEnd.y + 1;
740     gsyncOptTimingParams.vBlankEnd      = pTimings->rasterBlankEnd.y + 1;
741     gsyncOptTimingParams.vBlankStart    = pTimings->rasterBlankStart.y + 1;
742     gsyncOptTimingParams.vTotal         = pTimings->rasterSize.y;
743 
744     gsyncOptTimingParams.vInterlacedBlankEnd = pTimings->rasterVertBlank2End;
745     gsyncOptTimingParams.vInterlacedBlankStart =
746         pTimings->rasterVertBlank2Start;
747 
748     switch (pTimings->protocol) {
749         case NVKMS_PROTOCOL_DAC_RGB:
750             gsyncOptTimingParams.protocol =
751                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_DAC_RGB_CRT;
752             break;
753         case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC:
754             nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle external TMDS.");
755             // fallthrough
756         case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A:
757             gsyncOptTimingParams.protocol =
758                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_A;
759             break;
760         case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B:
761             gsyncOptTimingParams.protocol =
762                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_B;
763             break;
764         case NVKMS_PROTOCOL_SOR_DUAL_TMDS:
765             gsyncOptTimingParams.protocol =
766                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DUAL_TMDS;
767             break;
768         case NVKMS_PROTOCOL_SOR_DP_A:
769             gsyncOptTimingParams.protocol =
770                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_A;
771             break;
772         case NVKMS_PROTOCOL_SOR_DP_B:
773             gsyncOptTimingParams.protocol =
774                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_B;
775             break;
776         case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM:
777             gsyncOptTimingParams.protocol =
778                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_LVDS_CUSTOM;
779             break;
780         case NVKMS_PROTOCOL_SOR_HDMI_FRL:
781             gsyncOptTimingParams.protocol =
782                 NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_HDMI_FRL;
783             break;
784         case NVKMS_PROTOCOL_DSI:
785             nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle DSI.");
786             return;
787     }
788 
789     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
790                          pDispEvo->pFrameLockEvo->device,
791                          NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING,
792                          &gsyncOptTimingParams,
793                          sizeof(gsyncOptTimingParams));
794 
795     if (ret != NVOS_STATUS_SUCCESS) {
796         nvAssert(!"Failed to convert to Quadro Sync safe timing");
797         /* do not apply the timings returned by RM if the call failed */
798         return;
799     }
800 
801     nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings);
802 
803     nvEvoLogInfoString(pInfoString,
804             "Adjusting Mode Timings for Quadro Sync Compatibility");
805     nvEvoLogInfoString(pInfoString, " Old Timings:");
806     nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings);
807 
808     pTimings->rasterSyncEnd.x           = gsyncOptTimingParams.hSyncEnd - 1;
809     pTimings->rasterSyncEnd.y           = gsyncOptTimingParams.vSyncEnd - 1;
810     pTimings->rasterBlankEnd.x          = gsyncOptTimingParams.hBlankEnd - 1;
811     pTimings->rasterBlankEnd.y          = gsyncOptTimingParams.vBlankEnd - 1;
812     pTimings->rasterBlankStart.x        = gsyncOptTimingParams.hBlankStart - 1;
813     pTimings->rasterBlankStart.y        = gsyncOptTimingParams.vBlankStart - 1;
814     pTimings->rasterSize.x              = gsyncOptTimingParams.hTotal;
815     pTimings->rasterSize.y              = gsyncOptTimingParams.vTotal;
816 
817     if (gsyncOptTimingParams.structure ==
818         NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED) {
819         pTimings->rasterVertBlank2Start =
820             gsyncOptTimingParams.vInterlacedBlankStart;
821         pTimings->rasterVertBlank2End =
822             gsyncOptTimingParams.vInterlacedBlankEnd;
823     }
824 
825     pTimings->pixelClock = HzToKHz(gsyncOptTimingParams.pixelClockHz); // Hz to KHz
826 
827     nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings);
828 
829     nvEvoLogInfoString(pInfoString, " New Timings:");
830     nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings);
831 }
832 
833 static NvBool HeadStateIsHdmiTmdsDeepColor(const NVDispHeadStateEvoRec *pHeadState)
834 {
835     nvAssert(pHeadState->pConnectorEvo != NULL);
836 
837     // Check for HDMI TMDS.
838     if (pHeadState->pConnectorEvo->isHdmiEnabled &&
839         (pHeadState->timings.protocol != NVKMS_PROTOCOL_SOR_HDMI_FRL)) {
840         // Check for pixelDepth >= 30.
841         switch (pHeadState->pixelDepth) {
842             case NVKMS_PIXEL_DEPTH_18_444:
843             case NVKMS_PIXEL_DEPTH_24_444:
844             case NVKMS_PIXEL_DEPTH_20_422:
845             case NVKMS_PIXEL_DEPTH_16_422:
846                 return FALSE;
847             case NVKMS_PIXEL_DEPTH_30_444:
848                 return TRUE;
849         }
850     }
851 
852     return FALSE;
853 }
854 
855 /*!
856  * Check whether rasterlock is possible between the two head states.
857  * Note that we don't compare viewports, but I don't believe the viewport size
858  * affects whether it is possible to rasterlock.
859  */
860 
861 static NvBool RasterLockPossible(const NVDispHeadStateEvoRec *pHeadState1,
862                                  const NVDispHeadStateEvoRec *pHeadState2)
863 {
864     const NVHwModeTimingsEvo *pTimings1 = &pHeadState1->timings;
865     const NVHwModeTimingsEvo *pTimings2 = &pHeadState2->timings;
866 
867     /*
868      * XXX Bug 4235728: With HDMI TMDS signaling >= 10 BPC, display requires a
869      * higher VPLL clock multiplier varying by pixel depth, which can cause
870      * rasterlock to fail between heads with differing multipliers. So, if a
871      * head is using HDMI TMDS >= 10 BPC, it can only rasterlock with heads that
872      * that are using HDMI TMDS with the same pixel depth.
873      */
874 
875     // If either head is HDMI TMDS DeepColor (10+ BPC)...
876     if (HeadStateIsHdmiTmdsDeepColor(pHeadState1) ||
877         HeadStateIsHdmiTmdsDeepColor(pHeadState2)) {
878         // The other head must also be HDMI TMDS DeepColor.
879         if (!HeadStateIsHdmiTmdsDeepColor(pHeadState1) ||
880             !HeadStateIsHdmiTmdsDeepColor(pHeadState2)) {
881             return FALSE;
882         }
883 
884         // Both heads must have identical pixel depth.
885         if (pHeadState1->pixelDepth != pHeadState2->pixelDepth) {
886             return FALSE;
887         }
888     }
889 
890     return ((pTimings1->rasterSize.x       == pTimings2->rasterSize.x) &&
891             (pTimings1->rasterSize.y       == pTimings2->rasterSize.y) &&
892             (pTimings1->rasterSyncEnd.x    == pTimings2->rasterSyncEnd.x) &&
893             (pTimings1->rasterSyncEnd.y    == pTimings2->rasterSyncEnd.y) &&
894             (pTimings1->rasterBlankEnd.x   == pTimings2->rasterBlankEnd.x) &&
895             (pTimings1->rasterBlankEnd.y   == pTimings2->rasterBlankEnd.y) &&
896             (pTimings1->rasterBlankStart.x == pTimings2->rasterBlankStart.x) &&
897             (pTimings1->rasterBlankStart.y == pTimings2->rasterBlankStart.y) &&
898             (pTimings1->rasterVertBlank2Start ==
899              pTimings2->rasterVertBlank2Start) &&
900             (pTimings1->rasterVertBlank2End ==
901              pTimings2->rasterVertBlank2End) &&
902             (pTimings1->pixelClock         == pTimings2->pixelClock) &&
903             (pTimings1->hSyncPol           == pTimings2->hSyncPol) &&
904             (pTimings1->vSyncPol           == pTimings2->vSyncPol) &&
905             (pTimings1->interlaced         == pTimings2->interlaced) &&
906             (pTimings1->doubleScan         == pTimings2->doubleScan));
907 
908 }
909 
910 /*!
911  * Fill the overscan color struct to be passed to SetRasterParams based on
912  * whether or not SW yuv420 is enabled.
913  *
914  * \param[out] pOverscanColor     The overscan color struct to be filled
915  * \param[in] yuv420              Whether or not SW yuv420 is enabled
916  */
917 static void SetOverscanColor(NVEvoColorPtr pOverscanColor, NvBool yuv420)
918 {
919     // Black in RGB format.
920     // If we're using an emulated YUV 4:2:0 mode, set the equivalent in
921     // YUV ITU-R BT.709 (64/64/512).
922     if (yuv420) {
923         pOverscanColor->red = 64;
924         pOverscanColor->green = 64;
925         pOverscanColor->blue = 512;
926     } else {
927         pOverscanColor->red = 0;
928         pOverscanColor->green = 0;
929         pOverscanColor->blue = 0;
930     }
931 
932 #if defined(DEBUG)
933     // Override the overscan color to red in debug builds.
934     // XXX This will look different for YUV 4:2:0
935     pOverscanColor->red = 1023;
936     pOverscanColor->green = 0;
937     pOverscanColor->blue = 0;
938 #endif
939 }
940 
941 void nvEvoDisableHwYUV420Packer(const NVDispEvoRec *pDispEvo,
942                                 const NvU32 head,
943                                 NVEvoUpdateState *pUpdateState)
944 {
945     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
946     pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hwYuv420 = FALSE;
947     EvoUpdateHeadParams(pDispEvo, head, pUpdateState);
948 }
949 
950 /*
951  * Send the raster timings for the pDpyEvo to EVO.
952  */
953 void nvEvoSetTimings(NVDispEvoPtr pDispEvo,
954                      const NvU32 head,
955                      NVEvoUpdateState *updateState)
956 {
957     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
958     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
959     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
960     const NVDscInfoEvoRec *pDscInfo = &pHeadState->dscInfo;
961     const enum nvKmsPixelDepth pixelDepth = pHeadState->pixelDepth;
962     NVEvoColorRec overscanColor;
963 
964     nvPushEvoSubDevMaskDisp(pDispEvo);
965     SetOverscanColor(&overscanColor, (pTimings->yuv420Mode ==
966                                       NV_YUV420_MODE_SW));
967 
968     pDevEvo->hal->SetRasterParams(pDevEvo, head,
969                                   pTimings, pHeadState->tilePosition,
970                                   pDscInfo, &overscanColor, updateState);
971 
972     // Set the head parameters
973     pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].interlaced =
974         pTimings->interlaced;
975     pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hdmi3D =
976         pTimings->hdmi3D;
977 
978     /*
979      * Current HW does not support the combination of HW YUV420 and DSC.
980      * HW YUV420 is currently only supported with HDMI, so we should never see
981      * the combination of DP DSC and HW YUV420.
982      * The combination of HDMI FRL DSC and HW YUV420 should be disallowed by
983      * the HDMI library.
984      */
985     nvAssert(!((pTimings->yuv420Mode == NV_YUV420_MODE_HW) &&
986                (pDscInfo->type != NV_DSC_INFO_EVO_TYPE_DISABLED)));
987 
988     pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hwYuv420 =
989         (pTimings->yuv420Mode == NV_YUV420_MODE_HW);
990 
991     EvoUpdateHeadParams(pDispEvo, head, updateState);
992 
993     pDevEvo->hal->SetDscParams(pDispEvo, head, pDscInfo, pixelDepth);
994 
995     nvPopEvoSubDevMask(pDevEvo);
996 }
997 
998 /*
999  * Increase the size of the provided raster lock group by 1.
1000  *
1001  * This involves incrementing *pNumRasterLockGroups, reallocating the
1002  * pRasterLockGroups array, and initializing the new entry.
1003  */
1004 static RasterLockGroup *GrowRasterLockGroup(RasterLockGroup *pRasterLockGroups,
1005                                             unsigned int *pNumRasterLockGroups)
1006 {
1007     RasterLockGroup *pNewRasterLockGroups, *pRasterLockGroup;
1008     unsigned int numRasterLockGroups;
1009 
1010     numRasterLockGroups = *pNumRasterLockGroups;
1011 
1012     numRasterLockGroups++;
1013     pNewRasterLockGroups =
1014         nvRealloc(pRasterLockGroups,
1015                   numRasterLockGroups * sizeof(RasterLockGroup));
1016     if (!pNewRasterLockGroups) {
1017         nvFree(pRasterLockGroups);
1018         *pNumRasterLockGroups = 0;
1019         return NULL;
1020     }
1021 
1022     pRasterLockGroup = &pNewRasterLockGroups[numRasterLockGroups - 1];
1023     pRasterLockGroup->numDisps = 0;
1024 
1025     *pNumRasterLockGroups = numRasterLockGroups;
1026 
1027     return pNewRasterLockGroups;
1028 }
1029 
1030 static RasterLockGroup *CopyAndAppendRasterLockGroup(
1031     RasterLockGroup *pRasterLockGroups,
1032     unsigned int *pNumRasterLockGroups,
1033     const RasterLockGroup *source)
1034 {
1035     RasterLockGroup *dest;
1036 
1037     pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups,
1038                                             pNumRasterLockGroups);
1039     if (pRasterLockGroups) {
1040         dest = &pRasterLockGroups[*pNumRasterLockGroups - 1];
1041         nvkms_memcpy(dest, source, sizeof(RasterLockGroup));
1042     }
1043 
1044     return pRasterLockGroups;
1045 }
1046 
1047 static void AddDispEvoIntoRasterLockGroup(RasterLockGroup *pRasterLockGroup,
1048                                           NVDispEvoPtr pDispEvo)
1049 {
1050     NvU32 i;
1051 
1052     /*
1053      * The extent of a RasterLockGroup is the largest number of GPUs that can
1054      * be linked together.
1055      */
1056     nvAssert(pRasterLockGroup->numDisps < NVKMS_MAX_SUBDEVICES);
1057 
1058     /* Caller should keep track of not adding duplicate entries. */
1059     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
1060         nvAssert(pRasterLockGroup->pDispEvoOrder[i] != pDispEvo);
1061     }
1062 
1063     /* Add to the end of the array. */
1064     pRasterLockGroup->pDispEvoOrder[pRasterLockGroup->numDisps] = pDispEvo;
1065     pRasterLockGroup->numDisps++;
1066 }
1067 
1068 static const RasterLockGroup *FindRasterLockGroupForDispEvo(
1069     const RasterLockGroup *pRasterLockGroups,
1070     unsigned int numRasterLockGroups,
1071     const NVDispEvoPtr pDispEvo)
1072 {
1073     const RasterLockGroup *pRasterLockGroup;
1074     NvU32 i;
1075 
1076     for (pRasterLockGroup = pRasterLockGroups;
1077          pRasterLockGroup < pRasterLockGroups + numRasterLockGroups;
1078          pRasterLockGroup++) {
1079         for (i = 0; i < pRasterLockGroup->numDisps; i++) {
1080             if (pRasterLockGroup->pDispEvoOrder[i] == pDispEvo) {
1081                 return pRasterLockGroup;
1082             }
1083         }
1084     }
1085 
1086     return NULL;
1087 }
1088 
1089 static DispEntry *DispEvoListFindDispByGpuId (DispEvoList *list, NvU32 gpuId)
1090 {
1091     NvU32 i;
1092 
1093     for (i = 0; i < list->numDisps; i++) {
1094         if (list->disps[i].gpuId == gpuId) {
1095             return &list->disps[i];
1096         }
1097     }
1098 
1099     return NULL;
1100 }
1101 
1102 static void DispEvoListInit(DispEvoList *list)
1103 {
1104     list->numDisps = 0;
1105 }
1106 
1107 static void DispEvoListAppend(DispEvoList *list, NVDispEvoPtr pDispEvo)
1108 {
1109     nvAssert(DispEvoListFindDispByGpuId(
1110                  list, nvGpuIdOfDispEvo(pDispEvo)) == NULL);
1111 
1112     nvAssert(list->numDisps < ARRAY_LEN(list->disps));
1113     list->disps[list->numDisps].pDispEvo = pDispEvo;
1114     list->disps[list->numDisps].gpuId = nvGpuIdOfDispEvo(pDispEvo);
1115     list->disps[list->numDisps].pRasterLockGroup = NULL;
1116     list->numDisps++;
1117 }
1118 
1119 /*
1120  * Helper function to look up, for a gpuId, the list of connected GPUs in
1121  * NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS.
1122  */
1123 static NV0000_CTRL_GPU_VIDEO_LINKS *FindLinksForGpuId(
1124     NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams,
1125     NvU32 gpuId)
1126 {
1127     NvU32 i;
1128 
1129     for (i = 0; i < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS; i++) {
1130         if (vidLinksParams->links[i].gpuId == NV0000_CTRL_GPU_INVALID_ID) {
1131             break;
1132         }
1133 
1134         if (vidLinksParams->links[i].gpuId == gpuId) {
1135             return &vidLinksParams->links[i];
1136         }
1137     }
1138 
1139     return NULL;
1140 }
1141 
1142 static void BuildRasterLockGroupFromVideoLinks(
1143     DispEvoList *list,
1144     RasterLockGroup *pRasterLockGroup,
1145     NvU32 gpuId,
1146     NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams)
1147 {
1148     DispEntry *dispEntry;
1149     NV0000_CTRL_GPU_VIDEO_LINKS *links;
1150     NvU32 i;
1151 
1152     /* Find the correct DispEntry for the gpuId. If we can't find one the
1153      * gpuId must be pointing to a DevEvo that was not listed in our
1154      * DevEvoList: ignore these links at this point. */
1155     dispEntry = DispEvoListFindDispByGpuId(list, gpuId);
1156     if (!dispEntry) {
1157         return;
1158     }
1159 
1160     /*
1161      * Unless we've seen this gpuId already add into current RasterLockGroup
1162      * and try to discover bridged GPUs.
1163      */
1164     if (!dispEntry->pRasterLockGroup) {
1165         /* Assign in the current RasterLockGroup. */
1166         AddDispEvoIntoRasterLockGroup(pRasterLockGroup, dispEntry->pDispEvo);
1167         dispEntry->pRasterLockGroup = pRasterLockGroup;
1168 
1169         /* First, get the links for this gpuId. */
1170         links = FindLinksForGpuId(vidLinksParams, gpuId);
1171 
1172         /* Recurse into connected GPUs. */
1173         if (links) {
1174             for (i = 0; i < NV0000_CTRL_GPU_MAX_VIDEO_LINKS; i++) {
1175                 if (links->connectedGpuIds[i] == NV0000_CTRL_GPU_INVALID_ID) {
1176                     break;
1177                 }
1178 
1179                 BuildRasterLockGroupFromVideoLinks(list,
1180                                                    pRasterLockGroup,
1181                                                    links->connectedGpuIds[i],
1182                                                    vidLinksParams);
1183             }
1184         }
1185     }
1186 }
1187 
1188 /*
1189  * Stateless (RM SLI/client SLI agnostic) discovery of bridged GPUs: build
1190  * RasterLockGroups for all non-RM SLI devices based on the found GPU links.
1191  *
1192  * This function and BuildRasterLockGroupFromVideoLinks() implement a simple
1193  * algorithm that puts clusters of bridged GPUs into distinct RasterLockGroups.
1194  * Here's an outline of how we basically generate the final RasterLockGroups:
1195  *
1196  * 1. Create a DispEvoList array to hold RasterLockGroup state for all the
1197  *    DispEvo objects in the system.
1198  *
1199  * 2. Query RM for an array of video links for each GPU.
1200  *
1201  * 3. As long as the DispEvoList contains DispEvos of the given pDevEvo
1202  *    without a group, find the first occurrence of such, create a new
1203  *    group, and populate it by recursively adding the DispEvo and all
1204  *    its connected DispEvos into the new group.
1205  *
1206  * 4. Once all known DispEvos are assigned the result will be a list of
1207  *    global RasterLockGroups, each of which hosts <N> DispEvos that are
1208  *    connected together.
1209  *
1210  * The result of this function should be cached once and later used to
1211  * cheaply look up the appropriate, immutable RasterLockGroup for a DispEvo.
1212  *
1213  */
1214 static RasterLockGroup *GetRasterLockGroupsStateless(
1215     unsigned int *pNumRasterLockGroups)
1216 {
1217     RasterLockGroup *pRasterLockGroups = NULL;
1218     RasterLockGroup *pRasterLockGroup;
1219     DispEvoList evoList;
1220     NVDevEvoPtr pCurDev;
1221     NVDispEvoPtr pCurDisp;
1222     NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS *vidLinksParams;
1223     NvU32 sd;
1224     NvU32 i;
1225 
1226     DispEvoListInit(&evoList);
1227 
1228     /*
1229      * First create an array of DispEntries to hold some state for all the
1230      * DispEvos in the system.
1231      */
1232     FOR_ALL_EVO_DEVS(pCurDev) {
1233         /*
1234          * Only include non RM SLI devices so as to not clash with multi-GPU
1235          * RM SLI devices.
1236          */
1237         if (pCurDev->numSubDevices == 1) {
1238             FOR_ALL_EVO_DISPLAYS(pCurDisp, sd, pCurDev) {
1239                 DispEvoListAppend(&evoList, pCurDisp);
1240             }
1241         }
1242     }
1243 
1244     /*
1245      * Ask RM about the currently known video links.
1246      */
1247     vidLinksParams = nvCalloc(1, sizeof(*vidLinksParams));
1248     if (!vidLinksParams) {
1249         return NULL;
1250     }
1251 
1252     if (nvRmApiControl(nvEvoGlobal.clientHandle,
1253                        nvEvoGlobal.clientHandle,
1254                        NV0000_CTRL_CMD_GPU_GET_VIDEO_LINKS,
1255                        vidLinksParams,
1256                        sizeof(*vidLinksParams)) == NVOS_STATUS_SUCCESS) {
1257 
1258         for (i = 0; i < evoList.numDisps; i++) {
1259             /*
1260              * Create a new group starting from the first DispEvo not yet
1261              * assigned into a RasterLockGroup, and all GPUs possibly reachable
1262              * from it through bridges.
1263              *
1264              * TODO: Consider if we should only ever start a new
1265              * RasterLockGroup with a GPU that has only one connection and not
1266              * two. Then the group's pDispEvoOrder would always start from a
1267              * "leaf" GPU of a linkage graph. But will the GPU links always be
1268              * linear and non-branching? NV0000_CTRL_GPU_GET_VIDEO_LINKS_PARAMS
1269              * makes it possible to represent GPUs with any number of links.
1270              * Either FinishModesetOneGroup() must be able to handle that
1271              * (in which case this is not a concern) or we must be able to
1272              * trust that only 0-2 links will be reported per GPU.
1273              */
1274             if (evoList.disps[i].pRasterLockGroup) {
1275                 continue;
1276             }
1277 
1278             pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups,
1279                                                     pNumRasterLockGroups);
1280             if (!pRasterLockGroups) {
1281                 nvFree(vidLinksParams);
1282                 return NULL;
1283             }
1284 
1285             pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1];
1286 
1287             BuildRasterLockGroupFromVideoLinks(&evoList,
1288                                                pRasterLockGroup,
1289                                                evoList.disps[i].gpuId,
1290                                                vidLinksParams);
1291         }
1292 
1293         nvFree(vidLinksParams);
1294         nvAssert(*pNumRasterLockGroups > 0);
1295         return pRasterLockGroups;
1296     }
1297 
1298     nvFree(vidLinksParams);
1299     nvFree(pRasterLockGroups);
1300     return NULL;
1301 }
1302 
1303 /*
1304  * GetRasterLockGroups() - Determine which GPUs to consider for locking (or
1305  * unlocking) displays.  This is one of the following:
1306  * 1. SLI video bridge order, if SLI is enabled;
1307  * 2. GPUs linked through rasterlock pins, no SLI (like in clientSLI);
1308  * 3. A single GPU,
1309  * in that order.
1310  *
1311  * Note that we still go through the same codepaths for the last degenerate
1312  * case, in order to potentially lock heads on the same GPU together.
1313  */
1314 static RasterLockGroup *GetRasterLockGroups(
1315     NVDevEvoPtr pDevEvo,
1316     unsigned int *pNumRasterLockGroups)
1317 {
1318     unsigned int i;
1319     RasterLockGroup *pRasterLockGroups = NULL;
1320 
1321     *pNumRasterLockGroups = 0;
1322 
1323     if (pDevEvo->numSubDevices > 1 && pDevEvo->sli.bridge.present) {
1324         NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS params = { 0 };
1325         NvU32 ret;
1326 
1327         /* In SLI, with a video bridge.  Get the video bridge order from RM. */
1328 
1329         if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle,
1330                                   pDevEvo->deviceHandle,
1331                                   NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER,
1332                                   &params, sizeof(params)))
1333                 != NVOS_STATUS_SUCCESS) {
1334             nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
1335                              "NvRmControl(GET_VIDLINK_ORDER) failed; "
1336                              "ret: %d\n", ret);
1337             return NULL;
1338         }
1339 
1340         if (params.ConnectionCount > 0) {
1341             RasterLockGroup *pRasterLockGroup;
1342             pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups,
1343                                                     pNumRasterLockGroups);
1344 
1345             if (!pRasterLockGroups) {
1346                 return NULL;
1347             }
1348 
1349             pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1];
1350 
1351             /*
1352              * For some reason this interface returns a mask instead of an
1353              * index, so we have to convert
1354              */
1355             for (i = 0; i < pDevEvo->numSubDevices; i++) {
1356                 NvU32 subDeviceMask = params.Order[i];
1357                 NvU32 sd = 0;
1358 
1359                 nvAssert(nvPopCount32(subDeviceMask) == 1);
1360 
1361                 if (!subDeviceMask) continue;
1362 
1363                 while (!(subDeviceMask & (1 << sd))) sd++;
1364 
1365                 nvAssert(sd < NVKMS_MAX_SUBDEVICES);
1366                 nvAssert(pDevEvo->pDispEvo[sd] != NULL);
1367 
1368                 /* SLI Mosaic. */
1369                 AddDispEvoIntoRasterLockGroup(pRasterLockGroup,
1370                                               pDevEvo->pDispEvo[sd]);
1371             }
1372         }
1373 
1374         if (*pNumRasterLockGroups > 0) {
1375             return pRasterLockGroups;
1376         }
1377     }
1378 
1379     /*
1380      * Client SLI: Create a RasterLockGroup from pDevEvo's only DispEvo
1381      * and other DispEvos potentially bridged to that.
1382      */
1383 
1384     if (pDevEvo->numSubDevices == 1) {
1385         /* Get-or-create cached RasterLockGroup for this device. */
1386         if (!globalRasterLockGroups) {
1387             globalRasterLockGroups =
1388                 GetRasterLockGroupsStateless(&numGlobalRasterLockGroups);
1389         }
1390 
1391         /* Look for a cached group containing this device's DispEvo. */
1392         if (globalRasterLockGroups && numGlobalRasterLockGroups > 0) {
1393             const RasterLockGroup *pRasterLockGroup =
1394                 FindRasterLockGroupForDispEvo(globalRasterLockGroups,
1395                                               numGlobalRasterLockGroups,
1396                                               pDevEvo->pDispEvo[0]);
1397 
1398             /* Make a copy of it and add to 'pRasterLockGroups'. */
1399             if (pRasterLockGroup) {
1400                 pRasterLockGroups =
1401                     CopyAndAppendRasterLockGroup(pRasterLockGroups,
1402                                                  pNumRasterLockGroups,
1403                                                  pRasterLockGroup);
1404             }
1405         }
1406 
1407         if (*pNumRasterLockGroups > 0) {
1408             return pRasterLockGroups;
1409         }
1410     }
1411 
1412     /*
1413      * Single GPU or bridgeless SLI. We create a group for each
1414      * individual DispEvo.
1415      */
1416 
1417     NVDispEvoPtr pDispEvo;
1418     unsigned int sd;
1419 
1420     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1421         RasterLockGroup *pRasterLockGroup;
1422         pRasterLockGroups = GrowRasterLockGroup(pRasterLockGroups,
1423                                                 pNumRasterLockGroups);
1424 
1425         if (!pRasterLockGroups) {
1426             return NULL;
1427         }
1428 
1429         pRasterLockGroup = &pRasterLockGroups[*pNumRasterLockGroups - 1];
1430 
1431         AddDispEvoIntoRasterLockGroup(pRasterLockGroup, pDispEvo);
1432     }
1433 
1434     return pRasterLockGroups;
1435 }
1436 
1437 /*
1438  * ApplyLockActionIfPossible() - Check if the given action is a valid
1439  * transition for this pEvoSubDev's state, and apply it if so.
1440  * Return TRUE if any hardware state needs to be updated, FALSE o.w.
1441  */
1442 static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo,
1443                                         NVEvoSubDevPtr pEvoSubDev,
1444                                         NVEvoLockAction action)
1445 {
1446     if (!pEvoSubDev) {
1447         return FALSE;
1448     }
1449 
1450     if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
1451                                   action, NULL)) {
1452         unsigned int i = 0;
1453         NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
1454         NvU32 head;
1455 
1456         for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
1457             if (nvHeadIsActive(pDispEvo, head)) {
1458                 pHeads[i++] = head;
1459             }
1460         }
1461         nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP);
1462         pHeads[i] = NV_INVALID_HEAD;
1463 
1464         pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads);
1465 
1466         return TRUE;
1467     }
1468 
1469     return FALSE;
1470 
1471 } // ApplyLockActionIfPossible()
1472 
1473 /*
1474  * Disable any intra-GPU lock state set up in FinishModesetOneDisp().
1475  * This assumes that any cross-GPU locking which may have been set up on this
1476  * GPU was already torn down.
1477  */
1478 static void UnlockRasterLockOneDisp(NVDispEvoPtr pDispEvo)
1479 {
1480     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1481     NvU32 sd = pDispEvo->displayOwner;
1482     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
1483     NvBool changed = FALSE;
1484 
1485     /* Initialize the assembly state */
1486     SyncEvoLockState();
1487 
1488     /* We want to evaluate all of these, so don't use || */
1489     changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev,
1490                                          NV_EVO_PROHIBIT_LOCK_DISABLE);
1491     changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev,
1492                                          NV_EVO_UNLOCK_HEADS);
1493 
1494     /* Update the hardware if anything has changed */
1495     if (changed) {
1496         UpdateEvoLockState();
1497     }
1498 
1499     pDispEvo->rasterLockPossible = FALSE;
1500 }
1501 
1502 /*
1503  * Call UnlockRasterLockOneDisp() for each disp on this device to tear down
1504  * intra-GPU locking on each.
1505  */
1506 static void UnlockRasterLockOneDev(NVDevEvoPtr pDevEvo)
1507 {
1508     NVDispEvoPtr pDispEvo;
1509     NvU32 sd;
1510 
1511     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1512         UnlockRasterLockOneDisp(pDispEvo);
1513     }
1514 }
1515 
1516 static void DisableLockGroupFlipLock(NVLockGroup *pLockGroup)
1517 {
1518 
1519     const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup;
1520     NvU32 i;
1521 
1522     if (!pLockGroup->flipLockEnabled) {
1523         return;
1524     }
1525 
1526     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
1527         NVEvoUpdateState updateState = { };
1528         NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i];
1529         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1530         NvU32 sd = pDispEvo->displayOwner;
1531         NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
1532         NvU32 head;
1533         NvBool changed = FALSE;
1534 
1535         for (head = 0; head < pDevEvo->numHeads; head++) {
1536             NvBool headChanged = FALSE;
1537             if (!nvHeadIsActive(pDispEvo, head)) {
1538                 continue;
1539             }
1540 
1541             /*
1542              * scanLockState transitions (such as nvEvoLockHWStateLockHeads)
1543              * will update headControlAssy values for all heads, so we should
1544              * update flipLock and flipLockPin for all heads as well.
1545              */
1546             NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head];
1547             /*
1548              * Reset the fliplock pin, if it's not in use for framelock,
1549              * and unregister our use of the fliplock pin
1550              */
1551             if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockPinSetForFrameLockHeadMask,
1552                                  head)) {
1553                 if (pHC->flipLockPin != NV_EVO_LOCK_PIN_INTERNAL(0)) {
1554                     pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
1555                     headChanged = TRUE;
1556                 }
1557             }
1558             pEvoSubDev->flipLockPinSetForSliHeadMask =
1559                 HEAD_MASK_UNSET(pEvoSubDev->flipLockPinSetForSliHeadMask,
1560                                 head);
1561 
1562             /*
1563              * Disable fliplock, if it's not in use for framelock, and
1564              * unregister our need for fliplock to be enabled
1565              */
1566             if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask,
1567                                  head)) {
1568                 if (pHC->flipLock) {
1569                     pHC->flipLock = FALSE;
1570                     headChanged = TRUE;
1571                 }
1572             }
1573             pEvoSubDev->flipLockEnabledForSliHeadMask =
1574                 HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForSliHeadMask,
1575                                 head);
1576             if (headChanged) {
1577                 EvoUpdateHeadParams(pDispEvo, head, &updateState);
1578             }
1579         }
1580         if (changed) {
1581             nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
1582                                   TRUE /* releaseElv */);
1583         }
1584     }
1585 
1586     pLockGroup->flipLockEnabled = FALSE;
1587 }
1588 
1589 /*
1590  * Unlock cross-GPU locking in the given lock group.
1591  */
1592 static void UnlockLockGroup(NVLockGroup *pLockGroup)
1593 {
1594     RasterLockGroup *pRasterLockGroup;
1595     int i;
1596 
1597     if (pLockGroup == NULL) {
1598         return;
1599     }
1600 
1601     pRasterLockGroup = &pLockGroup->rasterLockGroup;
1602 
1603     DisableLockGroupFlipLock(pLockGroup);
1604 
1605     for (i = (int)pRasterLockGroup->numDisps - 1; i >= 0; i--) {
1606         NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i];
1607         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1608         NvU32 sd = pDispEvo->displayOwner;
1609         NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
1610 
1611         /* Initialize the assembly state */
1612         SyncEvoLockState();
1613 
1614         if (ApplyLockActionIfPossible(pDispEvo, pEvoSubDev,
1615                                       NV_EVO_REM_SLI)) {
1616             /* Update the hardware if anything has changed */
1617             UpdateEvoLockState();
1618         }
1619 
1620         pEvoSubDev->flipLockProhibitedHeadMask = 0x0;
1621 
1622         nvAssert(pDispEvo->pLockGroup == pLockGroup);
1623         pDispEvo->pLockGroup = NULL;
1624     }
1625 
1626     /*
1627      * Disable any SLI video bridge features we may have enabled for locking.
1628      * This is a separate loop from the above in order to handle both cases:
1629      *
1630      * a) Multiple pDispEvos on the same pDevEvo (linked RM-SLI): all disps in
1631      *    the lock group will share the same pDevEvo.  In that case we should
1632      *    not call RM to disable the video bridge power across the entire
1633      *    device until we've disabled locking on all GPUs).  This loop will
1634      *    call nvEvoUpdateSliVideoBridge() redundantly for the same pDevEvo,
1635      *    but those calls will be filtered out.  (If we did this in the loop
1636      *    above, RM would broadcast the video bridge disable call to all pDisps
1637      *    on the first call, even before we've disabled locking on them.)
1638      *
1639      * b) Each pDispEvo on a separate pDevEvo (client-side SLI or no SLI, when
1640      *    a video bridge is present): in that case each pDispEvo has a separate
1641      *    pDevEvo, and we need to call nvEvoUpdateSliVideoBridge() on each.
1642      *    (It would be okay in this case to call nvEvoUpdateSliVideoBridge() in
1643      *    the loop above since it will only disable the video bridge power for
1644      *    one GPU at a time.)
1645      */
1646     for (i = (int)pRasterLockGroup->numDisps - 1; i >= 0; i--) {
1647         NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i];
1648         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1649 
1650         pDevEvo->sli.bridge.powerNeededForRasterLock = FALSE;
1651         nvEvoUpdateSliVideoBridge(pDevEvo);
1652     }
1653 
1654     nvFree(pLockGroup);
1655 }
1656 
1657 /*
1658  * Unlock all any cross-GPU locking in the rasterlock group(s) associated with
1659  * the given device.
1660  */
1661 static void UnlockLockGroupsForDevice(NVDevEvoPtr pDevEvo)
1662 {
1663     NVDispEvoPtr pDispEvo;
1664     NvU32 sd;
1665 
1666     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1667         UnlockLockGroup(pDispEvo->pLockGroup);
1668         nvAssert(pDispEvo->pLockGroup == NULL);
1669     }
1670 }
1671 
1672 void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo)
1673 {
1674     NVDispEvoPtr pDispEvo;
1675     int i;
1676 
1677     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
1678         NvU32 head;
1679         for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
1680             nvAssert(!nvHeadIsActive(pDispEvo, head));
1681         }
1682     }
1683 }
1684 
1685 /*!
1686  * Disable locking-related state.
1687  */
1688 static void DisableLockState(NVDevEvoPtr pDevEvo)
1689 {
1690     NvU32 dispIndex;
1691     NVDispEvoPtr pDispEvo;
1692 
1693     /* Disable flip lock as requested by swap groups/framelock. */
1694 
1695     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1696         nvToggleFlipLockPerDisp(pDispEvo,
1697                                 nvGetActiveHeadMask(pDispEvo),
1698                                 FALSE /* enable */);
1699     }
1700 
1701     /* Disable any locking across GPUs. */
1702 
1703     UnlockLockGroupsForDevice(pDevEvo);
1704 
1705     /* Disable intra-GPU rasterlock on this pDevEvo. */
1706     UnlockRasterLockOneDev(pDevEvo);
1707 
1708     /* Reset the EVO locking state machine. */
1709 
1710     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1711         nvEvoStateAssertNoLock(&pDevEvo->gpus[pDispEvo->displayOwner]);
1712         nvEvoStateStartNoLock(&pDevEvo->gpus[pDispEvo->displayOwner]);
1713     }
1714 }
1715 
1716 void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo)
1717 {
1718     DisableLockState(pDevEvo);
1719 }
1720 
1721 /*!
1722  * Set up raster lock between GPUs, if applicable.
1723  */
1724 void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock)
1725 {
1726     RasterLockGroup *pRasterLockGroups, *pRasterLockGroup;
1727     unsigned int numRasterLockGroups;
1728 
1729     if (!doRasterLock) {
1730         return;
1731     }
1732 
1733     FinishModesetOneDev(pDevEvo);
1734 
1735     pRasterLockGroups = GetRasterLockGroups(pDevEvo, &numRasterLockGroups);
1736     if (!pRasterLockGroups) {
1737         return;
1738     }
1739 
1740     for (pRasterLockGroup = pRasterLockGroups;
1741          pRasterLockGroup < pRasterLockGroups + numRasterLockGroups;
1742          pRasterLockGroup++) {
1743         FinishModesetOneGroup(pRasterLockGroup);
1744     }
1745 
1746     nvFree(pRasterLockGroups);
1747 }
1748 
1749 /*!
1750  * Updates the hardware based on software needs tracked in pDevEvo->sli.bridge.
1751  * Call this function after changing any of those needs variables.
1752  */
1753 void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo)
1754 {
1755     NV0080_CTRL_GPU_SET_VIDLINK_PARAMS params = { 0 };
1756     const NvBool enable = pDevEvo->sli.bridge.powerNeededForRasterLock;
1757     NvU32 status;
1758 
1759     if (pDevEvo->sli.bridge.powered == enable) {
1760         return;
1761     }
1762 
1763     if (enable) {
1764         /* SLI should be prohibited earlier if no bridge is present. */
1765         nvAssert(pDevEvo->sli.bridge.present);
1766     }
1767 
1768     params.enable = enable ?
1769         NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_TRUE :
1770         NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_FALSE;
1771 
1772     status = nvRmApiControl(nvEvoGlobal.clientHandle,
1773                             pDevEvo->deviceHandle,
1774                             NV0080_CTRL_CMD_GPU_SET_VIDLINK,
1775                             &params, sizeof(params));
1776     if (status != NV_OK) {
1777         nvAssert(!"NV0080_CTRL_CMD_GPU_SET_VIDLINK failed");
1778     }
1779 
1780     pDevEvo->sli.bridge.powered = enable;
1781 }
1782 
1783 /*
1784  * Check if VRR or MergeMode are enabled; if so, go into the special "prohibit
1785  * lock" mode which prevents other scanlock states from being reached.
1786  *
1787  * Return TRUE iff VRR or MergeMode is in use on this GPU.
1788  */
1789 static NvBool ProhibitLockIfNecessary(NVDispEvoRec *pDispEvo)
1790 {
1791     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1792     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
1793     NvU32 activeHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
1794     NvBool prohibitLock = FALSE;
1795     NvU32 numActiveHeads = 0;
1796     NvU32 head;
1797 
1798     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
1799         if (nvHeadIsActive(pDispEvo, head)) {
1800             activeHeads[numActiveHeads++] = head;
1801             if ((pDispEvo->headState[head].timings.vrr.type !=
1802                  NVKMS_DPY_VRR_TYPE_NONE)) {
1803                 prohibitLock = TRUE;
1804             }
1805 
1806             if (pDispEvo->headState[head].mergeMode !=
1807                     NV_EVO_MERGE_MODE_DISABLED) {
1808                 prohibitLock = TRUE;
1809             }
1810         }
1811     }
1812 
1813 
1814     if (prohibitLock) {
1815         activeHeads[numActiveHeads] = NV_INVALID_HEAD;
1816 
1817         SyncEvoLockState();
1818 
1819         if (!pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
1820                                        NV_EVO_PROHIBIT_LOCK,
1821                                        activeHeads)) {
1822             nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
1823                               "Failed to prohibit lock");
1824             return FALSE;
1825         }
1826 
1827         UpdateEvoLockState();
1828 
1829         return TRUE;
1830     }
1831     return FALSE;
1832 }
1833 
1834 
1835 /*
1836  * Set up rasterlock between heads on a single GPU, if certain conditions are met:
1837  * - Locking is not prohibited due to the active configuration
1838  * - All active heads have identical mode timings
1839  *
1840  * Set pDispEvo->pRasterLockPossible to indicate whether rasterlock is possible
1841  * on this GPU, which will be used to determine if rasterlock is possible
1842  * between this GPU and other GPUs.
1843  * Note that this isn't the same as whether heads were locked: if fewer than
1844  * two heads were active, heads will not be locked but rasterlock with other
1845  * GPUs may still be possible.
1846  */
1847 static void FinishModesetOneDisp(
1848     NVDispEvoRec *pDispEvo)
1849 {
1850     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1851     NVEvoSubDevPtr pEvoSubDev;
1852     const NVDispHeadStateEvoRec *pPrevHeadState = NULL;
1853     NvU32 head, usedHeads = 0;
1854     NvU32 headsToLock[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
1855 
1856     if (pDevEvo->gpus == NULL) {
1857         return;
1858     }
1859 
1860     pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
1861 
1862     pDispEvo->rasterLockPossible = FALSE;
1863 
1864     if (ProhibitLockIfNecessary(pDispEvo)) {
1865         /* If locking is prohibited, do not attempt to lock heads. */
1866         return;
1867     }
1868 
1869     /*
1870      * Determine if rasterlock is possible: check each active display for
1871      * rasterlock compatibility with the previous one we looked at.  If any of
1872      * them aren't compatible, rasterlock is not possible.
1873      */
1874     pDispEvo->rasterLockPossible = TRUE;
1875     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
1876         const NVDispHeadStateEvoRec *pHeadState =
1877             &pDispEvo->headState[head];
1878 
1879         if (!nvHeadIsActive(pDispEvo, head)) {
1880             continue;
1881         }
1882 
1883         if (pPrevHeadState &&
1884             !RasterLockPossible(pHeadState, pPrevHeadState)) {
1885             pDispEvo->rasterLockPossible = FALSE;
1886             break;
1887         }
1888 
1889         pPrevHeadState = pHeadState;
1890 
1891         headsToLock[usedHeads] = head;
1892         usedHeads++;
1893     }
1894 
1895     if (!pDispEvo->rasterLockPossible) {
1896         return;
1897     }
1898 
1899     if (usedHeads > 1) {
1900         /* Terminate array */
1901         headsToLock[usedHeads] = NV_INVALID_HEAD;
1902 
1903         /* Initialize the assembly state */
1904         SyncEvoLockState();
1905 
1906         /* Set up rasterlock between heads on this disp. */
1907         nvAssert(headsToLock[0] != NV_INVALID_HEAD);
1908         if (!pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
1909                                        NV_EVO_LOCK_HEADS,
1910                                        headsToLock)) {
1911             nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
1912                               "Unable to lock heads");
1913             pDispEvo->rasterLockPossible = FALSE;
1914         }
1915 
1916         /* Update the hardware with the new state */
1917         UpdateEvoLockState();
1918     }
1919 }
1920 
1921 /* Call FinishModesetOneDisp() for each disp on this device to set up intra-GPU
1922  * locking on each. */
1923 static void FinishModesetOneDev(
1924     NVDevEvoRec *pDevEvo)
1925 {
1926     NVDispEvoPtr pDispEvo;
1927     NvU32 sd;
1928 
1929     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1930         FinishModesetOneDisp(pDispEvo);
1931     }
1932 }
1933 
1934 /*
1935  * Enable fliplock for the specified pLockGroup.
1936  * This assumes that rasterlock was already enabled.
1937  */
1938 static void EnableLockGroupFlipLock(NVLockGroup *pLockGroup)
1939 {
1940     const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup;
1941     NvU32 i;
1942 
1943     if (pRasterLockGroup->numDisps < 2) {
1944         /* TODO: enable fliplock for single GPUs */
1945         return;
1946     }
1947 
1948     pLockGroup->flipLockEnabled = TRUE;
1949 
1950     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
1951         NVEvoUpdateState updateState = { };
1952         NVDispEvoPtr pDispEvo = pRasterLockGroup->pDispEvoOrder[i];
1953         NvU32 sd = pDispEvo->displayOwner;
1954         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1955         NvU32 head;
1956 
1957         for (head = 0; head < pDevEvo->numHeads; head++) {
1958             NvU64 startTime = 0;
1959 
1960             if (!nvHeadIsActive(pDispEvo, head)) {
1961                 continue;
1962             }
1963 
1964             NVEvoLockPin pin =
1965                 nvEvoGetPinForSignal(pDispEvo, &pDevEvo->gpus[sd],
1966                                      NV_EVO_LOCK_SIGNAL_FLIP_LOCK);
1967 
1968             /* Wait for the raster lock to sync in.. */
1969             if (pin == NV_EVO_LOCK_PIN_ERROR ||
1970                 !EvoWaitForLock(pDevEvo, sd, head, EVO_RASTER_LOCK,
1971                                 &startTime)) {
1972                 nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
1973                     "Timed out waiting for rasterlock; not enabling fliplock.");
1974                 goto fail;
1975             }
1976 
1977             /*
1978              * Enable fliplock, and register that we've enabled
1979              * fliplock for SLI to ensure it doesn't get disabled
1980              * later.
1981              */
1982             pDevEvo->gpus[sd].headControl[head].flipLockPin = pin;
1983             pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask =
1984                 HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask, head);
1985 
1986             pDevEvo->gpus[sd].headControl[head].flipLock = TRUE;
1987             pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask =
1988                 HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask, head);
1989 
1990             EvoUpdateHeadParams(pDispEvo, head, &updateState);
1991         }
1992 
1993          /*
1994          * This must be synchronous as EVO reports lock success if
1995          * locking isn't enabled, so we could race through the
1996          * WaitForLock check below otherwise.
1997          */
1998         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
1999                               TRUE /* releaseElv */);
2000 
2001         /*
2002          * Wait for flip lock sync.  I'm not sure this is really
2003          * necessary, but the docs say to do this before attempting any
2004          * flips in the base channel.
2005          */
2006         for (head = 0; head < pDevEvo->numHeads; head++) {
2007             NvU64 startTime = 0;
2008 
2009             if (!nvHeadIsActive(pDispEvo, head)) {
2010                 continue;
2011             }
2012 
2013             if (!EvoWaitForLock(pDevEvo, sd, head, EVO_FLIP_LOCK,
2014                                 &startTime)) {
2015                 nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR,
2016                     "Timed out waiting for fliplock.");
2017                 goto fail;
2018             }
2019         }
2020     }
2021 
2022     return;
2023 fail:
2024     DisableLockGroupFlipLock(pLockGroup);
2025 }
2026 
2027 /*
2028  * FinishModesetOneGroup() - Set up raster lock between GPUs, if applicable,
2029  * for one RasterLockGroup.  Called in a loop from nvFinishModesetEvo().
2030  */
2031 
2032 static void FinishModesetOneGroup(RasterLockGroup *pRasterLockGroup)
2033 {
2034     NVDispEvoPtr *pDispEvoOrder = pRasterLockGroup->pDispEvoOrder;
2035     NvU32 numUsedGpus = 0;
2036     const NVDispHeadStateEvoRec *pPrevHeadState = NULL;
2037     NvBool headInUse[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP];
2038     NvBool rasterLockPossible = TRUE, foundUnused = FALSE;
2039     unsigned int i, j;
2040     NVLockGroup *pLockGroup = NULL;
2041 
2042     /* Don't attempt locking across GPUs if, on any individual GPU, rasterlock
2043      * isn't possible. */
2044     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
2045         NVDispEvoPtr pDispEvo = pDispEvoOrder[i];
2046 
2047         if (!pDispEvo->rasterLockPossible) {
2048             return;
2049         }
2050     }
2051 
2052     nvkms_memset(headInUse, 0, sizeof(headInUse));
2053 
2054     /*
2055      * Next, figure out if we can perform cross-GPU locking and which
2056      * GPUs/heads we can use.  Only attempt locking if all heads across GPUs
2057      * have compatible timings and are consecutive in the video bridge order.
2058      */
2059     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
2060         NVDispEvoPtr pDispEvo = pDispEvoOrder[i];
2061         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2062         NvU32 head;
2063 
2064         /*
2065          * We can't lock if there is an unused GPU between two used GPUs on the
2066          * video bridge chain.
2067          * We much check if pDevEvo->gpus is NULL in case we haven't been
2068          * through AllocDeviceObject for this pDev (yet?).
2069          */
2070         if (!HasActiveHeads(pDispEvo) ||
2071             !pDevEvo->gpus) {
2072             foundUnused = TRUE;
2073             continue;
2074         } else {
2075             if (foundUnused) {
2076                 rasterLockPossible = FALSE;
2077                 break;
2078             }
2079 
2080             numUsedGpus++;
2081         }
2082 
2083         /*
2084          * Compare modetimings for each active display with the previous one we
2085          * looked at.  If any of them don't match, punt on locking.
2086          */
2087         for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
2088             const NVDispHeadStateEvoRec *pHeadState =
2089                 &pDispEvo->headState[head];
2090 
2091             if (!nvHeadIsActive(pDispEvo, head)) {
2092                 continue;
2093             }
2094 
2095             if (pPrevHeadState &&
2096                 !RasterLockPossible(pHeadState, pPrevHeadState)) {
2097                 rasterLockPossible = FALSE;
2098                 goto exitHeadLoop;
2099             }
2100 
2101             headInUse[i][head] = TRUE;
2102 
2103             pPrevHeadState = pHeadState;
2104         }
2105 
2106 exitHeadLoop:
2107         if (!rasterLockPossible) {
2108             break;
2109         }
2110     }
2111 
2112     if (!rasterLockPossible || numUsedGpus == 0) {
2113         return;
2114     }
2115 
2116     /* Create a new lock group to store the current configuration */
2117     pLockGroup = nvCalloc(1, sizeof(*pLockGroup));
2118 
2119     if (pLockGroup == NULL) {
2120         return;
2121     }
2122 
2123     pLockGroup->rasterLockGroup = *pRasterLockGroup;
2124 
2125     /*
2126      * Finally, actually set up locking: go through the video bridge order
2127      * setting it up.
2128      */
2129     for (i = 0; i < pRasterLockGroup->numDisps; i++) {
2130         NVDispEvoPtr pDispEvo = pDispEvoOrder[i];
2131         NvU32 sd = pDispEvo->displayOwner;
2132         NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2133         NvU32 head[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
2134         unsigned int usedHeads = 0;
2135         NvBool gpusLocked = FALSE;
2136 
2137         /* Remember that we've enabled this lock group on this GPU. */
2138         nvAssert(pDispEvo->pLockGroup == NULL);
2139         pDispEvo->pLockGroup = pLockGroup;
2140 
2141         /* If we're past the end of the chain, stop applying locking below, but
2142          * continue this loop to assign pDispEvo->pLockGroup above. */
2143         if (i >= numUsedGpus) {
2144             continue;
2145         }
2146 
2147         /* Initialize the assembly state */
2148         SyncEvoLockState();
2149 
2150         for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) {
2151             if (headInUse[i][j]) {
2152 
2153                 head[usedHeads] = j;
2154 
2155                 usedHeads++;
2156             }
2157         }
2158         head[usedHeads] = NV_INVALID_HEAD;
2159 
2160         /* Then set up cross-GPU locking, if we have enough active GPUs */
2161         if (numUsedGpus > 1) {
2162             NVEvoLockAction action;
2163             NVEvoLockPin *pServerPin = &pDevEvo->gpus[sd].sliServerLockPin;
2164             NVEvoLockPin *pClientPin = &pDevEvo->gpus[sd].sliClientLockPin;
2165 
2166             *pServerPin = NV_EVO_LOCK_PIN_ERROR;
2167             *pClientPin = NV_EVO_LOCK_PIN_ERROR;
2168 
2169             if (i == 0) {
2170                 action = NV_EVO_ADD_SLI_PRIMARY;
2171             } else {
2172                 if (i == (numUsedGpus - 1)) {
2173                     action = NV_EVO_ADD_SLI_LAST_SECONDARY;
2174                 } else {
2175                     action = NV_EVO_ADD_SLI_SECONDARY;
2176                 }
2177             }
2178 
2179             if (action == NV_EVO_ADD_SLI_PRIMARY ||
2180                 action == NV_EVO_ADD_SLI_SECONDARY) {
2181                 /* Find pin for server to next */
2182                 NVDispEvoPtr pDispEvoNext = pDispEvoOrder[i + 1];
2183                 NvU32 headNext = 0;
2184 
2185                 for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) {
2186                     if (headInUse[i + 1][j]) {
2187                         headNext = j;
2188                         break;
2189                     }
2190                 }
2191 
2192                 GetRasterLockPin(pDispEvo, head[0],
2193                                  pDispEvoNext, headNext,
2194                                  pServerPin, NULL);
2195             }
2196 
2197             if (action == NV_EVO_ADD_SLI_SECONDARY ||
2198                 action == NV_EVO_ADD_SLI_LAST_SECONDARY) {
2199 
2200                 /* Find pin for client to prev */
2201                 NVDispEvoPtr pDispEvoPrev = pDispEvoOrder[i - 1];
2202                 NvU32 headPrev = 0;
2203 
2204                 for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) {
2205                     if (headInUse[i - 1][j]) {
2206                         headPrev = j;
2207                         break;
2208                     }
2209                 }
2210 
2211                 GetRasterLockPin(pDispEvo, head[0],
2212                                  pDispEvoPrev, headPrev,
2213                                  NULL, pClientPin);
2214             }
2215 
2216             if (!pDevEvo->gpus[sd].scanLockState(pDispEvo, &pDevEvo->gpus[sd],
2217                                                  action, head)) {
2218                 nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
2219                                   "Unable to set up SLI locking");
2220             } else {
2221                 gpusLocked = TRUE;
2222             }
2223         }
2224 
2225         /*
2226          * On certain GPUs, we need to enable the video bridge (MIO pads) when
2227          * enabling rasterlock.  Note that we don't disable in this function,
2228          * so if gpusLocked is true for any iteration of these loops, this bit
2229          * will be on.
2230          */
2231         if (gpusLocked && NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
2232                 NV0073_CTRL_SYSTEM_CAPS_RASTER_LOCK_NEEDS_MIO_POWER)) {
2233             pDevEvo->sli.bridge.powerNeededForRasterLock = TRUE;
2234             nvEvoUpdateSliVideoBridge(pDevEvo);
2235         }
2236 
2237         /* If anything changed, update the hardware */
2238         if (gpusLocked) {
2239             UpdateEvoLockState();
2240         }
2241     }
2242 
2243     /* Enable fliplock, if we can */
2244     EnableFlipLockIfRequested(pLockGroup);
2245 }
2246 
2247 /*
2248  * Check if the given LockGroup matches the given FlipLockRequestedGroup.
2249  * This is true if the flip lock heads match the currently-active
2250  * heads on all pDispEvos.
2251  */
2252 static NvBool CheckLockGroupMatchFlipLockRequestedGroup(
2253     const NVLockGroup *pLockGroup,
2254     const FlipLockRequestedGroup *pFLRG)
2255 {
2256     const RasterLockGroup *pRasterLockGroup = &pLockGroup->rasterLockGroup;
2257     NvU32 disp, requestedDisp;
2258 
2259     /* Verify the number of disps is the same. */
2260     NvU32 numRequestedDisps = 0;
2261     for (requestedDisp = 0;
2262          requestedDisp < ARRAY_LEN(pFLRG->disp);
2263          requestedDisp++) {
2264         const NVDispEvoRec *pRequestedDispEvo =
2265             pFLRG->disp[requestedDisp].pDispEvo;
2266         if (pRequestedDispEvo == NULL) {
2267             break;
2268         }
2269         numRequestedDisps++;
2270     }
2271     if (numRequestedDisps != pRasterLockGroup->numDisps) {
2272         return FALSE;
2273     }
2274 
2275     /*
2276      * For each disp in the rasterlock group:
2277      * - If there is no matching disp in the pFLRG, no match
2278      * - If the disp's active head mask doesn't match the pFLRG's requested
2279      *   head mask for that disp, no match
2280      * If none of the conditions above failed, then we have a match.
2281      */
2282     for (disp = 0; disp < pRasterLockGroup->numDisps; disp++) {
2283         const NVDispEvoRec *pDispEvo = pRasterLockGroup->pDispEvoOrder[disp];
2284         NvBool found = FALSE;
2285         for (requestedDisp = 0;
2286              requestedDisp < ARRAY_LEN(pFLRG->disp);
2287              requestedDisp++) {
2288             const NVDispEvoRec *pRequestedDispEvo =
2289                 pFLRG->disp[requestedDisp].pDispEvo;
2290             if (pRequestedDispEvo == NULL) {
2291                 break;
2292             }
2293             if (pRequestedDispEvo == pDispEvo) {
2294                 if (pFLRG->disp[requestedDisp].flipLockHeads !=
2295                     nvGetActiveHeadMask(pDispEvo)) {
2296                     return FALSE;
2297                 }
2298                 found = TRUE;
2299                 break;
2300             }
2301         }
2302         if (!found) {
2303             return FALSE;
2304         }
2305     }
2306 
2307     return TRUE;
2308 }
2309 
2310 /*
2311  * Check if any requested fliplock groups match this lockgroup; if so, enable
2312  * fliplock on the lockgroup.
2313  */
2314 static void EnableFlipLockIfRequested(NVLockGroup *pLockGroup)
2315 {
2316     FlipLockRequestedGroup *pFLRG;
2317     nvListForEachEntry(pFLRG, &requestedFlipLockGroups, listEntry) {
2318         if (CheckLockGroupMatchFlipLockRequestedGroup(pLockGroup, pFLRG)) {
2319             EnableLockGroupFlipLock(pLockGroup);
2320             break;
2321         }
2322     }
2323 }
2324 
2325 /*
2326  * Check if there is an active NVLockGroup that matches the given
2327  * FlipLockRequestedGroup.
2328  * "Matches" means that the NVLockGroup extends to the exact same GPUs as the
2329  * FlipLockRequestedGroup, and that the *active* heads on those GPUs exactly
2330  * match the heads requested in the FlipLockRequestedGroup.
2331  */
2332 static NVLockGroup *FindMatchingLockGroup(const FlipLockRequestedGroup *pFLRG)
2333 {
2334     /* If there is an active lock group that matches this pFLRG, it must also
2335      * be active on the first disp, so we don't need to bother looping over
2336      * all disps. */
2337     NVLockGroup *pLockGroup = pFLRG->disp[0].pDispEvo->pLockGroup;
2338 
2339     if (pLockGroup != NULL &&
2340         CheckLockGroupMatchFlipLockRequestedGroup(pLockGroup, pFLRG)) {
2341         return pLockGroup;
2342     }
2343     return NULL;
2344 }
2345 
2346 /* Disable any currently-active lock groups that match the given pFLRG */
2347 static void
2348 DisableRequestedFlipLockGroup(const FlipLockRequestedGroup *pFLRG)
2349 {
2350     NVLockGroup *pLockGroup = FindMatchingLockGroup(pFLRG);
2351     if (pLockGroup != NULL) {
2352         DisableLockGroupFlipLock(pLockGroup);
2353 
2354         nvAssert(!pLockGroup->flipLockEnabled);
2355     }
2356 }
2357 
2358 /*
2359  * Check if there is a currently-active rasterlock group that matches the
2360  * disps/heads of this FlipLockRequestedGroup.  If so, enable flip lock between
2361  * those heads.
2362  */
2363 static void
2364 EnableRequestedFlipLockGroup(const FlipLockRequestedGroup *pFLRG)
2365 {
2366     NVLockGroup *pLockGroup = FindMatchingLockGroup(pFLRG);
2367     if (pLockGroup != NULL) {
2368         EnableLockGroupFlipLock(pLockGroup);
2369     }
2370 }
2371 
2372 /*
2373  * Convert the given API head mask to a HW head mask, using the
2374  * currently-active API head->HW head mapping.
2375  */
2376 static NvU32 ApiHeadMaskToHwHeadMask(
2377     const NVDispEvoRec *pDispEvo,
2378     const NvU32 apiHeadMask)
2379 {
2380     const NvU32 numHeads = pDispEvo->pDevEvo->numHeads;
2381     NvU32 apiHead;
2382     NvU32 hwHeadMask = 0;
2383 
2384     for (apiHead = 0; apiHead < numHeads; apiHead++) {
2385         if ((apiHeadMask & (1 << apiHead)) != 0) {
2386             const NVDispApiHeadStateEvoRec *pApiHeadState =
2387                 &pDispEvo->apiHeadState[apiHead];
2388             if (nvApiHeadIsActive(pDispEvo, apiHead)) {
2389                 hwHeadMask |= pApiHeadState->hwHeadsMask;
2390             }
2391         }
2392     }
2393 
2394     return hwHeadMask;
2395 }
2396 
2397 /*
2398  * Return true if all main channels are idle on the heads specified in the
2399  * FlipLockRequestedGroup.
2400  */
2401 static NvBool CheckFlipLockGroupIdle(
2402     const FlipLockRequestedGroup *pFLRG)
2403 {
2404     NvU32 i;
2405 
2406     for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) {
2407         NVDispEvoPtr pDispEvo = pFLRG->disp[i].pDispEvo;
2408         if (pDispEvo != NULL) {
2409             NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2410             const NvU32 sd = pDispEvo->displayOwner;
2411             const NvU32 numHeads = pDevEvo->numHeads;
2412             NvU32 head;
2413 
2414             for (head = 0; head < numHeads; head++) {
2415                 NvBool isMethodPending;
2416                 if (!nvHeadIsActive(pDispEvo, head)) {
2417                     continue;
2418                 }
2419                 if (!pDevEvo->hal->IsChannelMethodPending(
2420                         pDevEvo,
2421                         pDevEvo->head[head].layer[NVKMS_MAIN_LAYER],
2422                         sd,
2423                         &isMethodPending) || isMethodPending) {
2424                     return FALSE;
2425                 }
2426             }
2427         }
2428     }
2429 
2430     return TRUE;
2431 }
2432 
2433 /*
2434  * Return true if all main channels are idle on each head in overlapping flip
2435  * lock groups.
2436  */
2437 static NvBool CheckOverlappingFlipLockRequestGroupsIdle(
2438     NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES],
2439     const struct NvKmsSetFlipLockGroupRequest *pRequest)
2440 {
2441     NvU32 dev;
2442 
2443     /* Loop over the GPUs specified in this FlipLockGroupRequest */
2444     for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) {
2445         NVDispEvoPtr pDispEvo;
2446         NvU32 sd;
2447 
2448         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) {
2449             FlipLockRequestedGroup *pFLRG;
2450 
2451             if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) {
2452                 continue;
2453             }
2454 
2455             /*
2456              * For each specified GPU, search through existing requested
2457              * fliplock groups and find any that overlap with heads in this
2458              * request.
2459              *
2460              * Return FALSE if any overlapping fliplock groups are not idle.
2461              */
2462             nvListForEachEntry(pFLRG, &requestedFlipLockGroups, listEntry) {
2463                 NvU32 i;
2464                 for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) {
2465                     if (pFLRG->disp[i].pDispEvo == NULL) {
2466                         break;
2467                     }
2468                     if (pFLRG->disp[i].pDispEvo == pDispEvo) {
2469                         /* API heads requested for this disp by the client */
2470                         const NvU32 requestedApiHeadMask =
2471                             pRequest->dev[dev].disp[sd].requestedHeadsBitMask;
2472                         const NvU32 requestedHwHeadMask =
2473                             ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask);
2474 
2475                         if ((requestedHwHeadMask &
2476                              pFLRG->disp[i].flipLockHeads) != 0) {
2477                             /* Match */
2478                             if (!CheckFlipLockGroupIdle(pFLRG)) {
2479                                 return FALSE;
2480                             }
2481                         }
2482                         break;
2483                     }
2484                 }
2485             }
2486         }
2487     }
2488 
2489     return TRUE;
2490 }
2491 
2492 /*
2493  * Disable and remove any FlipLockRequestGroups that contain any of the heads
2494  * in 'hwHeadsMask' on the given pDispEvo.
2495  */
2496 static void
2497 RemoveOverlappingFlipLockRequestGroupsOneDisp(
2498     NVDispEvoRec *pDispEvo,
2499     NvU32 hwHeadMask)
2500 {
2501     FlipLockRequestedGroup *pFLRG, *tmp;
2502 
2503     /*
2504      * For each specified GPU, search through existing requested
2505      * fliplock groups and find any that overlap with heads in this
2506      * request.
2507      *
2508      * For any that are found, disable fliplock and remove the
2509      * requested flip lock group.
2510      */
2511     nvListForEachEntry_safe(pFLRG, tmp, &requestedFlipLockGroups, listEntry) {
2512         NvU32 i;
2513 
2514         for (i = 0; i < ARRAY_LEN(pFLRG->disp); i++) {
2515             if (pFLRG->disp[i].pDispEvo == NULL) {
2516                 break;
2517             }
2518             if (pFLRG->disp[i].pDispEvo == pDispEvo) {
2519 
2520                 if ((hwHeadMask &
2521                      pFLRG->disp[i].flipLockHeads) != 0) {
2522                     /* Match */
2523                     DisableRequestedFlipLockGroup(pFLRG);
2524 
2525                     /* Remove from global list */
2526                     nvListDel(&pFLRG->listEntry);
2527                     nvFree(pFLRG);
2528                 }
2529                 break;
2530             }
2531         }
2532     }
2533 }
2534 
2535 /*
2536  * Disable and remove any FlipLockRequestGroups that contain any of the heads
2537  * specified in 'pRequest'.
2538  */
2539 static void
2540 RemoveOverlappingFlipLockRequestGroups(
2541     NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES],
2542     const struct NvKmsSetFlipLockGroupRequest *pRequest)
2543 {
2544     NvU32 dev;
2545 
2546     /* Loop over the GPUs specified in this FlipLockGroupRequest */
2547     for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) {
2548         NVDispEvoPtr pDispEvo;
2549         NvU32 sd;
2550 
2551         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) {
2552             NvU32 requestedApiHeadMask, requestedHwHeadMask;
2553 
2554             if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) {
2555                 continue;
2556             }
2557 
2558             /* API heads requested for this disp by the client */
2559             requestedApiHeadMask =
2560                 pRequest->dev[dev].disp[sd].requestedHeadsBitMask;
2561             requestedHwHeadMask =
2562                 ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask);
2563 
2564             RemoveOverlappingFlipLockRequestGroupsOneDisp(pDispEvo,
2565                                                           requestedHwHeadMask);
2566         }
2567     }
2568 }
2569 
2570 /*
2571  * Disable and remove any FlipLockRequestGroups that contain any of the heads
2572  * specified in 'pRequest'.
2573  */
2574 void nvEvoRemoveOverlappingFlipLockRequestGroupsForModeset(
2575     NVDevEvoPtr pDevEvo,
2576     const struct NvKmsSetModeRequest *pRequest)
2577 {
2578     NVDispEvoPtr pDispEvo;
2579     NvU32 sd;
2580 
2581     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
2582         NvU32 requestedApiHeadMask, requestedHwHeadMask;
2583 
2584         if ((pRequest->requestedDispsBitMask & (1 << sd)) == 0) {
2585             continue;
2586         }
2587 
2588         /* API heads requested for this disp by the client */
2589         requestedApiHeadMask =
2590             pRequest->disp[sd].requestedHeadsBitMask;
2591         requestedHwHeadMask =
2592             ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeadMask);
2593 
2594         RemoveOverlappingFlipLockRequestGroupsOneDisp(pDispEvo,
2595                                                       requestedHwHeadMask);
2596     }
2597 }
2598 
2599 /*!
2600  * Handle a NVKMS_IOCTL_SET_FLIPLOCK_GROUP request.  This assumes that the
2601  * request was already validated by nvkms.c:SetFlipLockGroup().
2602  *
2603  * param[in]  pDevEvo  Array of NVDevEvoPtr pointers, in the same order as
2604  *                     the deviceHandle were specified in the request.
2605  * param[in]  pRequest The ioctl request.
2606  */
2607 NvBool
2608 nvSetFlipLockGroup(NVDevEvoRec *pDevEvo[NV_MAX_SUBDEVICES],
2609                    const struct NvKmsSetFlipLockGroupRequest *pRequest)
2610 {
2611     FlipLockRequestedGroup *pFLRG = NULL;
2612 
2613     /* Construct the new RequestedFlipLockGroup first, so if it fails we can
2614      * return before removing overlapping groups. */
2615     if (pRequest->enable) {
2616         NvU32 dev, disp;
2617 
2618         pFLRG = nvCalloc(1, sizeof(*pFLRG));
2619         if (pFLRG == NULL) {
2620             goto fail;
2621         }
2622 
2623         disp = 0;
2624         for (dev = 0; dev < NV_MAX_SUBDEVICES && pDevEvo[dev] != NULL; dev++) {
2625             NVDispEvoPtr pDispEvo;
2626             NvU32 sd;
2627 
2628             FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo[dev]) {
2629                 const NvU32 requestedApiHeads =
2630                     pRequest->dev[dev].disp[sd].requestedHeadsBitMask;
2631 
2632                 if ((pRequest->dev[dev].requestedDispsBitMask & (1 << sd)) == 0) {
2633                     continue;
2634                 }
2635 
2636                 if (disp >= ARRAY_LEN(pFLRG->disp)) {
2637                     nvAssert(!"FlipLockRequestedGroup::disp too short?");
2638                     goto fail;
2639                 }
2640 
2641                 pFLRG->disp[disp].pDispEvo = pDispEvo;
2642                 pFLRG->disp[disp].flipLockHeads =
2643                     ApiHeadMaskToHwHeadMask(pDispEvo, requestedApiHeads);
2644                 disp++;
2645             }
2646         }
2647 
2648         if (!CheckFlipLockGroupIdle(pFLRG)) {
2649             nvEvoLogDebug(EVO_LOG_ERROR,
2650                           "Failed to request flip lock: group not idle");
2651             goto fail;
2652         }
2653     }
2654 
2655     if (!CheckOverlappingFlipLockRequestGroupsIdle(pDevEvo, pRequest)) {
2656         nvEvoLogDebug(EVO_LOG_ERROR,
2657                       "Failed to request flip lock: overlapping group(s) not idle");
2658         goto fail;
2659     }
2660 
2661     RemoveOverlappingFlipLockRequestGroups(pDevEvo, pRequest);
2662 
2663     if (pFLRG) {
2664         nvListAdd(&pFLRG->listEntry, &requestedFlipLockGroups);
2665 
2666         EnableRequestedFlipLockGroup(pFLRG);
2667     }
2668 
2669     return TRUE;
2670 
2671 fail:
2672     nvFree(pFLRG);
2673     return FALSE;
2674 }
2675 
2676 NvBool nvSetUsageBoundsEvo(
2677     NVDevEvoPtr pDevEvo,
2678     const NvU32 sd,
2679     const NvU32 head,
2680     const struct NvKmsUsageBounds *pUsage,
2681     NVEvoUpdateState *updateState)
2682 {
2683     NvBool needCoreUpdate;
2684 
2685     nvPushEvoSubDevMask(pDevEvo, NVBIT(sd));
2686 
2687     needCoreUpdate = pDevEvo->hal->SetUsageBounds(pDevEvo, sd, head, pUsage,
2688                                                   updateState);
2689 
2690     nvPopEvoSubDevMask(pDevEvo);
2691 
2692     pDevEvo->gpus[sd].headState[head].usage = *pUsage;
2693 
2694     return needCoreUpdate;
2695 }
2696 
2697 void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo,
2698                                       NvU32 sd,
2699                                       NvU32 head,
2700                                       NvBool enable,
2701                                       NVEvoUpdateState *pUpdateState)
2702 {
2703     pDevEvo->gpus[sd].headState[head].
2704         disableMidFrameAndDWCFWatermark = !enable;
2705 
2706     if (pDevEvo->hal->EnableMidFrameAndDWCFWatermark == NULL) {
2707         nvEvoLogDev(pDevEvo,
2708                     EVO_LOG_ERROR,
2709                     "EnableMidFrameAndDWCFWatermark() is not defined");
2710         return;
2711     }
2712 
2713     pDevEvo->hal->EnableMidFrameAndDWCFWatermark(pDevEvo,
2714                                                  sd,
2715                                                  head,
2716                                                  enable,
2717                                                  pUpdateState);
2718 }
2719 
2720 NvBool nvGetDefaultColorSpace(
2721     const NVColorFormatInfoRec *pColorFormatsInfo,
2722     enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace,
2723     enum NvKmsDpyAttributeColorBpcValue *pColorBpc)
2724 {
2725     if (pColorFormatsInfo->rgb444.maxBpc !=
2726             NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
2727         *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
2728         *pColorBpc = pColorFormatsInfo->rgb444.maxBpc;
2729         return TRUE;
2730     }
2731 
2732     if (pColorFormatsInfo->yuv444.maxBpc !=
2733             NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
2734         *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444;
2735         *pColorBpc = pColorFormatsInfo->yuv444.maxBpc;
2736         return TRUE;
2737     }
2738 
2739     if (pColorFormatsInfo->yuv422.maxBpc !=
2740             NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
2741         *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422;
2742         *pColorBpc = pColorFormatsInfo->yuv422.maxBpc;
2743         return TRUE;
2744     }
2745 
2746     return FALSE;
2747 }
2748 
2749 NvBool nvChooseColorRangeEvo(
2750     enum NvKmsOutputColorimetry colorimetry,
2751     const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,
2752     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
2753     const enum NvKmsDpyAttributeColorBpcValue colorBpc,
2754     enum NvKmsDpyAttributeColorRangeValue *pColorRange)
2755 {
2756     /* Hardware supports BPC_6 only for RGB */
2757     nvAssert((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) ||
2758                 (colorBpc != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6));
2759 
2760     if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) &&
2761             (colorBpc == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6)) {
2762         /* At depth 18 only RGB and full range are allowed */
2763         if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
2764             /* BT2100 requires limited color range */
2765             return FALSE;
2766         }
2767         *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
2768     } else if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) ||
2769                (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) ||
2770                (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420) ||
2771                (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100)) {
2772         /* Both YUV and BT2100 colorimetry require limited color range. */
2773         *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED;
2774     } else {
2775         *pColorRange = requestedColorRange;
2776     }
2777 
2778     return TRUE;
2779 }
2780 
2781 /*!
2782  * Choose current colorSpace and colorRange for the given dpy based on
2783  * the dpy's color format capailities, the given modeset parameters (YUV420
2784  * mode and output transfer function) and the requested color space and range.
2785  *
2786  * This needs to be called during a modeset as well as when the requested color
2787  * space or range have changed.
2788  *
2789  * If SW YUV420 mode is enabled, EVO HW is programmed with default (RGB color
2790  * space, FULL color range) values, and the real values are used in a
2791  * headSurface composite shader.
2792  */
2793 NvBool nvChooseCurrentColorSpaceAndRangeEvo(
2794     const NVDpyEvoRec *pDpyEvo,
2795     const NVHwModeTimingsEvo *pHwTimings,
2796     NvU8 hdmiFrlBpc,
2797     enum NvKmsOutputColorimetry colorimetry,
2798     const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace,
2799     const enum NvKmsDpyAttributeColorRangeValue requestedColorRange,
2800     enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace,
2801     enum NvKmsDpyAttributeColorBpcValue *pCurrentColorBpc,
2802     enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange)
2803 {
2804     enum NvKmsDpyAttributeCurrentColorSpaceValue newColorSpace =
2805         NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
2806     enum NvKmsDpyAttributeColorBpcValue newColorBpc =
2807         NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10;
2808     enum NvKmsDpyAttributeColorRangeValue newColorRange =
2809         NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
2810     const NVColorFormatInfoRec colorFormatsInfo =
2811         nvGetColorFormatInfo(pDpyEvo);
2812 
2813     // XXX HDR TODO: Handle other colorimetries
2814     // XXX HDR TODO: Handle YUV
2815     if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
2816         /*
2817          * If the head currently has BT2100 colorimetry, we override the
2818          * requested color space with RGB.  We cannot support yuv420Mode in
2819          * that configuration, so fail in that case.
2820          */
2821         if (pHwTimings->yuv420Mode != NV_YUV420_MODE_NONE) {
2822             return FALSE;
2823         }
2824 
2825         newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
2826         newColorBpc = colorFormatsInfo.rgb444.maxBpc;
2827     } else if (pHwTimings->yuv420Mode != NV_YUV420_MODE_NONE) {
2828         /*
2829          * If the current mode timing requires YUV420 compression, we override the
2830          * requested color space with YUV420.
2831          */
2832         newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420;
2833         newColorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
2834 
2835         nvAssert(colorFormatsInfo.rgb444.maxBpc >=
2836                     NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8);
2837     } else {
2838         /*
2839          * Note this is an assignment between different enum types. Checking the
2840          * value of requested colorSpace and then assigning the value to current
2841          * colorSpace, to avoid warnings about cross-enum assignment.
2842          */
2843         switch (requestedColorSpace) {
2844         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB:
2845             newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB;
2846             newColorBpc = colorFormatsInfo.rgb444.maxBpc;
2847             break;
2848         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422:
2849             newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422;
2850             newColorBpc = colorFormatsInfo.yuv422.maxBpc;
2851             break;
2852         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444:
2853             newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444;
2854             newColorBpc = colorFormatsInfo.yuv444.maxBpc;
2855             break;
2856         default:
2857             nvAssert(!"Invalid Requested ColorSpace");
2858         }
2859 
2860         if ((newColorBpc ==
2861                 NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) &&
2862             !nvGetDefaultColorSpace(&colorFormatsInfo, &newColorSpace,
2863                                     &newColorBpc)) {
2864             return FALSE;
2865         }
2866     }
2867 
2868     /*
2869      * Downgrade BPC if HDMI configuration does not support current selection
2870      * with TMDS or FRL.
2871      */
2872     if (nvDpyIsHdmiEvo(pDpyEvo) &&
2873         nvHdmiTimingsNeedFrl(pDpyEvo, pHwTimings, newColorBpc) &&
2874         (newColorBpc > hdmiFrlBpc))  {
2875 
2876         newColorBpc =
2877             hdmiFrlBpc ? hdmiFrlBpc : NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
2878         nvAssert(newColorBpc >= NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8);
2879     }
2880 
2881     // 10 BPC required for HDR
2882     // XXX HDR TODO: Handle other colorimetries
2883     // XXX HDR TODO: Handle YUV
2884     if ((colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) &&
2885         (newColorBpc < NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10)) {
2886         return FALSE;
2887     }
2888 
2889     if (!nvChooseColorRangeEvo(colorimetry, requestedColorRange, newColorSpace,
2890                                newColorBpc, &newColorRange)) {
2891     }
2892 
2893     *pCurrentColorSpace = newColorSpace;
2894     *pCurrentColorRange = newColorRange;
2895     *pCurrentColorBpc = newColorBpc;
2896 
2897     return TRUE;
2898 }
2899 
2900 void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
2901     NVDispEvoPtr pDispEvo,
2902     const NvU32 head,
2903     enum NvKmsOutputColorimetry colorimetry,
2904     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
2905     const enum NvKmsDpyAttributeColorRangeValue colorRange,
2906     NVEvoUpdateState *pUpdateState)
2907 {
2908     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
2909     NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
2910     const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo;
2911 
2912     nvAssert(pConnectorEvo != NULL);
2913 
2914     // XXX HDR TODO: Support more output colorimetries
2915     if (colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100) {
2916         nvAssert(pHeadState->timings.yuv420Mode == NV_YUV420_MODE_NONE);
2917         nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB);
2918         nvAssert(colorRange == NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED);
2919 
2920         pHeadState->procAmp.colorimetry =  NVT_COLORIMETRY_BT2020RGB;
2921         pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_LIMITED;
2922         pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB;
2923     } else if ((pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) &&
2924         (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) {
2925         /*
2926          * In SW YUV420 mode, HW is programmed with RGB color space and full
2927          * color range.  The color space conversion and color range compression
2928          * happen in a headSurface composite shader.
2929          */
2930         pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
2931         pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL;
2932         pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB;
2933     } else {
2934 
2935         // Set default colorimetry to RGB and default color range to full
2936         pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
2937         pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL;
2938 
2939         // Set color format
2940         switch (colorSpace) {
2941         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
2942             pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB;
2943             break;
2944         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
2945             pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr444;
2946             break;
2947         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
2948             pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr422;
2949             break;
2950         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
2951             pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr420;
2952             break;
2953         default:
2954             nvAssert(!"unrecognized colorSpace");
2955         }
2956 
2957         switch (pConnectorEvo->legacyType) {
2958         case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP:
2959             // program HW with RGB/YCbCr
2960             switch (colorSpace) {
2961             case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
2962                 pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
2963                 break;
2964             case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
2965             case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
2966             case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
2967                 if (nvEvoIsHDQualityVideoTimings(&pHeadState->timings)) {
2968                     pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_709;
2969                 } else {
2970                     pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_601;
2971                 }
2972                 break;
2973             default:
2974                 nvAssert(!"unrecognized colorSpace");
2975             }
2976             break;
2977         case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT:
2978             // colorSpace isn't used for DEVICE_TYPE_CRT and
2979             // hence should be set to the "unchanged" value
2980             // (i.e. the default - RGB)
2981             nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB);
2982 
2983             // program HW with RGB only
2984             pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB;
2985             break;
2986         default:
2987             nvAssert(!"ERROR: invalid pDpyEvo->type");
2988         }
2989 
2990         /* YCbCr444 should be advertise only for DisplayPort and HDMI */
2991         nvAssert((colorSpace != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) ||
2992                     nvConnectorUsesDPLib(pConnectorEvo) ||
2993                     pConnectorEvo->isHdmiEnabled);
2994 
2995         /* YcbCr422 should be advertised only for HDMI and DP on supported GPUs */
2996         nvAssert((colorSpace != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) ||
2997                      (((pDevEvo->caps.hdmiYCbCr422MaxBpc != 0) &&
2998                        pConnectorEvo->isHdmiEnabled)) ||
2999                       ((pDevEvo->caps.dpYCbCr422MaxBpc != 0) &&
3000                        nvConnectorUsesDPLib(pConnectorEvo)));
3001 
3002         switch (colorRange) {
3003         case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL:
3004             pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL;
3005             break;
3006         case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED:
3007             pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_LIMITED;
3008             break;
3009         default:
3010             nvAssert(!"Invalid colorRange");
3011             break;
3012         }
3013     }
3014 
3015     // In YUV colorimetry, only limited color range is allowed.
3016     nvAssert(!((pHeadState->procAmp.colorimetry != NVT_COLORIMETRY_RGB) &&
3017                (pHeadState->procAmp.colorRange != NVT_COLOR_RANGE_LIMITED)));
3018 
3019     // Limited color range is not allowed with 18bpp mode
3020     nvAssert(!((pHeadState->pixelDepth == NVKMS_PIXEL_DEPTH_18_444) &&
3021                (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED)));
3022 
3023     nvPushEvoSubDevMaskDisp(pDispEvo);
3024 
3025     // Set the procamp head method
3026     pDevEvo->hal->SetProcAmp(pDispEvo, head, pUpdateState);
3027 
3028     // Clean up
3029     nvPopEvoSubDevMask(pDevEvo);
3030 }
3031 
3032 void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo,
3033                            const NvU32 head, NVEvoUpdateState *pUpdateState)
3034 {
3035     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
3036     const NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[head];
3037     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
3038     const enum nvKmsPixelDepth pixelDepth = pHeadState->pixelDepth;
3039     NvBool colorSpaceOverride = FALSE;
3040 
3041     /*
3042      * Determine whether or not this dpy will need its color space
3043      * overridden.
3044      *
3045      * This is currently only used for DP 1.3 YUV420 mode, where the
3046      * HW's normal support for carrying color space information
3047      * together with the frame is insufficient.
3048      */
3049     if ((pTimings->yuv420Mode == NV_YUV420_MODE_SW) &&
3050         nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) {
3051 
3052         nvAssert(pDispEvo->pDevEvo->caps.supportsDP13);
3053         colorSpaceOverride = TRUE;
3054     }
3055 
3056     // Only set up the actual output for SLI primary.
3057     nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner);
3058 
3059     pDevEvo->hal->HeadSetControlOR(pDevEvo, head, pTimings, pixelDepth,
3060                                    colorSpaceOverride,
3061                                    pUpdateState);
3062 
3063     nvPopEvoSubDevMask(pDevEvo);
3064 }
3065 
3066 static const struct {
3067     NvU32 algo;
3068     enum NvKmsDpyAttributeCurrentDitheringModeValue nvKmsDitherMode;
3069 } ditherModeTable[] = {
3070     { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2,
3071       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2 },
3072     { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2,
3073       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2 },
3074     { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL,
3075       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL },
3076     { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN,
3077       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE }
3078 };
3079 
3080 static const struct {
3081     NvU32 type;
3082     enum NvKmsDpyAttributeCurrentDitheringDepthValue nvKmsDitherDepth;
3083 } ditherDepthTable[] = {
3084     { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS,
3085       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS },
3086     { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS,
3087       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS },
3088     { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF,
3089       NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE }
3090 };
3091 
3092 /*!
3093  * Choose dithering based on the requested dithering config
3094  * NVConnectorEvo::or::dither.
3095  */
3096 void nvChooseDitheringEvo(
3097     const NVConnectorEvoRec *pConnectorEvo,
3098     enum NvKmsDpyAttributeColorBpcValue bpc,
3099     const NVDpyAttributeRequestedDitheringConfig *pReqDithering,
3100     NVDpyAttributeCurrentDitheringConfig *pCurrDithering)
3101 {
3102     NvU32 i;
3103     NVDpyAttributeCurrentDitheringConfig currDithering = {
3104         .enabled = FALSE,
3105         .mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE,
3106         .depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE,
3107     };
3108 
3109     currDithering.enabled = (pConnectorEvo->or.ditherType !=
3110                                 NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF);
3111 
3112     for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) {
3113         if (ditherDepthTable[i].type == pConnectorEvo->or.ditherType) {
3114             currDithering.depth = ditherDepthTable[i].nvKmsDitherDepth;
3115             break;
3116         }
3117     }
3118 
3119     for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) {
3120         if (ditherModeTable[i].algo == pConnectorEvo->or.ditherAlgo) {
3121             currDithering.mode = ditherModeTable[i].nvKmsDitherMode;
3122             break;
3123         }
3124     }
3125 
3126     switch (pReqDithering->state) {
3127     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED:
3128         currDithering.enabled = TRUE;
3129         break;
3130     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED:
3131         currDithering.enabled = FALSE;
3132         break;
3133     default:
3134         nvAssert(!"Unknown Dithering configuration");
3135         // Fall through
3136     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO:
3137         /*
3138          * Left it initialized
3139          * based on value NVDpyEvoRec::or::dither::init::enabled.
3140          */
3141         break;
3142     }
3143 
3144     switch (pReqDithering->depth) {
3145     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS:
3146         currDithering.depth =
3147             NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS;
3148         break;
3149     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS:
3150         currDithering.depth =
3151             NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS;
3152         break;
3153     default:
3154         nvAssert(!"Unknown Dithering Depth");
3155         // Fall through
3156     case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO:
3157         /*
3158          * Left it initialized
3159          * based on value NVDpyEvoRec::or::dither::init::type.
3160          */
3161         break;
3162     }
3163 
3164 
3165     if (nvConnectorUsesDPLib(pConnectorEvo) &&
3166         (pReqDithering->state !=
3167             NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED)) {
3168         NvU32 lutBits = 11;
3169 
3170         /* If we are using DisplayPort panel with bandwidth constraints
3171          * which lowers the color depth, consider that while applying
3172          * dithering effects.
3173          */
3174         if (bpc == 0) {
3175             nvAssert(!"Unknown dpBits");
3176             bpc = 8;
3177         }
3178 
3179         /*
3180          * If fewer than 8 DP bits are available, dither.  Ideally we'd
3181          * dither from lutBits > 10 to 10 bpc, but EVO doesn't have an
3182          * option for that.
3183          *
3184          * XXX TODO: nvdisplay can dither to 10 bpc.
3185          */
3186         if ((bpc <= 8) && (lutBits > bpc)) {
3187             if (pReqDithering->state ==
3188                     NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO) {
3189                 currDithering.enabled = TRUE;
3190             }
3191         }
3192 
3193         if (pReqDithering->depth ==
3194                 NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO) {
3195             if (bpc <= 6) {
3196                 currDithering.depth =
3197                     NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS;
3198             } else if (bpc <= 8) {
3199                 currDithering.depth =
3200                     NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS;
3201             }
3202         }
3203     }
3204 
3205     if (currDithering.enabled) {
3206         switch (pReqDithering->mode) {
3207         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL:
3208             currDithering.mode =
3209                 NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL;
3210             break;
3211         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2:
3212             currDithering.mode =
3213                 NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2;
3214             break;
3215         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2:
3216             currDithering.mode =
3217                 NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2;
3218             break;
3219         default:
3220             nvAssert(!"Unknown Dithering Mode");
3221             // Fall through
3222         case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO:
3223             /*
3224              * Left it initialized
3225              * based on value NVDpyEvoRec::or::dither::init::algo.
3226              */
3227             break;
3228         }
3229     } else {
3230         currDithering.depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE;
3231         currDithering.mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE;
3232     }
3233 
3234     *pCurrDithering = currDithering;
3235 }
3236 
3237 void nvSetDitheringEvo(
3238     NVDispEvoPtr pDispEvo,
3239     const NvU32 head,
3240     const NVDpyAttributeCurrentDitheringConfig *pCurrDithering,
3241     NVEvoUpdateState *pUpdateState)
3242 {
3243     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
3244     NvU32 i;
3245     NvU32 algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN;
3246     NvU32 type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF;
3247     NvU32 enabled = pCurrDithering->enabled;
3248 
3249     for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) {
3250         if (ditherModeTable[i].nvKmsDitherMode == pCurrDithering->mode) {
3251             algo = ditherModeTable[i].algo;
3252             break;
3253         }
3254     }
3255     nvAssert(i < ARRAY_LEN(ditherModeTable));
3256 
3257     for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) {
3258         if (ditherDepthTable[i].nvKmsDitherDepth == pCurrDithering->depth) {
3259             type = ditherDepthTable[i].type;
3260             break;
3261         }
3262     }
3263     nvAssert(i < ARRAY_LEN(ditherDepthTable));
3264 
3265     /*
3266      * Make sure algo is a recognizable value that we will be able to program
3267      * in hardware.
3268      */
3269     if (algo == NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN) {
3270         algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2;
3271     }
3272 
3273     nvPushEvoSubDevMaskDisp(pDispEvo);
3274     pDevEvo->hal->SetDither(pDispEvo, head, enabled, type, algo,
3275                             pUpdateState);
3276     nvPopEvoSubDevMask(pDevEvo);
3277 }
3278 
3279 /*
3280  * HeadCanStereoLock() - Return whether or not this head can use stereo lock
3281  * mode.  This can only be called from UpdateEvoLockState, when the pending
3282  * interlaced/locked values are still in the head control assembly structure.
3283  */
3284 static NvBool HeadCanStereoLock(NVDevEvoPtr pDevEvo, int sd, int head)
3285 {
3286     NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head];
3287 
3288     return (!pHC->interlaced && !pHC->mergeMode &&
3289             ((pHC->serverLock != NV_EVO_NO_LOCK) ||
3290              (pHC->clientLock != NV_EVO_NO_LOCK)));
3291 }
3292 
3293 /*
3294  * SetStereoLockMode() - For stereo lock mode, we need to notify
3295  * the gsync board that this GPU requires stereo lock mode.
3296  */
3297 static NvBool SetStereoLockMode(NVDispEvoPtr pDispEvo, NvBool stereoLocked)
3298 {
3299     NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS
3300         statusParams = { 0 };
3301     NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
3302 
3303     if (!pFrameLockEvo ||
3304         ((pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2060) &&
3305         (pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061))) {
3306         return TRUE;
3307     }
3308 
3309     statusParams.gpuId = nvGpuIdOfDispEvo(pDispEvo);
3310     statusParams.enable = stereoLocked ? 1 : 0;
3311 
3312     if (nvRmApiControl(nvEvoGlobal.clientHandle,
3313                        pFrameLockEvo->device,
3314                        NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE,
3315                        &statusParams,
3316                        sizeof(statusParams)) != NVOS_STATUS_SUCCESS) {
3317         nvAssert(!"Failed to set stereo lock mode");
3318         return FALSE;
3319     }
3320 
3321     return TRUE;
3322 }
3323 
3324 /*
3325  * SyncEvoLockState()
3326  *
3327  * Set the Assembly state based on the current Armed state.  This should be
3328  * called before transitioning between states in the EVO state machine.
3329  */
3330 static void SyncEvoLockState(void)
3331 {
3332     NVDispEvoPtr pDispEvo;
3333     unsigned int sd;
3334     NVDevEvoPtr pDevEvo;
3335 
3336     FOR_ALL_EVO_DEVS(pDevEvo) {
3337 
3338         if (!pDevEvo->gpus) {
3339             continue;
3340         }
3341 
3342         if (pDevEvo->displayHandle == 0) {
3343             continue;
3344         }
3345 
3346         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3347             NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
3348             NvU32 updateHeadMask = nvGetActiveHeadMask(pDispEvo);
3349             unsigned int head;
3350 
3351             /* Update the cached HEAD_SET_CONTROL EVO method state */
3352             FOR_ALL_HEADS(head, updateHeadMask) {
3353                 pEvoSubDev->headControlAssy[head] =
3354                     pEvoSubDev->headControl[head];
3355 
3356                 /*
3357                  * The following are probably not necessary, since no other
3358                  * code touches them (as opposed to headControl above which
3359                  * is updated beyond the scope of the state machine).  But
3360                  * update them here anyway to be consistent.
3361                  */
3362                 pEvoSubDev->frameLockClientMaskAssy =
3363                     pEvoSubDev->frameLockClientMaskArmed;
3364                 pEvoSubDev->frameLockServerMaskAssy =
3365                     pEvoSubDev->frameLockServerMaskArmed;
3366                 pEvoSubDev->frameLockExtRefClkMaskAssy =
3367                     pEvoSubDev->frameLockExtRefClkMaskArmed;
3368             }
3369         }
3370     }
3371 }
3372 
3373 /*
3374  * Determine a unique index for the given (pDevEvo, sd) tuple.
3375  * This is used to index into an array of size NV_MAX_DEVICES.
3376  *
3377  * It would be more straightforward to use a two-dimensional array of
3378  * NV_MAX_DEVICES x NV_MAX_SUBDEVICES and index by (devIndex, sd), but
3379  * that makes the array too large to fit on the stack.  This is safe because
3380  * we should only ever have at most NV_MAX_DEVICES GPUs in the system
3381  * total, although at any given time they may be split into many single-GPU
3382  * device or a small number of many-GPU SLI devices.
3383  */
3384 static NvU32 GpuIndex(const NVDevEvoRec *pDevEvo, NvU32 sd)
3385 {
3386     const NVDevEvoRec *pDevEvoIter;
3387     NvU32 index = 0;
3388 
3389     nvAssert(sd < pDevEvo->numSubDevices);
3390 
3391     FOR_ALL_EVO_DEVS(pDevEvoIter) {
3392         if (pDevEvoIter == pDevEvo) {
3393             index += sd;
3394             nvAssert(index < NV_MAX_DEVICES);
3395             return index;
3396         }
3397         index += pDevEvo->numSubDevices;
3398     }
3399 
3400     nvAssert(!"Failed to look up GPU index");
3401     return 0;
3402 }
3403 
3404 NvU32 nvGetRefreshRate10kHz(const NVHwModeTimingsEvo *pTimings)
3405 {
3406     const NvU32 totalPixels = pTimings->rasterSize.x * pTimings->rasterSize.y;
3407 
3408     /*
3409      * pTimings->pixelClock is in 1000/s
3410      * we want 0.0001/s
3411      * factor = 1000/0.0001 = 10000000.
3412      */
3413     NvU32 factor = 10000000;
3414 
3415     if (pTimings->doubleScan) factor /= 2;
3416     if (pTimings->interlaced) factor *= 2;
3417 
3418     if (totalPixels == 0) {
3419         return 0;
3420     }
3421 
3422     return axb_div_c(pTimings->pixelClock, factor, totalPixels);
3423 }
3424 
3425 /*!
3426  * Get the current refresh rate for the heads in headMask, in 0.0001 Hz units.
3427  * All heads in headMask are expected to have the same refresh rate.
3428  */
3429 static NvU32 GetRefreshRateHeadMask10kHz(const NVDispEvoRec *pDispEvo,
3430                                          NvU32 headMask)
3431 {
3432     const NVHwModeTimingsEvo *pTimings = NULL;
3433     NvU32 head;
3434 
3435     FOR_ALL_HEADS(head, headMask) {
3436         const NVDispHeadStateEvoRec *pHeadState =
3437             &pDispEvo->headState[head];
3438 
3439         if (head >= pDispEvo->pDevEvo->numHeads &&
3440                 pHeadState->activeRmId == 0x0) {
3441             continue;
3442         }
3443 
3444         if (pTimings == NULL) {
3445             pTimings = &pHeadState->timings;
3446         } else {
3447             nvAssert(pTimings->rasterSize.x ==
3448                         pHeadState->timings.rasterSize.x);
3449             nvAssert(pTimings->rasterSize.y ==
3450                         pHeadState->timings.rasterSize.y);
3451             nvAssert(pTimings->doubleScan == pHeadState->timings.doubleScan);
3452             nvAssert(pTimings->interlaced == pHeadState->timings.interlaced);
3453             nvAssert(pTimings->pixelClock == pHeadState->timings.pixelClock);
3454         }
3455     }
3456 
3457     if (pTimings == NULL) {
3458         return 0;
3459     }
3460 
3461     return nvGetRefreshRate10kHz(pTimings);
3462 }
3463 
3464 /*!
3465  * Return a the mask of RmIds from the heads mask.
3466  */
3467 static NvU32 HeadMaskToActiveRmIdMask(const NVDispEvoRec *pDispEvo,
3468                                       const NvU32 headMask)
3469 {
3470     NvU32 head;
3471     NvU32 rmDisplayMask = 0;
3472 
3473     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
3474         if ((NVBIT(head) & headMask) != 0x0) {
3475             rmDisplayMask |=
3476                 pDispEvo->headState[head].activeRmId;
3477         }
3478     }
3479 
3480     return rmDisplayMask;
3481 }
3482 
3483 static NvBool FramelockSetControlSync(NVDispEvoPtr pDispEvo, const NvU32 headMask,
3484                                   NvBool server)
3485 {
3486     NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
3487     NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS gsyncSetControlSyncParams = { 0 };
3488     NvU32 ret;
3489 
3490     /* There can only be one server. */
3491 
3492     nvAssert(!server || (nvPopCount32(headMask) == 1));
3493 
3494     gsyncSetControlSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo);
3495     gsyncSetControlSyncParams.master = server;
3496     gsyncSetControlSyncParams.displays =
3497         HeadMaskToActiveRmIdMask(pDispEvo, headMask);
3498 
3499     if (gsyncSetControlSyncParams.displays == 0x0) {
3500         return FALSE;
3501     }
3502 
3503     gsyncSetControlSyncParams.refresh =
3504         GetRefreshRateHeadMask10kHz(pDispEvo, headMask);
3505 
3506     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3507                          pFrameLockEvo->device,
3508                          NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC,
3509                          &gsyncSetControlSyncParams,
3510                          sizeof(gsyncSetControlSyncParams));
3511 
3512     if (ret != NVOS_STATUS_SUCCESS) {
3513         return FALSE;
3514     }
3515 
3516     return TRUE;
3517 }
3518 
3519 NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask,
3520                                       NvBool server)
3521 {
3522     NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
3523     NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS
3524         gsyncSetControlUnsyncParams = { 0 };
3525     NvU32 ret;
3526 
3527     gsyncSetControlUnsyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo);
3528     gsyncSetControlUnsyncParams.master = server;
3529     gsyncSetControlUnsyncParams.displays =
3530         HeadMaskToActiveRmIdMask(pDispEvo, headMask);
3531 
3532     if (gsyncSetControlUnsyncParams.displays == 0x0) {
3533         return FALSE;
3534     }
3535 
3536     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
3537                          pFrameLockEvo->device,
3538                          NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC,
3539                          &gsyncSetControlUnsyncParams,
3540                          sizeof(gsyncSetControlUnsyncParams));
3541 
3542     if (ret != NVOS_STATUS_SUCCESS) {
3543         return FALSE;
3544     }
3545 
3546     return TRUE;
3547 }
3548 
3549 /*
3550  * UpdateEvoLockState()
3551  *
3552  * Update the hardware based on the Assembly state, if it is different from the
3553  * current Armed state.  This should be called after transitioning through
3554  * states in the EVO state machine to propagate all of the necessary values to
3555  * HW.
3556  */
3557 static void UpdateEvoLockState(void)
3558 {
3559     NVDispEvoPtr pDispEvo;
3560     NVFrameLockEvoPtr pFrameLockEvo;
3561     unsigned int sd;
3562     NVDevEvoPtr pDevEvo;
3563     NvBool ret;
3564     enum {
3565         FIRST_ITERATION,
3566         DISABLE_UNNEEDED_CLIENTS = FIRST_ITERATION,
3567         DISABLE_UNNEEDED_SERVER,
3568         COMPUTE_HOUSE_SYNC,
3569         UPDATE_HOUSE_SYNC,
3570         ENABLE_SERVER,
3571         ENABLE_CLIENTS,
3572         LAST_ITERATION = ENABLE_CLIENTS,
3573     } iteration;
3574     struct {
3575         unsigned char disableServer:1;
3576         unsigned char disableClient:1;
3577         unsigned char enableServer:1;
3578         unsigned char enableClient:1;
3579     } cache[NV_MAX_DEVICES][NVKMS_MAX_HEADS_PER_DISP];
3580 
3581     nvkms_memset(cache, 0, sizeof(cache));
3582 
3583     /* XXX NVKMS TODO: idle base channel, first? */
3584 
3585     /*
3586      * Stereo lock mode is enabled if all heads are either raster locked or
3587      * frame locked, and if all heads are not using interlaced mode.
3588      */
3589     FOR_ALL_EVO_DEVS(pDevEvo) {
3590         if (!pDevEvo->gpus) {
3591             continue;
3592         }
3593         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3594             NvBool gpuCanStereoLock = TRUE;
3595             NvBool testedOneHead = FALSE;
3596 
3597             /*
3598              * If at least one head is not locked or driving an interlaced
3599              * mode, then no heads on this GPU will use stereo lock mode.
3600              */
3601             NvU32 head;
3602             for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
3603                 NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head];
3604 
3605                 if (!nvHeadIsActive(pDispEvo, head) ||
3606                     ((pHC->serverLock == NV_EVO_NO_LOCK) &&
3607                      (pHC->clientLock  == NV_EVO_NO_LOCK))) {
3608                     /*
3609                      * If the heads aren't scan locked then we should skip
3610                      * them as if they aren't connected. NOTE this
3611                      * conservative approach means that we won't disable
3612                      * StereoLockMode when frameLock is turned off. This
3613                      * should be harmless.
3614                      */
3615                     continue;
3616                 }
3617                 testedOneHead = TRUE;
3618                 if (!HeadCanStereoLock(pDevEvo, sd, head)) {
3619                     gpuCanStereoLock = FALSE;
3620                 }
3621             }
3622             /*
3623              * Don't set StereoLockMode for screenless GPUs. As above we'll also
3624              * count heads that can't stereoLock as unconnected.
3625              */
3626             if (!testedOneHead) {
3627                 continue;
3628             }
3629 
3630             /*
3631              * Notify the framelock board whether or not we we will use stereo
3632              * lock mode.  If it failed, then don't enable stereo lock mode on
3633              * the GPU.
3634              */
3635             if (!SetStereoLockMode(pDispEvo, gpuCanStereoLock)) {
3636                 gpuCanStereoLock = FALSE;
3637             }
3638 
3639             /*
3640              * Cache whether or not we can use stereo lock mode, so we know
3641              * whether or not to enable stereo lock mode on the GPU during
3642              * SetHeadControl
3643              */
3644             for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
3645                 if (nvHeadIsActive(pDispEvo, head)) {
3646                     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
3647                     pEvoSubDev->headControlAssy[head].stereoLocked =
3648                         gpuCanStereoLock;
3649                 }
3650             }
3651         }
3652     }
3653 
3654     /*
3655      * Go through every GPU on the system, making its framelock state match the
3656      * assembly state that we've saved.
3657      *
3658      * We do this in six steps, in order to keep the overall system state sane
3659      * throughout:
3660      * 1. Disable any clients we no longer need
3661      * 2. Disable server we no longer need
3662      * 3. Compute which framelock devices need house sync
3663      * 4. Update framelock devices with new house sync info
3664      * 5. Enable new server
3665      * 6. Enable new clients
3666      */
3667     for (iteration = FIRST_ITERATION;
3668          iteration <= LAST_ITERATION;
3669          iteration++) {
3670 
3671         if (iteration == COMPUTE_HOUSE_SYNC) {
3672             /* First, clear assy state */
3673             FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) {
3674                 pFrameLockEvo->houseSyncAssy = FALSE;
3675             }
3676         }
3677 
3678         if (iteration == UPDATE_HOUSE_SYNC) {
3679             FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) {
3680                 /*
3681                  * Since nvFrameLockSetUseHouseSyncEvo sets house sync
3682                  * output mode in addition to house sync input mode and
3683                  * input polarity, this needs to be done unconditionally,
3684                  * even if a house sync state transition hasn't occurred.
3685                  */
3686                 if (!nvFrameLockSetUseHouseSyncEvo(
3687                         pFrameLockEvo, pFrameLockEvo->houseSyncAssy)) {
3688                     nvAssert(!"Setting house sync failed");
3689                 } else {
3690                     pFrameLockEvo->houseSyncArmed =
3691                         pFrameLockEvo->houseSyncAssy;
3692                 }
3693             }
3694 
3695             continue;
3696         }
3697 
3698         FOR_ALL_EVO_DEVS(pDevEvo) {
3699 
3700             if (!pDevEvo->gpus) {
3701                 continue;
3702             }
3703 
3704             if (pDevEvo->displayHandle == 0) {
3705                 /*
3706                  * This may happen during init, when setting initial modes on
3707                  * one device while other devices have not yet been allocated.
3708                  * Skip these devices for now; we'll come back later when
3709                  * they've been brought up.
3710                  */
3711                 continue;
3712             }
3713 
3714             FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3715                 NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
3716                 NvBool server = FALSE;
3717                 NvU32 needsEnableMask = 0, needsDisableMask = 0;
3718                 unsigned int head;
3719 
3720                 switch (iteration) {
3721                 case COMPUTE_HOUSE_SYNC:
3722                     /* Accumulate house sync across pDisps */
3723                     if (pEvoSubDev->frameLockHouseSync) {
3724                         pDispEvo->pFrameLockEvo->houseSyncAssy = TRUE;
3725                     }
3726                     break;
3727                 case DISABLE_UNNEEDED_CLIENTS:
3728                     needsDisableMask = pEvoSubDev->frameLockClientMaskArmed &
3729                                        ~pEvoSubDev->frameLockClientMaskAssy;
3730                     server = FALSE;
3731                     break;
3732                 case DISABLE_UNNEEDED_SERVER:
3733                     needsDisableMask = pEvoSubDev->frameLockServerMaskArmed &
3734                                        ~pEvoSubDev->frameLockServerMaskAssy;
3735                     server = TRUE;
3736                     break;
3737                 case ENABLE_SERVER:
3738                     needsEnableMask = pEvoSubDev->frameLockServerMaskAssy &
3739                                       ~pEvoSubDev->frameLockServerMaskArmed;
3740                     server = TRUE;
3741                     break;
3742                 case ENABLE_CLIENTS:
3743                     needsEnableMask = pEvoSubDev->frameLockClientMaskAssy &
3744                                       ~pEvoSubDev->frameLockClientMaskArmed;
3745                     server = FALSE;
3746                     break;
3747                 case UPDATE_HOUSE_SYNC:
3748                     nvAssert(!"Shouldn't reach here");
3749                     break;
3750                 }
3751 
3752                 if (needsDisableMask) {
3753                     ret = nvFramelockSetControlUnsyncEvo(pDispEvo,
3754                                                          needsDisableMask,
3755                                                          server);
3756                     nvAssert(ret);
3757 
3758                     if (ret) {
3759                         if (server) {
3760                             pEvoSubDev->frameLockServerMaskArmed &=
3761                                 ~needsDisableMask;
3762 
3763                             FOR_ALL_HEADS(head, needsDisableMask) {
3764                                 cache[GpuIndex(pDevEvo, sd)][head].disableServer = TRUE;
3765                             }
3766                         } else {
3767                             pEvoSubDev->frameLockClientMaskArmed &=
3768                                 ~needsDisableMask;
3769 
3770                             FOR_ALL_HEADS(head, needsDisableMask) {
3771                                 cache[GpuIndex(pDevEvo, sd)][head].disableClient = TRUE;
3772                             }
3773                         }
3774                     }
3775                 }
3776                 if (needsEnableMask) {
3777                     ret = FramelockSetControlSync(pDispEvo,
3778                                                   needsEnableMask,
3779                                                   server);
3780 
3781                     nvAssert(ret);
3782 
3783                     if (ret) {
3784                         if (server) {
3785                             pEvoSubDev->frameLockServerMaskArmed |=
3786                                 needsEnableMask;
3787 
3788                             FOR_ALL_HEADS(head, needsEnableMask) {
3789                                 cache[GpuIndex(pDevEvo, sd)][head].enableServer = TRUE;
3790                             }
3791                         } else {
3792                             pEvoSubDev->frameLockClientMaskArmed |=
3793                                 needsEnableMask;
3794 
3795                             FOR_ALL_HEADS(head, needsEnableMask) {
3796                                 cache[GpuIndex(pDevEvo, sd)][head].enableClient = TRUE;
3797                             }
3798                         }
3799                     }
3800                 }
3801 
3802                 /* After the above process, we should have "promoted" assy
3803                  * to armed */
3804                 if (iteration == LAST_ITERATION) {
3805                     nvAssert(pEvoSubDev->frameLockServerMaskArmed ==
3806                              pEvoSubDev->frameLockServerMaskAssy);
3807                     nvAssert(pEvoSubDev->frameLockClientMaskArmed ==
3808                              pEvoSubDev->frameLockClientMaskAssy);
3809                 }
3810             }
3811         }
3812     }
3813 
3814     /*
3815      * Update the EVO HW state.  Make this a separate set of loops to not
3816      * confuse the one above
3817      */
3818     FOR_ALL_EVO_DEVS(pDevEvo) {
3819 
3820         if (!pDevEvo->gpus) {
3821             continue;
3822         }
3823 
3824         if (pDevEvo->displayHandle == 0) {
3825             continue;
3826         }
3827 
3828         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3829             NvBool needUpdate = FALSE;
3830             NVEvoUpdateState updateState = { };
3831             NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
3832             NvU32 extRefClkMaskAssy, extRefClkUpdateMask;
3833             NvU32 possibleHeadMask;
3834             NvBool refClkChanged[NVKMS_MAX_HEADS_PER_DISP] = { FALSE };
3835             unsigned int head;
3836 
3837             extRefClkMaskAssy = pEvoSubDev->frameLockExtRefClkMaskAssy;
3838 
3839             /* Set the external reference clock, if different */
3840             extRefClkUpdateMask = extRefClkMaskAssy ^
3841                 pEvoSubDev->frameLockExtRefClkMaskArmed;
3842 
3843             FOR_ALL_HEADS(head, extRefClkUpdateMask) {
3844                 NvBool extRefClkNeeded =
3845                     !!(extRefClkMaskAssy & (1 << head));
3846 
3847                 SetRefClk(pDevEvo, sd, head, extRefClkNeeded, &updateState);
3848                 refClkChanged[head] = TRUE;
3849 
3850                 /* Update armed state for this head */
3851                 pEvoSubDev->frameLockExtRefClkMaskArmed =
3852                     (pEvoSubDev->frameLockExtRefClkMaskArmed &
3853                      (~(1 << head))) |
3854                     (extRefClkMaskAssy & (1 << head));
3855             }
3856             /* After the above process, the armed state should match
3857              * assembly state */
3858             nvAssert(extRefClkMaskAssy ==
3859                      pEvoSubDev->frameLockExtRefClkMaskArmed);
3860 
3861             /* Update the HEAD_SET_CONTROL EVO method state */
3862 
3863             possibleHeadMask = nvGetActiveHeadMask(pDispEvo);
3864 
3865             FOR_ALL_HEADS(head, possibleHeadMask) {
3866                 if (nvkms_memcmp(&pEvoSubDev->headControl[head],
3867                                  &pEvoSubDev->headControlAssy[head],
3868                                  sizeof(NVEvoHeadControl))) {
3869 
3870                     nvPushEvoSubDevMask(pDevEvo, 1 << sd);
3871 
3872                     pEvoSubDev->headControl[head] =
3873                         pEvoSubDev->headControlAssy[head];
3874                     pDevEvo->hal->SetHeadControl(pDevEvo, sd, head,
3875                                                  &updateState);
3876                     needUpdate = TRUE;
3877 
3878                     nvPopEvoSubDevMask(pDevEvo);
3879                 } else if (refClkChanged[head]) {
3880                     needUpdate = TRUE;
3881                 }
3882             }
3883 
3884             if (needUpdate) {
3885                 nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
3886                                       TRUE /* releaseElv */);
3887             }
3888         }
3889     }
3890 
3891     /*
3892      * Inform GLS of framelock changes.  It uses this information to do things
3893      * like enable fake stereo to get stereo sync when stereo apps start
3894      * without flickering the displays.
3895      */
3896     for (iteration = FIRST_ITERATION;
3897          iteration <= LAST_ITERATION;
3898          iteration++) {
3899 
3900         FOR_ALL_EVO_DEVS(pDevEvo) {
3901 
3902             if (!pDevEvo->gpus) {
3903                 continue;
3904             }
3905 
3906             if (pDevEvo->displayHandle == 0) {
3907                 continue;
3908             }
3909 
3910             FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
3911                 NvU32 head;
3912                 for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
3913                     NvBool sendEvent = FALSE;
3914                     NvBool enable = FALSE, server = FALSE;
3915 
3916                     if (!nvHeadIsActive(pDispEvo, head)) {
3917                         continue;
3918                     }
3919 
3920                     switch (iteration) {
3921                     case DISABLE_UNNEEDED_CLIENTS:
3922                         if (cache[GpuIndex(pDevEvo, sd)][head].disableClient) {
3923                             enable = FALSE;
3924                             server = FALSE;
3925                             sendEvent = TRUE;
3926                         }
3927                         break;
3928                     case DISABLE_UNNEEDED_SERVER:
3929                         if (cache[GpuIndex(pDevEvo, sd)][head].disableServer) {
3930                             enable = FALSE;
3931                             server = TRUE;
3932                             sendEvent = TRUE;
3933                         }
3934                         break;
3935                     case ENABLE_SERVER:
3936                         if (cache[GpuIndex(pDevEvo, sd)][head].enableServer) {
3937                             enable = TRUE;
3938                             server = TRUE;
3939                             sendEvent = TRUE;
3940                         }
3941                         break;
3942                     case ENABLE_CLIENTS:
3943                         if (cache[GpuIndex(pDevEvo, sd)][head].enableClient) {
3944                             enable = TRUE;
3945                             server = FALSE;
3946                             sendEvent = TRUE;
3947                         }
3948                         break;
3949                     case UPDATE_HOUSE_SYNC:
3950                     case COMPUTE_HOUSE_SYNC:
3951                         sendEvent = FALSE;
3952                         break;
3953                     }
3954 
3955                     if (sendEvent) {
3956                         nvUpdateGLSFramelock(pDispEvo, head, enable, server);
3957                     }
3958                 }
3959             }
3960         }
3961     }
3962 }
3963 
3964 /*
3965  * For every head in the headMask on pDispEvo, construct a prioritized
3966  * list of heads and call into the EVO locking state machine to
3967  * perform the given transition.
3968  *
3969  * Return the list of heads that actually succeeded.
3970  */
3971 static NvU32 applyActionForHeads(NVDispEvoPtr pDispEvo,
3972                                  const NvU32 headMask,
3973                                  NVEvoLockAction action)
3974 {
3975     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
3976     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
3977     NvU32 appliedHeadMask = 0;
3978     NvU32 head;
3979 
3980     FOR_ALL_HEADS(head, headMask) {
3981         NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
3982         unsigned int i = 0;
3983         NvU32 tmpHead, usedHeadMask = 0;
3984 
3985         /* Fill in the array starting with this head, then with the others in
3986          * the list, and finally any other active heads */
3987         pHeads[i++] = head;
3988         usedHeadMask |= (1 << head);
3989 
3990         FOR_ALL_HEADS(tmpHead, headMask) {
3991             if (usedHeadMask & (1 << tmpHead)) {
3992                 continue;
3993             }
3994             pHeads[i++] = tmpHead;
3995             usedHeadMask |= (1 << tmpHead);
3996         }
3997 
3998         for (tmpHead = 0; tmpHead < NVKMS_MAX_HEADS_PER_DISP; tmpHead++) {
3999             if (!nvHeadIsActive(pDispEvo, tmpHead)) {
4000                 continue;
4001             }
4002             if (usedHeadMask & (1 << tmpHead)) {
4003                 continue;
4004             }
4005             pHeads[i++] = tmpHead;
4006             usedHeadMask |= (1 << tmpHead);
4007         }
4008 
4009         nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP);
4010         pHeads[i] = NV_INVALID_HEAD;
4011 
4012         if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads)) {
4013             appliedHeadMask |= (1 << head);
4014         }
4015     }
4016 
4017     return appliedHeadMask;
4018 }
4019 
4020 //
4021 // Set up raster lock and frame lock for external frame lock
4022 //
4023 
4024 NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo)
4025 {
4026     NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
4027     NvU32 serverHead = nvGetFramelockServerHead(pDispEvo);
4028     NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo);
4029     NvU32 appliedHeadMask;
4030     NvU32 activeClientHeadsMask;
4031     NvBool useHouseSync = FALSE;
4032     NvU32 head;
4033 
4034     nvAssert(pDispEvo->framelock.currentServerHead == NV_INVALID_HEAD);
4035     nvAssert(pDispEvo->framelock.currentClientHeadsMask == 0x0);
4036 
4037     if (serverHead != NV_INVALID_HEAD  &&
4038         (pFrameLockEvo->houseSyncMode ==
4039          NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT)) {
4040 
4041         NvS64 houseSync;
4042 
4043         /*
4044          * Only use house sync if present.
4045          * XXX what happens when house sync is unplugged?  why not enable it
4046          * now and let the FPGA decide?
4047          */
4048         if (!nvFrameLockGetStatusEvo(pFrameLockEvo,
4049                                      NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS,
4050                                      &houseSync)) {
4051             return FALSE;
4052         }
4053 
4054         useHouseSync = (houseSync != 0);
4055     }
4056 
4057     /* Initialize the assembly state */
4058     SyncEvoLockState();
4059 
4060     /* Enable the server */
4061     if ((serverHead != NV_INVALID_HEAD) &&
4062             nvHeadIsActive(pDispEvo, serverHead)) {
4063         NvU32 serverHeadMask;
4064 
4065         serverHeadMask = (1 << serverHead);
4066         appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask,
4067                                               NV_EVO_ADD_FRAME_LOCK_SERVER);
4068 
4069         nvAssert(appliedHeadMask == serverHeadMask);
4070         pDispEvo->framelock.currentServerHead = serverHead;
4071 
4072         /* Enable house sync, if requested */
4073         if (useHouseSync) {
4074             appliedHeadMask =
4075                 applyActionForHeads(pDispEvo, serverHeadMask,
4076                                     NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC);
4077 
4078             if (appliedHeadMask == serverHeadMask) {
4079                 pDispEvo->framelock.currentHouseSync = TRUE;
4080             }
4081         }
4082     }
4083 
4084     /* Enable the clients */
4085     activeClientHeadsMask = 0;
4086     FOR_ALL_HEADS(head, clientHeadsMask) {
4087         if (nvHeadIsActive(pDispEvo, head)) {
4088             activeClientHeadsMask |= (1 << head);
4089         }
4090     }
4091     appliedHeadMask = applyActionForHeads(pDispEvo, activeClientHeadsMask,
4092                                           NV_EVO_ADD_FRAME_LOCK_CLIENT);
4093 
4094     nvAssert(appliedHeadMask == activeClientHeadsMask);
4095     pDispEvo->framelock.currentClientHeadsMask = activeClientHeadsMask;
4096 
4097     /* Finally, update the hardware */
4098     UpdateEvoLockState();
4099 
4100     return TRUE;
4101 }
4102 
4103 //
4104 // Disable raster lock and frame lock
4105 //
4106 
4107 NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo)
4108 {
4109     NvU32 serverHead = nvGetFramelockServerHead(pDispEvo);
4110     NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo);
4111     NvU32 activeClientHeadsMask;
4112     NvU32 appliedHeadMask;
4113     NvU32 head;
4114 
4115     /* Initialize the assembly state */
4116     SyncEvoLockState();
4117 
4118     /* Disable the clients */
4119     activeClientHeadsMask = 0;
4120     FOR_ALL_HEADS(head, clientHeadsMask) {
4121         if (nvHeadIsActive(pDispEvo, head)) {
4122             activeClientHeadsMask |= (1 << head);
4123         }
4124     }
4125     appliedHeadMask = applyActionForHeads(pDispEvo,
4126                                           activeClientHeadsMask,
4127                                           NV_EVO_REM_FRAME_LOCK_CLIENT);
4128 
4129     nvAssert(appliedHeadMask == activeClientHeadsMask);
4130     pDispEvo->framelock.currentClientHeadsMask &= ~activeClientHeadsMask;
4131 
4132     /* Disable house sync */
4133     if (serverHead != NV_INVALID_HEAD &&
4134             nvHeadIsActive(pDispEvo, serverHead)) {
4135         NvU32 serverHeadMask = (1 << serverHead);
4136 
4137         if (pDispEvo->framelock.currentHouseSync) {
4138             appliedHeadMask =
4139                 applyActionForHeads(pDispEvo, serverHeadMask,
4140                                     NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC);
4141 
4142             nvAssert(appliedHeadMask == serverHeadMask);
4143             pDispEvo->framelock.currentHouseSync = FALSE;
4144         }
4145 
4146         /* Disable the server */
4147         appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask,
4148                                               NV_EVO_REM_FRAME_LOCK_SERVER);
4149         nvAssert(appliedHeadMask == serverHeadMask);
4150         if (appliedHeadMask == serverHeadMask) {
4151             pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD;
4152         }
4153     }
4154 
4155     /* Finally, update the hardware */
4156     UpdateEvoLockState();
4157 
4158     return TRUE;
4159 }
4160 
4161 //
4162 // Enable/Disable External Reference Clock Sync
4163 //
4164 // This function is used by frame lock to make the GPU sync to
4165 // the external device's reference clock.
4166 //
4167 static void SetRefClk(NVDevEvoPtr pDevEvo,
4168                       NvU32 sd, NvU32 head, NvBool external,
4169                       NVEvoUpdateState *updateState)
4170 {
4171     nvPushEvoSubDevMask(pDevEvo, 1 << sd);
4172 
4173     pDevEvo->hal->SetHeadRefClk(pDevEvo, head, external, updateState);
4174 
4175     nvPopEvoSubDevMask(pDevEvo);
4176 }
4177 
4178 
4179 //
4180 // Query raster lock state
4181 //
4182 
4183 NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val)
4184 {
4185     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
4186     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4187     NVEvoSubDevPtr pEvoSubDev;
4188     const NvU32 apiHead = pDpyEvo->apiHead;
4189     const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead);
4190     NVEvoHeadControlPtr pHC;
4191 
4192     /*
4193      * XXX[2Heads1OR] The EVO lock state machine is not currently supported with
4194      * 2Heads1OR, the api head is expected to be mapped onto a single
4195      * hardware head (which is the primary hardware head) if 2Heads1OR is not
4196      * active and the EVO lock state machine is in use.
4197      */
4198     if ((apiHead == NV_INVALID_HEAD) ||
4199             (nvPopCount32(pDispEvo->apiHeadState[apiHead].hwHeadsMask) != 1)) {
4200         return FALSE;
4201     }
4202 
4203     if ((head == NV_INVALID_HEAD) || (pDevEvo->gpus == NULL)) {
4204         return FALSE;
4205     }
4206 
4207     pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4208     pHC = &pEvoSubDev->headControl[head];
4209 
4210     *val = pHC->serverLock == NV_EVO_RASTER_LOCK ||
4211            pHC->clientLock == NV_EVO_RASTER_LOCK;
4212 
4213     return TRUE;
4214 }
4215 
4216 void nvInvalidateRasterLockGroupsEvo(void)
4217 {
4218     if (globalRasterLockGroups) {
4219         nvFree(globalRasterLockGroups);
4220 
4221         globalRasterLockGroups = NULL;
4222         numGlobalRasterLockGroups = 0;
4223     }
4224 }
4225 
4226 /*
4227  * Return the surface format usage bounds that NVKMS will program for the
4228  * requested format.
4229  *
4230  * For an RGB XBPP format, this function will return a bitmask of all RGB YBPP
4231  * formats, where Y <= X.
4232  *
4233  * For a YUV format, this function will return a bitmask of all YUV formats
4234  * that:
4235  * - Have the same number of planes as the requested format
4236  * - Have the same chroma decimation factors as the requested format
4237  * - Have the same or lower effective fetch bpp as the requested format
4238  *
4239  * For example, if the requested format is YUV420 12-bit SP, this function will
4240  * include all YUV420 8/10/12-bit SP formats.
4241  */
4242 NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound(
4243     const enum NvKmsSurfaceMemoryFormat format,
4244     NvU64 supportedFormatsCapMask)
4245 {
4246     const NvKmsSurfaceMemoryFormatInfo *pFormatInfo =
4247         nvKmsGetSurfaceMemoryFormatInfo(format);
4248     NvU64 supportedFormatsUsageBound = 0;
4249     NvU8 formatIdx;
4250 
4251     FOR_EACH_INDEX_IN_MASK(64, formatIdx, supportedFormatsCapMask) {
4252 
4253         const NvKmsSurfaceMemoryFormatInfo *pOtherFormatInfo =
4254             nvKmsGetSurfaceMemoryFormatInfo(formatIdx);
4255 
4256         if ((pFormatInfo->isYUV != pOtherFormatInfo->isYUV) ||
4257             (pFormatInfo->numPlanes != pOtherFormatInfo->numPlanes)) {
4258             continue;
4259         }
4260 
4261         if (pFormatInfo->isYUV) {
4262             if ((pFormatInfo->yuv.horizChromaDecimationFactor !=
4263                  pOtherFormatInfo->yuv.horizChromaDecimationFactor) ||
4264                 (pFormatInfo->yuv.vertChromaDecimationFactor !=
4265                  pOtherFormatInfo->yuv.vertChromaDecimationFactor) ||
4266                 (pFormatInfo->yuv.depthPerComponent <
4267                  pOtherFormatInfo->yuv.depthPerComponent)) {
4268                 continue;
4269             }
4270         } else {
4271             if (pFormatInfo->rgb.bitsPerPixel <
4272                 pOtherFormatInfo->rgb.bitsPerPixel) {
4273                 continue;
4274             }
4275         }
4276 
4277         supportedFormatsUsageBound |= NVBIT64(formatIdx);
4278 
4279     } FOR_EACH_INDEX_IN_MASK_END;
4280 
4281     return supportedFormatsUsageBound;
4282 }
4283 
4284 //
4285 // Enable or disable flip lock (or query state)
4286 //
4287 
4288 NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head,
4289                                   NvU32 *val, NvBool set,
4290                                   NVEvoUpdateState *updateState)
4291 {
4292     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4293     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4294     NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head];
4295 
4296     if (set) {
4297         // make sure we're dealing with a bool
4298         NvBool setVal = !!*val;
4299 
4300         if (setVal ^ pHC->flipLock) {
4301             NvBool isMethodPending;
4302             NvBool changed = FALSE;
4303 
4304             if (!pDevEvo->hal->
4305                     IsChannelMethodPending(pDevEvo,
4306                                            pDevEvo->head[head].layer[NVKMS_MAIN_LAYER],
4307                                            pDispEvo->displayOwner,
4308                                            &isMethodPending) ||
4309                 isMethodPending) {
4310                 nvAssert(!"Base channel not idle");
4311                 return FALSE;
4312             }
4313 
4314             if (setVal) {
4315                 /* make sure flip lock is not prohibited and raster lock is enabled
4316                  *
4317                  * XXX: [2Heads1OR] If head is locked in the merge mode then
4318                  * its flip-lock state can not be changed.
4319                  */
4320                 if ((pHC->serverLock == NV_EVO_NO_LOCK &&
4321                      pHC->clientLock == NV_EVO_NO_LOCK) ||
4322                     HEAD_MASK_QUERY(pEvoSubDev->flipLockProhibitedHeadMask,
4323                                     head) ||
4324                     pHC->mergeMode) {
4325                     return FALSE;
4326                 }
4327                 pHC->flipLock = TRUE;
4328                 changed = TRUE;
4329             } else {
4330                 /* Only actually disable fliplock if it's not needed for SLI.
4331                  *
4332                  * XXX: [2Heads1OR] If head is locked in the merge mode then
4333                  * its flip-lock state can not be changed.
4334                  */
4335                 if (!pHC->mergeMode &&
4336                     !HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask,
4337                                      head)) {
4338                     pHC->flipLock = FALSE;
4339                     changed = TRUE;
4340                 }
4341             }
4342 
4343             if (changed) {
4344                 EvoUpdateHeadParams(pDispEvo, head, updateState);
4345             }
4346         }
4347 
4348         /* Remember if we currently need fliplock enabled for framelock */
4349         pEvoSubDev->flipLockEnabledForFrameLockHeadMask =
4350             setVal ?
4351                 HEAD_MASK_SET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head) :
4352                 HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head);
4353     }
4354 
4355     /*
4356      * XXX should the query return the cached "enabled for framelock" state
4357      * instead?
4358      */
4359     *val = pHC->flipLock;
4360 
4361 
4362     return TRUE;
4363 }
4364 
4365 
4366 static NvBool UpdateFlipLock50(const NVDpyEvoRec *pDpyEvo,
4367                                NvU32 *val, NvBool set)
4368 {
4369     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
4370     const NvU32 apiHead = pDpyEvo->apiHead;
4371     const NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead);
4372     NVEvoUpdateState updateState = { };
4373     NvBool ret;
4374 
4375     if (head == NV_INVALID_HEAD) {
4376         return FALSE;
4377     }
4378 
4379     ret = nvUpdateFlipLockEvoOneHead(pDispEvo, head, val, set,
4380                                      &updateState);
4381 
4382     if (set && ret) {
4383         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
4384                               TRUE /* releaseElv */);
4385     }
4386 
4387     return ret;
4388 }
4389 
4390 NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value)
4391 {
4392     NvU32 val32 = !!value;
4393     return UpdateFlipLock50(pDpyEvo, &val32, TRUE /* set */);
4394 }
4395 
4396 NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue)
4397 {
4398     NvBool ret;
4399     NvU32 val32 = 0;
4400     ret = UpdateFlipLock50(pDpyEvo, &val32, FALSE /* set */);
4401 
4402     if (ret) {
4403         *pValue = !!val32;
4404     }
4405 
4406     return ret;
4407 }
4408 
4409 static void ProhibitFlipLock50(NVDispEvoPtr pDispEvo)
4410 {
4411     NvU32 head;
4412     NvBool needUpdate = FALSE;
4413     NVEvoUpdateState updateState = { };
4414 
4415     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4416     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4417 
4418     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
4419         NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head];
4420         /*
4421          * XXX: [2Heads1OR] If head is locked in the merge mode then its flip-lock
4422          * state can not be changed.
4423          */
4424         if (!nvHeadIsActive(pDispEvo, head) || pHC->mergeMode) {
4425             continue;
4426         }
4427 
4428         if (HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask,
4429                             head)) {
4430             nvAssert(!"Can not prohibit flip lock "
4431                       "because it is already enabled for frame lock");
4432             continue;
4433         }
4434 
4435         if (pHC->flipLock) {
4436             needUpdate = TRUE;
4437 
4438             pHC->flipLock = FALSE;
4439             EvoUpdateHeadParams(pDispEvo, head, &updateState);
4440         }
4441 
4442         pEvoSubDev->flipLockProhibitedHeadMask =
4443             HEAD_MASK_SET(pEvoSubDev->flipLockProhibitedHeadMask, head);
4444     }
4445 
4446     if (needUpdate) {
4447         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
4448                               TRUE /* releaseElv */);
4449     }
4450 }
4451 
4452 static void AllowFlipLock50(NVDispEvoPtr pDispEvo)
4453 {
4454     NvU32 head;
4455     NvBool needUpdate = FALSE;
4456     NVEvoUpdateState updateState = { };
4457     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4458     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4459 
4460     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
4461         NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head];
4462 
4463         /*
4464          * XXX: [2Heads1OR] If head is locked in the merge mode then its flip-lock
4465          * state can not be changed.
4466          */
4467         if (!nvHeadIsActive(pDispEvo, head) || pHC->mergeMode) {
4468             continue;
4469         }
4470 
4471         if (!pHC->flipLock &&
4472             HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask,
4473                             head)) {
4474             needUpdate = TRUE;
4475 
4476             nvAssert(pHC->serverLock != NV_EVO_NO_LOCK ||
4477                      pHC->clientLock != NV_EVO_NO_LOCK);
4478 
4479             pHC->flipLock = TRUE;
4480             EvoUpdateHeadParams(pDispEvo, head, &updateState);
4481         }
4482 
4483         pEvoSubDev->flipLockProhibitedHeadMask =
4484             HEAD_MASK_UNSET(pEvoSubDev->flipLockProhibitedHeadMask, head);
4485     }
4486 
4487     if (needUpdate) {
4488         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
4489                               TRUE /* releaseElv */);
4490     }
4491 }
4492 
4493 NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value)
4494 {
4495     if (value == 0) {
4496         ProhibitFlipLock50(pDispEvo);
4497     } else {
4498         AllowFlipLock50(pDispEvo);
4499     }
4500     return TRUE;
4501 }
4502 
4503 /*!
4504  * Enable or disable stereo.
4505  *
4506  * XXX SLI+Stereo For now, just set stereo on the display owner.
4507  */
4508 NvBool nvSetStereoEvo(
4509     const NVDispEvoRec *pDispEvo,
4510     const NvU32 head,
4511     NvBool enable)
4512 {
4513     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4514     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4515     NVEvoHeadControlPtr pHC;
4516     NVEvoLockPin pin;
4517 
4518     nvAssert(head != NV_INVALID_HEAD);
4519 
4520     pHC = &pEvoSubDev->headControl[head];
4521     pin = NV_EVO_LOCK_PIN_INTERNAL(head);
4522 
4523     // make sure we're dealing with a bool
4524     NvBool stereo = !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin);
4525 
4526     if (enable ^ stereo) {
4527         NVEvoUpdateState updateState = { };
4528 
4529         if (enable) {
4530             NvU32 otherHead;
4531             NvU32 signalPin;
4532 
4533             // If any other head is already driving stereo, fail
4534             for (otherHead = 0; otherHead < NVKMS_MAX_HEADS_PER_DISP;
4535                  otherHead++) {
4536                 if (!nvHeadIsActive(pDispEvo, otherHead)) {
4537                     continue;
4538                 }
4539                 if (head == otherHead) {
4540                     continue;
4541                 }
4542 
4543                 const NVEvoHeadControl *pOtherHC =
4544                     &pEvoSubDev->headControl[otherHead];
4545 
4546                 if (!NV_EVO_LOCK_PIN_IS_INTERNAL(pOtherHC->stereoPin)) {
4547                     return FALSE;
4548                 }
4549             }
4550 
4551             signalPin = nvEvoGetPinForSignal(pDispEvo,
4552                                              pEvoSubDev,
4553                                              NV_EVO_LOCK_SIGNAL_STEREO);
4554             if (signalPin != NV_EVO_LOCK_PIN_ERROR) {
4555                 pin = signalPin;
4556             }
4557         }
4558 
4559         pHC->stereoPin = pin;
4560 
4561         EvoUpdateHeadParams(pDispEvo, head, &updateState);
4562 
4563         // Make method take effect.
4564         nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState,
4565                               TRUE /* releaseElv */);
4566     }
4567 
4568     return TRUE;
4569 }
4570 
4571 /*!
4572  * Query stereo state.
4573  *
4574  * XXX SLI+Stereo For now, just get stereo on the display owner.
4575  */
4576 NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head)
4577 {
4578     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4579     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4580     NVEvoHeadControlPtr pHC;
4581 
4582     nvAssert(head != NV_INVALID_HEAD);
4583 
4584     pHC = &pEvoSubDev->headControl[head];
4585 
4586     return !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin);
4587 }
4588 
4589 void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo,
4590                        const NvU32 head, NVEvoUpdateState *updateState)
4591 {
4592     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4593     NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
4594     const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort;
4595 
4596     nvPushEvoSubDevMaskDisp(pDispEvo);
4597     pDevEvo->hal->SetViewportInOut(pDevEvo, head,
4598                                    pViewPort, pViewPort, pViewPort,
4599                                    updateState);
4600     nvPopEvoSubDevMask(pDevEvo);
4601 
4602     /*
4603      * Specify safe default values of 0 for viewPortPointIn x and y; these
4604      * may be changed when panning out of band of a modeset.
4605      */
4606     EvoSetViewportPointIn(pDispEvo, head, 0 /* x */, 0 /* y */, updateState);
4607 }
4608 
4609 
4610 
4611 static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, const NvU32 head,
4612                                   NvU16 x, NvU16 y,
4613                                   NVEvoUpdateState *updateState)
4614 {
4615     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4616 
4617     nvPushEvoSubDevMaskDisp(pDispEvo);
4618     pDevEvo->hal->SetViewportPointIn(pDevEvo, head, x, y, updateState);
4619     nvPopEvoSubDevMask(pDevEvo);
4620 }
4621 
4622 void nvEvoSetLUTContextDma(NVDispEvoPtr pDispEvo,
4623                            const NvU32 head, NVEvoUpdateState *pUpdateState)
4624 {
4625     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4626     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
4627 
4628     pDevEvo->hal->SetLUTContextDma(pDispEvo,
4629                                    head,
4630                                    pHeadState->lut.pCurrSurface,
4631                                    pHeadState->lut.baseLutEnabled,
4632                                    pHeadState->lut.outputLutEnabled,
4633                                    pUpdateState,
4634                                    pHeadState->bypassComposition);
4635 }
4636 
4637 static void EvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, const NvU32 apiHead)
4638 {
4639     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
4640     NVDispApiHeadStateEvoRec *pApiHeadState =
4641                               &pDispEvo->apiHeadState[apiHead];
4642     const int dispIndex = pDispEvo->displayOwner;
4643     NvU32 head;
4644     NVEvoUpdateState updateState = { };
4645 
4646     FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
4647         nvEvoSetLUTContextDma(pDispEvo, head, &updateState);
4648     }
4649 
4650     /*
4651      * EVO2 does not set LUT context DMA if the core channel
4652      * doesn't have a scanout surface set, in that case there is no update
4653      * state to kickoff.
4654      */
4655     if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) {
4656         int notifier;
4657         NvBool notify;
4658 
4659         nvEvoStageLUTNotifier(pDispEvo, apiHead);
4660         notifier = nvEvoCommitLUTNotifiers(pDispEvo);
4661 
4662         nvAssert(notifier >= 0);
4663 
4664         /*
4665          * XXX: The notifier index returned by nvEvoCommitLUTNotifiers here
4666          * shouldn't be < 0 because this function shouldn't have been called
4667          * while a previous LUT update is outstanding. If
4668          * nvEvoCommitLUTNotifiers ever returns -1 for one reason or another,
4669          * using notify and setting notifier to 0 in this manner to avoid
4670          * setting an invalid notifier in the following Update call prevents
4671          * potential kernel panics and Xids.
4672          */
4673         notify = notifier >= 0;
4674         if (!notify) {
4675             notifier = 0;
4676         }
4677 
4678         // Clear the completion notifier and kick off an update.  Wait for it
4679         // here if NV_CTRL_SYNCHRONOUS_PALETTE_UPDATES is enabled.  Otherwise,
4680         // don't wait for the notifier -- it'll be checked the next time a LUT
4681         // change request comes in.
4682         EvoUpdateAndKickOffWithNotifier(pDispEvo,
4683                                         notify, /* notify */
4684                                         FALSE, /* sync */
4685                                         notifier,
4686                                         &updateState,
4687                                         TRUE /* releaseElv */);
4688         pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate |= notify;
4689     }
4690 }
4691 
4692 static void UpdateMaxPixelClock(NVDevEvoPtr pDevEvo)
4693 {
4694     NVDispEvoPtr pDispEvo;
4695     NVDpyEvoPtr pDpyEvo;
4696     int i;
4697 
4698     FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) {
4699         FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) {
4700             nvDpyProbeMaxPixelClock(pDpyEvo);
4701         }
4702     }
4703 }
4704 
4705 static NvBool AllocEvoSubDevs(NVDevEvoPtr pDevEvo)
4706 {
4707     NVDispEvoPtr pDispEvo;
4708     NvU32 sd;
4709 
4710     pDevEvo->gpus = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoSubDevRec));
4711 
4712     if (pDevEvo->gpus == NULL) {
4713         return FALSE;
4714     }
4715 
4716     /* Assign the pDispEvo for each evoSubDevice */
4717 
4718     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
4719         pDevEvo->gpus[sd].pDispEvo = pDispEvo;
4720     }
4721     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4722         nvAssert(pDevEvo->gpus[sd].pDispEvo != NULL);
4723     }
4724 
4725     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
4726         NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd];
4727         NvU32 head;
4728 
4729         pDevEvo->gpus[sd].subDeviceInstance = sd;
4730         // Initialize the lock state.
4731         nvEvoStateStartNoLock(pEvoSubDev);
4732 
4733         for (head = 0; head < pDevEvo->numHeads; head++) {
4734             NVEvoSubDevHeadStateRec *pSdHeadState =
4735                 &pDevEvo->gpus[sd].headState[head];
4736             NvU32 i;
4737 
4738             for (i = 0; i < ARRAY_LEN(pSdHeadState->layer); i++) {
4739                 pSdHeadState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX;
4740             }
4741 
4742             pSdHeadState->cursor.cursorCompParams =
4743                 nvDefaultCursorCompositionParams(pDevEvo);
4744         }
4745     }
4746 
4747     return TRUE;
4748 }
4749 
4750 
4751 // Replace default cursor composition params when zeroed-out values are unsupported.
4752 struct NvKmsCompositionParams nvDefaultCursorCompositionParams(const NVDevEvoRec *pDevEvo)
4753 {
4754     const struct NvKmsCompositionCapabilities *pCaps =
4755         &pDevEvo->caps.cursorCompositionCaps;
4756     const NvU32 supportedBlendMode =
4757         pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE].supportedBlendModes[1];
4758 
4759     struct NvKmsCompositionParams params = { };
4760 
4761     if ((supportedBlendMode & NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) != 0x0) {
4762         params.blendingMode[1] = NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE;
4763     } else {
4764         params.blendingMode[1] = NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA;
4765     }
4766 
4767     return params;
4768 }
4769 
4770 static NvBool ValidateConnectorTypes(const NVDevEvoRec *pDevEvo)
4771 {
4772     const NVDispEvoRec *pDispEvo;
4773     const NVConnectorEvoRec *pConnectorEvo;
4774     NvU32 dispIndex;
4775 
4776     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
4777         const NVEvoSubDevRec *pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
4778         const NVEvoCapabilities *pEvoCaps = &pEvoSubDev->capabilities;
4779         const NVEvoMiscCaps *pMiscCaps = &pEvoCaps->misc;
4780 
4781         FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
4782             if (!pMiscCaps->supportsDSI &&
4783                 pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) {
4784                 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
4785                             "DSI connectors are unsupported!");
4786                 return FALSE;
4787             }
4788         }
4789     }
4790     return TRUE;
4791 }
4792 
4793 static void UnregisterFlipOccurredEventOneHead(NVDispEvoRec *pDispEvo,
4794                                                const NvU32 head)
4795 {
4796     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
4797     NvU32 layer;
4798 
4799     /* XXX NVKMS TODO: need disp-scope in event */
4800     if (pDispEvo->displayOwner != 0) {
4801         return;
4802     }
4803 
4804     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
4805         NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer];
4806 
4807         nvAssert((pChannel->completionNotifierEventHandle == 0) ||
4808                     (pChannel->completionNotifierEventRefPtr != NULL));
4809 
4810         if (pChannel->completionNotifierEventHandle != 0) {
4811             nvRmApiFree(nvEvoGlobal.clientHandle,
4812                         pChannel->pb.channel_handle,
4813                         pChannel->completionNotifierEventHandle);
4814             nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4815                                pChannel->completionNotifierEventHandle);
4816             pChannel->completionNotifierEventHandle = 0;
4817             pChannel->completionNotifierEventRefPtr = NULL;
4818         }
4819     }
4820 }
4821 
4822 static void ClearApiHeadStateOneDisp(NVDispEvoRec *pDispEvo)
4823 {
4824     NvU32 apiHead;
4825 
4826     /*
4827      * Unregister all the flip-occurred event callbacks which are
4828      * registered with the (api-head, layer) pair event data,
4829      * before destroying the api-head states.
4830      */
4831     for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) {
4832         UnregisterFlipOccurredEventOneHead(pDispEvo, head);
4833     }
4834 
4835     for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->apiHeadState); apiHead++) {
4836         NvU32 layer;
4837         NVDispApiHeadStateEvoRec *pApiHeadState =
4838             &pDispEvo->apiHeadState[apiHead];
4839         nvAssert(nvListIsEmpty(&pApiHeadState->vblankCallbackList));
4840         for (layer = 0; layer < ARRAY_LEN(pApiHeadState->flipOccurredEvent); layer++) {
4841             if (pApiHeadState->flipOccurredEvent[layer].ref_ptr != NULL) {
4842                 nvkms_free_ref_ptr(pApiHeadState->flipOccurredEvent[layer].ref_ptr);
4843                 pApiHeadState->flipOccurredEvent[layer].ref_ptr = NULL;
4844             }
4845         }
4846     }
4847 
4848     nvkms_memset(pDispEvo->apiHeadState, 0, sizeof(pDispEvo->apiHeadState));
4849 }
4850 
4851 static void ClearApiHeadState(NVDevEvoRec *pDevEvo)
4852 {
4853     NvU32 dispIndex;
4854     NVDispEvoRec *pDispEvo;
4855 
4856     nvRmFreeCoreRGSyncpts(pDevEvo);
4857 
4858     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
4859         ClearApiHeadStateOneDisp(pDispEvo);
4860     }
4861 
4862     nvkms_memset(pDevEvo->apiHead, 0, sizeof(pDevEvo->apiHead));
4863 }
4864 
4865 static NvBool InitApiHeadStateOneDisp(NVDispEvoRec *pDispEvo)
4866 {
4867     NvU32 usedApiHeadsMask = 0x0;
4868     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
4869 
4870     for (NvU32 apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->apiHeadState); apiHead++) {
4871         NvU32 layer;
4872         NVDispApiHeadStateEvoRec *pApiHeadState =
4873             &pDispEvo->apiHeadState[apiHead];
4874 
4875         pApiHeadState->activeDpys = nvEmptyDpyIdList();
4876         pApiHeadState->attributes = NV_EVO_DEFAULT_ATTRIBUTES_SET;
4877 
4878         nvListInit(&pApiHeadState->vblankCallbackList);
4879 
4880         for (layer = 0; layer < ARRAY_LEN(pApiHeadState->flipOccurredEvent); layer++) {
4881             pApiHeadState->flipOccurredEvent[layer].ref_ptr =
4882                 nvkms_alloc_ref_ptr(&pApiHeadState->flipOccurredEvent[layer].data);
4883             if (pApiHeadState->flipOccurredEvent[layer].ref_ptr == NULL) {
4884                 goto failed;
4885             }
4886 
4887             pApiHeadState->flipOccurredEvent[layer].data =
4888                 (NVDispFlipOccurredEventDataEvoRec) {
4889                 .pDispEvo = pDispEvo,
4890                 .apiHead = apiHead,
4891                 .layer = layer,
4892             };
4893         }
4894     }
4895 
4896     for (NvU32 head = 0; head < pDevEvo->numHeads; head++) {
4897         if (pDispEvo->headState[head].pConnectorEvo != NULL) {
4898             NvU32 apiHead;
4899             const NVConnectorEvoRec *pConnectorEvo =
4900                 pDispEvo->headState[head].pConnectorEvo;
4901 
4902             /* Find unused api-head which support the equal number of layers */
4903             for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
4904                 if ((NVBIT(apiHead) & usedApiHeadsMask) != 0x0) {
4905                     continue;
4906                 }
4907 
4908                 if (pDevEvo->apiHead[apiHead].numLayers ==
4909                         pDevEvo->head[head].numLayers) {
4910                     usedApiHeadsMask |= NVBIT(apiHead);
4911                     break;
4912                 }
4913             }
4914             nvAssert(apiHead < pDevEvo->numApiHeads);
4915 
4916             /*
4917              * Use the pDpyEvo for the connector, since we may not have one
4918              * for display id if it's a dynamic one.
4919              */
4920             NVDpyEvoRec *pDpyEvo = nvGetDpyEvoFromDispEvo(pDispEvo,
4921                 pConnectorEvo->displayId);
4922 
4923             nvAssert(pDpyEvo->apiHead == NV_INVALID_HEAD);
4924 
4925             pDpyEvo->apiHead = apiHead;
4926             nvAssignHwHeadsMaskApiHeadState(
4927                 &pDispEvo->apiHeadState[apiHead],
4928                 NVBIT(head));
4929             pDispEvo->apiHeadState[apiHead].activeDpys =
4930                 nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId);
4931         }
4932     }
4933 
4934     return TRUE;
4935 
4936 failed:
4937     ClearApiHeadStateOneDisp(pDispEvo);
4938 
4939     return FALSE;
4940 }
4941 
4942 static void
4943 CompletionNotifierEventDeferredWork(void *dataPtr, NvU32 dataU32)
4944 {
4945     NVDispFlipOccurredEventDataEvoRec *pEventData = dataPtr;
4946 
4947     nvSendFlipOccurredEventEvo(pEventData->pDispEvo, pEventData->apiHead,
4948                                pEventData->layer);
4949 }
4950 
4951 static void CompletionNotifierEvent(void *arg, void *pEventDataVoid,
4952                                     NvU32 hEvent, NvU32 Data, NV_STATUS Status)
4953 {
4954   (void) nvkms_alloc_timer_with_ref_ptr(
4955         CompletionNotifierEventDeferredWork, /* callback */
4956         arg, /* argument (this is a ref_ptr to NVDispFlipOccurredEventDataEvoRec) */
4957         0,   /* dataU32 */
4958         0);  /* timeout: schedule the work immediately */
4959 }
4960 
4961 void nvEvoPreModesetRegisterFlipOccurredEvent(NVDispEvoRec *pDispEvo,
4962                                               const NvU32 head,
4963                                               const NVEvoModesetUpdateState
4964                                                   *pModesetUpdate)
4965 {
4966     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
4967     NvU32 layer;
4968 
4969     /* XXX NVKMS TODO: need disp-scope in event */
4970     if (pDispEvo->displayOwner != 0) {
4971         return;
4972     }
4973 
4974     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
4975         NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer];
4976         const struct _NVEvoModesetUpdateStateOneLayer *pLayer =
4977              &pModesetUpdate->flipOccurredEvent[head].layer[layer];
4978 
4979         if (!pLayer->changed ||
4980                 (pLayer->ref_ptr == NULL) ||
4981                 (pLayer->ref_ptr == pChannel->completionNotifierEventRefPtr)) {
4982             continue;
4983         }
4984 
4985         nvAssert((pChannel->completionNotifierEventHandle == 0) &&
4986                     (pChannel->completionNotifierEventRefPtr == NULL));
4987 
4988         pChannel->completionNotifierEventHandle =
4989             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
4990 
4991         if (!nvRmRegisterCallback(pDevEvo,
4992                                   &pChannel->completionNotifierEventCallback,
4993                                   pLayer->ref_ptr,
4994                                   pChannel->pb.channel_handle,
4995                                   pChannel->completionNotifierEventHandle,
4996                                   CompletionNotifierEvent,
4997                                   0)) {
4998             nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
4999                                pChannel->completionNotifierEventHandle);
5000             pChannel->completionNotifierEventHandle = 0;
5001         } else {
5002             pChannel->completionNotifierEventRefPtr = pLayer->ref_ptr;
5003         }
5004     }
5005 }
5006 
5007 void nvEvoPostModesetUnregisterFlipOccurredEvent(NVDispEvoRec *pDispEvo,
5008                                                    const NvU32 head,
5009                                                    const NVEvoModesetUpdateState
5010                                                        *pModesetUpdate)
5011 {
5012     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
5013     NvU32 layer;
5014 
5015     /* XXX NVKMS TODO: need disp-scope in event */
5016     if (pDispEvo->displayOwner != 0) {
5017         return;
5018     }
5019 
5020     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
5021         NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer];
5022         const struct _NVEvoModesetUpdateStateOneLayer *pLayer =
5023              &pModesetUpdate->flipOccurredEvent[head].layer[layer];
5024 
5025         if (!pLayer->changed ||
5026                 (pLayer->ref_ptr != NULL) ||
5027                 (pChannel->completionNotifierEventHandle == 0)) {
5028 
5029             /*
5030              * If the flip occurred event of this layer is updated to get
5031              * enabled (pLayer->ref_ptr != NULL) then that update should have
5032              * been already processed by
5033              * nvEvoPreModesetRegisterFlipOccurredEvent() and
5034              * pChannel->completionNotifierEventRefPtr == pLayer->ref_ptr.
5035              */
5036             nvAssert(!pLayer->changed ||
5037                         (pChannel->completionNotifierEventHandle == 0) ||
5038                         (pChannel->completionNotifierEventRefPtr ==
5039                             pLayer->ref_ptr));
5040             continue;
5041         }
5042 
5043         nvRmApiFree(nvEvoGlobal.clientHandle,
5044                     pChannel->pb.channel_handle,
5045                     pChannel->completionNotifierEventHandle);
5046         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
5047                            pChannel->completionNotifierEventHandle);
5048         pChannel->completionNotifierEventHandle = 0;
5049         pChannel->completionNotifierEventRefPtr = NULL;
5050     }
5051 }
5052 
5053 static NvBool InitApiHeadState(NVDevEvoRec *pDevEvo)
5054 {
5055     NVDispEvoRec *pDispEvo;
5056     NvU32 dispIndex;
5057 
5058     /*
5059      * For every hardware head, there should be at least one api-head
5060      * which supports the equal number of layer.
5061      */
5062     nvAssert(pDevEvo->numApiHeads == pDevEvo->numHeads);
5063     for (NvU32 head = 0; head < pDevEvo->numHeads; head++) {
5064         pDevEvo->apiHead[head].numLayers = pDevEvo->head[head].numLayers;
5065     }
5066 
5067     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5068         if (!InitApiHeadStateOneDisp(pDispEvo)) {
5069             goto failed;
5070         }
5071     }
5072 
5073     nvRmAllocCoreRGSyncpts(pDevEvo);
5074 
5075     return TRUE;
5076 
5077 failed:
5078     ClearApiHeadState(pDevEvo);
5079 
5080     return FALSE;
5081 }
5082 
5083 /*!
5084  * Allocate the EVO core channel.
5085  *
5086  * This function trivially succeeds if the core channel is already allocated.
5087  */
5088 NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo)
5089 {
5090     NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { };
5091     NvU32 ret;
5092     NvBool bRet;
5093     NVDispEvoRec *pDispEvo;
5094     NvU32 dispIndex;
5095     NvU32 head;
5096 
5097     /* Do nothing if the display was already allocated */
5098     if (pDevEvo->displayHandle != 0) {
5099         return TRUE;
5100     }
5101 
5102     if (!AllocEvoSubDevs(pDevEvo)) {
5103         goto failed;
5104     }
5105 
5106     // Disallow GC6 in anticipation of touching GPU/displays.
5107     if (!nvRmSetGc6Allowed(pDevEvo, FALSE)) {
5108         goto failed;
5109     }
5110 
5111     /* Query console FB info, and save the result into pDevEvo->vtFbInfo.
5112      * This is done at device allocation time.
5113      * nvRmImportFbConsoleMemory will import the surface for console restore by
5114      * nvEvoRestoreConsole if the surface format is compatible.
5115      * Else, console restore will cause core channel realloc, telling RM to
5116      * restore the console via nvRmVTSwitch.
5117      */
5118     if (!nvRmGetVTFBInfo(pDevEvo)) {
5119         goto failed;
5120     }
5121 
5122     if (!nvRmVTSwitch(pDevEvo,
5123                       NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE)) {
5124         goto failed;
5125     }
5126 
5127     /* Evo object (parent of all other NV50 display stuff) */
5128     nvAssert(nvRmEvoClassListCheck(pDevEvo, pDevEvo->dispClass));
5129     pDevEvo->displayHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
5130 
5131     ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
5132                        pDevEvo->deviceHandle,
5133                        pDevEvo->displayHandle,
5134                        pDevEvo->dispClass,
5135                        NULL);
5136     if (ret != NVOS_STATUS_SUCCESS) {
5137         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5138                     "Failed to initialize display engine: 0x%x (%s)",
5139                     ret, nvstatusToString(ret));
5140         goto failed;
5141     }
5142 
5143     /* Get the display caps bits */
5144 
5145     ct_assert(sizeof(pDevEvo->capsBits) == sizeof(capsParams.capsTbl));
5146     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5147                          pDevEvo->displayHandle,
5148                          NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2,
5149                          &capsParams, sizeof(capsParams));
5150     if (ret != NVOS_STATUS_SUCCESS) {
5151         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5152                     "Failed to determine display capabilities");
5153         goto failed;
5154     }
5155     nvkms_memcpy(pDevEvo->capsBits, capsParams.capsTbl,
5156                  sizeof(pDevEvo->capsBits));
5157 
5158     // Evo core channel. Allocated once, shared per GPU
5159     if (!nvRMSetupEvoCoreChannel(pDevEvo)) {
5160         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5161                     "Failed to allocate display engine core DMA push buffer");
5162         goto failed;
5163     }
5164 
5165     pDevEvo->coreInitMethodsPending = TRUE;
5166 
5167     bRet = pDevEvo->hal->GetCapabilities(pDevEvo);
5168 
5169     if (!bRet) {
5170         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5171                     "Failed to query display engine capability bits.");
5172         goto failed;
5173     }
5174 
5175     /*
5176      * XXX NVKMS TODO: if the EVO core channel is allocated (and
5177      * capability notifier queried) before any nvDpyConnectEvo(), then
5178      * we won't need to update the pixelClock here.
5179      */
5180     UpdateMaxPixelClock(pDevEvo);
5181 
5182     if (pDevEvo->numWindows > 0) {
5183         int win;
5184 
5185         if (!nvRMAllocateWindowChannels(pDevEvo)) {
5186             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5187                         "Failed to allocate display engine window channels");
5188             goto failed;
5189         }
5190 
5191         for (win = 0; win < pDevEvo->numWindows; win++) {
5192             const NvU32 head = pDevEvo->headForWindow[win];
5193 
5194             if (head == NV_INVALID_HEAD) {
5195                 continue;
5196             }
5197 
5198             pDevEvo->head[head].layer[pDevEvo->head[head].numLayers]  =
5199                 pDevEvo->window[win];
5200             pDevEvo->head[head].numLayers++;
5201         }
5202     } else {
5203         // Allocate the base channels
5204         if (!nvRMAllocateBaseChannels(pDevEvo)) {
5205             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5206                         "Failed to allocate display engine base channels");
5207             goto failed;
5208         }
5209 
5210         // Allocate the overlay channels
5211         if (!nvRMAllocateOverlayChannels(pDevEvo)) {
5212             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5213                         "Failed to allocate display engine overlay channels");
5214             goto failed;
5215         }
5216 
5217         /* Map base and overlay channels onto main and overlay layers. */
5218         for (head = 0; head < pDevEvo->numHeads; head++) {
5219             nvAssert(pDevEvo->base[head] != NULL && pDevEvo->overlay[head] != NULL);
5220 
5221             pDevEvo->head[head].layer[NVKMS_MAIN_LAYER] = pDevEvo->base[head];
5222             pDevEvo->head[head].layer[NVKMS_OVERLAY_LAYER] = pDevEvo->overlay[head];
5223             pDevEvo->head[head].numLayers = 2;
5224         }
5225     }
5226 
5227     // Allocate and map the cursor controls for all heads
5228     bRet = nvAllocCursorEvo(pDevEvo);
5229     if (!bRet) {
5230         goto failed;
5231     }
5232 
5233     if (!nvAllocLutSurfacesEvo(pDevEvo)) {
5234         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5235             "Failed to allocate memory for the display color lookup table.");
5236         goto failed;
5237     }
5238 
5239     // Resume the DisplayPort library's control of the device.
5240     if (!nvRmResumeDP(pDevEvo)) {
5241         nvEvoLogDev(
5242             pDevEvo,
5243             EVO_LOG_ERROR,
5244             "Failed to initialize DisplayPort sub-system.");
5245         goto failed;
5246     }
5247 
5248     if (!InitApiHeadState(pDevEvo)) {
5249         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
5250                     "Failed to initialize the api heads.");
5251         goto failed;
5252     }
5253 
5254     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5255         nvRmRegisterBacklight(pDispEvo);
5256     }
5257 
5258     // Allow GC6 if no heads are active.
5259     if (nvAllHeadsInactive(pDevEvo)) {
5260         if (!nvRmSetGc6Allowed(pDevEvo, TRUE)) {
5261             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
5262                         "No head is active, but failed to allow GC6");
5263         }
5264     }
5265 
5266     return TRUE;
5267 
5268 failed:
5269     nvFreeCoreChannelEvo(pDevEvo);
5270 
5271     return FALSE;
5272 }
5273 
5274 /*!
5275  * Clear the pConnectorEvo->or.primary and pConnectorEvo->or.secondaryMask
5276  * tracking.
5277  */
5278 static void ClearSORAssignmentsOneDisp(const NVDispEvoRec *pDispEvo)
5279 {
5280     NVConnectorEvoPtr pConnectorEvo;
5281 
5282     nvAssert(NV0073_CTRL_SYSTEM_GET_CAP(pDispEvo->pDevEvo->commonCapsBits,
5283                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED));
5284 
5285     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
5286         if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
5287             continue;
5288         }
5289 
5290         pConnectorEvo->or.primary = NV_INVALID_OR;
5291         pConnectorEvo->or.secondaryMask = 0x0;
5292     }
5293 }
5294 
5295 /*!
5296  * Update pConnectorEvo->or.primary and pConnectorEvo->or.secondaryMask from
5297  * the list given to us by RM.
5298  */
5299 static void RefreshSORAssignments(const NVDispEvoRec *pDispEvo,
5300                                   const NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams)
5301 {
5302     NVConnectorEvoPtr pConnectorEvo;
5303 
5304     ClearSORAssignmentsOneDisp(pDispEvo);
5305 
5306     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
5307         const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
5308         NvU32 sorIndex;
5309 
5310         if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
5311             continue;
5312         }
5313 
5314         for (sorIndex = 0;
5315              sorIndex < ARRAY_LEN(pParams->sorAssignList) &&
5316              sorIndex < ARRAY_LEN(pConnectorEvo->or.ownerHeadMask);
5317              sorIndex++) {
5318             if ((pParams->sorAssignListWithTag[sorIndex].displayMask &
5319                     displayId) == displayId) {
5320                 if ((pParams->sorAssignListWithTag[sorIndex].sorType ==
5321                         NV0073_CTRL_DFP_SOR_TYPE_SINGLE) ||
5322                         (pParams->sorAssignListWithTag[sorIndex].sorType ==
5323                          NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY)) {
5324                     pConnectorEvo->or.primary = sorIndex;
5325                 } else {
5326                     nvAssert(pParams->sorAssignListWithTag[sorIndex].sorType ==
5327                                 NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY);
5328                     pConnectorEvo->or.secondaryMask |= NVBIT(sorIndex);
5329                 }
5330             }
5331         }
5332 
5333         nvAssert((pConnectorEvo->or.secondaryMask == 0) ||
5334                     (pConnectorEvo->or.primary != NV_INVALID_OR));
5335     }
5336 }
5337 
5338 /*
5339  * Ask RM to assign an SOR to given displayId.
5340  *
5341  * In 2Heads1OR MST case, this function gets called with the dynamic displayId.
5342  *
5343  * Note that this assignment may be temporary.  This function will always call
5344  * RM, and unless the connector is currently in use (i.e., being driven by a
5345  * head), a previously-assigned SOR may be reused.
5346  *
5347  * The RM will either:
5348  * a) return an SOR that's already assigned/attached
5349  *    to root port of this displayId, or
5350  * b) pick a new "unused" SOR, assign and attach it to this connector, and
5351  *    return that -- where "unused" means both not being actively driven by a
5352  *    head and not in the "exclude mask" argument.
5353  *    The "exclude mask" is useful if we need to assign multiple SORs up front
5354  *    before activating heads to drive them.
5355  *
5356  * For example, if head 0 is currently actively scanning out to SOR 0 and we
5357  * are doing a modeset to activate currently-inactive heads 1 and 2:
5358  * 1. nvkms calls RM for nvAssignSOREvo(pConnectorForHead1, 0);
5359  *    RM returns any SOR other than 0 (say 3)
5360  * 2. nvkms calls RM for nvAssignSOREvo(pConnectorForHead2, (1 << 3));
5361  *    RM returns any SOR other than 0 and 3 (say 1)
5362  * 3. At this point nvkms can push methods and UPDATE to enable heads 1 and 2
5363  *    to drive SORs 3 and 1.
5364  * In the example above, the sorExcludeMask == (1 << 3) at step 2 is important
5365  * to ensure that RM doesn't reuse the SOR 3 from step 1.  It won't reuse SOR 0
5366  * because it's in use by head 0.
5367  *
5368  * If an SOR is only needed temporarily (e.g., to do link training to "assess"
5369  * a DisplayPort or HDMI FRL link), then sorExcludeMask should be 0 -- any SOR
5370  * that's not actively used by a head can be used, and as soon as nvkms
5371  * finishes the "assessment", the SOR is again eligible for reuse.
5372  *
5373  * Because of the potential for SOR reuse, nvAssignSOREvo() will always call
5374  * RefreshSORAssignments() to update pConnectorEvo->or.primary and
5375  * pConnectorEvo->or.secondaryMask on *every* connector after calling
5376  * NV0073_CTRL_CMD_DFP_ASSIGN_SOR for *any* connector.
5377  */
5378 NvBool nvAssignSOREvo(const NVDispEvoRec *pDispEvo, const NvU32 displayId,
5379                       const NvBool b2Heads1Or, const NvU32 sorExcludeMask)
5380 {
5381     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
5382     NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = { 0 };
5383     NvU32 ret;
5384 
5385     if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
5386                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
5387         return TRUE;
5388     }
5389 
5390     params.subDeviceInstance = pDispEvo->displayOwner;
5391     params.displayId = displayId;
5392     params.bIs2Head1Or = b2Heads1Or;
5393     params.sorExcludeMask = sorExcludeMask;
5394 
5395     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5396                          pDevEvo->displayCommonHandle,
5397                          NV0073_CTRL_CMD_DFP_ASSIGN_SOR,
5398                          &params,
5399                          sizeof(params));
5400 
5401     if (ret != NVOS_STATUS_SUCCESS) {
5402         return FALSE;
5403     }
5404 
5405     RefreshSORAssignments(pDispEvo, &params);
5406 
5407     return TRUE;
5408 }
5409 
5410 static void CacheSorAssignList(const NVDispEvoRec *pDispEvo,
5411     const NVConnectorEvoRec *sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS])
5412 {
5413     const NVConnectorEvoRec *pConnectorEvo;
5414 
5415     FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
5416         if ((pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) ||
5417                 (pConnectorEvo->or.primary == NV_INVALID_OR)) {
5418             continue;
5419         }
5420 
5421         /*
5422          * RM populates same sor index into more than one connectors if
5423          * they are are DCC partners, this checks make sure SOR
5424          * assignment happens only for a single connector. The sor
5425          * assignment call before modeset/dp-link-training makes sure
5426          * assignment happens for the correct connector.
5427          */
5428         if (sorAssignList[pConnectorEvo->or.primary] != NULL) {
5429             continue;
5430         }
5431         sorAssignList[pConnectorEvo->or.primary] =
5432             pConnectorEvo;
5433     }
5434 }
5435 
5436 static void RestoreSorAssignList(NVDispEvoRec *pDispEvo,
5437     const NVConnectorEvoRec *sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS])
5438 {
5439     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
5440     NvU32 sorIndex;
5441 
5442     for (sorIndex = 0;
5443          sorIndex < NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS; sorIndex++) {
5444 
5445         if (sorAssignList[sorIndex] == NULL) {
5446             continue;
5447         }
5448 
5449         NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = {
5450             .subDeviceInstance = pDispEvo->displayOwner,
5451             .displayId = nvDpyIdToNvU32(sorAssignList[sorIndex]->displayId),
5452             .sorExcludeMask = ~NVBIT(sorIndex),
5453         };
5454         NvU32 ret;
5455 
5456         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
5457                              pDevEvo->displayCommonHandle,
5458                              NV0073_CTRL_CMD_DFP_ASSIGN_SOR,
5459                              &params,
5460                              sizeof(params));
5461 
5462         if (ret != NVOS_STATUS_SUCCESS) {
5463             nvEvoLogDispDebug(pDispEvo,
5464                               EVO_LOG_ERROR,
5465                               "Failed to restore SOR-%u -> %s assignment.",
5466                               sorIndex, sorAssignList[sorIndex]->name);
5467         } else {
5468             RefreshSORAssignments(pDispEvo, &params);
5469         }
5470     }
5471 }
5472 
5473 NvBool nvResumeDevEvo(NVDevEvoRec *pDevEvo)
5474 {
5475     struct {
5476         const NVConnectorEvoRec *
5477             sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
5478     } disp[NVKMS_MAX_SUBDEVICES] = { };
5479     NVDispEvoRec *pDispEvo;
5480     NvU32 dispIndex;
5481 
5482     if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
5483                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
5484         FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5485             CacheSorAssignList(pDispEvo, disp[dispIndex].sorAssignList);
5486         }
5487     }
5488 
5489     if (!nvAllocCoreChannelEvo(pDevEvo)) {
5490         return FALSE;
5491     }
5492 
5493     /*
5494      * During the hibernate-resume cycle vbios or GOP driver programs
5495      * the display engine to lit up the boot display. In
5496      * hibernate-resume path, doing NV0073_CTRL_CMD_DFP_ASSIGN_SOR
5497      * rm-control call before the core channel allocation causes display
5498      * channel hang because at that stage RM is not aware of the boot
5499      * display actived by vbios and it ends up unrouting active SOR
5500      * assignments. Therefore restore the SOR assignment only after the
5501      * core channel allocation.
5502      */
5503 
5504     if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
5505                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
5506 
5507         /*
5508          * Shutdown all heads before restoring the SOR assignments because in
5509          * case of hibernate-resume the SOR, for which NVKMS is trying to
5510          * restore the assignment, might be in use by the boot display setup
5511          * by vbios/gop driver.
5512          */
5513         nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev,
5514                            NULL /* pTestFunc, shut down all heads */,
5515                            NULL /* pData */,
5516                            TRUE /* doRasterLock */);
5517 
5518         FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5519             RestoreSorAssignList(pDispEvo, disp[dispIndex].sorAssignList);
5520         }
5521     }
5522 
5523     return TRUE;
5524 }
5525 
5526 void nvSuspendDevEvo(NVDevEvoRec *pDevEvo)
5527 {
5528     nvFreeCoreChannelEvo(pDevEvo);
5529 }
5530 
5531 /*!
5532  * Free the EVO core channel.
5533  *
5534  * This function does nothing if the core channel was already free.
5535  */
5536 void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo)
5537 {
5538     NVDispEvoPtr pDispEvo;
5539     NvU32 dispIndex;
5540     NvU32 head;
5541 
5542     ClearApiHeadState(pDevEvo);
5543 
5544     nvEvoCancelPostFlipIMPTimer(pDevEvo);
5545     nvCancelVrrFrameReleaseTimers(pDevEvo);
5546 
5547     nvCancelLowerDispBandwidthTimer(pDevEvo);
5548 
5549     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5550         nvRmUnregisterBacklight(pDispEvo);
5551 
5552         nvAssert(pDevEvo->skipConsoleRestore ||
5553                  nvDpyIdListIsEmpty(nvActiveDpysOnDispEvo(pDispEvo)));
5554     }
5555 
5556     // Pause the DisplayPort library's control of the device.
5557     nvRmPauseDP(pDevEvo);
5558 
5559     nvFreeLutSurfacesEvo(pDevEvo);
5560 
5561     // Unmap and free the cursor controls for all heads
5562     nvFreeCursorEvo(pDevEvo);
5563 
5564     // TODO: Unregister all surfaces registered with this device.
5565 
5566     for (head = 0; head < pDevEvo->numHeads; head++) {
5567         NvU32 layer;
5568 
5569         for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
5570             nvRmEvoFreePreSyncpt(pDevEvo, pDevEvo->head[head].layer[layer]);
5571             pDevEvo->head[head].layer[layer] = NULL;
5572         }
5573         pDevEvo->head[head].numLayers = 0;
5574     }
5575 
5576     nvRMFreeWindowChannels(pDevEvo);
5577     nvRMFreeOverlayChannels(pDevEvo);
5578     nvRMFreeBaseChannels(pDevEvo);
5579 
5580     nvRMFreeEvoCoreChannel(pDevEvo);
5581 
5582     if (pDevEvo->displayHandle != 0) {
5583         if (nvRmApiFree(nvEvoGlobal.clientHandle,
5584                         pDevEvo->deviceHandle,
5585                         pDevEvo->displayHandle) != NVOS_STATUS_SUCCESS) {
5586             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to tear down Disp");
5587         }
5588         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->displayHandle);
5589         pDevEvo->displayHandle = 0;
5590 
5591         if (!pDevEvo->skipConsoleRestore) {
5592             nvRmVTSwitch(pDevEvo,
5593                          NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE);
5594         } else {
5595             nvRmVTSwitch(pDevEvo,
5596                          NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED);
5597         }
5598     }
5599 
5600     // No longer possible that NVKMS is driving any displays, allow GC6.
5601     nvRmSetGc6Allowed(pDevEvo, TRUE);
5602 
5603     nvFree(pDevEvo->gpus);
5604     pDevEvo->gpus = NULL;
5605 }
5606 
5607 
5608 #define ASSIGN_PIN(_pPin, _pin)                         \
5609     do {                                                \
5610         ct_assert(NV_IS_UNSIGNED((_pin)));              \
5611         if ((_pPin)) {                                  \
5612             if ((_pin) >= NV_EVO_NUM_LOCK_PIN_CAPS) {   \
5613                 return FALSE;                           \
5614             }                                           \
5615             *(_pPin) = (_pin);                          \
5616         }                                               \
5617     } while (0)
5618 
5619 static NvBool QueryFrameLockHeaderPins(const NVDispEvoRec *pDispEvo,
5620                                        NVEvoSubDevPtr pEvoSubDev,
5621                                        NvU32 *pFrameLockPin,
5622                                        NvU32 *pRasterLockPin,
5623                                        NvU32 *pFlipLockPin)
5624 {
5625     NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS params = { };
5626     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5627 
5628     params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance;
5629 
5630     if (nvRmApiControl(nvEvoGlobal.clientHandle,
5631                        pDevEvo->displayHandle,
5632                        NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS,
5633                        &params, sizeof(params)) != NVOS_STATUS_SUCCESS) {
5634         nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
5635                           "Failed to query framelock header pins");
5636         return FALSE;
5637     }
5638 
5639     ASSIGN_PIN(pFrameLockPin, params.frameLockPin);
5640     ASSIGN_PIN(pRasterLockPin, params.rasterLockPin);
5641     ASSIGN_PIN(pFlipLockPin, params.flipLockPin);
5642 
5643     return TRUE;
5644 }
5645 
5646 // Gets the lock pin dedicated for a given signal and returns the corresponding method
5647 NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *pDispEvo,
5648                                   NVEvoSubDevPtr pEvoSubDev,
5649                                   NVEvoLockSignal signal)
5650 {
5651     NVEvoLockPinCaps *caps = pEvoSubDev->capabilities.pin;
5652     NvU32 pin;
5653 
5654     switch (signal) {
5655 
5656         case NV_EVO_LOCK_SIGNAL_RASTER_LOCK:
5657             if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev,
5658                                           NULL, &pin, NULL)) {
5659                 break;
5660             }
5661 
5662             if (!caps[pin].scanLock) break;
5663 
5664             return NV_EVO_LOCK_PIN_0 + pin;
5665 
5666         case NV_EVO_LOCK_SIGNAL_FRAME_LOCK:
5667             if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev,
5668                                           &pin, NULL, NULL)) {
5669                 break;
5670             }
5671 
5672             if (!caps[pin].scanLock) break;
5673 
5674             return NV_EVO_LOCK_PIN_0 + pin;
5675 
5676         case NV_EVO_LOCK_SIGNAL_FLIP_LOCK:
5677             if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev,
5678                                           NULL, NULL, &pin) ||
5679                 !caps[pin].flipLock) {
5680                 // If the query from RM fails (or returns a bogus pin), fall
5681                 // back to an alternate mechanism.  This may happen on boards
5682                 // with no framelock header.  Look in the capabilities for the
5683                 // pin that has the requested capability.
5684                 for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) {
5685                     if (caps[pin].flipLock)
5686                         break;
5687                 }
5688 
5689                 if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) {
5690                     // Not found
5691                     break;
5692                 }
5693             }
5694 
5695             if (!caps[pin].flipLock) {
5696                 break;
5697             }
5698 
5699             return NV_EVO_LOCK_PIN_0 + pin;
5700 
5701         case NV_EVO_LOCK_SIGNAL_STEREO:
5702             // Look in the capabilities for the pin that has the requested capability
5703             for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) {
5704                 if (caps[pin].stereo)
5705                     break;
5706             }
5707 
5708             if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) break;
5709 
5710             return NV_EVO_LOCK_PIN_0 + pin;
5711 
5712         default:
5713             nvAssert(!"Unknown signal type");
5714             break;
5715     }
5716 
5717     // Pin not found
5718     return NV_EVO_LOCK_PIN_ERROR;
5719 }
5720 
5721 void nvSetDVCEvo(NVDispEvoPtr pDispEvo,
5722                  const NvU32 head,
5723                  NvS32 dvc,
5724                  NVEvoUpdateState *updateState)
5725 {
5726     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5727     NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
5728 
5729     nvAssert(dvc >= NV_EVO_DVC_MIN);
5730     nvAssert(dvc <= NV_EVO_DVC_MAX);
5731 
5732     // HW range is from -2048 to + 2047
5733     // Negative values, are not used they distort the colors
5734     // Values from 1023 to 0 are greying the colors out.
5735     // We use 0 to 2047 with 1024 as default.
5736     dvc += 1024;
5737     nvAssert(dvc >= 0);
5738     pHeadState->procAmp.satCos = dvc;
5739 
5740     // In SW YUV420 mode, HW is programmed with default DVC. The DVC is handled
5741     // in a headSurface composite shader.
5742     if (pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) {
5743         pHeadState->procAmp.satCos = 1024;
5744     }
5745 
5746     nvPushEvoSubDevMaskDisp(pDispEvo);
5747     pDevEvo->hal->SetProcAmp(pDispEvo, head, updateState);
5748     nvPopEvoSubDevMask(pDevEvo);
5749 }
5750 
5751 void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head,
5752                              NvU32 value, NVEvoUpdateState *updateState)
5753 {
5754     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5755 
5756     /*
5757      * Evo values are from -128 to 127, with a default of 0.
5758      * Negative values sharpen.
5759      * Control panel values from 0 (less sharp) to 255
5760      */
5761     value = 127 - value;
5762 
5763     nvPushEvoSubDevMaskDisp(pDispEvo);
5764     pDevEvo->hal->SetOutputScaler(pDispEvo, head, value, updateState);
5765     nvPopEvoSubDevMask(pDevEvo);
5766 }
5767 
5768 static void LayerSetPositionOneApiHead(NVDispEvoRec *pDispEvo,
5769                                        const NvU32 apiHead,
5770                                        const NvU32 layer,
5771                                        const NvS16 x,
5772                                        const NvS16 y,
5773                                        NVEvoUpdateState *pUpdateState)
5774 {
5775     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
5776     const NVDispApiHeadStateEvoRec *pApiHeadState =
5777         &pDispEvo->apiHeadState[apiHead];
5778     const NvU32 sd = pDispEvo->displayOwner;
5779     NvU32 head;
5780 
5781     nvPushEvoSubDevMaskDisp(pDispEvo);
5782     FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
5783         NVEvoSubDevHeadStateRec *pSdHeadState =
5784             &pDevEvo->gpus[sd].headState[head];
5785 
5786         if ((pSdHeadState->layer[layer].outputPosition.x != x) ||
5787             (pSdHeadState->layer[layer].outputPosition.y != y)) {
5788             NVEvoChannelPtr pChannel =
5789                 pDevEvo->head[head].layer[layer];
5790 
5791             pSdHeadState->layer[layer].outputPosition.x = x;
5792             pSdHeadState->layer[layer].outputPosition.y = y;
5793 
5794             pDevEvo->hal->SetImmPointOut(pDevEvo, pChannel, sd, pUpdateState,
5795                                          x, y);
5796         }
5797     }
5798     nvPopEvoSubDevMask(pDevEvo);
5799 }
5800 
5801 NvBool nvLayerSetPositionEvo(
5802     NVDevEvoPtr pDevEvo,
5803     const struct NvKmsSetLayerPositionRequest *pRequest)
5804 {
5805     NVDispEvoPtr pDispEvo;
5806     NvU32 sd;
5807 
5808     /*
5809      * We need this call to not modify any state if it will fail, so we
5810      * first verify that all relevant layers support output positioning,
5811      * then go back through the layers to actually modify the relevant
5812      * state.
5813      */
5814     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
5815         NvU32 apiHead;
5816 
5817         if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) {
5818             continue;
5819         }
5820 
5821         for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) {
5822             NvU32 layer;
5823 
5824             if ((pRequest->disp[sd].requestedHeadsBitMask &
5825                  NVBIT(apiHead)) == 0) {
5826                 continue;
5827             }
5828 
5829             if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
5830                 continue;
5831             }
5832 
5833             for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
5834                 const NvS16 x = pRequest->disp[sd].head[apiHead].layerPosition[layer].x;
5835                 const NvS16 y = pRequest->disp[sd].head[apiHead].layerPosition[layer].y;
5836 
5837                 if ((pRequest->disp[sd].head[apiHead].requestedLayerBitMask &
5838                         NVBIT(layer)) == 0x0) {
5839                     continue;
5840                 }
5841 
5842                 /*
5843                  * Error out if a requested layer does not support position
5844                  * updates and the requested position is not (0, 0).
5845                  */
5846                 if (!pDevEvo->caps.layerCaps[layer].supportsWindowMode &&
5847                     (x != 0 || y != 0)) {
5848                     nvEvoLogDebug(EVO_LOG_ERROR, "Layer %d does not support "
5849                                                  "position updates.", layer);
5850                     return FALSE;
5851                 }
5852             }
5853         }
5854     }
5855 
5856     /* Checks in above block passed, so make the requested changes. */
5857     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
5858         NvU32 apiHead;
5859 
5860         if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) {
5861             continue;
5862         }
5863 
5864         for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) {
5865             NVEvoUpdateState updateState = { };
5866             NvU32 layer;
5867 
5868             if ((pRequest->disp[sd].requestedHeadsBitMask &
5869                  NVBIT(apiHead)) == 0) {
5870                 continue;
5871             }
5872 
5873             if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
5874                 continue;
5875             }
5876 
5877             for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
5878                 const NvS16 x = pRequest->disp[sd].head[apiHead].layerPosition[layer].x;
5879                 const NvS16 y = pRequest->disp[sd].head[apiHead].layerPosition[layer].y;
5880 
5881                 if ((pRequest->disp[sd].head[apiHead].requestedLayerBitMask &
5882                         NVBIT(layer)) == 0x0) {
5883                     continue;
5884                 }
5885 
5886                 LayerSetPositionOneApiHead(pDispEvo, apiHead, layer, x, y,
5887                                            &updateState);
5888             }
5889 
5890             pDevEvo->hal->Update(pDevEvo, &updateState, TRUE /* releaseElv */);
5891         }
5892     }
5893 
5894     return TRUE;
5895 }
5896 
5897 /*
5898  * nvConstructHwModeTimingsImpCheckEvo() - perform an IMP check on the
5899  * given raster timings and viewport during the
5900  * nvConstructHwModeTimingsEvo path.  If IMP fails, we try multiple
5901  * times, each time scaling back the usage bounds until we find a
5902  * configuration IMP will accept, or until we can't scale back any
5903  * further.  If this fails, mark the viewport as invalid.
5904  */
5905 
5906 NvBool nvConstructHwModeTimingsImpCheckEvo(
5907     const NVConnectorEvoRec                *pConnectorEvo,
5908     const NVHwModeTimingsEvo               *pTimings,
5909     const NvBool                            enableDsc,
5910     const NvBool                            b2Heads1Or,
5911     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
5912     const enum NvKmsDpyAttributeColorBpcValue colorBpc,
5913     const struct NvKmsModeValidationParams *pParams,
5914     NVHwModeTimingsEvo                      timings[NVKMS_MAX_HEADS_PER_DISP],
5915     NvU32                                  *pNumHeads,
5916     NVEvoInfoStringPtr                      pInfoString)
5917 {
5918     NvU32 head;
5919     NvU32 activeRmId;
5920     const NvU32 numHeads = b2Heads1Or ? 2 : 1;
5921     NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP];
5922     NvBool requireBootClocks = !!(pParams->overrides &
5923                                   NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS);
5924     NvU32 ret;
5925 
5926     /* bypass this checking if the user disabled IMP */
5927 
5928     if ((pParams->overrides &
5929          NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK) != 0) {
5930         return TRUE;
5931     }
5932 
5933     activeRmId = nvRmAllocDisplayId(pConnectorEvo->pDispEvo,
5934                     nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId));
5935     if (activeRmId == 0x0) {
5936         return FALSE;
5937     }
5938 
5939     nvkms_memset(&timingsParams, 0, sizeof(timingsParams));
5940 
5941     for (head = 0; head < numHeads; head++) {
5942         timingsParams[head].pConnectorEvo = pConnectorEvo;
5943         timingsParams[head].activeRmId = activeRmId;
5944         timingsParams[head].pixelDepth =
5945             nvEvoColorSpaceBpcToPixelDepth(colorSpace, colorBpc);
5946         if (!nvEvoGetSingleTileHwModeTimings(pTimings, numHeads,
5947                                              &timings[head])) {
5948             ret = FALSE;
5949             goto done;
5950         }
5951         timingsParams[head].pTimings = &timings[head];
5952         timingsParams[head].enableDsc = enableDsc;
5953         timingsParams[head].b2Heads1Or = b2Heads1Or;
5954         timingsParams[head].pUsage = &timings[head].viewPort.guaranteedUsage;
5955     }
5956 
5957     ret = nvValidateImpOneDispDowngrade(pConnectorEvo->pDispEvo, timingsParams,
5958                                         requireBootClocks,
5959                                         NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE,
5960                                         /* downgradePossibleHeadsBitMask */
5961                                         (NVBIT(NVKMS_MAX_HEADS_PER_DISP) - 1UL));
5962     if (ret) {
5963         *pNumHeads = numHeads;
5964     } else {
5965         nvEvoLogInfoString(pInfoString,
5966                            "ViewPort %dx%d exceeds hardware capabilities.",
5967                            pTimings->viewPort.out.width,
5968                            pTimings->viewPort.out.height);
5969     }
5970 
5971 done:
5972     nvRmFreeDisplayId(pConnectorEvo->pDispEvo, activeRmId);
5973 
5974     return ret;
5975 }
5976 
5977 /*
5978  * Convert from NvModeTimings values to NVHwModeTimingsEvo.
5979  */
5980 
5981 static void
5982 ConstructHwModeTimingsFromNvModeTimings(const NvModeTimings *pModeTimings,
5983                                         NVHwModeTimingsEvoPtr pTimings)
5984 {
5985     NvU32 hBlankStart;
5986     NvU32 vBlankStart;
5987     NvU32 hBlankEnd;
5988     NvU32 vBlankEnd;
5989     NvU32 hSyncWidth;
5990     NvU32 vSyncWidth;
5991     NvU32 vTotalAdjustment = 0;
5992 
5993     NvModeTimings modeTimings;
5994 
5995     modeTimings = *pModeTimings;
5996 
5997     if (modeTimings.doubleScan) {
5998         modeTimings.vVisible *= 2;
5999         modeTimings.vSyncStart *= 2;
6000         modeTimings.vSyncEnd *= 2;
6001         modeTimings.vTotal *= 2;
6002     }
6003 
6004     /*
6005      * The real pixel clock and width values for modes using YUV 420 emulation
6006      * are half of the incoming values parsed from the EDID. This conversion is
6007      * performed here, so NvModeTimings will have the user-visible (full width)
6008      * values, and NVHwModeTimingsEvo will have the real (half width) values.
6009      *
6010      * HW YUV 420 requires setting the full width mode timings, which are then
6011      * converted in HW.  RM will recognize YUV420 mode is in use and halve
6012      * these values for IMP.
6013      *
6014      * In either case, only modes with even width are allowed in YUV 420 mode.
6015      */
6016     if (modeTimings.yuv420Mode != NV_YUV420_MODE_NONE) {
6017         nvAssert(((modeTimings.pixelClockHz & 1) == 0) &&
6018                  ((modeTimings.hVisible & 1) == 0) &&
6019                  ((modeTimings.hSyncStart & 1) == 0) &&
6020                  ((modeTimings.hSyncEnd & 1) == 0) &&
6021                  ((modeTimings.hTotal & 1) == 0) &&
6022                  ((modeTimings.vVisible & 1) == 0));
6023         if (modeTimings.yuv420Mode == NV_YUV420_MODE_SW) {
6024             modeTimings.pixelClockHz /= 2;
6025             modeTimings.hVisible /= 2;
6026             modeTimings.hSyncStart /= 2;
6027             modeTimings.hSyncEnd /= 2;
6028             modeTimings.hTotal /= 2;
6029         }
6030     }
6031 
6032     pTimings->hSyncPol = modeTimings.hSyncNeg;
6033     pTimings->vSyncPol = modeTimings.vSyncNeg;
6034     pTimings->interlaced = modeTimings.interlaced;
6035     pTimings->doubleScan = modeTimings.doubleScan;
6036 
6037     /* pTimings->pixelClock are in KHz but modeTimings.pixelClock are in Hz */
6038 
6039     pTimings->pixelClock = HzToKHz(modeTimings.pixelClockHz);
6040 
6041     /*
6042      * assign total width, height; note that when the rastertimings
6043      * are interlaced, we need to make sure SetRasterSize.Height is
6044      * odd, per EVO's mfs file
6045      */
6046 
6047     if (pTimings->interlaced) vTotalAdjustment = 1;
6048 
6049     pTimings->rasterSize.x = modeTimings.hTotal;
6050     pTimings->rasterSize.y = modeTimings.vTotal | vTotalAdjustment;
6051 
6052     /*
6053      * A bit of EVO quirkiness: The hw increases the blank/sync values
6054      * by one. So we need to offset by subtracting one.
6055      *
6056      * In other words, the h/w inserts one extra sync line/pixel thus
6057      * incrementing the raster params by one. The number of blank
6058      * lines/pixels we get is true to what we ask for.  Note the hw
6059      * does not increase the TotalImageSize by one so we don't need to
6060      * adjust SetRasterSize.
6061      *
6062      * This is slightly unintuitive. Per Evo's specs, the blankEnd
6063      * comes before blankStart as defined below:  BlankStart: The last
6064      * pixel/line at the end of the h/v active area.  BlankEnd: The
6065      * last pixel/line at the end of the h/v blanking.
6066      *
6067      * Also: note that in the below computations, we divide by two for
6068      * interlaced modes *before* subtracting; see bug 263622.
6069      */
6070 
6071     hBlankStart = modeTimings.hVisible +
6072         (modeTimings.hTotal - modeTimings.hSyncStart);
6073 
6074     vBlankStart = modeTimings.vVisible +
6075         (modeTimings.vTotal - modeTimings.vSyncStart);
6076 
6077     hBlankEnd = (modeTimings.hTotal - modeTimings.hSyncStart);
6078     vBlankEnd = (modeTimings.vTotal - modeTimings.vSyncStart);
6079 
6080     hSyncWidth = (modeTimings.hSyncEnd - modeTimings.hSyncStart);
6081     vSyncWidth = (modeTimings.vSyncEnd - modeTimings.vSyncStart);
6082 
6083     if (pTimings->interlaced) {
6084         vBlankStart /= 2;
6085         vBlankEnd /= 2;
6086         vSyncWidth /= 2;
6087     }
6088 
6089     pTimings->rasterSyncEnd.x           = hSyncWidth - 1;
6090     pTimings->rasterSyncEnd.y           = vSyncWidth - 1;
6091     pTimings->rasterBlankStart.x        = hBlankStart - 1;
6092     pTimings->rasterBlankStart.y        = vBlankStart - 1;
6093     pTimings->rasterBlankEnd.x          = hBlankEnd - 1;
6094     pTimings->rasterBlankEnd.y          = vBlankEnd - 1;
6095 
6096     /* assign rasterVertBlank2 */
6097 
6098     if (pTimings->interlaced) {
6099         const NvU32 firstFieldHeight = modeTimings.vTotal / 2;
6100 
6101         pTimings->rasterVertBlank2Start = firstFieldHeight + vBlankStart - 1;
6102         pTimings->rasterVertBlank2End = firstFieldHeight + vBlankEnd - 1;
6103     } else {
6104         pTimings->rasterVertBlank2Start = 0;
6105         pTimings->rasterVertBlank2End = 0;
6106     }
6107 
6108     pTimings->hdmi3D = modeTimings.hdmi3D;
6109     pTimings->yuv420Mode = modeTimings.yuv420Mode;
6110 }
6111 
6112 
6113 
6114 /*
6115  * Adjust the HwModeTimings as necessary to meet dual link dvi
6116  * requirements; returns TRUE if the timings were successfully
6117  * modified; returns FALSE if the timings cannot be made valid for
6118  * dual link dvi.
6119  */
6120 static NvBool ApplyDualLinkRequirements(const NVDpyEvoRec *pDpyEvo,
6121                                         const struct
6122                                         NvKmsModeValidationParams *pParams,
6123                                         NVHwModeTimingsEvoPtr pTimings,
6124                                         NVEvoInfoStringPtr pInfoString)
6125 {
6126     int adjust;
6127 
6128     nvAssert(pDpyEvo->pConnectorEvo->legacyType ==
6129              NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP);
6130 
6131     if (pTimings->protocol != NVKMS_PROTOCOL_SOR_DUAL_TMDS) {
6132         return TRUE;
6133     }
6134 
6135     if ((pParams->overrides &
6136          NVKMS_MODE_VALIDATION_NO_DUAL_LINK_DVI_CHECK) != 0) {
6137         return TRUE;
6138     }
6139 
6140     /* extract the fields we will need below */
6141 
6142     /*
6143      * hTotal must be even for dual link dvi; we won't try to patch
6144      * the htotal size; just give up if it isn't even
6145      */
6146 
6147     if ((pTimings->rasterSize.x % 2) != 0) {
6148         nvEvoLogInfoString(pInfoString,
6149             "Horizontal Total (%d) must be even for dual link DVI mode timings.",
6150             pTimings->rasterSize.x);
6151         return FALSE;
6152     }
6153 
6154     /*
6155      * RASTER_BLANK_END_X must be odd, so that the active region
6156      * starts on the following (even) pixel; if it is odd, we are
6157      * already done
6158      */
6159 
6160     if ((pTimings->rasterBlankEnd.x % 2) == 1) return TRUE;
6161 
6162     /*
6163      * RASTER_BLANK_END_X is even, so we need to adjust both
6164      * RASTER_BLANK_END_X and RASTER_BLANK_START_X by one; we'll first
6165      * try to subtract one pixel from both
6166      */
6167 
6168     adjust = -1;
6169 
6170     /*
6171      * if RASTER_BLANK_END_X cannot be made smaller (would collide
6172      * with hSyncEnd), see if it would be safe to instead add one to
6173      * RASTER_BLANK_END_X and RASTER_BLANK_START_X
6174      */
6175 
6176     if (pTimings->rasterBlankEnd.x <= pTimings->rasterSyncEnd.x + 1) {
6177         if (pTimings->rasterBlankStart.x + 1 >= pTimings->rasterSize.x) {
6178             nvEvoLogInfoString(pInfoString,
6179                 "Cannot adjust mode timings for dual link DVI requirements.");
6180             return FALSE;
6181         }
6182         adjust = 1;
6183     }
6184 
6185     pTimings->rasterBlankEnd.x += adjust;
6186     pTimings->rasterBlankStart.x += adjust;
6187 
6188     nvEvoLogInfoString(pInfoString,
6189         "Adjusted mode timings for dual link DVI requirements.");
6190 
6191     return TRUE;
6192 }
6193 
6194 void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo,
6195                               struct NvKmsScalingUsageBounds *pScaling)
6196 {
6197     pScaling->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X;
6198     pScaling->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X;
6199     pScaling->vTaps = pDevEvo->hal->caps.minScalerTaps;
6200     pScaling->vUpscalingAllowed = FALSE;
6201 }
6202 
6203 /*
6204  * Check if the provided number of vertical taps is possible based on the
6205  * capabilities: the lineStore (the smaller of inWidth and outWidth) must
6206  * not exceed the maximum pixels for the desired taps; see bug 241014
6207  */
6208 static NvBool IsVTapsPossible(const NVEvoScalerCaps *pScalerCaps,
6209                               NvU32 inWidth, NvU32 outWidth,
6210                               NVEvoScalerTaps nTaps)
6211 {
6212     const NvU32 lineStore = NV_MIN(inWidth, outWidth);
6213     NvU32 maxPixels = pScalerCaps->taps[nTaps].maxPixelsVTaps;
6214 
6215     return lineStore <= maxPixels;
6216 }
6217 
6218 /*!
6219  * Compute the scale factor and check against the maximum.
6220  *
6221  * param[in]    max     Max scale factor to check against (* 1024)
6222  * param[in]    in      Input width or height
6223  * param[in]    out     Output width or height
6224  * param[out]   pFactor Output scale factor (* 1024)
6225  */
6226 static NvBool ComputeScalingFactor(NvU32 max,
6227                                    NvU16 in, NvU16 out,
6228                                    NvU16 *pFactor)
6229 {
6230     /* Use a 32-bit temporary to prevent overflow */
6231     NvU32 tmp;
6232 
6233     /* Add (out - 1) to round up */
6234     tmp = ((in * 1024) + (out - 1)) / out;
6235 
6236     /* Check against scaling limits. */
6237     if (tmp > max) {
6238         return FALSE;
6239     }
6240 
6241     *pFactor = tmp;
6242     return TRUE;
6243 }
6244 
6245 /*!
6246  * Compute scaling factors based on in/out dimensions.
6247  * Used by IMP and when programming viewport and window parameters in HW.
6248  *
6249  * The 'maxScaleFactor' values are defined by nvdClass_01.mfs as:
6250  *      SizeIn/SizeOut * 1024
6251  */
6252 NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps,
6253                                    const NvU32 inWidth, const NvU32 inHeight,
6254                                    const NvU32 outWidth, const NvU32 outHeight,
6255                                    NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps,
6256                                    struct NvKmsScalingUsageBounds *out)
6257 {
6258     const NVEvoScalerTapsCaps *pTapsCaps = NULL;
6259 
6260     out->vTaps = vTaps;
6261 
6262     /* Start with default values (1.0) */
6263     out->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X;
6264     out->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X;
6265 
6266     if (outHeight > inHeight) {
6267         out->vUpscalingAllowed = TRUE;
6268     } else if (outHeight < inHeight) {
6269         out->vUpscalingAllowed = FALSE;
6270 
6271         pTapsCaps = &pScalerCaps->taps[vTaps];
6272         if (!ComputeScalingFactor(pTapsCaps->maxVDownscaleFactor,
6273                                   inHeight, outHeight,
6274                                   &out->maxVDownscaleFactor)) {
6275             return FALSE;
6276         }
6277     }
6278 
6279     if (outWidth < inWidth) {
6280         pTapsCaps = &pScalerCaps->taps[hTaps];
6281         if (!ComputeScalingFactor(pTapsCaps->maxHDownscaleFactor,
6282                                   inWidth, outWidth,
6283                                   &out->maxHDownscaleFactor)) {
6284             return FALSE;
6285         }
6286     }
6287 
6288     return TRUE;
6289 }
6290 
6291 NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo,
6292                           const NVEvoScalerCaps *pScalerCaps,
6293                           const NvU32 inWidth, const NvU32 inHeight,
6294                           const NvU32 outWidth, const NvU32 outHeight,
6295                           NvBool doubleScan,
6296                           NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut)
6297 {
6298     NVEvoScalerTaps hTaps, vTaps;
6299     NvBool setHTaps = (outWidth != inWidth);
6300     NvBool setVTaps = (outHeight != inHeight);
6301 
6302     /*
6303      * Select the taps filtering; we select the highest taps allowed with our
6304      * scaling configuration.
6305      *
6306      * Note if requiresScalingTapsInBothDimensions is true and if we are
6307      * scaling in *either* dimension, then we need to program > 1 taps
6308      * in *both* dimensions.
6309      */
6310     if ((setHTaps || setVTaps) &&
6311         pDevEvo->hal->caps.requiresScalingTapsInBothDimensions) {
6312         setHTaps = TRUE;
6313         setVTaps = TRUE;
6314     }
6315 
6316     /*
6317      * Horizontal taps: if not scaling, then no filtering; otherwise, set the
6318      * maximum filtering, because htaps shouldn't have any constraints (unlike
6319      * vtaps... see below).
6320      */
6321     if (setHTaps) {
6322         /*
6323          * XXX dispClass_01.mfs says: "For text and desktop scaling, the 2 tap
6324          * bilinear frequently looks better than the 8 tap filter which is more
6325          * optimized for video type scaling." Once we determine how best to
6326          * expose configuration of taps, we should choose how to indicate that 8
6327          * or 5 taps is the maximum.
6328          *
6329          * For now, we'll start with 2 taps as the default, but may end up
6330          * picking a higher taps value if the required H downscaling factor
6331          * isn't possible with 2 taps.
6332          */
6333         NvBool hTapsFound = FALSE;
6334 
6335         for (hTaps = NV_EVO_SCALER_2TAPS;
6336              hTaps <= NV_EVO_SCALER_TAPS_MAX;
6337              hTaps++) {
6338             NvU16 hFactor;
6339 
6340             if (!ComputeScalingFactor(
6341                     pScalerCaps->taps[hTaps].maxHDownscaleFactor,
6342                     inWidth, outWidth,
6343                     &hFactor)) {
6344                 continue;
6345             }
6346 
6347             hTapsFound = TRUE;
6348             break;
6349         }
6350 
6351         if (!hTapsFound) {
6352             return FALSE;
6353         }
6354     } else {
6355         hTaps = pDevEvo->hal->caps.minScalerTaps;
6356     }
6357 
6358     /*
6359      * Vertical taps: if scaling, set the maximum valid filtering, otherwise, no
6360      * filtering.
6361      */
6362     if (setVTaps) {
6363         /*
6364          * Select the maximum vertical taps based on the capabilities.
6365          *
6366          * For doublescan modes, limit to 2 taps to reduce blurriness. We really
6367          * want plain old line doubling, but EVO doesn't support that.
6368          */
6369         if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_5TAPS) &&
6370             !doubleScan) {
6371             vTaps = NV_EVO_SCALER_5TAPS;
6372         } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_3TAPS) &&
6373                    !doubleScan) {
6374             vTaps = NV_EVO_SCALER_3TAPS;
6375         } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_2TAPS)) {
6376             vTaps = NV_EVO_SCALER_2TAPS;
6377         } else {
6378             return FALSE;
6379         }
6380     } else {
6381         vTaps = pDevEvo->hal->caps.minScalerTaps;
6382     }
6383 
6384     *hTapsOut = hTaps;
6385     *vTapsOut = vTaps;
6386 
6387     return TRUE;
6388 }
6389 
6390 /*
6391  * Check that ViewPortIn does not exceed hardware limits and compute vTaps and
6392  * hTaps based on configured ViewPortIn/Out scaling if possible given scaler
6393  * capabilities.
6394  */
6395 NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo,
6396                                        const NVEvoScalerCaps *pScalerCaps,
6397                                        NVHwModeTimingsEvoPtr pTimings,
6398                                        NVEvoInfoStringPtr pInfoString)
6399 {
6400     NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort;
6401     const NvU32 inWidth   = pViewPort->in.width;
6402     const NvU32 inHeight  = pViewPort->in.height;
6403     const NvU32 outWidth  = pViewPort->out.width;
6404     const NvU32 outHeight = pViewPort->out.height;
6405     const NvBool scaling = (outWidth != inWidth) || (outHeight != inHeight);
6406     NVEvoScalerTaps hTaps, vTaps;
6407 
6408     /*
6409      * As per the MFS, there is a restriction for the width and height
6410      * of ViewPortIn and ViewPortOut
6411      */
6412     if (inWidth > 8192 || inHeight > 8192 ||
6413         outWidth > 8192 || outHeight > 8192) {
6414         nvEvoLogInfoString(pInfoString,
6415                            "Viewport dimensions exceed hardware capabilities");
6416         return FALSE;
6417     }
6418 
6419     if (!nvAssignScalerTaps(pDevEvo, pScalerCaps, inWidth, inHeight, outWidth, outHeight,
6420                             pTimings->doubleScan, &hTaps, &vTaps)) {
6421         nvEvoLogInfoString(pInfoString,
6422                            "Unable to configure scaling from %dx%d to %dx%d (exceeds filtering capabilities)",
6423                            inWidth, inHeight,
6424                            outWidth, outHeight);
6425         return FALSE;
6426     }
6427 
6428     /*
6429      * If this is an interlaced mode but we don't have scaling
6430      * configured, check that the width will fit in the 2-tap vertical
6431      * LineStoreSize; this is an EVO requirement for interlaced
6432      * rasters
6433      */
6434     if (pTimings->interlaced && !scaling) {
6435         /* !scaling means widths should be same */
6436         nvAssert(outWidth == inWidth);
6437 
6438         if (outWidth > pScalerCaps->taps[NV_EVO_SCALER_2TAPS].maxPixelsVTaps) {
6439             nvEvoLogInfoString(pInfoString,
6440                                "Interlaced mode requires filtering, but line width (%d) exceeds filtering capabilities",
6441                                outWidth);
6442             return FALSE;
6443         }
6444 
6445         /* hTaps and vTaps should have been set to minScalerTaps above */
6446         nvAssert(hTaps == pDevEvo->hal->caps.minScalerTaps);
6447         nvAssert(vTaps == pDevEvo->hal->caps.minScalerTaps);
6448     }
6449 
6450     pViewPort->hTaps = hTaps;
6451     pViewPort->vTaps = vTaps;
6452     return TRUE;
6453 }
6454 
6455 static void AssignGuaranteedSOCBounds(const NVDevEvoRec *pDevEvo,
6456                                       struct NvKmsUsageBounds *pGuaranteed)
6457 {
6458     NvU32 layer;
6459 
6460     pGuaranteed->layer[NVKMS_MAIN_LAYER].usable = TRUE;
6461     pGuaranteed->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats =
6462         nvEvoGetFormatsWithEqualOrLowerUsageBound(
6463             NvKmsSurfaceMemoryFormatA8R8G8B8,
6464             pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats);
6465     nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[NVKMS_MAIN_LAYER].scaling);
6466 
6467     for (layer = 1; layer < ARRAY_LEN(pGuaranteed->layer); layer++) {
6468         pGuaranteed->layer[layer].usable = FALSE;
6469         nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[layer].scaling);
6470     }
6471 }
6472 
6473 /*
6474  * Initialize the given NvKmsUsageBounds. Ask for everything supported by the HW
6475  * by default.  Later, based on what IMP says, we will scale back as needed.
6476  */
6477 void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo,
6478                                 NVHwModeViewPortEvo *pViewPort)
6479 {
6480     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
6481     struct NvKmsUsageBounds *pPossible = &pViewPort->possibleUsage;
6482     NvU32 i;
6483 
6484     for (i = 0; i < ARRAY_LEN(pPossible->layer); i++) {
6485         struct NvKmsScalingUsageBounds *pScaling = &pPossible->layer[i].scaling;
6486 
6487         pPossible->layer[i].supportedSurfaceMemoryFormats =
6488             pDevEvo->caps.layerCaps[i].supportedSurfaceMemoryFormats;
6489         pPossible->layer[i].usable =
6490             (pPossible->layer[i].supportedSurfaceMemoryFormats != 0);
6491         if (!pPossible->layer[i].usable) {
6492             continue;
6493         }
6494 
6495         nvInitScalingUsageBounds(pDevEvo, pScaling);
6496 
6497         if (pDevEvo->hal->GetWindowScalingCaps) {
6498             const NVEvoScalerCaps *pScalerCaps =
6499                 pDevEvo->hal->GetWindowScalingCaps(pDevEvo);
6500             int j;
6501 
6502             for (j = NV_EVO_SCALER_TAPS_MAX; j >= NV_EVO_SCALER_TAPS_MIN; j--) {
6503                 const NVEvoScalerTapsCaps *pTapsCaps = &pScalerCaps->taps[j];
6504 
6505                 if ((pTapsCaps->maxVDownscaleFactor == 0) &&
6506                     (pTapsCaps->maxHDownscaleFactor == 0)) {
6507                     continue;
6508                 }
6509 
6510                 pScaling->maxVDownscaleFactor = pTapsCaps->maxVDownscaleFactor;
6511                 pScaling->maxHDownscaleFactor = pTapsCaps->maxHDownscaleFactor;
6512                 pScaling->vTaps = j;
6513                 pScaling->vUpscalingAllowed = (pTapsCaps->maxPixelsVTaps > 0);
6514                 break;
6515             }
6516         }
6517     }
6518 
6519     if (pDevEvo->isSOCDisplay) {
6520         AssignGuaranteedSOCBounds(pDevEvo, &pViewPort->guaranteedUsage);
6521     } else {
6522         pViewPort->guaranteedUsage = *pPossible;
6523     }
6524 }
6525 
6526 /*
6527  * ConstructHwModeTimingsViewPort() - determine the ViewPortOut size
6528  *
6529  * ViewPortIn (specified by inWidth, inHeight) selects the pixels to
6530  * extract from the scanout surface; ViewPortOut positions those
6531  * pixels within the raster timings.
6532  *
6533  * If the configuration is not possible, pViewPort->valid will be set
6534  * to false; otherwise, pViewPort->valid will be set to true.
6535  */
6536 
6537 static NvBool
6538 ConstructHwModeTimingsViewPort(const NVDispEvoRec *pDispEvo,
6539                                NVHwModeTimingsEvoPtr pTimings,
6540                                NVEvoInfoStringPtr pInfoString,
6541                                const struct NvKmsSize *pViewPortSizeIn,
6542                                const struct NvKmsRect *pViewPortOut)
6543 {
6544     NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort;
6545     NvU32 outWidth, outHeight;
6546     const NvU32 hVisible = nvEvoVisibleWidth(pTimings);
6547     const NvU32 vVisible = nvEvoVisibleHeight(pTimings);
6548 
6549     /* the ViewPortOut should default to the raster size */
6550 
6551     outWidth = hVisible;
6552     outHeight = vVisible;
6553 
6554     pViewPort->out.xAdjust = 0;
6555     pViewPort->out.yAdjust = 0;
6556     pViewPort->out.width = outWidth;
6557     pViewPort->out.height = outHeight;
6558 
6559     /*
6560      * If custom viewPortOut or viewPortIn were specified, do basic
6561      * validation and then assign them to pViewPort.  We'll do more
6562      * extensive checking of these values as part of IMP.  Note that
6563      * pViewPort->out.[xy]Adjust are relative to viewPortOut centered
6564      * within the raster timings, but pViewPortOut->[xy]1 are relative
6565      * to 0,0.
6566      */
6567     if (pViewPortOut) {
6568         NvS16 offset;
6569         struct NvKmsRect viewPortOut = *pViewPortOut;
6570 
6571         /*
6572          * When converting from user viewport out to hardware raster timings,
6573          * double in the vertical dimension
6574          */
6575         if (pTimings->doubleScan) {
6576             viewPortOut.y *= 2;
6577             viewPortOut.height *= 2;
6578         }
6579 
6580         /*
6581          * The client-specified viewPortOut is in "full" horizontal space for
6582          * SW YUV420 modes. Convert to "half" horizontal space (matching
6583          * NVHwModeTimingsEvo and viewPortIn).
6584          */
6585         if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) {
6586             viewPortOut.x /= 2;
6587             viewPortOut.width /= 2;
6588         }
6589 
6590         if (A_plus_B_greater_than_C_U16(viewPortOut.x,
6591                                         viewPortOut.width,
6592                                         hVisible)) {
6593             return FALSE;
6594         }
6595 
6596         if (A_plus_B_greater_than_C_U16(viewPortOut.y,
6597                                         viewPortOut.height,
6598                                         vVisible)) {
6599             return FALSE;
6600         }
6601 
6602         offset = (hVisible - viewPortOut.width) / 2 * -1;
6603         pViewPort->out.xAdjust = offset + viewPortOut.x;
6604 
6605         offset = (vVisible - viewPortOut.height) / 2 * -1;
6606         pViewPort->out.yAdjust = offset + viewPortOut.y;
6607 
6608         pViewPort->out.width = viewPortOut.width;
6609         pViewPort->out.height = viewPortOut.height;
6610     }
6611 
6612     if (pViewPortSizeIn) {
6613         if (pViewPortSizeIn->width <= 0) {
6614             return FALSE;
6615         }
6616         if (pViewPortSizeIn->height <= 0) {
6617             return FALSE;
6618         }
6619 
6620         pViewPort->in.width = pViewPortSizeIn->width;
6621         pViewPort->in.height = pViewPortSizeIn->height;
6622     } else {
6623         pViewPort->in.width = pViewPort->out.width;
6624         pViewPort->in.height = pViewPort->out.height;
6625 
6626         /* When deriving viewportIn from viewportOut, halve the height for
6627          * doubleScan */
6628         if (pTimings->doubleScan) {
6629             pViewPort->in.height /= 2;
6630         }
6631     }
6632 
6633     nvAssignDefaultUsageBounds(pDispEvo, &pTimings->viewPort);
6634 
6635     return TRUE;
6636 }
6637 
6638 
6639 
6640 /*
6641  * GetDfpProtocol()- determine the protocol to use on the given pDpy
6642  * with the given pTimings; assigns pTimings->protocol.
6643  */
6644 
6645 static NvBool GetDfpProtocol(const NVDpyEvoRec *pDpyEvo,
6646                              const struct NvKmsModeValidationParams *pParams,
6647                              NVHwModeTimingsEvoPtr pTimings)
6648 {
6649     NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
6650     const NvU32 rmProtocol = pConnectorEvo->or.protocol;
6651     const NvU32 overrides = pParams->overrides;
6652     enum nvKmsTimingsProtocol timingsProtocol;
6653 
6654     nvAssert(pConnectorEvo->legacyType ==
6655              NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP);
6656 
6657     if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
6658         /* Override protocol if this mode requires HDMI FRL. */
6659         if (nvDpyIsHdmiEvo(pDpyEvo) &&
6660             /* If we don't require boot clocks... */
6661             ((overrides & NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS) == 0) &&
6662             /* If FRL is supported, use it for 10 BPC if needed. */
6663             ((nvHdmiDpySupportsFrl(pDpyEvo) &&
6664               nvDpyIsHdmiDepth30Evo(pDpyEvo) &&
6665               nvHdmiTimingsNeedFrl(pDpyEvo, pTimings, HDMI_BPC10)) ||
6666             /* Fall back to 8 BPC, use FRL if needed. */
6667              nvHdmiTimingsNeedFrl(pDpyEvo, pTimings, HDMI_BPC8))) {
6668 
6669             /* If FRL is needed for 8 BPC, but not supported, fail. */
6670             if (!nvHdmiDpySupportsFrl(pDpyEvo)) {
6671                 return FALSE;
6672             }
6673 
6674             nvAssert(nvDpyIsHdmiEvo(pDpyEvo));
6675             nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A ||
6676                      rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B);
6677             timingsProtocol = NVKMS_PROTOCOL_SOR_HDMI_FRL;
6678         } else {
6679             switch (rmProtocol) {
6680             default:
6681                 nvAssert(!"unrecognized SOR RM protocol");
6682                 return FALSE;
6683             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
6684                 if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) {
6685                     return FALSE;
6686                 }
6687                 timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A;
6688                 break;
6689             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
6690                 if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) {
6691                     return FALSE;
6692                 }
6693                 timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B;
6694                 break;
6695             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
6696                 /*
6697                  * Override dual/single link TMDS protocol if necessary.
6698                  * XXX might be nice to give a way for users to override the
6699                  * SingleLink/DualLink decision.
6700                  *
6701                  * TMDS_A: "use A side of the link"
6702                  * TMDS_B: "use B side of the link"
6703                  */
6704                 if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) {
6705                     timingsProtocol = NVKMS_PROTOCOL_SOR_DUAL_TMDS;
6706                 } else {
6707                     timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A;
6708                 }
6709                 break;
6710             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
6711                 timingsProtocol = NVKMS_PROTOCOL_SOR_DP_A;
6712                 break;
6713             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
6714                 timingsProtocol = NVKMS_PROTOCOL_SOR_DP_B;
6715                 break;
6716             case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM:
6717                 timingsProtocol = NVKMS_PROTOCOL_SOR_LVDS_CUSTOM;
6718                 break;
6719             }
6720         }
6721     } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) {
6722         nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC);
6723         timingsProtocol = NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC;
6724     } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_DSI) {
6725         nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI);
6726         timingsProtocol = NVKMS_PROTOCOL_DSI;
6727     } else {
6728         nvAssert(!"Unknown OR type");
6729         return FALSE;
6730     }
6731 
6732     pTimings->protocol = timingsProtocol;
6733 
6734     return TRUE;
6735 
6736 }
6737 
6738 
6739 
6740 /*
6741  * ConstructHwModeTimingsEvoCrt() - construct EVO hardware timings to
6742  * drive a CRT, given the mode timings in pMt
6743  */
6744 
6745 static NvBool
6746 ConstructHwModeTimingsEvoCrt(const NVConnectorEvoRec *pConnectorEvo,
6747                              const NvModeTimings *pModeTimings,
6748                              const struct NvKmsSize *pViewPortSizeIn,
6749                              const struct NvKmsRect *pViewPortOut,
6750                              NVHwModeTimingsEvoPtr pTimings,
6751                              NVEvoInfoStringPtr pInfoString)
6752 {
6753     ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings);
6754 
6755     /* assign the protocol; we expect DACs to have RGB protocol */
6756 
6757     nvAssert(pConnectorEvo->or.protocol ==
6758              NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT);
6759 
6760     pTimings->protocol = NVKMS_PROTOCOL_DAC_RGB;
6761 
6762     /* assign scaling fields */
6763 
6764     return ConstructHwModeTimingsViewPort(pConnectorEvo->pDispEvo, pTimings,
6765                                           pInfoString, pViewPortSizeIn,
6766                                           pViewPortOut);
6767 }
6768 
6769 
6770 /*!
6771  * Construct EVO hardware timings to drive a digital protocol (TMDS,
6772  * DP, etc).
6773  *
6774  * \param[in]  pDpy          The display device for which to build timings.
6775  * \param[in]  pModeTimings  The hw-neutral description of the timings.
6776  * \param[out] pTimings      The EVO-specific modetimings.
6777  *
6778  * \return     TRUE if the EVO modetimings could be built; FALSE if failure.
6779  */
6780 static NvBool ConstructHwModeTimingsEvoDfp(const NVDpyEvoRec *pDpyEvo,
6781                                            const NvModeTimings *pModeTimings,
6782                                            const struct NvKmsSize *pViewPortSizeIn,
6783                                            const struct NvKmsRect *pViewPortOut,
6784                                            NVHwModeTimingsEvoPtr pTimings,
6785                                            const struct
6786                                            NvKmsModeValidationParams *pParams,
6787                                            NVEvoInfoStringPtr pInfoString)
6788 {
6789     NvBool ret;
6790 
6791     ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings);
6792 
6793     ret = GetDfpProtocol(pDpyEvo, pParams, pTimings);
6794 
6795     if (!ret) {
6796         return ret;
6797     }
6798 
6799     ret = ApplyDualLinkRequirements(pDpyEvo, pParams, pTimings, pInfoString);
6800 
6801     if (!ret) {
6802         return ret;
6803     }
6804 
6805     return ConstructHwModeTimingsViewPort(pDpyEvo->pDispEvo, pTimings,
6806                                           pInfoString, pViewPortSizeIn,
6807                                           pViewPortOut);
6808 }
6809 
6810 static NvBool DowngradeColorBpc(
6811     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
6812     enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
6813     enum NvKmsDpyAttributeColorRangeValue *pColorRange)
6814 {
6815     switch (*pColorBpc) {
6816         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
6817             *pColorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
6818             break;
6819         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
6820             /* At depth 18 only RGB and full range are allowed */
6821             if (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) {
6822                 *pColorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6;
6823                 *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
6824             } else {
6825                 return FALSE;
6826             }
6827             break;
6828         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN:
6829         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6:
6830             return FALSE;
6831     }
6832 
6833     return TRUE;
6834 }
6835 
6836 NvBool nvDowngradeColorSpaceAndBpc(
6837     const NVColorFormatInfoRec *pSupportedColorFormats,
6838     enum NvKmsDpyAttributeCurrentColorSpaceValue *pColorSpace,
6839     enum NvKmsDpyAttributeColorBpcValue *pColorBpc,
6840     enum NvKmsDpyAttributeColorRangeValue *pColorRange)
6841 {
6842     if (DowngradeColorBpc(*pColorSpace, pColorBpc, pColorRange)) {
6843         return TRUE;
6844     }
6845 
6846     switch (*pColorSpace) {
6847         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: /* fallthrough */
6848         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
6849             if (pSupportedColorFormats->yuv422.maxBpc !=
6850                     NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN) {
6851                 *pColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422;
6852                 *pColorBpc = pSupportedColorFormats->yuv422.maxBpc;
6853                 *pColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED;
6854                 return TRUE;
6855             }
6856             break;
6857         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: /* fallthrough */
6858         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
6859             break;
6860     }
6861 
6862     return FALSE;
6863 }
6864 
6865 /*
6866  * nvDPValidateModeEvo() - For DP devices handled by the DP lib, check DP
6867  * bandwidth and pick the best possible/supported pixel depth to use for
6868  * the given mode timings.
6869  */
6870 
6871 NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo,
6872                            NVHwModeTimingsEvoPtr pTimings,
6873                            const NvBool b2Heads1Or,
6874                            NVDscInfoEvoRec *pDscInfo,
6875                            const struct NvKmsModeValidationParams *pParams)
6876 {
6877     NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo;
6878     enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace;
6879     enum NvKmsDpyAttributeColorBpcValue colorBpc;
6880     enum NvKmsDpyAttributeColorRangeValue colorRange;
6881     const NVColorFormatInfoRec supportedColorFormats =
6882         nvGetColorFormatInfo(pDpyEvo);
6883 
6884     /* Only do this for DP devices. */
6885     if (!nvConnectorUsesDPLib(pConnectorEvo)) {
6886         return TRUE;
6887     }
6888 
6889     if ((pParams->overrides &
6890          NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK) != 0) {
6891         return TRUE;
6892     }
6893 
6894     if (pTimings->yuv420Mode != NV_YUV420_MODE_NONE) {
6895         colorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420;
6896         colorBpc = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8;
6897     } else if (!nvGetDefaultColorSpace(&supportedColorFormats, &colorSpace,
6898                                        &colorBpc)) {
6899         return FALSE;
6900     }
6901 
6902     if (colorSpace != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) {
6903         colorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED;
6904     } else {
6905         colorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL;
6906     }
6907 
6908     nvAssert(nvDpyUsesDPLib(pDpyEvo));
6909     nvAssert(pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR);
6910 
6911  tryAgain:
6912 
6913     if (!nvDPValidateModeForDpyEvo(pDpyEvo, colorSpace, colorBpc, pParams,
6914                                    pTimings, b2Heads1Or, pDscInfo)) {
6915         if (nvDowngradeColorSpaceAndBpc(&supportedColorFormats, &colorSpace,
6916                                         &colorBpc, &colorRange)) {
6917              goto tryAgain;
6918         }
6919         /*
6920          * Cannot downgrade pixelDepth further --
6921          *     this mode is not possible on this DP link, so fail.
6922          */
6923 
6924         return FALSE;
6925     }
6926 
6927     return TRUE;
6928 }
6929 
6930 /*
6931  * Construct the hardware values to program EVO for the specified
6932  * NVModeTimings
6933  */
6934 
6935 NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo,
6936                                    const struct NvKmsMode *pKmsMode,
6937                                    const struct NvKmsSize *pViewPortSizeIn,
6938                                    const struct NvKmsRect *pViewPortOut,
6939                                    NVHwModeTimingsEvoPtr pTimings,
6940                                    const struct NvKmsModeValidationParams
6941                                    *pParams,
6942                                    NVEvoInfoStringPtr pInfoString)
6943 {
6944     const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo;
6945     NvBool ret;
6946 
6947     /* assign the pTimings values */
6948 
6949     if (pConnectorEvo->legacyType ==
6950                NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) {
6951         ret = ConstructHwModeTimingsEvoDfp(pDpyEvo,
6952                                            &pKmsMode->timings,
6953                                            pViewPortSizeIn, pViewPortOut,
6954                                            pTimings, pParams, pInfoString);
6955     } else if (pConnectorEvo->legacyType ==
6956                NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) {
6957         ret = ConstructHwModeTimingsEvoCrt(pConnectorEvo,
6958                                            &pKmsMode->timings,
6959                                            pViewPortSizeIn, pViewPortOut,
6960                                            pTimings, pInfoString);
6961     } else {
6962         nvAssert(!"Invalid pDpyEvo->type");
6963         return FALSE;
6964     }
6965 
6966     if (!ret) return FALSE;
6967 
6968     /* tweak the raster timings for gsync */
6969 
6970     if (pDpyEvo->pDispEvo->pFrameLockEvo) {
6971         // if this fails, the timing remains untweaked, which just means
6972         // that the mode may not work well with frame lock
6973         TweakTimingsForGsync(pDpyEvo, pTimings, pInfoString, pParams->stereoMode);
6974     }
6975 
6976     return TRUE;
6977 }
6978 
6979 static NvBool DowngradeViewPortTaps(const NVEvoHeadCaps *pHeadCaps,
6980                                     NVHwModeViewPortEvoPtr pViewPort,
6981                                     NVEvoScalerTaps srcTaps,
6982                                     NVEvoScalerTaps dstTaps,
6983                                     NvBool isVert,
6984                                     NVEvoScalerTaps *pTaps)
6985 {
6986     const NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps;
6987     NvBool dstPossible;
6988 
6989     if (isVert) {
6990         dstPossible = IsVTapsPossible(pScalerCaps, pViewPort->in.width,
6991                                       pViewPort->out.width, dstTaps);
6992     } else {
6993         dstPossible = pScalerCaps->taps[dstTaps].maxHDownscaleFactor > 0;
6994     }
6995 
6996     if (*pTaps >= srcTaps && dstPossible) {
6997         *pTaps = dstTaps;
6998         return TRUE;
6999     }
7000 
7001     return FALSE;
7002 }
7003 
7004 /* Downgrade the htaps from 8 to 5 */
7005 static NvBool DowngradeViewPortHTaps8(const NVDevEvoRec *pDevEvo,
7006                                       const NvU32 head,
7007                                       const NVEvoHeadCaps *pHeadCaps,
7008                                       NVHwModeViewPortEvoPtr pViewPort,
7009                                       NvU64 unused)
7010 {
7011     return DowngradeViewPortTaps(pHeadCaps,
7012                                  pViewPort,
7013                                  NV_EVO_SCALER_8TAPS,
7014                                  NV_EVO_SCALER_5TAPS,
7015                                  FALSE /* isVert */,
7016                                  &pViewPort->hTaps);
7017 }
7018 
7019 /* Downgrade the htaps from 5 to 2 */
7020 static NvBool DowngradeViewPortHTaps5(const NVDevEvoRec *pDevEvo,
7021                                       const NvU32 head,
7022                                       const NVEvoHeadCaps *pHeadCaps,
7023                                       NVHwModeViewPortEvoPtr pViewPort,
7024                                       NvU64 unused)
7025 {
7026     return DowngradeViewPortTaps(pHeadCaps,
7027                                  pViewPort,
7028                                  NV_EVO_SCALER_5TAPS,
7029                                  NV_EVO_SCALER_2TAPS,
7030                                  FALSE /* isVert */,
7031                                  &pViewPort->hTaps);
7032 }
7033 
7034 /* Downgrade the vtaps from 5 to 3 */
7035 static NvBool DowngradeViewPortVTaps5(const NVDevEvoRec *pDevEvo,
7036                                       const NvU32 head,
7037                                       const NVEvoHeadCaps *pHeadCaps,
7038                                       NVHwModeViewPortEvoPtr pViewPort,
7039                                       NvU64 unused)
7040 {
7041     return DowngradeViewPortTaps(pHeadCaps,
7042                                  pViewPort,
7043                                  NV_EVO_SCALER_5TAPS,
7044                                  NV_EVO_SCALER_3TAPS,
7045                                  TRUE /* isVert */,
7046                                  &pViewPort->vTaps);
7047 }
7048 
7049 /* Downgrade the vtaps from 3 to 2 */
7050 static NvBool DowngradeViewPortVTaps3(const NVDevEvoRec *pDevEvo,
7051                                       const NvU32 head,
7052                                       const NVEvoHeadCaps *pHeadCaps,
7053                                       NVHwModeViewPortEvoPtr pViewPort,
7054                                       NvU64 unused)
7055 {
7056     return DowngradeViewPortTaps(pHeadCaps,
7057                                  pViewPort,
7058                                  NV_EVO_SCALER_3TAPS,
7059                                  NV_EVO_SCALER_2TAPS,
7060                                  TRUE /* isVert */,
7061                                  &pViewPort->vTaps);
7062 }
7063 
7064 static NvBool
7065 DowngradeLayerDownscaleFactor(NVHwModeViewPortEvoPtr pViewPort,
7066                               const NvU32 layer,
7067                               NvU16 srcFactor,
7068                               NvU16 dstFactor,
7069                               NvU16 *pFactor)
7070 {
7071     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7072 
7073     if (!pUsage->layer[layer].usable) {
7074         return FALSE;
7075     }
7076 
7077     if (*pFactor == srcFactor) {
7078         *pFactor = dstFactor;
7079         return TRUE;
7080     }
7081 
7082     return FALSE;
7083 }
7084 
7085 static NvBool
7086 DowngradeLayerVDownscaleFactor4X(const NVDevEvoRec *pDevEvo,
7087                                  const NvU32 head,
7088                                  const NVEvoHeadCaps *pHeadCaps,
7089                                  NVHwModeViewPortEvoPtr pViewPort,
7090                                  NvU64 unused)
7091 {
7092     NvU32 layer;
7093 
7094     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7095         struct NvKmsScalingUsageBounds *pScaling =
7096             &pViewPort->guaranteedUsage.layer[layer].scaling;
7097 
7098         if (DowngradeLayerDownscaleFactor(pViewPort,
7099                                           layer,
7100                                           NV_EVO_SCALE_FACTOR_4X,
7101                                           NV_EVO_SCALE_FACTOR_3X,
7102                                           &pScaling->maxVDownscaleFactor)) {
7103             return TRUE;
7104         }
7105     }
7106 
7107     return FALSE;
7108 }
7109 
7110 static NvBool
7111 DowngradeLayerVDownscaleFactor3X(const NVDevEvoRec *pDevEvo,
7112                                  const NvU32 head,
7113                                  const NVEvoHeadCaps *pHeadCaps,
7114                                  NVHwModeViewPortEvoPtr pViewPort,
7115                                  NvU64 unused)
7116 {
7117     NvU32 layer;
7118 
7119     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7120         struct NvKmsScalingUsageBounds *pScaling =
7121             &pViewPort->guaranteedUsage.layer[layer].scaling;
7122 
7123         if (DowngradeLayerDownscaleFactor(pViewPort,
7124                                           layer,
7125                                           NV_EVO_SCALE_FACTOR_3X,
7126                                           NV_EVO_SCALE_FACTOR_2X,
7127                                           &pScaling->maxVDownscaleFactor)) {
7128             return TRUE;
7129         }
7130     }
7131 
7132     return FALSE;
7133 }
7134 
7135 static NvBool
7136 DowngradeLayerVDownscaleFactor2X(const NVDevEvoRec *pDevEvo,
7137                                  const NvU32 head,
7138                                  const NVEvoHeadCaps *pHeadCaps,
7139                                  NVHwModeViewPortEvoPtr pViewPort,
7140                                  NvU64 unused)
7141 {
7142     NvU32 layer;
7143 
7144     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7145         struct NvKmsScalingUsageBounds *pScaling =
7146             &pViewPort->guaranteedUsage.layer[layer].scaling;
7147 
7148         if (DowngradeLayerDownscaleFactor(pViewPort,
7149                                           layer,
7150                                           NV_EVO_SCALE_FACTOR_2X,
7151                                           NV_EVO_SCALE_FACTOR_1X,
7152                                           &pScaling->maxVDownscaleFactor)) {
7153             return TRUE;
7154         }
7155     }
7156 
7157     return FALSE;
7158 }
7159 
7160 static NvBool
7161 DowngradeLayerHDownscaleFactor4X(const NVDevEvoRec *pDevEvo,
7162                                  const NvU32 head,
7163                                  const NVEvoHeadCaps *pHeadCaps,
7164                                  NVHwModeViewPortEvoPtr pViewPort,
7165                                  NvU64 unused)
7166 {
7167     NvU32 layer;
7168 
7169     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7170         struct NvKmsScalingUsageBounds *pScaling =
7171             &pViewPort->guaranteedUsage.layer[layer].scaling;
7172 
7173         if (DowngradeLayerDownscaleFactor(pViewPort,
7174                                           layer,
7175                                           NV_EVO_SCALE_FACTOR_4X,
7176                                           NV_EVO_SCALE_FACTOR_3X,
7177                                           &pScaling->maxHDownscaleFactor)) {
7178             return TRUE;
7179         }
7180     }
7181 
7182     return FALSE;
7183 }
7184 
7185 static NvBool DowngradeLayerHDownscaleFactor3X(const NVDevEvoRec *pDevEvo,
7186                                                const NvU32 head,
7187                                                const NVEvoHeadCaps *pHeadCaps,
7188                                                NVHwModeViewPortEvoPtr pViewPort,
7189                                                NvU64 unused)
7190 {
7191     NvU32 layer;
7192 
7193     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7194         struct NvKmsScalingUsageBounds *pScaling =
7195             &pViewPort->guaranteedUsage.layer[layer].scaling;
7196 
7197         if (DowngradeLayerDownscaleFactor(pViewPort,
7198                                      layer,
7199                                      NV_EVO_SCALE_FACTOR_3X,
7200                                      NV_EVO_SCALE_FACTOR_2X,
7201                                      &pScaling->maxHDownscaleFactor)) {
7202             return TRUE;
7203         }
7204     }
7205 
7206     return FALSE;
7207 }
7208 
7209 static NvBool DowngradeLayerHDownscaleFactor2X(const NVDevEvoRec *pDevEvo,
7210                                                const NvU32 head,
7211                                                const NVEvoHeadCaps *pHeadCaps,
7212                                                NVHwModeViewPortEvoPtr pViewPort,
7213                                                NvU64 unused)
7214 {
7215     NvU32 layer;
7216 
7217     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7218         struct NvKmsScalingUsageBounds *pScaling =
7219             &pViewPort->guaranteedUsage.layer[layer].scaling;
7220 
7221         if (DowngradeLayerDownscaleFactor(pViewPort,
7222                                           layer,
7223                                           NV_EVO_SCALE_FACTOR_2X,
7224                                           NV_EVO_SCALE_FACTOR_1X,
7225                                           &pScaling->maxHDownscaleFactor)) {
7226             return TRUE;
7227         }
7228     }
7229 
7230     return FALSE;
7231 }
7232 
7233 /* Downgrade the vtaps from 5 to 2 */
7234 static NvBool DowngradeLayerVTaps5(const NVDevEvoRec *pDevEvo,
7235                                    const NvU32 head,
7236                                    const NVEvoHeadCaps *pHeadCaps,
7237                                    NVHwModeViewPortEvoPtr pViewPort,
7238                                    NvU64 unused)
7239 {
7240     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7241     NvU32 layer;
7242 
7243     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7244         struct NvKmsScalingUsageBounds *pScaling =
7245             &pUsage->layer[layer].scaling;
7246 
7247         if (!pUsage->layer[layer].usable) {
7248             continue;
7249         }
7250 
7251         if (pScaling->vTaps == NV_EVO_SCALER_5TAPS) {
7252             pScaling->vTaps = NV_EVO_SCALER_2TAPS;
7253             return TRUE;
7254         }
7255     }
7256 
7257     return FALSE;
7258 }
7259 
7260 static NvBool DowngradeLayerVUpscaling(const NVDevEvoRec *pDevEvo,
7261                                        const NvU32 head,
7262                                        const NVEvoHeadCaps *pHeadCaps,
7263                                        NVHwModeViewPortEvoPtr pViewPort,
7264                                        NvU64 unused)
7265 {
7266     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7267     NvU32 layer;
7268 
7269     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7270         struct NvKmsScalingUsageBounds *pScaling =
7271             &pUsage->layer[layer].scaling;
7272 
7273         if (!pUsage->layer[layer].usable) {
7274             continue;
7275         }
7276 
7277         if (pScaling->vUpscalingAllowed) {
7278             pScaling->vUpscalingAllowed = FALSE;
7279             return TRUE;
7280         }
7281     }
7282 
7283     return FALSE;
7284 }
7285 
7286 static NvBool DowngradeViewPortOverlayFormats(
7287     const NVDevEvoRec *pDevEvo,
7288     const NvU32 head,
7289     const NVEvoHeadCaps *pHeadCaps,
7290     NVHwModeViewPortEvoPtr pViewPort,
7291     NvU64 removeSurfaceMemoryFormats)
7292 {
7293     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7294     NvU32 layer;
7295 
7296     for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
7297         if (layer == NVKMS_MAIN_LAYER || !pUsage->layer[layer].usable) {
7298             continue;
7299         }
7300 
7301         if (pUsage->layer[layer].supportedSurfaceMemoryFormats &
7302             removeSurfaceMemoryFormats) {
7303             pUsage->layer[layer].supportedSurfaceMemoryFormats &=
7304                 ~removeSurfaceMemoryFormats;
7305             if (pUsage->layer[layer].supportedSurfaceMemoryFormats == 0) {
7306                 pUsage->layer[layer].usable = FALSE;
7307             }
7308 
7309             return TRUE;
7310         }
7311     }
7312 
7313     return FALSE;
7314 }
7315 
7316 static NvBool DowngradeViewPortBaseFormats(
7317     const NVDevEvoRec *pDevEvo,
7318     const NvU32 head,
7319     const NVEvoHeadCaps *pHeadCaps,
7320     NVHwModeViewPortEvoPtr pViewPort,
7321     NvU64 removeSurfaceMemoryFormats)
7322 {
7323     struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage;
7324 
7325     if (!pUsage->layer[NVKMS_MAIN_LAYER].usable) {
7326         return FALSE;
7327     }
7328 
7329     if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &
7330         removeSurfaceMemoryFormats) {
7331         pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &=
7332             ~removeSurfaceMemoryFormats;
7333         if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats == 0) {
7334             pUsage->layer[NVKMS_MAIN_LAYER].usable = FALSE;
7335         }
7336 
7337         return TRUE;
7338     }
7339 
7340     return FALSE;
7341 }
7342 
7343 typedef NvBool (*DowngradeViewPortFuncPtr)(const NVDevEvoRec *pDevEvo,
7344                                            const NvU32 head,
7345                                            const NVEvoHeadCaps *pHeadCaps,
7346                                            NVHwModeViewPortEvoPtr pViewPort,
7347                                            NvU64 removeSurfaceMemoryFormats);
7348 
7349 /*
7350  * Try to downgrade the usage bounds of the viewports, keeping the
7351  * viewports roughly equal in capability; we do this from
7352  * ValidateMetaMode50() when IMP rejects the mode.  Return TRUE if we
7353  * were able to downgrade something; return FALSE if there was nothing
7354  * left to downgrade.
7355  */
7356 
7357 static NvBool DownGradeMetaModeUsageBounds(
7358     const NVDevEvoRec                      *pDevEvo,
7359     const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],
7360     NvU32                                   downgradePossibleHeadsBitMask)
7361 {
7362     static const struct {
7363         DowngradeViewPortFuncPtr downgradeFunc;
7364         NvU64 removeSurfaceMemoryFormats;
7365     } downgradeFuncs[] = {
7366         { DowngradeLayerVDownscaleFactor4X,
7367           0 },
7368         { DowngradeLayerHDownscaleFactor4X,
7369           0 },
7370         { DowngradeViewPortOverlayFormats,
7371           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 },
7372         { DowngradeViewPortBaseFormats,
7373           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 },
7374         { DowngradeViewPortOverlayFormats,
7375           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 },
7376         { DowngradeViewPortBaseFormats,
7377           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 },
7378         { DowngradeViewPortOverlayFormats,
7379           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 },
7380         { DowngradeViewPortBaseFormats,
7381           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 },
7382         { DowngradeViewPortOverlayFormats,
7383           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 },
7384         { DowngradeViewPortBaseFormats,
7385           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 },
7386         { DowngradeViewPortOverlayFormats,
7387           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 },
7388         { DowngradeViewPortBaseFormats,
7389           NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 },
7390         { DowngradeViewPortOverlayFormats,
7391           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 },
7392         { DowngradeViewPortBaseFormats,
7393           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 },
7394         { DowngradeViewPortOverlayFormats,
7395           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 },
7396         { DowngradeViewPortBaseFormats,
7397           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 },
7398         { DowngradeViewPortOverlayFormats,
7399           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 },
7400         { DowngradeViewPortBaseFormats,
7401           NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 },
7402         { DowngradeViewPortOverlayFormats,
7403           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 },
7404         { DowngradeViewPortBaseFormats,
7405           NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 },
7406         { DowngradeViewPortOverlayFormats,
7407           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP },
7408         { DowngradeViewPortBaseFormats,
7409           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP },
7410         { DowngradeLayerVDownscaleFactor3X,
7411           0 },
7412         { DowngradeLayerHDownscaleFactor3X,
7413           0 },
7414         { DowngradeViewPortVTaps5,
7415           0 },
7416         { DowngradeViewPortVTaps3,
7417           0 },
7418         { DowngradeViewPortHTaps8,
7419           0 },
7420         { DowngradeViewPortHTaps5,
7421           0 },
7422         { DowngradeViewPortOverlayFormats,
7423           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP },
7424         { DowngradeLayerVTaps5,
7425           0 },
7426         { DowngradeViewPortOverlayFormats,
7427           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP },
7428         { DowngradeLayerVDownscaleFactor2X,
7429           0 },
7430         { DowngradeLayerHDownscaleFactor2X,
7431           0 },
7432         { DowngradeLayerVUpscaling,
7433           0 },
7434         { DowngradeViewPortOverlayFormats,
7435           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP },
7436         { DowngradeViewPortBaseFormats,
7437           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP },
7438         { DowngradeViewPortBaseFormats,
7439           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP },
7440         { DowngradeViewPortBaseFormats,
7441           NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP },
7442     };
7443     int i;
7444 
7445     // XXX assume the heads have equal capabilities
7446     // XXX assume the gpus have equal capabilities
7447 
7448     const NVEvoHeadCaps *pHeadCaps =
7449         &pDevEvo->gpus[0].capabilities.head[0];
7450 
7451 
7452     for (i = 0; i < ARRAY_LEN(downgradeFuncs); i++) {
7453         int head;
7454         FOR_ALL_HEADS(head, downgradePossibleHeadsBitMask) {
7455             if (timingsParams[head].pTimings == NULL) {
7456                 continue;
7457             }
7458 
7459             if (downgradeFuncs[i].downgradeFunc(
7460                     pDevEvo,
7461                     head,
7462                     pHeadCaps,
7463                     &timingsParams[head].pTimings->viewPort,
7464                     downgradeFuncs[i].removeSurfaceMemoryFormats)) {
7465                 return TRUE;
7466             }
7467         }
7468     }
7469 
7470     /* Nothing else to downgrade */
7471     return FALSE;
7472 }
7473 
7474 NvBool nvAllocateDisplayBandwidth(
7475     NVDispEvoPtr pDispEvo,
7476     NvU32 newIsoBandwidthKBPS,
7477     NvU32 newDramFloorKBPS)
7478 {
7479     NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS params = { };
7480     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
7481     NvU32 ret;
7482 
7483     if (!pDevEvo->isSOCDisplay) {
7484         return TRUE;
7485     }
7486 
7487     params.subDeviceInstance = 0;
7488     params.averageBandwidthKBPS = newIsoBandwidthKBPS;
7489     params.floorBandwidthKBPS = newDramFloorKBPS;
7490 
7491     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
7492                          pDevEvo->displayCommonHandle,
7493                          NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH,
7494                          &params, sizeof(params));
7495     if (ret != NV_OK) {
7496         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
7497                     "Failed to allocate %u KBPS Iso and %u KBPS Dram",
7498                     newIsoBandwidthKBPS, newDramFloorKBPS);
7499         return FALSE;
7500     }
7501 
7502     pDispEvo->isoBandwidthKBPS = newIsoBandwidthKBPS;
7503     pDispEvo->dramFloorKBPS = newDramFloorKBPS;
7504 
7505     return TRUE;
7506 }
7507 
7508 static void AssignNVEvoIsModePossibleDispInput(
7509     NVDispEvoPtr                             pDispEvo,
7510     const NVValidateImpOneDispHeadParamsRec  timingsParams[NVKMS_MAX_HEADS_PER_DISP],
7511     NvBool                                   requireBootClocks,
7512     NVEvoReallocateBandwidthMode             reallocBandwidth,
7513     NVEvoIsModePossibleDispInput            *pImpInput)
7514 {
7515     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7516     NvU32 head;
7517     NvU32 nextSorIndex = 0;
7518 
7519     nvkms_memset(pImpInput, 0, sizeof(*pImpInput));
7520 
7521     pImpInput->requireBootClocks = requireBootClocks;
7522     pImpInput->reallocBandwidth = reallocBandwidth;
7523 
7524     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
7525         const NVConnectorEvoRec *pConnectorEvo =
7526                     timingsParams[head].pConnectorEvo;
7527         NvU32 otherHead = 0;
7528 
7529         nvAssert((timingsParams[head].pTimings == NULL) ==
7530                  (timingsParams[head].pConnectorEvo == NULL));
7531 
7532         pImpInput->head[head].orIndex = NV_INVALID_OR;
7533 
7534         if (timingsParams[head].pTimings == NULL) {
7535             continue;
7536         }
7537 
7538         pImpInput->head[head].pTimings = timingsParams[head].pTimings;
7539         pImpInput->head[head].enableDsc = timingsParams[head].enableDsc;
7540         pImpInput->head[head].b2Heads1Or = timingsParams[head].b2Heads1Or;
7541         pImpInput->head[head].pixelDepth = timingsParams[head].pixelDepth;
7542         pImpInput->head[head].displayId = timingsParams[head].activeRmId;
7543         pImpInput->head[head].orType = pConnectorEvo->or.type;
7544         pImpInput->head[head].pUsage = timingsParams[head].pUsage;
7545 
7546         if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
7547                 NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) ||
7548              pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
7549 
7550             nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR);
7551 
7552             pImpInput->head[head].orIndex = pConnectorEvo->or.primary;
7553             continue;
7554         }
7555 
7556         /*
7557          * If more than one head is attached to the same connector, then make
7558          * sure that all of them use the same SOR index.
7559          */
7560         for (otherHead = 0; otherHead < head; otherHead++) {
7561             if (timingsParams[otherHead].pConnectorEvo == pConnectorEvo) {
7562                 pImpInput->head[head].orIndex = pImpInput->head[otherHead].orIndex;
7563                 break;
7564             }
7565         }
7566 
7567         /*
7568          * On GPUs with a full crossbar, the SORs are equally capable, so just
7569          * use next unused SOR.
7570          *
7571          * We assume there are as many SORs as there are heads.
7572          */
7573         if (pImpInput->head[head].orIndex == NV_INVALID_OR) {
7574             pImpInput->head[head].orIndex = nextSorIndex;
7575             nextSorIndex++;
7576         }
7577     }
7578 }
7579 
7580 /*!
7581  * Validate the described disp configuration through IMP.
7582 
7583  * \param[in]      pDispEvo        The disp of the dpyIdList.
7584  *
7585  * \param[in.out]  timingsParams[] The proposed configuration to use on each head
7586  *                                 includes -
7587  *
7588  *                                   pConnectorEvo -
7589  *                                     The proposed connector to drive on each head.
7590  *
7591  *                                   activeRmId -
7592  *                                     The display ID that we use to talk to RM
7593  *                                     about the dpy(s) on each head.
7594  *
7595  *                                   pTimings -
7596  *                                     The proposed timings to use on each head;
7597  *                                     note the usage bounds within pTimings
7598  *                                     may be altered by this function.
7599  *
7600  *                                   depth -
7601  *                                     The depth of the buffer to be displayed on
7602  *                                     each head.
7603  * \param[in]      requireBootClocks
7604  *                                 Only validate modes that will work at P8
7605  *                                 clocks.
7606  *
7607  * \param[in]      reallocBandwidth
7608  *                                 Try to allocate the required display
7609  *                                 bandwidth if IMP passes.
7610  *
7611  * \param[out]     pMinIsoBandwidthKBPS
7612  *                                 The ISO bandwidth that's required for the
7613  *                                 proposed disp configuration only. This value
7614  *                                 doesn't take the current display state into
7615  *                                 account.
7616  *
7617  * \param[out]     pMinDramFloorKBPS
7618  *                                 The DRAM floor that's required for the
7619  *                                 proposed disp configuration only. This value
7620  *                                 doesn't take the current display state into
7621  *                                 account.
7622  *
7623  * \return         Return TRUE if the proposed disp configuration is
7624  *                 considered valid for IMP purposes.
7625  */
7626 NvBool nvValidateImpOneDisp(
7627     NVDispEvoPtr                            pDispEvo,
7628     const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],
7629     NvBool                                  requireBootClocks,
7630     NVEvoReallocateBandwidthMode            reallocBandwidth,
7631     NvU32                                   *pMinIsoBandwidthKBPS,
7632     NvU32                                   *pMinDramFloorKBPS)
7633 {
7634     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7635     NVEvoIsModePossibleDispInput impInput = { };
7636     NVEvoIsModePossibleDispOutput impOutput = { };
7637     NvU32 newIsoBandwidthKBPS, newDramFloorKBPS;
7638     NvBool needToRealloc = FALSE;
7639 
7640     AssignNVEvoIsModePossibleDispInput(pDispEvo,
7641                                        timingsParams, requireBootClocks,
7642                                        reallocBandwidth,
7643                                        &impInput);
7644 
7645     pDevEvo->hal->IsModePossible(pDispEvo, &impInput, &impOutput);
7646     if (!impOutput.possible) {
7647         return FALSE;
7648     }
7649 
7650     switch (reallocBandwidth) {
7651         case NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE:
7652             needToRealloc = (impOutput.minRequiredBandwidthKBPS > pDispEvo->isoBandwidthKBPS) ||
7653                             (impOutput.floorBandwidthKBPS > pDispEvo->dramFloorKBPS);
7654             newIsoBandwidthKBPS =
7655                 NV_MAX(pDispEvo->isoBandwidthKBPS, impOutput.minRequiredBandwidthKBPS);
7656             newDramFloorKBPS =
7657                 NV_MAX(pDispEvo->dramFloorKBPS, impOutput.floorBandwidthKBPS);
7658 
7659             break;
7660         case NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST:
7661             needToRealloc = (impOutput.minRequiredBandwidthKBPS != pDispEvo->isoBandwidthKBPS) ||
7662                             (impOutput.floorBandwidthKBPS != pDispEvo->dramFloorKBPS);
7663             newIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS;
7664             newDramFloorKBPS = impOutput.floorBandwidthKBPS;
7665 
7666             break;
7667         case NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE:
7668         default:
7669             break;
7670     }
7671 
7672     if (needToRealloc) {
7673         if (!nvAllocateDisplayBandwidth(pDispEvo,
7674                                         newIsoBandwidthKBPS,
7675                                         newDramFloorKBPS)) {
7676             return FALSE;
7677         }
7678     }
7679 
7680     if (pMinIsoBandwidthKBPS != NULL) {
7681         *pMinIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS;
7682     }
7683 
7684     if (pMinDramFloorKBPS != NULL) {
7685         *pMinDramFloorKBPS = impOutput.floorBandwidthKBPS;
7686     }
7687 
7688     return TRUE;
7689 }
7690 
7691 NvBool nvValidateImpOneDispDowngrade(
7692     NVDispEvoPtr                            pDispEvo,
7693     const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP],
7694     NvBool                                  requireBootClocks,
7695     NVEvoReallocateBandwidthMode            reallocBandwidth,
7696     NvU32                                   downgradePossibleHeadsBitMask)
7697 {
7698     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7699     NvBool impPassed = FALSE;
7700 
7701     do {
7702         impPassed = nvValidateImpOneDisp(pDispEvo,
7703                                          timingsParams,
7704                                          requireBootClocks,
7705                                          reallocBandwidth,
7706                                          NULL /* pMinIsoBandwidthKBPS */,
7707                                          NULL /* pMinDramFloorKBPS */);
7708         if (impPassed) {
7709             break;
7710         }
7711     } while (DownGradeMetaModeUsageBounds(pDevEvo, timingsParams,
7712                                           downgradePossibleHeadsBitMask));
7713 
7714     if (impPassed && !pDevEvo->isSOCDisplay) {
7715         NvU32 head;
7716 
7717         for (head = 0; head < pDevEvo->numHeads; head++) {
7718             if (timingsParams[head].pTimings != NULL) {
7719                 timingsParams[head].pTimings->viewPort.possibleUsage =
7720                     timingsParams[head].pTimings->viewPort.guaranteedUsage;
7721             }
7722         }
7723     }
7724 
7725     return impPassed;
7726 }
7727 
7728 /*
7729  * Return TRUE iff this display can be configured as a framelock
7730  * server given the current modetimings/framelock configuration, FALSE
7731  * o.w.
7732  */
7733 
7734 NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo)
7735 {
7736 
7737     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
7738     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7739     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
7740 
7741     return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
7742                                      NV_EVO_ADD_FRAME_LOCK_SERVER,
7743                                      NULL);
7744 }
7745 
7746 /*
7747  * Return TRUE iff this display can be configured as a framelock client
7748  * given the current modetimings/framelock configuration, FALSE o.w.
7749  */
7750 
7751 NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo)
7752 {
7753     NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo;
7754     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7755     NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
7756 
7757     return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev,
7758                                      NV_EVO_ADD_FRAME_LOCK_CLIENT,
7759                                      NULL);
7760 }
7761 
7762 
7763 /*
7764  * FrameLockSli() - Helper function for nvEvoRefFrameLockSli() and
7765  * nvEvoUnRefFrameLockSli(), which are hooked into the EVO locking state
7766  * machine via custom rules.  This function will find the GPU acting as the
7767  * given GPU's SLI primary and perform the NV_EVO_{ADD,REM}_FRAMELOCK_REF action
7768  * to increment or decrement the refcount on that GPU.
7769  * If queryOnly, it also figures out which displays to pass into the EVO state
7770  * machine; otherwise, it passes NULLs to perform a query without affecting
7771  * state.
7772  */
7773 
7774 static NvBool FrameLockSli(NVDevEvoPtr pDevEvo,
7775                            NvU32 action,
7776                            NvBool queryOnly)
7777 {
7778     RasterLockGroup *pRasterLockGroups;
7779     NVEvoSubDevPtr pEvoSubDev;
7780     NVDispEvoPtr pDispEvo;
7781     unsigned int numRasterLockGroups;
7782 
7783     pRasterLockGroups = GetRasterLockGroups(pDevEvo, &numRasterLockGroups);
7784     if (!pRasterLockGroups) {
7785         return FALSE;
7786     }
7787 
7788     nvAssert(numRasterLockGroups == 1);
7789     if (numRasterLockGroups != 1) {
7790         nvFree(pRasterLockGroups);
7791         return FALSE;
7792     }
7793 
7794     /* Want to be framelock server */
7795     pDispEvo = pRasterLockGroups[0].pDispEvoOrder[0];
7796 
7797     nvFree(pRasterLockGroups);
7798 
7799     if (!pDispEvo) {
7800         return FALSE;
7801     }
7802 
7803     nvAssert(pDevEvo == pDispEvo->pDevEvo);
7804 
7805     pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner];
7806 
7807     if (queryOnly) {
7808         return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, NULL);
7809     } else {
7810         NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1] = { NV_INVALID_HEAD, };
7811         NvU32 i = 0;
7812         NvU32 head;
7813 
7814         for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
7815             if (nvHeadIsActive(pDispEvo, head)) {
7816                 pHeads[i++] = head;
7817             }
7818         }
7819         nvAssert(i > 0 && i <= NVKMS_MAX_HEADS_PER_DISP);
7820         pHeads[i] = NV_INVALID_HEAD;
7821 
7822         return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action,
7823                                          pHeads);
7824     }
7825 }
7826 
7827 
7828 /*
7829  * nvEvoRefFrameLockSli() - Attempt to set up framelock on the GPU's SLI
7830  * primary.  Hooked into EVO state machine via custom rules.
7831  * If pHeads is NULL, only perform a query.
7832  */
7833 
7834 NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo,
7835                             NVEvoSubDevPtr pEvoSubDev,
7836                             const NvU32 *pHeads)
7837 {
7838     return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_ADD_FRAME_LOCK_REF,
7839                         pHeads == NULL);
7840 
7841 } /* nvEvoRefFrameLockSli */
7842 
7843 
7844 /*
7845  * nvEvoUnRefFrameLockSli() - Attempt to clean up framelock on the GPU's SLI
7846  * primary.  Hooked into EVO state machine via custom rules.
7847  * If pHeads is NULL, only perform a query.
7848  */
7849 
7850 NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo,
7851                               NVEvoSubDevPtr pEvoSubDev,
7852                               const NvU32 *pHeads)
7853 {
7854     return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_REM_FRAME_LOCK_REF,
7855                         pHeads == NULL);
7856 
7857 } /* nvEvoUnRefFrameLockSli */
7858 
7859 
7860 /*
7861  * GetRasterLockPin() - Ask RM which lockpin to use in order to configure GPU0
7862  * be a server or client of GPU1, where GPUn is represented by the duple
7863  * (pDispn, headn) (or NV_EVO_LOCK_PIN_ERROR if the two cannot be locked).
7864  */
7865 static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0,
7866                              NVDispEvoPtr pDispEvo1, NvU32 head1,
7867                              NVEvoLockPin *serverPin, NVEvoLockPin *clientPin)
7868 {
7869     NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS params = { };
7870     NvU32 displayHandle0 = pDispEvo0->pDevEvo->displayHandle;
7871     NvU32 displayHandle1 = pDispEvo1->pDevEvo->displayHandle;
7872     NvU32 ret;
7873 
7874     params.base.subdeviceIndex = pDispEvo0->displayOwner;
7875     params.head = head0;
7876 
7877     params.peer.hDisplay = displayHandle1;
7878     params.peer.subdeviceIndex = pDispEvo1->displayOwner;
7879     params.peer.head = head1;
7880 
7881     ret = nvRmApiControl(nvEvoGlobal.clientHandle,
7882                          displayHandle0,
7883                          NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7884                          &params, sizeof(params));
7885 
7886     if (ret != NVOS_STATUS_SUCCESS) {
7887         nvEvoLogDispDebug(pDispEvo0, EVO_LOG_ERROR,
7888                           "stateless lockpin query failed; ret: 0x%x", ret);
7889         if (serverPin) *serverPin = NV_EVO_LOCK_PIN_ERROR;
7890         if (clientPin) *clientPin = NV_EVO_LOCK_PIN_ERROR;
7891         return;
7892     }
7893 
7894     if (serverPin) {
7895         if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7896                                _MASTER_SCAN_LOCK_CONNECTED, _NO,
7897                                params.masterScanLock)) {
7898             *serverPin = NV_EVO_LOCK_PIN_ERROR;
7899         } else {
7900             int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7901                               _MASTER_SCAN_LOCK_PIN,
7902                               params.masterScanLock);
7903             *serverPin = NV_EVO_LOCK_PIN_0 + pin;
7904         }
7905     }
7906 
7907     if (clientPin) {
7908         if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7909                                _SLAVE_SCAN_LOCK_CONNECTED, _NO,
7910                                params.slaveScanLock)) {
7911             *clientPin = NV_EVO_LOCK_PIN_ERROR;
7912         } else {
7913             int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS,
7914                               _SLAVE_SCAN_LOCK_PIN,
7915                               params.slaveScanLock);
7916             *clientPin = NV_EVO_LOCK_PIN_0 + pin;
7917         }
7918     }
7919 } /* GetRasterLockPin */
7920 
7921 static void UpdateLUTNotifierTracking(
7922     NVDispEvoPtr pDispEvo)
7923 {
7924     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7925     const int dispIndex = pDispEvo->displayOwner;
7926     NvU32 i;
7927 
7928     for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) {
7929         int notifier = pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier;
7930 
7931         if (!pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) {
7932             continue;
7933         }
7934 
7935         if (!pDevEvo->hal->IsCompNotifierComplete(pDevEvo->pDispEvo[dispIndex],
7936                                                   notifier)) {
7937             continue;
7938         }
7939 
7940         pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask &=
7941             ~pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask;
7942         pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting = FALSE;
7943     }
7944 }
7945 
7946 /*
7947  * Check whether there are any staged API head LUT notifiers that need to be
7948  * committed.
7949  */
7950 NvBool nvEvoLUTNotifiersNeedCommit(
7951     NVDispEvoPtr pDispEvo)
7952 {
7953     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7954     const int dispIndex = pDispEvo->displayOwner;
7955     NvU32 apiHeadMask = pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask;
7956 
7957     return apiHeadMask != 0;
7958 }
7959 
7960 /*
7961  * Set up tracking for a LUT Notifier for the apiHeads in stagedApiHeadMask.
7962  *
7963  * The notifier returned by this function must be passed to a subsequent call to
7964  * EvoUpdateAndKickOffWithNotifier.
7965  *
7966  * Returns -1 if an error occurs or no apiHeads need a new LUT notifier. Passing
7967  * the -1 to EvoUpdateAndKickOffWithNotifier with its notify parameter set may
7968  * result in kernel panics.
7969  */
7970 int nvEvoCommitLUTNotifiers(
7971     NVDispEvoPtr pDispEvo)
7972 {
7973     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
7974     const int dispIndex = pDispEvo->displayOwner;
7975     NvU32 apiHeadMask = pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask;
7976     int i;
7977 
7978     pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask = 0;
7979 
7980     UpdateLUTNotifierTracking(pDispEvo);
7981 
7982     if (apiHeadMask == 0) {
7983         return -1;
7984     }
7985 
7986     if (pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask &
7987         apiHeadMask) {
7988         /*
7989          * an apiHead in the requested list is already waiting on a
7990          * notifier
7991          */
7992         nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "A requested API head is already waiting on a notifier");
7993         return -1;
7994     }
7995 
7996     for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) {
7997         int notifier = (dispIndex * NVKMS_MAX_HEADS_PER_DISP) + i + 1;
7998 
7999         if (pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) {
8000             continue;
8001         }
8002 
8003         /* use this notifier */
8004         pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier = notifier;
8005         pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting = TRUE;
8006         pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask = apiHeadMask;
8007 
8008         pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask |=
8009             apiHeadMask;
8010 
8011         return notifier;
8012     }
8013 
8014     /* slot not found */
8015     nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "No remaining LUT notifier slots");
8016     return -1;
8017 }
8018 
8019 /*
8020  * Unstage any staged API Heads' notifiers.
8021  */
8022 void nvEvoClearStagedLUTNotifiers(
8023     NVDispEvoPtr pDispEvo)
8024 {
8025     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8026     const int dispIndex = pDispEvo->displayOwner;
8027 
8028     pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask = 0;
8029 }
8030 
8031 /*
8032  * Stage the API Head's notifier for tracking. In order to kickoff the staged
8033  * notifier, nvEvoCommitLUTNotifiers must be called and its return value
8034  * passed to EvoUpdateAndKickoffWithNotifier.
8035  *
8036  * This function and its siblings nvEvoIsLUTNotifierComplete and
8037  * nvEvoWaitForLUTNotifier can be used by callers of nvEvoSetLut to ensure the
8038  * triple-buffer for the color LUT is not overflowed even when nvEvoSetLut is
8039  * called with kickoff = FALSE.
8040  */
8041 void nvEvoStageLUTNotifier(
8042     NVDispEvoPtr pDispEvo,
8043     NvU32 apiHead)
8044 {
8045     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8046     const int dispIndex = pDispEvo->displayOwner;
8047 
8048     nvAssert((pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask &
8049              NVBIT(apiHead)) == 0);
8050 
8051     pDevEvo->lut.notifierState.sd[dispIndex].stagedApiHeadMask |=
8052         NVBIT(apiHead);
8053 }
8054 
8055 /*
8056  * Check if the api head's LUT Notifier is complete.
8057  */
8058 
8059 NvBool nvEvoIsLUTNotifierComplete(
8060     NVDispEvoPtr pDispEvo,
8061     NvU32 apiHead)
8062 {
8063     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8064     const int dispIndex = pDispEvo->displayOwner;
8065 
8066     UpdateLUTNotifierTracking(pDispEvo);
8067 
8068     return (pDevEvo->lut.notifierState.sd[dispIndex].waitingApiHeadMask &
8069             NVBIT(apiHead)) == 0;
8070 }
8071 
8072 /*
8073  * Wait for the api head's LUT Notifier to complete.
8074  *
8075  * This function blocks while waiting for the notifier.
8076  */
8077 
8078 void nvEvoWaitForLUTNotifier(
8079     const NVDispEvoPtr pDispEvo,
8080     NvU32 apiHead)
8081 {
8082     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8083     const int dispIndex = pDispEvo->displayOwner;
8084     int i;
8085 
8086     if (nvEvoIsLUTNotifierComplete(pDispEvo, apiHead)) {
8087         return;
8088     }
8089 
8090     for (i = 0; i < ARRAY_LEN(pDevEvo->lut.notifierState.sd[dispIndex].notifiers); i++) {
8091         int notifier = pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].notifier;
8092 
8093         if (!pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].waiting) {
8094             continue;
8095         }
8096 
8097         if ((pDevEvo->lut.notifierState.sd[dispIndex].notifiers[i].apiHeadMask &
8098             NVBIT(apiHead)) == 0) {
8099 
8100             continue;
8101         }
8102 
8103         pDevEvo->hal->WaitForCompNotifier(pDispEvo, notifier);
8104         return;
8105     }
8106 }
8107 
8108 static void EvoIncrementCurrentLutIndex(NVDispEvoRec *pDispEvo,
8109                                         const NvU32 apiHead,
8110                                         const NvBool baseLutEnabled,
8111                                         const NvBool outputLutEnabled)
8112 {
8113     NvU32 head;
8114     const int dispIndex = pDispEvo->displayOwner;
8115     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
8116     const int numLUTs = ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT);
8117     NVDispApiHeadStateEvoRec *pApiHeadState =
8118         &pDispEvo->apiHeadState[apiHead];
8119 
8120     pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex++;
8121     pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex %= numLUTs;
8122     pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled = baseLutEnabled;
8123     pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled = outputLutEnabled;
8124 
8125     FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
8126         const NvU32 curLutIndex =
8127             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex;
8128         NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
8129 
8130         pHeadState->lut.outputLutEnabled =
8131             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled;
8132         pHeadState->lut.baseLutEnabled =
8133             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled;
8134         pHeadState->lut.pCurrSurface =
8135             pDevEvo->lut.apiHead[apiHead].LUT[curLutIndex];
8136 
8137     }
8138 }
8139 
8140 static NvU32 UpdateLUTTimer(NVDispEvoPtr pDispEvo,
8141                             const NvU32 apiHead,
8142                             const NvBool baseLutEnabled,
8143                             const NvBool outputLutEnabled)
8144 {
8145     if (!nvEvoIsLUTNotifierComplete(pDispEvo, apiHead)) {
8146         // If the notifier is still pending, then the previous update is still
8147         // pending and further LUT changes should continue to go into the third
8148         // buffer.  Reschedule the timer for another 10 ms.
8149         return 10;
8150     }
8151 
8152     // Update the current LUT index and kick off an update.
8153     EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled,
8154                                 outputLutEnabled);
8155 
8156     EvoUpdateCurrentPalette(pDispEvo, apiHead);
8157 
8158     // Return 0 to cancel the timer.
8159     return 0;
8160 }
8161 
8162 static void UpdateLUTTimerNVKMS(void *dataPtr, NvU32 dataU32)
8163 {
8164     NVDispEvoPtr pDispEvo = dataPtr;
8165     const NvU32 apiHead = DRF_VAL(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD,
8166                                dataU32);
8167     const NvBool baseLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32,
8168                                                _BASE_LUT, _ENABLE, dataU32);
8169     const NvBool outputLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32,
8170                                                  _OUTPUT_LUT, _ENABLE, dataU32);
8171     NvU32 ret = UpdateLUTTimer(pDispEvo, apiHead, baseLutEnabled,
8172                                outputLutEnabled);
8173 
8174     if (ret != 0) {
8175         ScheduleLutUpdate(pDispEvo, apiHead, dataU32, ret * 1000);
8176     }
8177 }
8178 
8179 static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo,
8180                               const NvU32 apiHead, const NvU32 data,
8181                               const NvU64 usec)
8182 {
8183     NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
8184 
8185     /* Cancel previous update */
8186     nvCancelLutUpdateEvo(pDispEvo, apiHead);
8187 
8188     /* schedule a new timer */
8189     pDevEvo->lut.apiHead[apiHead].disp[pDispEvo->displayOwner].updateTimer =
8190         nvkms_alloc_timer(UpdateLUTTimerNVKMS,
8191                           pDispEvo, data,
8192                           usec);
8193 }
8194 
8195 /*
8196  * The gamma ramp, if specified, has a 16-bit range.  Convert it to EVO's 14-bit
8197  * shifted range and zero out the low 3 bits for bug 813188.
8198  */
8199 static inline NvU16 GammaToEvo(NvU16 gamma)
8200 {
8201     return ((gamma >> 2) & ~7) + 24576;
8202 }
8203 
8204 static NVEvoLutDataRec *GetNewLutBuffer(
8205     const NVDispEvoRec *pDispEvo,
8206     const struct NvKmsSetLutCommonParams *pParams)
8207 {
8208     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
8209     NVEvoLutDataRec *pLUTBuffer = NULL;
8210 
8211     // XXX NVKMS TODO: If only input or output are specified and the other one
8212     // is enabled in the hardware, this will zero out the one not specified. In
8213     // practice it isn't a problem today because the X driver always specifies
8214     // both, but we should fix this once we start always using the base channel,
8215     // where we have a separate base LUT ctxdma.
8216     //
8217     // This is also a problem if a partial update of the input LUT is attempted
8218     // (i.e. start != 0 or end != numberOfLutEntries-1).
8219     //
8220     // Filed bug: 2042919 to track removing this TODO.
8221 
8222     pLUTBuffer = nvCalloc(1, sizeof(*pLUTBuffer));
8223 
8224     if (pLUTBuffer == NULL) {
8225         goto done;
8226     }
8227 
8228     if (pParams->input.specified && pParams->input.end != 0) {
8229         const struct NvKmsLutRamps *pRamps =
8230             nvKmsNvU64ToPointer(pParams->input.pRamps);
8231         const NvU16 *red = pRamps->red;
8232         const NvU16 *green = pRamps->green;
8233         const NvU16 *blue = pRamps->blue;
8234 
8235         nvAssert(pRamps != NULL);
8236 
8237         // Update our shadow copy of the LUT.
8238         pDevEvo->hal->FillLUTSurface(pLUTBuffer->base,
8239                                      red, green, blue,
8240                                      pParams->input.end + 1,
8241                                      pParams->input.depth);
8242     }
8243 
8244     if (pParams->output.specified && pParams->output.enabled) {
8245         const struct NvKmsLutRamps *pRamps =
8246             nvKmsNvU64ToPointer(pParams->output.pRamps);
8247         int i;
8248 
8249         nvAssert(pRamps != NULL);
8250 
8251         if (pDevEvo->hal->caps.hasUnorm16OLUT) {
8252             for (i = 0; i < 1024; i++) {
8253                 // Copy the client's 16-bit ramp directly to the LUT buffer.
8254                 pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Red = pRamps->red[i];
8255                 pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Green = pRamps->green[i];
8256                 pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Blue = pRamps->blue[i];
8257             }
8258 
8259             pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1024] =
8260                 pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1023];
8261         } else {
8262             for (i = 0; i < 1024; i++) {
8263                 // Convert from the client's 16-bit range to the EVO 14-bit shifted
8264                 // range.
8265                 pLUTBuffer->output[i].Red = GammaToEvo(pRamps->red[i]);
8266                 pLUTBuffer->output[i].Green = GammaToEvo(pRamps->green[i]);
8267                 pLUTBuffer->output[i].Blue = GammaToEvo(pRamps->blue[i]);
8268             }
8269 
8270             pLUTBuffer->output[1024] = pLUTBuffer->output[1023];
8271         }
8272     }
8273 
8274     /* fall through */
8275 
8276 done:
8277     return pLUTBuffer;
8278 }
8279 
8280 
8281 /*
8282  * Update the api head's LUT with the given colors.
8283  *
8284  * The color LUT is triple-buffered.
8285  *
8286  * curLUTIndex indicates the buffer currently being updated.  What the other
8287  * two buffers are used for depends on whether the previous update has
8288  * completed.  If not (case 1):
8289  *   curLUTIndex + 1 (mod 3): currently being displayed
8290  *   curLUTIndex + 2 (mod 3): will be displayed at next vblank
8291  * If so (case 2):
8292  *   curLUTIndex + 1 (mod 3): unused
8293  *   curLUTIndex + 2 (mod 3): currently being displayed
8294  *
8295  * In case 1, just update the current buffer and kick off a timer to submit the
8296  * update from i+2 to i.  If more LUT changes come in before the first update
8297  * happens, kill the timer and start a new one.
8298  *
8299  * In case 2, kill the timer if it still hasn't gone off, update buffer i, and
8300  * kick off an update.  No new timer needs to be scheduled.
8301  */
8302 
8303 void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 apiHead, NvBool kickoff,
8304                  const struct NvKmsSetLutCommonParams *pParams)
8305 {
8306     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8307     const int dispIndex = pDispEvo->displayOwner;
8308     const int curLUT = pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curLUTIndex;
8309     const NvBool waitForPreviousUpdate =
8310         pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate;
8311     const int numLUTs = ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT);
8312     const int lutToFill = (curLUT + 1) % numLUTs;
8313     NVLutSurfaceEvoPtr pSurfEvo = pDevEvo->lut.apiHead[apiHead].LUT[lutToFill];
8314     NvBool baseLutEnabled =
8315         pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled ;
8316     NvBool outputLutEnabled =
8317         pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled;
8318 
8319     if (!pParams->input.specified && !pParams->output.specified) {
8320         return;
8321     }
8322 
8323     if (pParams->input.specified) {
8324         baseLutEnabled = (pParams->input.end != 0);
8325     }
8326 
8327     if (pParams->output.specified) {
8328         outputLutEnabled = pParams->output.enabled;
8329     }
8330 
8331     nvAssert(pSurfEvo != NULL);
8332 
8333     if ((pParams->input.specified && pParams->input.end != 0) ||
8334         (pParams->output.specified && pParams->output.enabled)) {
8335         NVEvoLutDataRec *pLUTBuffer = GetNewLutBuffer(pDispEvo, pParams);
8336 
8337         if (pLUTBuffer == NULL) {
8338             nvEvoLogDev(pDevEvo, EVO_LOG_WARN,
8339                         "LUT Allocation failure; skipping LUT update");
8340             return;
8341         }
8342 
8343         // Fill in the new LUT buffer.
8344         nvUploadDataToLutSurfaceEvo(pSurfEvo, pLUTBuffer, pDispEvo);
8345 
8346         nvFree(pLUTBuffer);
8347     }
8348 
8349     /* Kill a pending timer */
8350     nvCancelLutUpdateEvo(pDispEvo, apiHead);
8351 
8352     if (!kickoff) {
8353         EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled,
8354                                     outputLutEnabled);
8355         return;
8356     }
8357 
8358     // See if we can just fill the next LUT buffer and kick off an update now.
8359     // We can do that if this is the very first update, or if the previous
8360     // update is complete, or if we need to guarantee that this update
8361     // is synchronous.
8362     NvBool previousUpdateComplete =
8363         nvEvoIsLUTNotifierComplete(pDispEvo, apiHead);
8364     if (!waitForPreviousUpdate || previousUpdateComplete ||
8365         pParams->synchronous) {
8366 
8367         if (!previousUpdateComplete) {
8368             nvEvoWaitForLUTNotifier(pDispEvo, apiHead);
8369         }
8370 
8371         // Kick off an update now.
8372         EvoIncrementCurrentLutIndex(pDispEvo, apiHead, baseLutEnabled,
8373                                     outputLutEnabled);
8374         EvoUpdateCurrentPalette(pDispEvo, apiHead);
8375 
8376         // If this LUT update is synchronous, then sync before returning.
8377         if (pParams->synchronous &&
8378             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate) {
8379 
8380             nvEvoWaitForLUTNotifier(pDispEvo, apiHead);
8381             pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate =
8382                 FALSE;
8383         }
8384     } else {
8385         // Schedule a timer to kick off an update later.
8386         // XXX 5 ms is a guess.  We could probably look at this pDpy's refresh
8387         // rate to come up with a more reasonable estimate.
8388         NvU32 dataU32 = DRF_NUM(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD, apiHead);
8389 
8390         nvAssert((apiHead & ~0xff) == 0);
8391 
8392         if (baseLutEnabled) {
8393             dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _BASE_LUT,
8394                                _ENABLE);
8395         }
8396 
8397         if (outputLutEnabled) {
8398             dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _OUTPUT_LUT,
8399                                _ENABLE);
8400         }
8401 
8402         ScheduleLutUpdate(pDispEvo, apiHead, dataU32, 5 * 1000);
8403     }
8404 }
8405 
8406 NvBool nvValidateSetLutCommonParams(
8407     const NVDevEvoRec *pDevEvo,
8408     const struct NvKmsSetLutCommonParams *pParams)
8409 {
8410     NvU32 maxSize = 0;
8411 
8412     if (pParams->output.specified && pParams->output.enabled) {
8413         if (pParams->output.pRamps == 0) {
8414             return FALSE;
8415         }
8416     }
8417 
8418     if (!pParams->input.specified || pParams->input.end == 0) {
8419         return TRUE;
8420     }
8421 
8422     if (pParams->input.pRamps == 0) {
8423         return FALSE;
8424     }
8425 
8426     switch (pParams->input.depth) {
8427         case 8:  maxSize = 256;  break;
8428         case 15: maxSize = 32;   break;
8429         case 16: maxSize = 64;   break;
8430         case 24: maxSize = 256;  break;
8431         case 30: maxSize = 1024; break;
8432         default: return FALSE;
8433     }
8434 
8435     nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE);
8436     nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE);
8437     nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE);
8438 
8439     /* Currently, the implementation assumes start==0. */
8440     if (pParams->input.start != 0) {
8441         return FALSE;
8442     }
8443 
8444     if (pParams->input.end >= maxSize) {
8445         return FALSE;
8446     }
8447 
8448     return TRUE;
8449 }
8450 
8451 static NvU32 GetSwapLockoutWindowUs(NVDispEvoPtr pDispEvo)
8452 {
8453     NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS params = { 0 };
8454     NvU32 ret;
8455 
8456     nvAssert(pDispEvo->pFrameLockEvo != NULL);
8457 
8458     ret = nvRmApiControl(
8459             nvEvoGlobal.clientHandle,
8460             pDispEvo->pFrameLockEvo->device,
8461             NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW,
8462             &params, sizeof(params));
8463 
8464     if (ret != NVOS_STATUS_SUCCESS) {
8465         nvAssert(!"NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW failed");
8466     }
8467 
8468     return params.tSwapRdyHi;
8469 }
8470 
8471 static NvU32 CalculateSwapLockoutStartP2060(NVDispEvoPtr pDispEvo,
8472                                             const NvU32 head,
8473                                             const NvU32 tSwapRdyHiUs)
8474 {
8475     const NVHwModeTimingsEvo *pTimings;
8476 
8477     nvAssert(head != NV_INVALID_HEAD);
8478     nvAssert(nvHeadIsActive(pDispEvo, head));
8479 
8480     pTimings = &pDispEvo->headState[head].timings;
8481 
8482     /*
8483      *  SWAP_LOCKOUT_START = Vtotal * TswapRdyHi * Refresh_Rate
8484      *
8485      * = Vtotal * TswapRdyHi * (pclk / Refresh_Rate)
8486      * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal))
8487      * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal))
8488      * = TswapRdyHi * (pclk / Htotal)
8489      * = TswapRdyHiUs * 1e-6 * pclk / Htotal
8490      * = TswapRdyHiUs * pclk / (Htotal * 1000000)
8491      * = TswapRdyHiUs * (pclkKhz * 1000) / (Htotal * 1000000)
8492      * = TswapRdyHiUs * pclkKhz / (Htotal * 1000)
8493      *
8494      * Since SWAP_LOCKOUT_START must be higher than LSR_MIN_TIME, round this
8495      * result up to the nearest integer.
8496      */
8497 
8498     return NV_ROUNDUP_DIV(tSwapRdyHiUs * pTimings->pixelClock,
8499                           pTimings->rasterSize.x * 1000);
8500 }
8501 
8502 /**
8503  * Override the swap lockout start value on heads on this pDisp, or restore the
8504  * default value.
8505  *
8506  * This is called before (with isPre == TRUE) and after (with isPre == FALSE)
8507  * swap barriers are enabled on the G-Sync board.  In order to satisfy certain
8508  * timing criteria, we need to set a special value for SWAP_LOCKOUT_START for
8509  * the duration of swap barriers being enabled.
8510  */
8511 void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo,
8512                                NvBool enable, NvBool isPre)
8513 {
8514     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8515     NvU32 tSwapRdyHiUs = 0;
8516     NvU32 head;
8517 
8518     if ((isPre && !enable) || (!isPre && enable)) {
8519         return;
8520     }
8521 
8522     if (enable) {
8523         tSwapRdyHiUs = GetSwapLockoutWindowUs(pDispEvo);
8524     }
8525 
8526     for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) {
8527         NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS params = { };
8528         NvU32 ret;
8529 
8530         if (!nvHeadIsActive(pDispEvo, head)) {
8531             continue;
8532         }
8533 
8534         params.maxSwapLockoutSkew =
8535             NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW_INIT;
8536 
8537         if (enable) {
8538             params.swapLockoutStart =
8539                 CalculateSwapLockoutStartP2060(pDispEvo, head, tSwapRdyHiUs);
8540         } else {
8541             params.swapLockoutStart =
8542                 NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START_INIT;
8543         }
8544 
8545         params.head = head;
8546 
8547         params.base.subdeviceIndex = pDispEvo->displayOwner;
8548 
8549         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
8550                              pDevEvo->displayHandle,
8551                              NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP,
8552                              &params,
8553                              sizeof(params));
8554 
8555         if (ret != NVOS_STATUS_SUCCESS) {
8556             nvAssert(!"NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP failed");
8557         }
8558     }
8559 }
8560 
8561 /*!
8562  * Release a reference to a pDevEvo
8563  *
8564  * If the refcount of the device drops to 0, this frees the device.
8565  *
8566  * \return TRUE if the device was freed, FALSE otherwise.
8567  */
8568 NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo)
8569 {
8570     if (pDevEvo == NULL) {
8571         return FALSE;
8572     }
8573 
8574     pDevEvo->allocRefCnt--;
8575 
8576     if (pDevEvo->allocRefCnt > 0) {
8577         return FALSE;
8578     }
8579 
8580     if (pDevEvo->pDifrState) {
8581         nvRmUnregisterDIFREventHandler(pDevEvo);
8582         nvDIFRFree(pDevEvo->pDifrState);
8583         pDevEvo->pDifrState = NULL;
8584     }
8585 
8586     if (pDevEvo->pNvKmsOpenDev != NULL) {
8587         /*
8588          * DP-MST allows to attach more than one heads/stream to single DP
8589          * connector, and there is no way to convey that DP-MST configuration to
8590          * next driver load; therefore disallow DP-MST.
8591          */
8592         nvEvoRestoreConsole(pDevEvo, FALSE /* allowMST */);
8593 
8594         nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev,
8595                                pDevEvo->fbConsoleSurfaceHandle,
8596                                TRUE /* skipUpdate */);
8597         pDevEvo->fbConsoleSurfaceHandle = 0;
8598     }
8599 
8600     nvFreeCoreChannelEvo(pDevEvo);
8601 
8602     nvTeardownHdmiLibrary(pDevEvo);
8603 
8604     nvHsFreeDevice(pDevEvo);
8605 
8606     nvFreePerOpenDev(nvEvoGlobal.nvKmsPerOpen, pDevEvo->pNvKmsOpenDev);
8607 
8608     nvFreeFrameLocksEvo(pDevEvo);
8609 
8610     if (pDevEvo->hal) {
8611         pDevEvo->hal->FreeRmCtrlObject(pDevEvo);
8612     }
8613 
8614     nvRmDestroyDisplays(pDevEvo);
8615 
8616     nvkms_free_timer(pDevEvo->consoleRestoreTimer);
8617     pDevEvo->consoleRestoreTimer = NULL;
8618 
8619     nvPreallocFree(pDevEvo);
8620 
8621     nvRmFreeDeviceEvo(pDevEvo);
8622 
8623     nvListDel(&pDevEvo->devListEntry);
8624 
8625     nvkms_free_ref_ptr(pDevEvo->ref_ptr);
8626 
8627     nvFree(pDevEvo);
8628     return TRUE;
8629 }
8630 
8631 static void AssignNumberOfApiHeads(NVDevEvoRec *pDevEvo)
8632 {
8633     pDevEvo->numApiHeads = pDevEvo->numHeads;
8634 }
8635 
8636 NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest,
8637                           enum NvKmsAllocDeviceStatus *pStatus)
8638 {
8639     NVDevEvoPtr pDevEvo = NULL;
8640     enum NvKmsAllocDeviceStatus status =
8641         NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE;
8642     NvU32 i;
8643 
8644     nvAssert(nvFindDevEvoByDeviceId(pRequest->deviceId) == NULL);
8645 
8646     pDevEvo = nvCalloc(1, sizeof(*pDevEvo));
8647 
8648     if (pDevEvo == NULL) {
8649         goto done;
8650     }
8651 
8652     pDevEvo->allocRefCnt = 1;
8653 
8654     pDevEvo->gpuLogIndex = NV_INVALID_GPU_LOG_INDEX;
8655 
8656     pDevEvo->gc6Allowed = TRUE;
8657 
8658     nvListAppend(&pDevEvo->devListEntry, &nvEvoGlobal.devList);
8659 
8660     pDevEvo->ref_ptr = nvkms_alloc_ref_ptr(pDevEvo);
8661     if (!pDevEvo->ref_ptr) {
8662         goto done;
8663     }
8664 
8665     for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) {
8666         pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID;
8667     }
8668 
8669     for (i = 0; i < ARRAY_LEN(pDevEvo->headForWindow); i++) {
8670         pDevEvo->headForWindow[i] = NV_INVALID_HEAD;
8671     }
8672 
8673     if (!nvRmAllocDeviceEvo(pDevEvo, pRequest)) {
8674         goto done;
8675     }
8676 
8677     status = nvAssignEvoCaps(pDevEvo);
8678 
8679     if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) {
8680         goto done;
8681     }
8682 
8683     if (!nvPreallocAlloc(pDevEvo)) {
8684         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8685         goto done;
8686     }
8687 
8688     /*
8689      * Copy the registry keys from the alloc device request to the device.
8690      *
8691      * This needs to be set before nvRmAllocDisplays, because nvRmAllocDisplays
8692      * will initialize DP lib which may read registry keys that we want to
8693      * allow clients to override.
8694      */
8695     ct_assert(ARRAY_LEN(pRequest->registryKeys) ==
8696               ARRAY_LEN(pDevEvo->registryKeys));
8697     ct_assert(ARRAY_LEN(pRequest->registryKeys[0].name) ==
8698               ARRAY_LEN(pDevEvo->registryKeys[0].name));
8699 
8700     for (i = 0; i < ARRAY_LEN(pRequest->registryKeys); i++) {
8701         const size_t nameLen = sizeof(pDevEvo->registryKeys[i].name);
8702         nvkms_memcpy(pDevEvo->registryKeys[i].name,
8703                      pRequest->registryKeys[i].name,
8704                      nameLen);
8705         pDevEvo->registryKeys[i].name[nameLen - 1] = '\0';
8706         pDevEvo->registryKeys[i].value = pRequest->registryKeys[i].value;
8707     }
8708 
8709     status = nvRmAllocDisplays(pDevEvo);
8710 
8711     if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) {
8712         goto done;
8713     }
8714 
8715     nvAllocFrameLocksEvo(pDevEvo);
8716 
8717     if (!pDevEvo->hal->AllocRmCtrlObject(pDevEvo)) {
8718         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8719         goto done;
8720     }
8721 
8722     AssignNumberOfApiHeads(pDevEvo);
8723 
8724     if (!nvAllocCoreChannelEvo(pDevEvo)) {
8725         status = NVKMS_ALLOC_DEVICE_STATUS_CORE_CHANNEL_ALLOC_FAILED;
8726         goto done;
8727     }
8728 
8729     pDevEvo->pNvKmsOpenDev = nvAllocPerOpenDev(nvEvoGlobal.nvKmsPerOpen,
8730                                                pDevEvo, TRUE /* isPrivileged */);
8731     if (!pDevEvo->pNvKmsOpenDev) {
8732         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8733         goto done;
8734     }
8735 
8736     nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
8737 
8738     /*
8739      * Import the framebuffer console, if there is one,
8740      * as a surface we can flip to.
8741      */
8742     nvRmImportFbConsoleMemory(pDevEvo);
8743 
8744     /*
8745      * This check must be placed after nvAllocCoreChannelEvo() since it depends
8746      * on the HW capabilities that are read in that function.
8747      */
8748     if (!ValidateConnectorTypes(pDevEvo)) {
8749         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8750         goto done;
8751     }
8752 
8753     if (!nvHsAllocDevice(pDevEvo, pRequest)) {
8754         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8755         goto done;
8756     }
8757 
8758     if (!nvInitHdmiLibrary(pDevEvo)) {
8759         status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
8760         goto done;
8761     }
8762 
8763     nvRmMuxInit(pDevEvo);
8764 
8765     status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS;
8766 
8767     /*
8768      * We can't allocate DIFR state if h/w doesn't support it. Only register
8769      * event handlers with DIFR state.
8770      */
8771     pDevEvo->pDifrState = nvDIFRAllocate(pDevEvo);
8772     if (pDevEvo->pDifrState) {
8773         if (!nvRmRegisterDIFREventHandler(pDevEvo)) {
8774             nvDIFRFree(pDevEvo->pDifrState);
8775             pDevEvo->pDifrState = NULL;
8776         }
8777     }
8778 
8779     /* fall through */
8780 
8781 done:
8782     if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) {
8783         nvFreeDevEvo(pDevEvo);
8784         pDevEvo = NULL;
8785     }
8786 
8787     *pStatus = status;
8788 
8789     return pDevEvo;
8790 }
8791 
8792 
8793 // How long before we time out waiting for lock?
8794 // In microseconds.
8795 #define LOCK_TIMEOUT 5000000
8796 
8797 //
8798 // EvoWaitForLock()
8799 // Wait for raster or flip lock to complete
8800 // Note that we use pDev and subdevice here instead of pDisp since this is used
8801 // per-subdev in SLI (including the pDispEvo->numSubDevices > 1 case).
8802 //
8803 static NvBool EvoWaitForLock(const NVDevEvoRec *pDevEvo, const NvU32 sd,
8804                              const NvU32 head, const NvU32 type,
8805                              NvU64 *pStartTime)
8806 {
8807     NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS status = { };
8808     NvU32 ret;
8809 
8810     nvAssert(type == EVO_RASTER_LOCK || type == EVO_FLIP_LOCK);
8811 
8812     if ((type == EVO_FLIP_LOCK) &&
8813         !pDevEvo->hal->caps.supportsFlipLockRGStatus) {
8814         return TRUE;
8815     }
8816 
8817     status.head             = head;
8818     status.base.subdeviceIndex = sd;
8819     status.scanLocked       = NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_NO;
8820     status.flipLocked       = NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_NO;
8821 
8822     // Just keep looping until we get what we want.
8823     do {
8824         ret = nvRmApiControl(nvEvoGlobal.clientHandle,
8825                              pDevEvo->displayHandle,
8826                              NV5070_CTRL_CMD_GET_RG_STATUS,
8827                              &status,
8828                              sizeof(status));
8829         if (ret != NVOS_STATUS_SUCCESS) {
8830             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
8831                         "Unable to read SLI lock status");
8832             return FALSE;
8833         }
8834 
8835         if ((type == EVO_RASTER_LOCK) &&
8836             (status.scanLocked ==
8837                 NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_YES)) {
8838             break;
8839         }
8840         if ((type == EVO_FLIP_LOCK) &&
8841             (status.flipLocked ==
8842                 NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_YES)) {
8843             break;
8844         }
8845 
8846         if (nvExceedsTimeoutUSec(pDevEvo, pStartTime, LOCK_TIMEOUT)) {
8847             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
8848                         "SLI lock timeout exceeded (type %d)", type);
8849             return FALSE;
8850         }
8851 
8852         nvkms_yield();
8853 
8854     } while (TRUE);
8855 
8856     // Once we've exited from the various loops above, we should be locked
8857     // as requested.
8858     return TRUE;
8859 }
8860 
8861 //
8862 // EvoUpdateHeadParams()
8863 // Send GPUs HeadParams updates; accounts for SLI.
8864 //
8865 static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head,
8866                                 NVEvoUpdateState *updateState)
8867 {
8868     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8869 
8870     nvPushEvoSubDevMaskDisp(pDispEvo);
8871 
8872     pDevEvo->hal->SetHeadControl(pDevEvo, pDispEvo->displayOwner, head, updateState);
8873 
8874     nvPopEvoSubDevMask(pDevEvo);
8875 }
8876 
8877 //
8878 // nvReadCRC32Evo()
8879 // Returns the last CRC32 value
8880 NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head,
8881                       CRC32NotifierCrcOut *crcOut)
8882 {
8883     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
8884     const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
8885     const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
8886     NVEvoDmaPtr dma = NULL;
8887     NVConnectorEvoPtr pConnectorEvo = NULL;
8888     NVEvoUpdateState updateState = { };
8889     NvU32 numCRC32 = 0;
8890     NvBool res = TRUE;
8891     NvBool found = FALSE;
8892     NvU32 ret;
8893 
8894     // Look up the head connector
8895     nvListForEachEntry(pConnectorEvo,
8896                        &pDispEvo->connectorList,
8897                        connectorListEntry) {
8898         NvU32 activeHeadMask =
8899             nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo);
8900         if (activeHeadMask & NVBIT(head)) {
8901             found = TRUE;
8902             break;
8903         }
8904     }
8905 
8906     if (!found) {
8907         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
8908                     "Unable to find active connector for head %d", head);
8909         return FALSE;
8910     }
8911 
8912     // Allocate a temporary DMA notifier
8913     dma = nvCalloc(1, sizeof(NVEvoDma));
8914     if ((dma == NULL) ||
8915         !nvRmAllocEvoDma(pDevEvo,
8916                          dma,
8917                          NV_DMA_EVO_NOTIFIER_SIZE - 1,
8918                          DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER),
8919                          1 << pDispEvo->displayOwner)) {
8920         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
8921                     "CRC32 notifier DMA allocation failed");
8922         nvFree(dma);
8923         return FALSE;
8924     }
8925 
8926     // Bind the CRC32 notifier surface descriptor
8927     ret = pDevEvo->hal->BindSurfaceDescriptor(pDevEvo, pDevEvo->core, &dma->surfaceDesc);
8928     if (ret != NVOS_STATUS_SUCCESS) {
8929         nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR,
8930                          "Failed to bind display engine CRC32 notify surface descriptor "
8931                          ": 0x%x (%s)", ret, nvstatusToString(ret));
8932         res = FALSE;
8933         goto done;
8934     }
8935 
8936     // Only set up the actual output for SLI primary.
8937     nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner);
8938 
8939     /* CRC notifiers are similar to completion notifiers, but work slightly
8940      * different:
8941      *
8942      *   1. In order to start CRC generation for a head, we need to:
8943      *
8944      *      - Point an EVO head at a block of memory with
8945      *        HEAD_SET_CONTEXT_DMA_CRC(head)
8946      *
8947      *      - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to select
8948      *        what output we want to capture CRC values from, and kicking off a
8949      *        core channel update (this already generates a CRC value for the
8950      *        last scanout buffer)
8951      *
8952      *      ----> hal->StartCRC32Capture()
8953      *
8954      *   2. From 1) on, a new CRC value is generated per vblank and written to
8955      *      an incrementing entry in the CRC notifier. With pre-nvdisplay chips,
8956      *      a CRC notifier can hold up to 256 entries. Once filled up, new CRC
8957      *      values are discarded. Either case, we are only interested in the
8958      *      last CRC32 value.
8959      *
8960      *   3. In order to stop CRC generation, we need to perform the inverse
8961      *      operation of 1):
8962      *
8963      *      - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to
8964      *        unselect all outputs we were capturing CRC values from.
8965      *
8966      *      - Unset the CRC context DMA with HEAD_SET_CONTEXT_DMA_CRC(head)
8967      *
8968      *      ----> hal->StopCRC32Capture()
8969      *
8970      *   4. From 3) on, it is safe to wait for the CRC notifier and query all
8971      *      entries.
8972      *
8973      *      ----> hal->QueryCRC32()
8974      */
8975     pDevEvo->hal->StartCRC32Capture(pDevEvo,
8976                                     dma,
8977                                     pConnectorEvo,
8978                                     pTimings->protocol,
8979                                     pConnectorEvo->or.primary,
8980                                     head,
8981                                     pDispEvo->displayOwner,
8982                                     &updateState);
8983 
8984     // This update should generate one CRC value.
8985     nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */);
8986 
8987     pDevEvo->hal->StopCRC32Capture(pDevEvo,
8988                                    head,
8989                                    &updateState);
8990 
8991     nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */);
8992 
8993     if (!pDevEvo->hal->QueryCRC32(pDevEvo,
8994                                   dma,
8995                                   pDispEvo->displayOwner,
8996                                   1,
8997                                   crcOut,
8998                                   &numCRC32) ||
8999         (numCRC32 == 0)) {
9000         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query all CRC32 values");
9001     }
9002 
9003     nvPopEvoSubDevMask(pDevEvo);
9004 
9005 done:
9006     // Clean-up
9007     nvRmFreeEvoDma(pDevEvo, dma);
9008     nvFree(dma);
9009 
9010     return res;
9011 }
9012 
9013 NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo)
9014 {
9015     NvU32 activeSorMask = 0;
9016     NvU32 head;
9017 
9018     for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) {
9019         NVConnectorEvoPtr pConnectorEvo =
9020             pDispEvo->headState[head].pConnectorEvo;
9021 
9022         if (pConnectorEvo != NULL &&
9023             pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
9024             NvU32 orIndex;
9025             nvAssert(pConnectorEvo->or.primary != NV_INVALID_OR);
9026             FOR_EACH_INDEX_IN_MASK(32, orIndex, nvConnectorGetORMaskEvo(pConnectorEvo)) {
9027                 if (pConnectorEvo->or.ownerHeadMask[orIndex] == 0x0) {
9028                     continue;
9029                 }
9030                 activeSorMask |= NVBIT(orIndex);
9031             } FOR_EACH_INDEX_IN_MASK_END;
9032         }
9033     }
9034 
9035     return activeSorMask;
9036 }
9037 
9038 NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo,
9039                                    const NvU32 sd,
9040                                    NVEvoChannelPtr pChannel,
9041                                    NvU64 *pStartTime,
9042                                    const NvU32 timeout)
9043 {
9044     do
9045     {
9046         NvBool isMethodPending = TRUE;
9047 
9048         if (pDevEvo->hal->IsChannelMethodPending(
9049                                     pDevEvo,
9050                                     pChannel,
9051                                     sd,
9052                                     &isMethodPending) && !isMethodPending) {
9053             break;
9054         }
9055 
9056         if (nvExceedsTimeoutUSec(pDevEvo, pStartTime, timeout)) {
9057             return FALSE;
9058         }
9059 
9060         nvkms_yield();
9061    } while (TRUE);
9062 
9063     return TRUE;
9064 }
9065 
9066 static NvU32 SetSORFlushMode(NVDevEvoPtr pDevEvo,
9067                              NvU32 sorNumber,
9068                              NvU32 headMask,
9069                              NvBool enable)
9070 {
9071     NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params = { };
9072 
9073     params.base.subdeviceIndex = 0;
9074     params.sorNumber = sorNumber;
9075     params.headMask = headMask;
9076     params.bEnable = enable;
9077 
9078     return nvRmApiControl(nvEvoGlobal.clientHandle,
9079                           pDevEvo->displayHandle,
9080                           NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE,
9081                           &params, sizeof(params));
9082 }
9083 
9084 static void DPSerializerLinkTrain(NVDispEvoPtr pDispEvo,
9085                                   NVConnectorEvoPtr pConnectorEvo,
9086                                   NvBool enableLink,
9087                                   NvBool reTrain)
9088 {
9089     const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId);
9090     const NvU32 sorNumber = pConnectorEvo->or.primary;
9091     const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo);
9092     NvBool force = NV_FALSE;
9093     NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
9094 
9095     /*
9096      * The NV0073_CTRL_DP_DATA_SET_{LANE_COUNT, LINK_BW} defines are the same
9097      * as the actual DPCD values. As such, we can directly assign the
9098      * dpSerializerCaps here.
9099      */
9100     NvBool isMST = pConnectorEvo->dpSerializerCaps.supportsMST;
9101     NvU32 linkBW = pConnectorEvo->dpSerializerCaps.maxLinkBW;
9102     NvU32 laneCount = pConnectorEvo->dpSerializerCaps.maxLaneCount;
9103 
9104     nvAssert(nvConnectorIsDPSerializer(pConnectorEvo));
9105 
9106     if (sorNumber == NV_INVALID_OR) {
9107         return;
9108     }
9109 
9110     if (reTrain) {
9111         if (!pConnectorEvo->dpSerializerEnabled) {
9112             nvEvoLogDev(pDevEvo, EVO_LOG_INFO,
9113                         "Received expected HPD_IRQ during serializer shutdown");
9114             return;
9115         }
9116     } else if (enableLink) {
9117         pConnectorEvo->dpSerializerEnabled = NV_TRUE;
9118     } else {
9119         linkBW = 0;
9120         laneCount = NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0;
9121         pConnectorEvo->dpSerializerEnabled = NV_FALSE;
9122     }
9123 
9124     if (isMST) {
9125         NvU32 dpcdData = 0;
9126 
9127         dpcdData = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, dpcdData);
9128         dpcdData =
9129             FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, dpcdData);
9130         if (!nvWriteDPCDReg(pConnectorEvo, NV_DPCD_MSTM_CTRL, dpcdData)) {
9131             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to enable MST DPCD");
9132             return;
9133         }
9134     }
9135 
9136     /*
9137      * We cannot perform link training while the OR has an attached head
9138      * since we would be changing the OR clocks and link frequency while
9139      * it's actively encoding pixels, and this could lead to FIFO overflow/
9140      * underflow issues. Instead, the recommended, safe sequence is to enter
9141      * flush mode first, re-train the link, and exit flush mode after.
9142      */
9143     if (reTrain) {
9144         if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_TRUE) !=
9145             NVOS_STATUS_SUCCESS) {
9146             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
9147                         "Failed to enter flush mode");
9148             return;
9149         }
9150     }
9151 
9152     do {
9153         NvU32 dpCtrlData = 0;
9154         NvU32 dpCtrlCmd = 0;
9155         NV0073_CTRL_DP_CTRL_PARAMS dpCtrlParams = { };
9156 
9157         dpCtrlCmd = DRF_DEF(0073_CTRL, _DP_CMD, _SET_LANE_COUNT, _TRUE) |
9158                     DRF_DEF(0073_CTRL, _DP_CMD, _SET_LINK_BW, _TRUE) |
9159                     DRF_DEF(0073_CTRL, _DP_CMD, _SET_ENHANCED_FRAMING, _TRUE);
9160 
9161         if (isMST) {
9162             dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_FORMAT_MODE, _MULTI_STREAM);
9163         }
9164 
9165         if (force) {
9166             dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAKE_LINK_TRAINING, _DONOT_TOGGLE_TRANSMISSION);
9167         }
9168 
9169         dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LINK_BW,
9170                                      linkBW, dpCtrlData);
9171         dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LANE_COUNT,
9172                                      laneCount, dpCtrlData);
9173         dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _TARGET,
9174                                      NV0073_CTRL_DP_DATA_TARGET_SINK,
9175                                      dpCtrlData);
9176 
9177         dpCtrlParams.subDeviceInstance = pDispEvo->displayOwner;
9178         dpCtrlParams.displayId = displayId;
9179         dpCtrlParams.cmd = dpCtrlCmd;
9180         dpCtrlParams.data = dpCtrlData;
9181 
9182         if (nvRmApiControl(nvEvoGlobal.clientHandle,
9183                            pDevEvo->displayCommonHandle,
9184                            NV0073_CTRL_CMD_DP_CTRL,
9185                            &dpCtrlParams, sizeof(dpCtrlParams)) == NVOS_STATUS_SUCCESS) {
9186             break;
9187         }
9188 
9189         if (force) {
9190             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Fake link training failed");
9191             break;
9192         }
9193 
9194         nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Link training failed");
9195 
9196         /*
9197          * XXX Force the link config on the GPU side to avoid hanging the display
9198          * pipe during modeset. Eventually, we need to figure out how to deal
9199          * with/report these kinds of LT failures.
9200          */
9201         force = NV_TRUE;
9202 
9203     } while (NV_TRUE);
9204 
9205     if (reTrain) {
9206         if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_FALSE) !=
9207             NVOS_STATUS_SUCCESS) {
9208             nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
9209                         "Failed to exit flush mode");
9210         }
9211     }
9212 }
9213 
9214 void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo,
9215                                NVConnectorEvoPtr pConnectorEvo)
9216 {
9217     DPSerializerLinkTrain(pDispEvo, pConnectorEvo,
9218                           NV_TRUE /* enableLink */,
9219                           NV_TRUE /* reTrain */);
9220 }
9221 
9222 void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo,
9223                               NVConnectorEvoPtr pConnectorEvo)
9224 {
9225     const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo);
9226 
9227     if (!pConnectorEvo->dpSerializerEnabled && (headMask != 0)) {
9228         DPSerializerLinkTrain(pDispEvo, pConnectorEvo,
9229                               NV_TRUE /* enableLink */,
9230                               NV_FALSE /* reTrain */);
9231     }
9232 }
9233 
9234 void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo,
9235                                NVConnectorEvoPtr pConnectorEvo)
9236 {
9237     const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo);
9238 
9239     if (pConnectorEvo->dpSerializerEnabled && (headMask == 0)) {
9240         DPSerializerLinkTrain(pDispEvo, pConnectorEvo,
9241                               NV_FALSE /* enableLink */,
9242                               NV_FALSE /* reTrain */);
9243     }
9244 }
9245 
9246 NvU32 nvGetHDRSrcMaxLum(const NVFlipChannelEvoHwState *pHwState)
9247 {
9248     if (!pHwState->hdrStaticMetadata.enabled) {
9249         return 0;
9250     }
9251 
9252     if (pHwState->hdrStaticMetadata.val.maxCLL > 0) {
9253         return pHwState->hdrStaticMetadata.val.maxCLL;
9254     }
9255 
9256     return pHwState->hdrStaticMetadata.val.maxDisplayMasteringLuminance;
9257 }
9258 
9259 NvBool nvNeedsTmoLut(NVDevEvoPtr pDevEvo,
9260                      NVEvoChannelPtr pChannel,
9261                      const NVFlipChannelEvoHwState *pHwState,
9262                      NvU32 srcMaxLum,
9263                      NvU32 targetMaxCLL)
9264 {
9265     const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask);
9266     const NvU32 head = pDevEvo->headForWindow[win];
9267     const NvU32 sdMask = nvPeekEvoSubDevMask(pDevEvo);
9268     const NvU32 sd = (sdMask == 0) ? 0 : nv_ffs(sdMask) - 1;
9269     const NVDispHeadStateEvoRec *pHeadState =
9270         &pDevEvo->pDispEvo[sd]->headState[head];
9271     const NVEvoWindowCaps *pWinCaps =
9272         &pDevEvo->gpus[sd].capabilities.window[pChannel->instance];
9273 
9274     // Don't tone map if flipped to NULL.
9275     if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) {
9276         return FALSE;
9277     }
9278 
9279     // Don't tone map if layer doesn't have static metadata.
9280     // XXX HDR TODO: Support tone mapping SDR surfaces to HDR
9281     if (!pHwState->hdrStaticMetadata.enabled) {
9282         return FALSE;
9283     }
9284 
9285     // Don't tone map if HDR infoframe isn't enabled
9286     // XXX HDR TODO: Support tone mapping HDR surfaces to SDR
9287     if (pHeadState->hdrInfoFrame.state != NVKMS_HDR_INFOFRAME_STATE_ENABLED) {
9288         return FALSE;
9289     }
9290 
9291     // Don't tone map if TMO not present
9292     if (!pWinCaps->tmoPresent) {
9293         return FALSE;
9294     }
9295 
9296     // Don't tone map if source or target max luminance is unspecified.
9297     if ((srcMaxLum == 0) || (targetMaxCLL == 0)) {
9298         return FALSE;
9299     }
9300 
9301     // Don't tone map unless source max luminance exceeds target by 10%.
9302     if (srcMaxLum <= ((targetMaxCLL * 110) / 100)) {
9303         return FALSE;
9304     }
9305 
9306     return TRUE;
9307 }
9308 
9309 NvBool nvIsCscMatrixIdentity(const struct NvKmsCscMatrix *matrix)
9310 {
9311     const struct NvKmsCscMatrix identity = NVKMS_IDENTITY_CSC_MATRIX;
9312 
9313     int y;
9314     for (y = 0; y < 3; y++) {
9315         int x;
9316 
9317         for (x = 0; x < 4; x++) {
9318             if (matrix->m[y][x] != identity.m[y][x]) {
9319                 return FALSE;
9320             }
9321         }
9322     }
9323 
9324     return TRUE;
9325 }
9326 
9327 enum nvKmsPixelDepth nvEvoColorSpaceBpcToPixelDepth(
9328     const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace,
9329     const enum NvKmsDpyAttributeColorBpcValue colorBpc)
9330 {
9331     switch (colorSpace) {
9332         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB:
9333         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444:
9334         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420:
9335             switch (colorBpc) {
9336                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
9337                     return NVKMS_PIXEL_DEPTH_30_444;
9338                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
9339                     return NVKMS_PIXEL_DEPTH_24_444;
9340                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN: /* fallthrough */
9341                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6:
9342                     return NVKMS_PIXEL_DEPTH_18_444;
9343             }
9344             break;
9345         case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422:
9346             nvAssert(colorBpc != NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6);
9347             switch (colorBpc) {
9348                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_10:
9349                     return NVKMS_PIXEL_DEPTH_20_422;
9350                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_6: /* fallthrough */
9351                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_UNKNOWN: /* fallthrough */
9352                 case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_BPC_8:
9353                     return NVKMS_PIXEL_DEPTH_16_422;
9354             }
9355             break;
9356     }
9357 
9358     return NVKMS_PIXEL_DEPTH_18_444;
9359 }
9360 
9361 void nvEvoEnableMergeModePreModeset(NVDispEvoRec *pDispEvo,
9362                                     const NvU32 headsMask,
9363                                     NVEvoUpdateState *pUpdateState)
9364 {
9365     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
9366     const NvU32 sd = pDispEvo->displayOwner;
9367     const NvU32 primaryHead = nvGetPrimaryHwHeadFromMask(headsMask);
9368     NvU32 head;
9369 
9370     nvAssert(pDevEvo->hal->caps.supportsMergeMode);
9371     nvAssert((nvPopCount32(headsMask) > 1) &&
9372                 (primaryHead != NV_INVALID_HEAD));
9373 
9374     FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) {
9375         NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
9376         const NVHwModeTimingsEvo *pTimings = &pHeadState->timings;
9377         NVEvoHeadControl *pHC =
9378             &pDevEvo->gpus[sd].headControl[head];
9379 
9380         nvAssert(pHeadState->mergeMode ==
9381                     NV_EVO_MERGE_MODE_DISABLED);
9382 
9383         /*
9384          * Heads requires to be raster locked before they transition to
9385          * PRIMARY/SECONDARY merge mode.
9386          *
9387          * SETUP should be the intermediate state before head transition to
9388          * PRIMARY/SECONDARY  merge mode. During SETUP state, there is no pixel
9389          * transmission from secondary to primary head, RG fetches and drops
9390          * pixels, viewport gets filled by the special gray/black pixels.
9391          */
9392         pHeadState->mergeMode = NV_EVO_MERGE_MODE_SETUP;
9393         pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode,
9394                                    pUpdateState);
9395 
9396         nvAssert((pHC->serverLock == NV_EVO_NO_LOCK) &&
9397                     (pHC->clientLock == NV_EVO_NO_LOCK));
9398 
9399         pHC->mergeMode = TRUE;
9400         if (head == primaryHead) {
9401             pHC->serverLock = NV_EVO_RASTER_LOCK;
9402             pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead);
9403             pHC->setLockOffsetX = TRUE;
9404         } else {
9405             pHC->clientLock = NV_EVO_RASTER_LOCK;
9406             pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead);
9407             if (pTimings->vrr.type != NVKMS_DPY_VRR_TYPE_NONE) {
9408                 pHC->clientLockoutWindow = 4;
9409                 pHC->useStallLockPin = TRUE;
9410                 pHC->stallLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead);
9411             } else {
9412                 pHC->clientLockoutWindow = 2;
9413             }
9414         }
9415 
9416         if (pTimings->vrr.type != NVKMS_DPY_VRR_TYPE_NONE) {
9417             pHC->crashLockUnstallMode = TRUE;
9418         }
9419         pHC->stereoLocked = FALSE;
9420 
9421         EvoUpdateHeadParams(pDispEvo, head, pUpdateState);
9422     }
9423 }
9424 
9425 void nvEvoEnableMergeModePostModeset(NVDispEvoRec *pDispEvo,
9426                                      const NvU32 headsMask,
9427                                      NVEvoUpdateState *pUpdateState)
9428 {
9429     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
9430     const NvU32 sd = pDispEvo->displayOwner;
9431     const NvU32 primaryHead = nvGetPrimaryHwHeadFromMask(headsMask);
9432     NvU64 startTime = 0;
9433     NvU32 head;
9434 
9435     nvAssert(pDevEvo->hal->caps.supportsMergeMode);
9436     nvAssert((nvPopCount32(headsMask) > 1) &&
9437                 (primaryHead != NV_INVALID_HEAD));
9438 
9439     FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) {
9440         nvAssert(pDispEvo->headState[head].mergeMode ==
9441                     NV_EVO_MERGE_MODE_SETUP);
9442 
9443         if (!EvoWaitForLock(pDevEvo, sd, head, EVO_RASTER_LOCK,
9444                             &startTime)) {
9445             nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "Raster lock timeout");
9446             return;
9447         }
9448     }
9449 
9450     FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) {
9451         NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
9452         NVEvoHeadControl *pHC = &pDevEvo->gpus[sd].headControl[head];
9453 
9454         pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(primaryHead);
9455         pHC->flipLock = TRUE;
9456 
9457         EvoUpdateHeadParams(pDispEvo, head, pUpdateState);
9458 
9459         pHeadState->mergeMode = (head == primaryHead) ?
9460             NV_EVO_MERGE_MODE_PRIMARY : NV_EVO_MERGE_MODE_SECONDARY;
9461         pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode,
9462                                    pUpdateState);
9463     }
9464 }
9465 
9466 void nvEvoDisableMergeMode(NVDispEvoRec *pDispEvo,
9467                            const NvU32 headsMask,
9468                            NVEvoUpdateState *pUpdateState)
9469 {
9470     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
9471     const NvU32 sd = pDispEvo->displayOwner;
9472     NvU32 head;
9473 
9474     nvAssert(pDevEvo->hal->caps.supportsMergeMode);
9475     nvAssert(nvPopCount32(headsMask) > 1);
9476 
9477     FOR_EACH_EVO_HW_HEAD_IN_MASK(headsMask, head) {
9478         NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head];
9479         NVEvoHeadControl *pHC =
9480             &pDevEvo->gpus[sd].headControl[head];
9481 
9482         pHeadState->mergeMode = NV_EVO_MERGE_MODE_DISABLED;
9483         pDevEvo->hal->SetMergeMode(pDispEvo, head, pHeadState->mergeMode,
9484                                    pUpdateState);
9485 
9486         pHC->mergeMode = FALSE;
9487         pHC->serverLock = NV_EVO_NO_LOCK;
9488         pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
9489         pHC->clientLock = NV_EVO_NO_LOCK;
9490         pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
9491         pHC->clientLockoutWindow = 0;
9492         pHC->setLockOffsetX = FALSE;
9493         pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
9494         pHC->flipLock = FALSE;
9495         pHC->useStallLockPin = FALSE;
9496         pHC->stallLockPin = NV_EVO_LOCK_PIN_INTERNAL(0);
9497         pHC->crashLockUnstallMode = FALSE;
9498 
9499 
9500         EvoUpdateHeadParams(pDispEvo, head, pUpdateState);
9501     }
9502 }
9503 
9504 NvBool nvEvoGetSingleTileHwModeTimings(const NVHwModeTimingsEvo *pSrc,
9505                                        const NvU32 numTiles,
9506                                        NVHwModeTimingsEvo *pDst)
9507 {
9508     if (numTiles == 1) {
9509         *pDst = *pSrc;
9510         return TRUE;
9511     }
9512 
9513     if ((numTiles == 0) ||
9514             (pSrc->viewPort.out.xAdjust != 0) ||
9515             (pSrc->viewPort.out.width != nvEvoVisibleWidth(pSrc))) {
9516         return FALSE;
9517     }
9518 
9519     if (((pSrc->rasterSize.x % numTiles) != 0) ||
9520             (((pSrc->rasterSyncEnd.x + 1) % numTiles) != 0) ||
9521             (((pSrc->rasterBlankEnd.x + 1) % numTiles) != 0) ||
9522             (((pSrc->rasterBlankStart.x + 1) % numTiles) != 0) ||
9523             ((pSrc->pixelClock % numTiles) != 0) ||
9524             ((pSrc->viewPort.in.width % numTiles) != 0)) {
9525         return FALSE;
9526     }
9527 
9528     *pDst = *pSrc;
9529 
9530     pDst->rasterSize.x /= numTiles;
9531     pDst->rasterSyncEnd.x /= numTiles;
9532     pDst->rasterBlankEnd.x /= numTiles;
9533     pDst->rasterBlankStart.x /= numTiles;
9534 
9535     pDst->pixelClock /= numTiles;
9536 
9537     pDst->viewPort.out.width /= numTiles;
9538     pDst->viewPort.in.width /= numTiles;
9539 
9540     return TRUE;
9541 }
9542 
9543 NvBool nvEvoUse2Heads1OR(const NVDpyEvoRec *pDpyEvo,
9544                          const NVHwModeTimingsEvo *pTimings,
9545                          const struct NvKmsModeValidationParams *pParams)
9546 {
9547     const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo;
9548     const NvU32 sd = pDispEvo->displayOwner;
9549     const NVEvoHeadCaps *pHeadCaps =
9550         &pDispEvo->pDevEvo->gpus[sd].capabilities.head[0];
9551 
9552     /* The 2Heads1OR mode can not be used if GPU does not
9553      * support merge mode, or */
9554     if (!pDispEvo->pDevEvo->hal->caps.supportsMergeMode ||
9555             /* the 2Heads1OR mode is forced disabled by client, or */
9556             ((pParams->overrides &
9557               NVKMS_MODE_VALIDATION_MAX_ONE_HARDWARE_HEAD) != 0) ||
9558             /* the given dpy does not support the display stream compression
9559              * and the given mode timings are not using the hardware YUV420
9560              * packer, or */
9561             (!nvDPDpyIsDscPossible(pDpyEvo) && !nvHdmiDpySupportsDsc(pDpyEvo) &&
9562                 (pTimings->yuv420Mode != NV_YUV420_MODE_HW)) ||
9563             /* the non-centered viewport out does not work with 2Heads1OR mode
9564              * an for simplicity disable all customized viewport out, or */
9565             (pTimings->viewPort.out.width != nvEvoVisibleWidth(pTimings)) ||
9566             (pTimings->viewPort.out.xAdjust != 0) ||
9567             /* either HVisible, HSyncWidth, HBackPorch, HForntPorch,
9568              * pixelClock, or viewPortIn width is odd and can not be split
9569              * equally across two heads, or */
9570             ((pTimings->rasterSize.x & 1 ) != 0) ||
9571             ((pTimings->rasterSyncEnd.x & 1) != 1) ||
9572             ((pTimings->rasterBlankEnd.x & 1) != 1) ||
9573             ((pTimings->rasterBlankStart.x & 1) != 1) ||
9574             ((pTimings->pixelClock & 1) != 0) ||
9575             ((pTimings->viewPort.in.width & 1) != 0)) {
9576         return FALSE;
9577     }
9578 
9579     /* Use 2Heads1OR mode only if the required pixel clock is greater than the
9580      * maximum pixel clock support by a head. */
9581     return (pTimings->pixelClock > pHeadCaps->maxPClkKHz);
9582 }
9583