1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "nvkms-dma.h"
25 #include "nvkms-evo.h"
26 #include "nvkms-flip.h"
27 #include "nvkms-hw-flip.h"
28 #include "nvkms-utils-flip.h"
29 #include "nvkms-prealloc.h"
30 #include "nvkms-private.h"
31 #include "nvkms-utils.h"
32 #include "nvkms-vrr.h"
33 #include "nvkms-dpy.h"
34 #include "nvkms-rm.h"
35
36 /*!
37 * Check whether the flipPermissions for pOpenDev allow the flipping
38 * requested by NvKmsFlipCommonParams.
39 */
nvCheckFlipPermissions(const struct NvKmsPerOpenDev * pOpenDev,const NVDevEvoRec * pDevEvo,const NvU32 sd,const NvU32 apiHead,const struct NvKmsFlipCommonParams * pParams)40 NvBool nvCheckFlipPermissions(
41 const struct NvKmsPerOpenDev *pOpenDev,
42 const NVDevEvoRec *pDevEvo,
43 const NvU32 sd,
44 const NvU32 apiHead,
45 const struct NvKmsFlipCommonParams *pParams)
46 {
47 const int dispIndex = pDevEvo->gpus[sd].pDispEvo->displayOwner;
48 const struct NvKmsFlipPermissions *pFlipPermissions =
49 nvGetFlipPermissionsFromOpenDev(pOpenDev);
50 const struct NvKmsModesetPermissions *pModesetPermissions =
51 nvGetModesetPermissionsFromOpenDev(pOpenDev);
52 const NvU8 allLayersMask = NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1;
53 NvU8 layerMask = 0;
54 NvU32 layer;
55
56 nvAssert(pOpenDev != NULL);
57 nvAssert(pFlipPermissions != NULL);
58 nvAssert(pModesetPermissions != NULL);
59
60 layerMask = pFlipPermissions->disp[dispIndex].head[apiHead].layerMask;
61
62 /*
63 * If the client has modeset permissions for this disp+head, allow
64 * the client to also perform flips on any layer.
65 */
66 if (!nvDpyIdListIsEmpty(pModesetPermissions->disp[dispIndex].
67 head[apiHead].dpyIdList)) {
68 layerMask = allLayersMask;
69 }
70
71 /* Changing viewPortIn or LUT requires permission to alter all layers. */
72
73 if ((layerMask != allLayersMask) && ((pParams->viewPortIn.specified) ||
74 (pParams->lut.input.specified) ||
75 (pParams->lut.output.specified))) {
76 return FALSE;
77 }
78
79 for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
80 if (nvIsLayerDirty(pParams, layer) && ((layerMask & NVBIT(layer)) == 0)) {
81 return FALSE;
82 }
83 }
84
85 return TRUE;
86 }
87
FillPostSyncptReplyOneApiHead(NVDevEvoRec * pDevEvo,const NvU32 sd,const NvU32 apiHead,const struct NvKmsFlipCommonParams * pRequestParams,struct NvKmsFlipCommonReplyOneHead * pReplyParams,const struct NvKmsFlipWorkArea * pWorkArea)88 static void FillPostSyncptReplyOneApiHead(
89 NVDevEvoRec *pDevEvo,
90 const NvU32 sd,
91 const NvU32 apiHead,
92 const struct NvKmsFlipCommonParams *pRequestParams,
93 struct NvKmsFlipCommonReplyOneHead *pReplyParams,
94 const struct NvKmsFlipWorkArea *pWorkArea)
95 {
96 /* XXX[2Heads1OR] Return per hardware-head post syncpt */
97 const NvU32 head = nvGetPrimaryHwHead(pDevEvo->gpus[sd].pDispEvo, apiHead);
98 NvU32 layer;
99
100 /*! check for valid config */
101 if ((head == NV_INVALID_HEAD) || !pDevEvo->supportsSyncpts) {
102 return;
103 }
104
105 for (layer = 0; layer < ARRAY_LEN(pRequestParams->layer); layer++) {
106 const NVFlipEvoHwState *pFlipState =
107 &pWorkArea->sd[sd].head[head].newState;
108
109 if (!pRequestParams->layer[layer].syncObjects.specified ||
110 !pRequestParams->layer[layer].syncObjects.val.useSyncpt) {
111 continue;
112 }
113
114 nvFillPostSyncptReplyOneChannel(
115 pDevEvo->head[head].layer[layer],
116 pRequestParams->layer[layer].syncObjects.val.u.syncpts.requestedPostType,
117 &pReplyParams->layer[layer].postSyncpt,
118 &pFlipState->layer[layer].syncObject);
119 }
120 }
121
122
UpdateProposedFlipStateOneApiHead(const NVDispEvoRec * pDispEvo,const NvU32 apiHead,const struct NvKmsFlipCommonParams * pParams,NVProposedFlipStateOneApiHead * pProposedApiHead)123 static NvBool UpdateProposedFlipStateOneApiHead(
124 const NVDispEvoRec *pDispEvo,
125 const NvU32 apiHead,
126 const struct NvKmsFlipCommonParams *pParams,
127 NVProposedFlipStateOneApiHead *pProposedApiHead)
128 {
129 const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
130 const NVDispApiHeadStateEvoRec *pApiHeadState =
131 &pDispEvo->apiHeadState[apiHead];
132 const NVDpyEvoRec *pDpyEvo =
133 nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo);
134 NvU32 layer;
135
136 if (pParams->tf.specified) {
137 pProposedApiHead->dirty.hdr = TRUE;
138 pProposedApiHead->hdr.tf = pParams->tf.val;
139 }
140
141 if (pParams->colorimetry.specified) {
142 pProposedApiHead->dirty.hdr = TRUE;
143 pProposedApiHead->hdr.colorimetry = pParams->colorimetry.val;
144 }
145
146 if (pParams->hdrInfoFrame.specified) {
147 pProposedApiHead->dirty.hdr = TRUE;
148 pProposedApiHead->hdr.infoFrameOverride =
149 pParams->hdrInfoFrame.enabled;
150 }
151
152 for (layer = 0; layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
153 if (pParams->layer[layer].hdr.specified) {
154 pProposedApiHead->dirty.hdr = TRUE;
155 if (pParams->layer[layer].hdr.enabled) {
156 pProposedApiHead->hdr.staticMetadataLayerMask |=
157 1 << layer;
158 } else {
159 pProposedApiHead->hdr.staticMetadataLayerMask &=
160 ~(1 << layer);
161 }
162 }
163 }
164
165 if (pProposedApiHead->dirty.hdr) {
166 // If enabling HDR output TF...
167 if (pProposedApiHead->hdr.tf == NVKMS_OUTPUT_TF_PQ) {
168 // Cannot be an SLI configuration.
169 // XXX HDR TODO: Test SLI Mosaic + HDR and remove this check
170 if (pDevEvo->numSubDevices > 1) {
171 return FALSE;
172 }
173
174 /* NVKMS_OUTPUT_TF_PQ requires the RGB color space */
175 if (pProposedApiHead->hdr.colorSpace !=
176 NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) {
177 return FALSE;
178 }
179 }
180
181 // If enabling HDR signaling...
182 // XXX HDR TODO: Handle other colorimetries
183 if (pProposedApiHead->hdr.infoFrameOverride ||
184 (pProposedApiHead->hdr.staticMetadataLayerMask != 0) ||
185 (pProposedApiHead->hdr.colorimetry == NVKMS_OUTPUT_COLORIMETRY_BT2100)) {
186 const NVDpyEvoRec *pDpyEvoIter;
187
188 // All dpys on apiHead must support HDR.
189 FOR_ALL_EVO_DPYS(pDpyEvoIter,
190 pApiHeadState->activeDpys,
191 pDispEvo) {
192 if (!nvDpyIsHDRCapable(pDpyEvoIter)) {
193 return FALSE;
194 }
195 }
196 }
197
198 if (!nvChooseColorRangeEvo(pProposedApiHead->hdr.colorimetry,
199 pDpyEvo->requestedColorRange,
200 pProposedApiHead->hdr.colorSpace,
201 pProposedApiHead->hdr.colorBpc,
202 &pProposedApiHead->hdr.colorRange)) {
203 return FALSE;
204 }
205 }
206
207 if (pParams->viewPortIn.specified) {
208 pProposedApiHead->dirty.viewPortPointIn = TRUE;
209 pProposedApiHead->viewPortPointIn = pParams->viewPortIn.point;
210 }
211
212 if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, &pParams->lut)) {
213 return FALSE;
214 }
215 pProposedApiHead->lut = pParams->lut;
216
217 return TRUE;
218 }
219
GetAllowVrr(const NVDevEvoRec * pDevEvo,const struct NvKmsFlipRequestOneHead * pFlipHead,NvU32 numFlipHeads,NvBool allowVrr,NvBool * pApplyAllowVrr)220 static NvBool GetAllowVrr(const NVDevEvoRec *pDevEvo,
221 const struct NvKmsFlipRequestOneHead *pFlipHead,
222 NvU32 numFlipHeads,
223 NvBool allowVrr,
224 NvBool *pApplyAllowVrr)
225 {
226 NvU32 sd, i;
227 const NVDispEvoRec *pDispEvo;
228 const NvU32 requestedApiHeadCount = numFlipHeads;
229 NvU32 activeApiHeadCount, dirtyMainLayerCount;
230
231 *pApplyAllowVrr = FALSE;
232
233 /*!
234 * Count active heads so we can make a decision about VRR
235 * and register syncpts if specified.
236 */
237 activeApiHeadCount = dirtyMainLayerCount = 0;
238
239 FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
240 NvU32 apiHead;
241 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
242 if (nvApiHeadIsActive(pDispEvo, apiHead)) {
243 activeApiHeadCount++;
244 }
245 }
246 }
247
248 for (i = 0; i < numFlipHeads; i++) {
249 if (nvIsLayerDirty(&pFlipHead[i].flip, NVKMS_MAIN_LAYER)) {
250 dirtyMainLayerCount++;
251 }
252 }
253
254 /*
255 * Deactivate VRR if only a subset of the heads are requested or
256 * only a subset of the heads are being flipped.
257 */
258 if ((activeApiHeadCount != requestedApiHeadCount) ||
259 (activeApiHeadCount != dirtyMainLayerCount)) {
260 allowVrr = FALSE;
261 }
262
263 /*
264 * Apply NvKmsFlipRequest::allowVrr
265 * only if at least one main layer is became dirty.
266 */
267 if (dirtyMainLayerCount > 0) {
268 *pApplyAllowVrr = TRUE;
269 }
270
271 return allowVrr;
272 }
273
FillNvKmsFlipReply(NVDevEvoRec * pDevEvo,struct NvKmsFlipWorkArea * pWorkArea,const NvBool applyAllowVrr,const NvS32 vrrSemaphoreIndex,const struct NvKmsFlipRequestOneHead * pFlipHead,NvU32 numFlipHeads,struct NvKmsFlipReply * reply)274 static void FillNvKmsFlipReply(NVDevEvoRec *pDevEvo,
275 struct NvKmsFlipWorkArea *pWorkArea,
276 const NvBool applyAllowVrr,
277 const NvS32 vrrSemaphoreIndex,
278 const struct NvKmsFlipRequestOneHead *pFlipHead,
279 NvU32 numFlipHeads,
280 struct NvKmsFlipReply *reply)
281 {
282 NvU32 i;
283
284 if (reply == NULL) {
285 return;
286 }
287
288 for (i = 0; i < numFlipHeads; i++) {
289 const NvU32 sd = pFlipHead[i].sd;
290 const NvU32 apiHead = pFlipHead[i].head;
291
292 FillPostSyncptReplyOneApiHead(pDevEvo,
293 sd,
294 apiHead,
295 &pFlipHead[i].flip,
296 &reply->flipHead[i],
297 pWorkArea);
298 }
299
300 if (applyAllowVrr) {
301 reply->vrrFlipType = nvGetActiveVrrType(pDevEvo);
302 reply->vrrSemaphoreIndex = vrrSemaphoreIndex;
303 } else {
304 reply->vrrFlipType = NV_KMS_VRR_FLIP_NON_VRR;
305 reply->vrrSemaphoreIndex = -1;
306 }
307 }
308
InitNvKmsFlipWorkArea(const NVDevEvoRec * pDevEvo,struct NvKmsFlipWorkArea * pWorkArea)309 static void InitNvKmsFlipWorkArea(const NVDevEvoRec *pDevEvo,
310 struct NvKmsFlipWorkArea *pWorkArea)
311 {
312 const NVDispEvoRec *pDispEvo;
313 NvU32 sd, head, apiHead;
314
315 nvkms_memset(pWorkArea, 0, sizeof(*pWorkArea));
316
317 /*
318 * Initialize the work area. Note we take two snapshots of the
319 * current headState: newState and oldState. newState will
320 * describe the new configuration. After that is applied, we will
321 * refer to oldState to identify any surfaces that are no longer
322 * in use.
323 */
324 FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
325 for (head = 0; head < ARRAY_LEN(pWorkArea->sd[sd].head); head++) {
326 nvInitFlipEvoHwState(pDevEvo, sd, head,
327 &pWorkArea->sd[sd].head[head].newState);
328 nvInitFlipEvoHwState(pDevEvo, sd, head,
329 &pWorkArea->sd[sd].head[head].oldState);
330 }
331
332 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
333 NVProposedFlipStateOneApiHead *pProposedApiHead =
334 &pWorkArea->disp[sd].apiHead[apiHead].proposedFlipState;
335 const NVDispApiHeadStateEvoRec *pApiHeadState =
336 &pDispEvo->apiHeadState[apiHead];
337
338 pProposedApiHead->hdr.tf = pApiHeadState->tf;
339 pProposedApiHead->hdr.colorimetry = pApiHeadState->colorimetry;
340 pProposedApiHead->hdr.infoFrameOverride =
341 pApiHeadState->hdrInfoFrameOverride;
342 pProposedApiHead->hdr.staticMetadataLayerMask =
343 pApiHeadState->hdrStaticMetadataLayerMask;
344 pProposedApiHead->hdr.colorSpace =
345 pApiHeadState->attributes.colorSpace;
346 pProposedApiHead->hdr.colorBpc =
347 pApiHeadState->attributes.colorBpc;
348 pProposedApiHead->hdr.colorRange =
349 pApiHeadState->attributes.colorRange;
350
351 pProposedApiHead->viewPortPointIn =
352 pApiHeadState->viewPortPointIn;
353
354 pProposedApiHead->lut.input.specified =
355 FALSE;
356 pProposedApiHead->lut.output.specified =
357 FALSE;
358 }
359 }
360 }
361
FlipEvoOneApiHead(NVDispEvoRec * pDispEvo,const NvU32 apiHead,const struct NvKmsFlipWorkArea * pWorkArea,const NvBool allowFlipLock,NVEvoUpdateState * pUpdateState)362 static void FlipEvoOneApiHead(NVDispEvoRec *pDispEvo,
363 const NvU32 apiHead,
364 const struct NvKmsFlipWorkArea *pWorkArea,
365 const NvBool allowFlipLock,
366 NVEvoUpdateState *pUpdateState)
367 {
368 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
369 const NvU32 sd = pDispEvo->displayOwner;
370 NvU32 head;
371 const NVProposedFlipStateOneApiHead *pProposedApiHead =
372 &pWorkArea->disp[sd].apiHead[apiHead].proposedFlipState;
373 NVDispApiHeadStateEvoRec *pApiHeadState =
374 &pDispEvo->apiHeadState[apiHead];
375 NVDpyEvoRec *pDpyEvo =
376 nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo);
377 const NVT_EDID_INFO *pInfo = &pDpyEvo->parsedEdid.info;
378 const NVT_HDR_STATIC_METADATA *pHdrInfo =
379 &pInfo->hdr_static_metadata_info;
380
381 nvAssert(nvApiHeadIsActive(pDispEvo, apiHead));
382
383 if (pProposedApiHead->lut.input.specified ||
384 pProposedApiHead->lut.output.specified) {
385 /* Set LUT settings */
386 nvEvoSetLut(pDispEvo, apiHead, FALSE /* kickoff */,
387 &pProposedApiHead->lut);
388 nvEvoStageLUTNotifier(pDispEvo, apiHead);
389 }
390
391 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
392 nvFlipEvoOneHead(pDevEvo, sd, head, pHdrInfo,
393 &pWorkArea->sd[sd].head[head].newState,
394 allowFlipLock,
395 pUpdateState);
396
397 if (pProposedApiHead->dirty.hdr) {
398 /* Update hardware's current colorSpace and colorRange */
399 nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
400 pDispEvo,
401 head,
402 pProposedApiHead->hdr.colorimetry,
403 pProposedApiHead->hdr.colorSpace,
404 pProposedApiHead->hdr.colorRange,
405 pUpdateState);
406 }
407
408 if (pProposedApiHead->lut.input.specified ||
409 pProposedApiHead->lut.output.specified) {
410 /* Update current LUT to hardware */
411 nvEvoSetLUTContextDma(pDispEvo, head, pUpdateState);
412 }
413 }
414
415 if (pProposedApiHead->dirty.hdr) {
416 pApiHeadState->attributes.colorSpace =
417 pProposedApiHead->hdr.colorSpace;
418 pApiHeadState->attributes.colorBpc =
419 pProposedApiHead->hdr.colorBpc;
420 pApiHeadState->attributes.colorRange =
421 pProposedApiHead->hdr.colorRange;
422
423 pApiHeadState->tf = pProposedApiHead->hdr.tf;
424
425 pApiHeadState->colorimetry = pProposedApiHead->hdr.colorimetry;
426
427 pApiHeadState->hdrInfoFrameOverride =
428 pProposedApiHead->hdr.infoFrameOverride;
429 pApiHeadState->hdrStaticMetadataLayerMask =
430 pProposedApiHead->hdr.staticMetadataLayerMask;
431
432 nvUpdateInfoFrames(pDpyEvo);
433 }
434
435 if (pProposedApiHead->dirty.viewPortPointIn) {
436 pApiHeadState->viewPortPointIn =
437 pProposedApiHead->viewPortPointIn;
438 }
439 }
440
FlipEvo2Head1OrOneDisp(NVDispEvoRec * pDispEvo,struct NvKmsFlipWorkArea * pWorkArea,const NvBool skipUpdate)441 static NvU32 FlipEvo2Head1OrOneDisp(NVDispEvoRec *pDispEvo,
442 struct NvKmsFlipWorkArea *pWorkArea,
443 const NvBool skipUpdate)
444 {
445 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
446 NvU32 flip2Heads1OrApiHeadsMask = 0x0;
447
448 for (NvU32 apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) {
449 NVDispApiHeadStateEvoRec *pApiHeadState =
450 &pDispEvo->apiHeadState[apiHead];
451 const NvBool b2Heads1Or =
452 (nvPopCount32(pApiHeadState->hwHeadsMask) >= 2);
453
454 if (!nvApiHeadIsActive(pDispEvo, apiHead) || !b2Heads1Or) {
455 continue;
456 }
457
458 nvkms_memset(&pWorkArea->updateState, 0,
459 sizeof(pWorkArea->updateState));
460
461 FlipEvoOneApiHead(pDispEvo, apiHead, pWorkArea,
462 TRUE /* allowFlipLock */, &pWorkArea->updateState);
463
464 /*
465 * If api-head is using 2Heads1OR mode then it can not be flip with
466 * other ap-heads in a single update; because each api-head, which is
467 * using 2Heads1OR mode, uses different fliplock group and kicking off
468 * multiple fliplock groups as part of a single update call is not
469 * supported yet.
470 */
471 pDevEvo->hal->Update(pDevEvo, &pWorkArea->updateState,
472 TRUE /* releaseElv */);
473 nvAssert(!skipUpdate);
474
475 flip2Heads1OrApiHeadsMask |= NVBIT(apiHead);
476 }
477
478 return flip2Heads1OrApiHeadsMask;
479 }
480
481 /*!
482 * Program a flip on all requested layers on all requested heads on
483 * all requested disps in NvKmsFlipRequest.
484 *
485 * /param[in] skipUpdate Update software state tracking, but don't kick
486 * off or perform an UPDATE.
487 *
488 * Note that this should be used only when the
489 * satellite channels (including the cursor) are
490 * disabled -- only the core channel should be
491 * displaying anything, and only the core surface
492 * should be specified in a skipUpdate flip.
493 * /param[in] allowFlipLock Whether this update should use fliplocked base
494 * flips. This is used on nvdisplay to set the
495 * interlock mask to include all fliplocked
496 * channels if necessary. This should currently
497 * only be set when this flip was initiated
498 * through NVKMS_IOCTL_FLIP.
499 */
nvFlipEvo(NVDevEvoPtr pDevEvo,const struct NvKmsPerOpenDev * pOpenDev,const struct NvKmsFlipRequestOneHead * pFlipHead,NvU32 numFlipHeads,NvBool commit,NvBool requestAllowVrr,struct NvKmsFlipReply * reply,NvBool skipUpdate,NvBool allowFlipLock)500 NvBool nvFlipEvo(NVDevEvoPtr pDevEvo,
501 const struct NvKmsPerOpenDev *pOpenDev,
502 const struct NvKmsFlipRequestOneHead *pFlipHead,
503 NvU32 numFlipHeads,
504 NvBool commit,
505 NvBool requestAllowVrr,
506 struct NvKmsFlipReply *reply,
507 NvBool skipUpdate,
508 NvBool allowFlipLock)
509 {
510 NvS32 vrrSemaphoreIndex = -1;
511 NvU32 apiHead, sd;
512 NvBool applyAllowVrr = FALSE;
513 NvBool ret = FALSE;
514 enum NvKmsFlipResult result = NV_KMS_FLIP_RESULT_INVALID_PARAMS;
515 NvBool changed = FALSE;
516 NVDispEvoPtr pDispEvo;
517 const NvBool allowVrr =
518 GetAllowVrr(pDevEvo, pFlipHead, numFlipHeads,
519 requestAllowVrr, &applyAllowVrr);
520 struct NvKmsFlipWorkArea *pWorkArea =
521 nvPreallocGet(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA,
522 sizeof(*pWorkArea));
523 NvU32 i;
524
525 /*
526 * Do not execute NVKMS_IOCTL_FLIP if the display channel yet has not
527 * been transitioned from vbios to driver. A modeset requires, to make
528 * display channel transition from vbios to driver.
529 *
530 * The NVKMS client should do modeset before initiating
531 * NVKMS_IOCTL_FLIP requests.
532 */
533 if (pDevEvo->coreInitMethodsPending) {
534 goto done;
535 }
536
537 InitNvKmsFlipWorkArea(pDevEvo, pWorkArea);
538
539 /* Validate the flip parameters and update the work area. */
540 for (i = 0; i < numFlipHeads; i++) {
541 const NvU32 apiHead = pFlipHead[i].head;
542 const NvU32 sd = pFlipHead[i].sd;
543 NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[sd];
544 NvU32 head;
545 const NVDispApiHeadStateEvoRec *pApiHeadState =
546 &pDispEvo->apiHeadState[apiHead];
547
548 if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
549 goto done;
550 }
551
552 if (!nvCheckFlipPermissions(pOpenDev, pDevEvo, sd, apiHead,
553 &pFlipHead[i].flip)) {
554 goto done;
555 }
556
557 if (!UpdateProposedFlipStateOneApiHead(
558 pDispEvo,
559 apiHead,
560 &pFlipHead[i].flip,
561 &pWorkArea->disp[sd].apiHead[apiHead].proposedFlipState)) {
562 goto done;
563 }
564
565 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
566 if (!nvAssignNVFlipEvoHwState(pDevEvo,
567 pOpenDev,
568 sd,
569 head,
570 &pFlipHead[i].flip,
571 allowVrr,
572 &pWorkArea->sd[sd].head[head].newState)) {
573 goto done;
574 }
575 }
576
577 pWorkArea->sd[sd].changed = TRUE;
578 changed = TRUE;
579 }
580
581 /* If nothing changed, fail. */
582
583 if (!changed) {
584 goto done;
585 }
586
587 ret = nvAllocatePreFlipBandwidth(pDevEvo, pWorkArea);
588 if (!ret) {
589 goto done;
590 }
591
592 /* XXX: Fail flip if LUT update in progress.
593 *
594 * Really, we should have a more robust system for this, but currently, the
595 * only user of the LUT parameter to the flip IOCTL is nvidia-drm, which
596 * waits for flips to be complete anyways. We should actually find a way to
597 * properly queue as many LUT-changing flips as we support queued flips in
598 * general.
599 *
600 * This failure returns NV_KMS_FLIP_RESULT_IN_PROGRESS rather than
601 * NV_KMS_FLIP_RESULT_INVALID_PARAMS.
602 *
603 * See bug 4054546 for efforts to update this system.
604 */
605 for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
606 pDispEvo = pDevEvo->gpus[sd].pDispEvo;
607 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
608 const NVProposedFlipStateOneApiHead *pProposedApiHead =
609 &pWorkArea->disp[sd].apiHead[apiHead].proposedFlipState;
610
611 if ((pProposedApiHead->lut.input.specified ||
612 pProposedApiHead->lut.output.specified) &&
613 !nvEvoIsLUTNotifierComplete(pDispEvo, apiHead)) {
614
615 if (commit) {
616 nvEvoLogDispDebug(
617 pDispEvo,
618 EVO_LOG_ERROR,
619 "Flip request with LUT parameter on API Head %d while LUT update outstanding",
620 apiHead);
621 }
622
623 result = NV_KMS_FLIP_RESULT_IN_PROGRESS;
624 goto done;
625 }
626 }
627 }
628
629 if (!commit) {
630 ret = NV_TRUE;
631 result = NV_KMS_FLIP_RESULT_SUCCESS;
632 goto done;
633 }
634
635 if (!nvPrepareToDoPreFlip(pDevEvo, pWorkArea)) {
636 goto done;
637 }
638
639 /*
640 * At this point, something changed on at least one head of one
641 * subdevice, and has been validated. Apply the request to our
642 * hardware and software state. We must not fail beyond this
643 * point.
644 */
645
646 ret = TRUE;
647 result = NV_KMS_FLIP_RESULT_SUCCESS;
648
649 nvPreFlip(pDevEvo, pWorkArea, applyAllowVrr, allowVrr, skipUpdate);
650
651 for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
652 NvU32 flip2Heads1OrApiHeadsMask = 0x0;
653
654 if (!pWorkArea->sd[sd].changed) {
655 continue;
656 }
657
658 pDispEvo = pDevEvo->gpus[sd].pDispEvo;
659
660 flip2Heads1OrApiHeadsMask =
661 FlipEvo2Head1OrOneDisp(pDispEvo, pWorkArea, skipUpdate);
662
663 nvkms_memset(&pWorkArea->updateState, 0,
664 sizeof(pWorkArea->updateState));
665
666 /*
667 * Ensure that we only commit the LUT notifiers staged in this
668 * nvFlipEvo call.
669 */
670 nvEvoClearStagedLUTNotifiers(pDispEvo);
671
672 for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) {
673 if (!nvApiHeadIsActive(pDispEvo, apiHead) ||
674 ((NVBIT(apiHead) & flip2Heads1OrApiHeadsMask) != 0x0)) {
675 continue;
676 }
677
678 FlipEvoOneApiHead(pDispEvo, apiHead, pWorkArea, allowFlipLock,
679 &pWorkArea->updateState);
680 }
681
682 if (!skipUpdate) {
683 nvEvoFlipUpdate(pDispEvo, &pWorkArea->updateState);
684 }
685 }
686
687 nvPostFlip(pDevEvo, pWorkArea, skipUpdate, applyAllowVrr, &vrrSemaphoreIndex);
688
689 FillNvKmsFlipReply(pDevEvo, pWorkArea, applyAllowVrr, vrrSemaphoreIndex,
690 pFlipHead, numFlipHeads, reply);
691
692 /* fall through */
693
694 done:
695
696 nvPreallocRelease(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA);
697 if (reply) {
698 reply->flipResult = result;
699 }
700
701 return ret;
702 }
703
nvApiHeadGetLayerSurfaceArray(const NVDispEvoRec * pDispEvo,const NvU32 apiHead,const NvU32 layer,NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES])704 void nvApiHeadGetLayerSurfaceArray(const NVDispEvoRec *pDispEvo,
705 const NvU32 apiHead,
706 const NvU32 layer,
707 NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES])
708 {
709
710 const NvU32 sd = pDispEvo->displayOwner;
711 const NVDispApiHeadStateEvoRec *pApiHeadState =
712 &pDispEvo->apiHeadState[apiHead];
713 NvU32 head, headCount;
714
715 nvAssert(apiHead != NV_INVALID_HEAD);
716
717 headCount = 0;
718 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
719 const NVEvoSubDevHeadStateRec *pSdHeadState =
720 &pDispEvo->pDevEvo->gpus[sd].headState[head];
721 NvU8 eye;
722
723 if (headCount == 0) {
724 for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) {
725 pSurfaceEvos[eye] =
726 pSdHeadState->layer[layer].pSurfaceEvo[eye];
727 }
728 } else {
729 for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) {
730 nvAssert(pSurfaceEvos[eye] ==
731 pSdHeadState->layer[layer].pSurfaceEvo[eye]);
732 }
733 }
734
735 headCount++;
736 }
737 }
738
nvApiHeadGetCursorInfo(const NVDispEvoRec * pDispEvo,const NvU32 apiHead,NVSurfaceEvoPtr * ppSurfaceEvo,NvS16 * x,NvS16 * y)739 void nvApiHeadGetCursorInfo(const NVDispEvoRec *pDispEvo,
740 const NvU32 apiHead,
741 NVSurfaceEvoPtr *ppSurfaceEvo,
742 NvS16 *x, NvS16 *y)
743 {
744
745 const NvU32 sd = pDispEvo->displayOwner;
746 const NVDispApiHeadStateEvoRec *pApiHeadState =
747 &pDispEvo->apiHeadState[apiHead];
748 NvU32 head, headCount;
749
750 nvAssert(apiHead != NV_INVALID_HEAD);
751
752 headCount = 0;
753 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
754 const NVEvoSubDevHeadStateRec *pSdHeadState =
755 &pDispEvo->pDevEvo->gpus[sd].headState[head];
756
757 if (headCount == 0) {
758 *ppSurfaceEvo = pSdHeadState->cursor.pSurfaceEvo;
759 *x = pSdHeadState->cursor.x;
760 *y = pSdHeadState->cursor.y;
761 } else {
762 nvAssert(*ppSurfaceEvo == pSdHeadState->cursor.pSurfaceEvo);
763 nvAssert(*x == pSdHeadState->cursor.x);
764 nvAssert(*y == pSdHeadState->cursor.y);
765 }
766
767 headCount++;
768 }
769 }
770
nvApiHeadSetViewportPointIn(const NVDispEvoRec * pDispEvo,const NvU32 apiHead,const NvU16 x,const NvU16 y)771 void nvApiHeadSetViewportPointIn(const NVDispEvoRec *pDispEvo,
772 const NvU32 apiHead,
773 const NvU16 x,
774 const NvU16 y)
775 {
776 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
777 NVEvoUpdateState updateState = { };
778 const NVDispApiHeadStateEvoRec *pApiHeadState =
779 &pDispEvo->apiHeadState[apiHead];
780 NvU16 hwViewportInWidth;
781 NvU32 head, headCount;
782
783 nvAssert(apiHead != NV_INVALID_HEAD);
784
785 headCount = 0;
786 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
787 const NVDispHeadStateEvoRec *pHeadState =
788 &pDispEvo->headState[head];
789 const NVHwModeTimingsEvo *pTimings =
790 &pHeadState->timings;
791
792 if (headCount == 0) {
793 hwViewportInWidth = pTimings->viewPort.in.width;
794 } else {
795 nvAssert(hwViewportInWidth == pTimings->viewPort.in.width);
796 }
797
798 nvPushEvoSubDevMaskDisp(pDispEvo);
799 pDevEvo->hal->SetViewportPointIn(pDevEvo, head,
800 x + (hwViewportInWidth * pHeadState->tilePosition), y,
801 &updateState);
802 nvPopEvoSubDevMask(pDevEvo);
803
804 headCount++;
805 }
806
807 if (headCount != 0) {
808 nvEvoUpdateAndKickOff(pDispEvo, FALSE /* sync */, &updateState,
809 TRUE /* releaseElv */);
810 }
811 }
812
nvApiHeadGetActiveViewportOffset(NVDispEvoRec * pDispEvo,NvU32 apiHead)813 NvU32 nvApiHeadGetActiveViewportOffset(NVDispEvoRec *pDispEvo,
814 NvU32 apiHead)
815 {
816 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
817 const NVDispApiHeadStateEvoRec *pApiHeadState =
818 &pDispEvo->apiHeadState[apiHead];
819 NvU32 head, headCount;
820 NvU32 offset = 0;
821
822 nvAssert(apiHead != NV_INVALID_HEAD);
823
824 headCount = 0;
825 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
826 if (headCount == 0) {
827 offset = pDevEvo->hal->GetActiveViewportOffset(pDispEvo, head);
828 } else {
829 nvAssert(offset == pDevEvo->hal->GetActiveViewportOffset(pDispEvo, head));
830 }
831 headCount++;
832 }
833
834 return offset;
835 }
836
nvApiHeadIdleMainLayerChannels(NVDevEvoRec * pDevEvo,const NvU32 apiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES])837 void nvApiHeadIdleMainLayerChannels(NVDevEvoRec *pDevEvo,
838 const NvU32 apiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES])
839 {
840 NVEvoChannelMask idleChannelMaskPerSd[NVKMS_MAX_SUBDEVICES] = { };
841 const NVDispEvoRec *pDispEvo;
842 NvU32 dispIndex, apiHead;
843 NvBool found = FALSE;
844
845 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
846 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
847 const NVDispApiHeadStateEvoRec *pApiHeadState =
848 &pDispEvo->apiHeadState[apiHead];
849 NvU32 head;
850
851 if ((apiHeadMaskPerSd[pDispEvo->displayOwner] &
852 NVBIT(apiHead)) == 0x0) {
853 continue;
854 }
855
856 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
857 NVEvoChannelPtr pMainLayerChannel =
858 pDevEvo->head[head].layer[NVKMS_MAIN_LAYER];
859 idleChannelMaskPerSd[pDispEvo->displayOwner] |=
860 pMainLayerChannel->channelMask;
861 found = TRUE;
862 }
863 }
864 }
865
866 if (!found) {
867 return;
868 }
869
870 nvIdleMainLayerChannels(pDevEvo, idleChannelMaskPerSd,
871 FALSE /* allowForceIdle */);
872 }
873
nvApiHeadUpdateFlipLock(NVDevEvoRec * pDevEvo,const NvU32 apiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES],const NvBool enable)874 void nvApiHeadUpdateFlipLock(NVDevEvoRec *pDevEvo,
875 const NvU32 apiHeadMaskPerSd[NVKMS_MAX_SUBDEVICES],
876 const NvBool enable)
877 {
878 NvU32 dispIndex;
879 NVDispEvoPtr pDispEvo;
880 NvU32 headMaskPerSd[NVKMS_MAX_SUBDEVICES] = { };
881 NVEvoChannelMask channelMaskPerSd[NVKMS_MAX_SUBDEVICES] = { };
882 NvBool found = FALSE;
883
884 /* Determine which channels need to enable or disable fliplock. */
885 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
886 NvU32 apiHead;
887 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
888 const NVDispApiHeadStateEvoRec *pApiHeadState =
889 &pDispEvo->apiHeadState[apiHead];
890 NvU32 head;
891
892 if ((apiHeadMaskPerSd[pDispEvo->displayOwner] &
893 NVBIT(apiHead)) == 0x0) {
894 continue;
895 }
896
897 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
898 NVEvoChannelPtr pMainLayerChannel =
899 pDevEvo->head[head].layer[NVKMS_MAIN_LAYER];
900
901 if (!nvNeedToToggleFlipLock(pDispEvo, head, enable)) {
902 continue;
903 }
904
905 if (enable) {
906 /*
907 * Override the prohibition of fliplock on pDispEvos with
908 * headsurface enabled (calculated earlier in
909 * HsConfigAllowFlipLock) to allow enabling fliplock for
910 * headSurface swapgroups.
911 */
912 nvAllowFlipLockEvo(pDispEvo, TRUE /* allowFlipLock */);
913 }
914
915 headMaskPerSd[pDispEvo->displayOwner] |= NVBIT(head);
916 channelMaskPerSd[pDispEvo->displayOwner] |=
917 pMainLayerChannel->channelMask;
918 found = TRUE;
919 }
920 }
921 }
922
923 if (!found) {
924 return;
925 }
926
927 /*
928 * Wait for all base channels that are enabling/disabling fliplock to be
929 * idle. This shouldn't timeout if we're enabling fliplock while bringing
930 * up swapgroups on a new head.
931 */
932 nvIdleMainLayerChannels(pDevEvo, channelMaskPerSd, !enable /* forceIdle */);
933
934 /* Now that all channels are idle, update fliplock. */
935 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
936 nvToggleFlipLockPerDisp(pDispEvo,
937 headMaskPerSd[pDispEvo->displayOwner],
938 enable);
939 }
940 }
941
nvIdleMainLayerChannelCheckIdleOneApiHead(NVDispEvoPtr pDispEvo,NvU32 apiHead)942 NvBool nvIdleMainLayerChannelCheckIdleOneApiHead(NVDispEvoPtr pDispEvo,
943 NvU32 apiHead)
944 {
945 NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
946 const NVDispApiHeadStateEvoRec *pApiHeadState =
947 &pDispEvo->apiHeadState[apiHead];
948 NvU32 head;
949
950 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
951 NVEvoChannelPtr pMainLayerChannel =
952 pDevEvo->head[head].layer[NVKMS_MAIN_LAYER];
953 NvBool isMethodPending = FALSE;
954 NvBool ret;
955
956 ret = pDevEvo->hal->IsChannelMethodPending(pDevEvo, pMainLayerChannel,
957 pDispEvo->displayOwner, &isMethodPending);
958
959 if (ret && isMethodPending) {
960 return FALSE;
961 }
962 }
963
964 return TRUE;
965 }
966
nvCollectSurfaceUsageMaskOneApiHead(const NVDispEvoRec * pDispEvo,const NvU32 apiHead,NVSurfaceEvoPtr pSurfaceEvo)967 NvU32 nvCollectSurfaceUsageMaskOneApiHead(const NVDispEvoRec *pDispEvo,
968 const NvU32 apiHead,
969 NVSurfaceEvoPtr pSurfaceEvo)
970 {
971 NvU32 usageMaskOneHead = 0x0;
972 const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
973 const NvU32 sd = pDispEvo->displayOwner;
974 const NVDispApiHeadStateEvoRec *pApiHeadState =
975 &pDispEvo->apiHeadState[apiHead];
976 NvU32 head;
977
978 if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
979 return 0;
980 }
981
982 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
983 const NVEvoSubDevHeadStateRec *pSdHeadState =
984 &pDevEvo->gpus[sd].headState[head];
985 NvU32 layer;
986
987 for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
988 const NVFlipChannelEvoHwState *pLayerFlipState =
989 &pSdHeadState->layer[layer];
990 const NVFlipSyncObjectEvoHwState *pSyncObject =
991 &pLayerFlipState->syncObject;
992 NvU32 usageMaskOneLayer = 0x0;
993
994 if ((pSurfaceEvo == pLayerFlipState->pSurfaceEvo[NVKMS_LEFT]) ||
995 (pSurfaceEvo == pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT])) {
996 usageMaskOneLayer = FLD_SET_DRF(_SURFACE, _USAGE_MASK_LAYER,
997 _SCANOUT, _ENABLE, usageMaskOneLayer);
998 }
999
1000 if(pSurfaceEvo ==
1001 pLayerFlipState->completionNotifier.surface.pSurfaceEvo) {
1002 usageMaskOneLayer = FLD_SET_DRF(_SURFACE, _USAGE_MASK_LAYER,
1003 _NOTIFIER, _ENABLE, usageMaskOneLayer);
1004 }
1005
1006 if ((!pLayerFlipState->syncObject.usingSyncpt) &&
1007 (pSurfaceEvo ==
1008 pSyncObject->u.semaphores.acquireSurface.pSurfaceEvo) &&
1009 (pSurfaceEvo ==
1010 pSyncObject->u.semaphores.releaseSurface.pSurfaceEvo)) {
1011 usageMaskOneLayer = FLD_SET_DRF(_SURFACE, _USAGE_MASK_LAYER,
1012 _SEMAPHORE, _ENABLE, usageMaskOneLayer);
1013 }
1014
1015 usageMaskOneHead = FLD_IDX_SET_DRF_NUM(_SURFACE, _USAGE_MASK,
1016 _LAYER, layer, usageMaskOneLayer, usageMaskOneHead);
1017 }
1018
1019 if (pSurfaceEvo == pSdHeadState->cursor.pSurfaceEvo) {
1020 usageMaskOneHead = FLD_SET_DRF(_SURFACE, _USAGE_MASK,
1021 _CURSOR, _ENABLE, usageMaskOneHead);
1022 }
1023 }
1024
1025 return usageMaskOneHead;
1026 }
1027
nvIdleLayerChannels(NVDevEvoRec * pDevEvo,NvU32 layerMaskPerSdApiHead[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP])1028 void nvIdleLayerChannels(NVDevEvoRec *pDevEvo,
1029 NvU32 layerMaskPerSdApiHead[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP])
1030 {
1031 NVEvoChannelMask channelMaskPerSd[NVKMS_MAX_SUBDEVICES] = { };
1032 const NVDispEvoRec *pDispEvo;
1033 NvU32 sd;
1034 NvU64 startTime = 0;
1035 const NvU32 timeout = 500000; // .5 seconds
1036 NvBool allIdle;
1037
1038 FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1039 for (NvU32 apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
1040 const NVDispApiHeadStateEvoRec *pApiHeadState =
1041 &pDispEvo->apiHeadState[apiHead];
1042 NvU32 head;
1043
1044 if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
1045 continue;
1046 }
1047
1048 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
1049 for (NvU32 layer = 0;
1050 layer < pDevEvo->head[head].numLayers; layer++) {
1051 if ((NVBIT(layer) &
1052 layerMaskPerSdApiHead[sd][apiHead]) != 0x0) {
1053 channelMaskPerSd[sd] |=
1054 pDevEvo->head[head].layer[layer]->channelMask;
1055 }
1056 }
1057 }
1058 }
1059 }
1060
1061 do {
1062 allIdle = TRUE;
1063 FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1064
1065 for (NvU32 head = 0; head < pDevEvo->numHeads; head++) {
1066 NvU32 layer;
1067
1068 if (!nvHeadIsActive(pDispEvo, head)) {
1069 continue;
1070 }
1071
1072 for (layer = 0;
1073 layer < pDevEvo->head[head].numLayers; layer++) {
1074 NVEvoChannelPtr pLayerChannel =
1075 pDevEvo->head[head].layer[layer];
1076 NvBool isMethodPending;
1077
1078 if ((pLayerChannel->channelMask &
1079 channelMaskPerSd[sd]) == 0x0) {
1080 continue;
1081 }
1082
1083 if (pDevEvo->hal->IsChannelMethodPending(pDevEvo,
1084 pLayerChannel, sd, &isMethodPending) &&
1085 isMethodPending) {
1086
1087 allIdle = FALSE;
1088 } else {
1089 /* This has been completed, no need to keep trying */
1090 channelMaskPerSd[sd] &= ~pLayerChannel->channelMask;
1091 }
1092 }
1093 }
1094 }
1095
1096 if (!allIdle) {
1097 if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) {
1098 break;
1099 }
1100 nvkms_yield();
1101 }
1102 } while (!allIdle);
1103
1104 /* If we timed out above, force things to be idle. */
1105 if (!allIdle) {
1106 NVEvoIdleChannelState idleChannelState = { };
1107 NvBool tryToForceIdle = FALSE;
1108
1109 FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
1110
1111 for (NvU32 head = 0; head < pDevEvo->numHeads; head++) {
1112 NvU32 layer;
1113
1114 if (!nvHeadIsActive(pDispEvo, head)) {
1115 continue;
1116 }
1117
1118 for (layer = 0;
1119 layer < pDevEvo->head[head].numLayers; layer++) {
1120 NVEvoChannelPtr pLayerChannel =
1121 pDevEvo->head[head].layer[layer];
1122
1123 if ((pLayerChannel->channelMask &
1124 channelMaskPerSd[sd]) != 0x0) {
1125 idleChannelState.subdev[sd].channelMask |=
1126 pLayerChannel->channelMask;
1127 tryToForceIdle = TRUE;
1128 }
1129 }
1130 }
1131 }
1132
1133 if (tryToForceIdle) {
1134 NvBool ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo,
1135 &idleChannelState);
1136 if (!ret) {
1137 nvAssert(ret);
1138 }
1139 }
1140 }
1141 }
1142
1143 /*
1144 * XXX NVKMS TODO
1145 * Make the sync more efficient: we only need to sync if the
1146 * in-flight methods flip away from this surface.
1147 */
nvEvoClearSurfaceUsage(NVDevEvoRec * pDevEvo,NVSurfaceEvoPtr pSurfaceEvo)1148 void nvEvoClearSurfaceUsage(NVDevEvoRec *pDevEvo,
1149 NVSurfaceEvoPtr pSurfaceEvo)
1150 {
1151 NvU32 head;
1152
1153 /*
1154 * If the core channel is no longer allocated, we don't need to
1155 * sync. This assumes the channels are allocated/deallocated
1156 * together.
1157 */
1158 if (pDevEvo->core) {
1159
1160 if (pDevEvo->hal->ClearSurfaceUsage != NULL) {
1161 pDevEvo->hal->ClearSurfaceUsage(pDevEvo, pSurfaceEvo);
1162 }
1163
1164 nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__);
1165
1166 for (head = 0; head < pDevEvo->numHeads; head++) {
1167 NvU32 layer;
1168
1169 for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) {
1170 NVEvoChannelPtr pChannel =
1171 pDevEvo->head[head].layer[layer];
1172
1173 nvRMSyncEvoChannel(pDevEvo, pChannel, __LINE__);
1174 }
1175 }
1176 }
1177 }
1178
nvIdleBaseChannelOneApiHead(NVDispEvoRec * pDispEvo,NvU32 apiHead,NvBool * pStoppedBase)1179 NvBool nvIdleBaseChannelOneApiHead(NVDispEvoRec *pDispEvo, NvU32 apiHead,
1180 NvBool *pStoppedBase)
1181 {
1182 NvBool ret = TRUE;
1183 NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
1184 const NVDispApiHeadStateEvoRec *pApiHeadState =
1185 &pDispEvo->apiHeadState[apiHead];
1186 NvU32 head;
1187
1188 *pStoppedBase = FALSE;
1189 FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) {
1190 NvBool stoppedBase = FALSE;
1191 if (!nvRMIdleBaseChannel(pDevEvo, head,
1192 pDispEvo->displayOwner, &stoppedBase)) {
1193 ret = FALSE;
1194 } else if (stoppedBase) {
1195 *pStoppedBase = TRUE;
1196 }
1197 }
1198
1199 return ret;
1200 }
1201