1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "nvkms-types.h"
25 #include "nvkms-headsurface.h"
26 #include "nvkms-headsurface-3d.h"
27 #include "nvkms-headsurface-priv.h"
28 #include "nvkms-headsurface-swapgroup.h"
29 #include "nvkms-utils.h"
30 #include "nvkms-rmapi.h"
31 #include "nvkms-surface.h"
32 #include "nvkms-sync.h"
33 #include "nvkms-flip.h"
34 #include "nvkms-private.h"
35 #include "nvkms-evo.h"
36 #include "nvkms-dma.h"
37 #include "nvkms-modeset.h"
38 #include "nvkms-rm.h"
39 
40 #include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */
41 
42 static NvBool AllocNotifiers(NVHsDeviceEvoRec *pHsDevice);
43 static void FreeNotifiers(NVHsDeviceEvoRec *pHsDevice);
44 static void HsProcFsRecordFullscreenSgFrames(NVHsChannelEvoPtr pHsChannel,
45                                              NvBool isFullscreen);
46 
47 static NvU32 GetLog2GobsPerBlockY(NvU32 height)
48 {
49     NvU32 log2GobsPerBlockY = 4; // 16 gobs/block
50 
51     const NvU64 heightAndOneHalf = (NvU64)height + ((NvU64)height/2ULL);
52     const NvU64 nvFermiBlockLinearGobHeight = NVKMS_BLOCK_LINEAR_GOB_HEIGHT;
53 
54     // If we're wasting too much memory, cap the block height
55     while ((log2GobsPerBlockY > 0U) &&
56            (((nvFermiBlockLinearGobHeight * ((NvU64)1ULL << log2GobsPerBlockY))) >
57             heightAndOneHalf)) {
58         log2GobsPerBlockY--;
59     }
60 
61     // If there is more than one gob per block,
62     if (log2GobsPerBlockY > 0U) {
63 
64         // Proposed shrunk block size.
65         // compute a new proposedBlockSize, based on a gob size that is half
66         // of the current value (log2 - 1).  the "if(log2 > 0)" above keeps this
67         // value always ">= 0".
68         NvU32 proposedBlockSize =
69             NVKMS_BLOCK_LINEAR_GOB_HEIGHT << (log2GobsPerBlockY - 1U);
70 
71         // While the proposedBlockSize is greater than the image size,
72         while (proposedBlockSize >= height) {
73             // It's safe to cut the gobs per block in half.
74             --log2GobsPerBlockY;
75 
76             // If we've hit 1 gob per block, stop.
77             if (log2GobsPerBlockY == 0U) {
78                 break;
79             }
80             // Otherwise, divide the proposed block dimension/size by two.
81             proposedBlockSize /= 2U;
82         }
83     }
84 
85     return log2GobsPerBlockY;
86 }
87 
88 static void GetLog2GobsPerBlock(
89     NvU32 bytesPerPixel,
90     NvU32 widthInPixels,
91     NvU32 heightInPixels,
92     NvU32 *pLog2GobsPerBlockY,
93     NvU32 *pitchInBlocks,
94     NvU64 *sizeInBytes)
95 {
96     NvU32 xAlign, yAlign, pitchInBytes, lines;
97 
98     NvU32 log2GobsPerBlockY = GetLog2GobsPerBlockY(heightInPixels);
99 
100     xAlign = NVKMS_BLOCK_LINEAR_GOB_WIDTH - 1;
101     yAlign = (NVKMS_BLOCK_LINEAR_GOB_HEIGHT << log2GobsPerBlockY) - 1;
102 
103     pitchInBytes = NV_ALIGN_UP(widthInPixels * bytesPerPixel, xAlign);
104     lines = NV_ALIGN_UP(heightInPixels, yAlign);
105 
106     *pLog2GobsPerBlockY = log2GobsPerBlockY;
107     *sizeInBytes = (NvU64)pitchInBytes * lines;
108     *pitchInBlocks = pitchInBytes / NVKMS_BLOCK_LINEAR_GOB_WIDTH;
109 }
110 
111 static NvU32 AllocSurfaceVidmem(
112     const NVDevEvoRec *pDevEvo,
113     NvU32 handle,
114     NvU64 sizeInBytes)
115 {
116     NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
117 
118     memAllocParams.owner = NVKMS_RM_HEAP_ID;
119     memAllocParams.size = sizeInBytes;
120     memAllocParams.type = NVOS32_TYPE_IMAGE;
121 
122     memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) |
123                           DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS) |
124                           DRF_DEF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR);
125 
126     memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT);
127 
128     memAllocParams.flags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN |
129                            NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE;
130 
131     memAllocParams.alignment = NV_EVO_SURFACE_ALIGNMENT;
132 
133     return nvRmApiAlloc(nvEvoGlobal.clientHandle,
134                         pDevEvo->deviceHandle,
135                         handle,
136                         NV01_MEMORY_LOCAL_USER,
137                         &memAllocParams);
138 }
139 
140 NvU64 nvHsMapSurfaceToDevice(
141     const NVDevEvoRec *pDevEvo,
142     const NvU32 rmHandle,
143     const NvU64 sizeInBytes,
144     const enum NvHsMapPermissions hsMapPermissions)
145 {
146     NvU32 ret;
147     NvU32 flags = DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE);
148     NvU64 gpuAddress = 0;
149 
150     /* pHsDevice could be NULL if we are in no3d mode. */
151 
152     if (pDevEvo->pHsDevice == NULL) {
153         return gpuAddress;
154     }
155 
156     switch (hsMapPermissions) {
157     case NvHsMapPermissionsNone:
158         return gpuAddress;
159     case NvHsMapPermissionsReadWrite:
160         flags |= DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_WRITE);
161         break;
162     case NvHsMapPermissionsReadOnly:
163         flags |= DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_ONLY);
164         break;
165     }
166 
167     ret = nvRmApiMapMemoryDma(nvEvoGlobal.clientHandle,
168                               pDevEvo->deviceHandle,
169                               pDevEvo->nvkmsGpuVASpace,
170                               rmHandle,
171                               0, /* offset */
172                               sizeInBytes,
173                               flags,
174                               &gpuAddress);
175 
176     if (ret == NVOS_STATUS_SUCCESS) {
177         return gpuAddress;
178     } else {
179         return NV_HS_BAD_GPU_ADDRESS;
180     }
181 }
182 
183 void nvHsUnmapSurfaceFromDevice(
184     const NVDevEvoRec *pDevEvo,
185     const NvU32 rmHandle,
186     const NvU64 gpuAddress)
187 {
188     if ((gpuAddress == 0) || (gpuAddress == NV_HS_BAD_GPU_ADDRESS)) {
189         return;
190     }
191 
192     if (pDevEvo->pHsDevice == NULL) {
193         return;
194     }
195 
196     nvRmApiUnmapMemoryDma(nvEvoGlobal.clientHandle,
197                           pDevEvo->deviceHandle,
198                           pDevEvo->nvkmsGpuVASpace,
199                           rmHandle,
200                           0, /* flags */
201                           gpuAddress);
202 }
203 
204 /*!
205  * Free an NVHsSurfaceRec, allocated by nvHsAllocSurface().
206  *
207  * \param[in]  pDevEvo     The device.
208  * \param[in]  pHsSurface  The NVHsSurfaceRec to free.
209  */
210 void nvHsFreeSurface(
211     NVDevEvoRec *pDevEvo,
212     NVHsSurfaceRec *pHsSurface)
213 {
214     if (pHsSurface == NULL) {
215         return;
216     }
217 
218     if (pHsSurface->rmHandle != 0) {
219         nvRmApiFree(nvEvoGlobal.clientHandle,
220                     pDevEvo->deviceHandle,
221                     pHsSurface->rmHandle);
222 
223         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pHsSurface->rmHandle);
224         pHsSurface->rmHandle = 0;
225     }
226 
227     if (pHsSurface->nvKmsHandle != 0) {
228         nvEvoUnregisterSurface(pDevEvo,
229                                pDevEvo->pNvKmsOpenDev,
230                                pHsSurface->nvKmsHandle,
231                                FALSE /* skipUpdate */);
232     }
233 
234     nvFree(pHsSurface);
235 }
236 
237 NVSurfaceEvoRec *nvHsGetNvKmsSurface(const NVDevEvoRec *pDevEvo,
238                                      NvKmsSurfaceHandle surfaceHandle,
239                                      const NvBool requireCtxDma)
240 {
241     const NVEvoApiHandlesRec *pNvKmsOpenDevSurfaceHandles;
242     NVSurfaceEvoRec *pKmsSurface;
243 
244     pNvKmsOpenDevSurfaceHandles =
245         nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev);
246 
247     nvAssert(pNvKmsOpenDevSurfaceHandles != NULL);
248 
249     pKmsSurface =
250         nvEvoGetSurfaceFromHandleNoCtxDmaOk(pDevEvo,
251                                             pNvKmsOpenDevSurfaceHandles,
252                                             surfaceHandle);
253     nvAssert(pKmsSurface != NULL);
254     nvAssert(pKmsSurface->requireCtxDma == requireCtxDma);
255 
256     return pKmsSurface;
257 }
258 
259 /*!
260  * Allocate an NVHsSurfaceRec, for use with headSurface.
261  *
262  * Video memory is allocated, mapped into the device's GPU virtual address
263  * space, and registered with NVKMS's pNvKmsOpenDev.
264  *
265  * Note the video memory is not cleared here, because the corresponding graphics
266  * channel may not be allocated, yet.
267  *
268  * \param[in]  pDevEvo         The device.
269  * \param[in]  requireCtxDma   Whether display hardware requires access.
270  * \param[in]  format          The format of the surface.
271  * \param[in]  widthInPixels   The width of the surface, in pixels.
272  * \param[in]  heightInPixels  The height of the surface, in pixels.
273  *
274  * \return  On success, an allocate NVHsSurfaceRec structure is returned.
275  *          On failure, NULL is returned.
276  */
277 NVHsSurfaceRec *nvHsAllocSurface(
278     NVDevEvoRec *pDevEvo,
279     const NvBool requireCtxDma,
280     const enum NvKmsSurfaceMemoryFormat format,
281     const NvU32 widthInPixels,
282     const NvU32 heightInPixels)
283 {
284     struct NvKmsRegisterSurfaceParams nvKmsParams = { };
285     const NvKmsSurfaceMemoryFormatInfo *pFormatInfo =
286         nvKmsGetSurfaceMemoryFormatInfo(format);
287     NvU32 pitchInBlocks = 0;
288     NvU64 sizeInBytes = 0;
289     NvU32 log2GobsPerBlockY = 0;
290     NvU32 ret = 0;
291     NVHsSurfaceRec *pHsSurface = nvCalloc(1, sizeof(*pHsSurface));
292 
293     if (pHsSurface == NULL) {
294         return NULL;
295     }
296 
297     GetLog2GobsPerBlock(pFormatInfo->rgb.bytesPerPixel,
298                         widthInPixels,
299                         heightInPixels,
300                         &log2GobsPerBlockY,
301                         &pitchInBlocks,
302                         &sizeInBytes);
303 
304     sizeInBytes = NV_ALIGN_UP(sizeInBytes, NV_EVO_SURFACE_ALIGNMENT);
305 
306     pHsSurface->rmHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
307 
308     if (pHsSurface->rmHandle == 0) {
309         goto fail;
310     }
311 
312     ret = AllocSurfaceVidmem(pDevEvo, pHsSurface->rmHandle, sizeInBytes);
313 
314     if (ret != NVOS_STATUS_SUCCESS) {
315         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pHsSurface->rmHandle);
316         pHsSurface->rmHandle = 0;
317 
318         goto fail;
319     }
320 
321     pHsSurface->gobsPerBlock.y = log2GobsPerBlockY;
322 
323     /*
324      * For blocklinear surfaces, the NVKMS pitch is in units of blocks, which
325      * matches what GetLog2GobsPerBlock() returned to us.
326      */
327     nvKmsParams.request.useFd = FALSE;
328     nvKmsParams.request.rmClient = nvEvoGlobal.clientHandle;
329     nvKmsParams.request.widthInPixels = widthInPixels;
330     nvKmsParams.request.heightInPixels = heightInPixels;
331     nvKmsParams.request.layout = NvKmsSurfaceMemoryLayoutBlockLinear;
332     nvKmsParams.request.format = format;
333     nvKmsParams.request.noDisplayHardwareAccess = !requireCtxDma;
334     nvKmsParams.request.log2GobsPerBlockY = log2GobsPerBlockY;
335 
336     nvKmsParams.request.planes[0].u.rmObject = pHsSurface->rmHandle;
337     nvKmsParams.request.planes[0].pitch = pitchInBlocks;
338     nvKmsParams.request.planes[0].rmObjectSizeInBytes = sizeInBytes;
339 
340     nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, &nvKmsParams,
341                          NvHsMapPermissionsReadWrite);
342 
343     if (nvKmsParams.reply.surfaceHandle == 0) {
344         goto fail;
345     }
346 
347     pHsSurface->nvKmsHandle = nvKmsParams.reply.surfaceHandle;
348 
349     pHsSurface->pSurfaceEvo =
350         nvHsGetNvKmsSurface(pDevEvo, pHsSurface->nvKmsHandle, requireCtxDma);
351 
352     if (pHsSurface->pSurfaceEvo == NULL) {
353         goto fail;
354     }
355 
356     return pHsSurface;
357 
358 fail:
359     nvHsFreeSurface(pDevEvo, pHsSurface);
360 
361     return NULL;
362 }
363 
364 NvBool nvHsAllocDevice(
365     NVDevEvoRec *pDevEvo,
366     const struct NvKmsAllocDeviceRequest *pRequest)
367 {
368     NVHsDeviceEvoRec *pHsDevice;
369 
370     nvAssert(pDevEvo->pHsDevice == NULL);
371 
372     if (!pDevEvo->isHeadSurfaceSupported) {
373         return TRUE;
374     }
375 
376     if (pRequest->no3d) {
377         return TRUE;
378     }
379 
380     pHsDevice = nvCalloc(1, sizeof(*pHsDevice));
381 
382     if (pHsDevice == NULL) {
383         goto fail;
384     }
385 
386     pDevEvo->pHsDevice = pHsDevice;
387     pHsDevice->pDevEvo = pDevEvo;
388 
389     nvAssert(pDevEvo->nvkmsGpuVASpace);
390 
391     if (!nvHs3dAllocDevice(pHsDevice)) {
392         goto fail;
393     }
394 
395     if (!AllocNotifiers(pHsDevice)) {
396         goto fail;
397     }
398 
399     return TRUE;
400 
401 fail:
402     nvHsFreeDevice(pDevEvo);
403 
404     return FALSE;
405 }
406 
407 void nvHsFreeDevice(NVDevEvoRec *pDevEvo)
408 {
409     NVHsDeviceEvoRec *pHsDevice = pDevEvo->pHsDevice;
410 
411     if (pHsDevice == NULL) {
412         return;
413     }
414 
415     FreeNotifiers(pHsDevice);
416 
417     nvHs3dFreeDevice(pHsDevice);
418 
419     nvFree(pHsDevice);
420 
421     pDevEvo->pHsDevice = NULL;
422 }
423 
424 NVHsChannelEvoPtr nvHsAllocChannel(NVDispEvoRec *pDispEvo, NvU32 apiHead)
425 {
426     NVHsChannelEvoRec *pHsChannel = nvCalloc(1, sizeof(*pHsChannel));
427 
428     if (pHsChannel == NULL) {
429         goto fail;
430     }
431 
432     pHsChannel->pDispEvo = pDispEvo;
433     pHsChannel->apiHead = apiHead;
434 
435     if (!nvHs3dAllocChannel(pHsChannel)) {
436         goto fail;
437     }
438 
439     return pHsChannel;
440 
441 fail:
442     nvHsFreeChannel(pHsChannel);
443 
444     return NULL;
445 }
446 
447 void nvHsFreeChannel(NVHsChannelEvoPtr pHsChannel)
448 {
449     if (pHsChannel == NULL) {
450         return;
451     }
452 
453     nvHs3dFreeChannel(pHsChannel);
454 
455     nvFree(pHsChannel);
456 }
457 
458 static NvU32 HsGetSemaphoreIndex(
459     const NVFlipNIsoSurfaceEvoHwState *pSemaSurface)
460 {
461     const NvU32 offsetInBytes = pSemaSurface->offsetInWords * 4;
462     const enum NvKmsNIsoFormat format = pSemaSurface->format;
463     const NvU32 sizeOfSemaphore = nvKmsSizeOfSemaphore(format);
464 
465     /*
466      * The semaphore size must be greater than zero.  Flip validation should
467      * prevent us from getting here with an invalid NvKmsNIsoFormat.
468      */
469     nvAssert(sizeOfSemaphore > 0);
470 
471     /* The semaphore offset should be a multiple of the semaphore size. */
472     nvAssert((offsetInBytes % sizeOfSemaphore) == 0);
473 
474     return offsetInBytes / sizeOfSemaphore;
475 }
476 
477 /*!
478  * Read the payload of the semaphore described in the pSemaSurface.
479  */
480 static NvU32 HsFlipQueueReadSemaphore(
481     const NVHsChannelEvoRec *pHsChannel,
482     const NVFlipNIsoSurfaceEvoHwState *pSemaSurface)
483 {
484     const enum NvKmsNIsoFormat format = pSemaSurface->format;
485     const NvU32 semaphoreIndex = HsGetSemaphoreIndex(pSemaSurface);
486     const NvU32 sd = pHsChannel->pDispEvo->displayOwner;
487     const void *ptr;
488     struct nvKmsParsedSemaphore parsedSemaphore = { };
489 
490     /* We should only get here if we have a valid semaphore surface. */
491     nvAssert(pSemaSurface->pSurfaceEvo != NULL);
492 
493     ptr = pSemaSurface->pSurfaceEvo->cpuAddress[sd];
494 
495     if (ptr == NULL) {
496         nvAssert(!"Semaphore surface without CPU mapping!");
497         return 0;
498     }
499 
500     nvKmsParseSemaphore(format, semaphoreIndex, ptr, &parsedSemaphore);
501 
502     return parsedSemaphore.payload;
503 }
504 
505 /*!
506  * Return whether the specified pFlipState is ready to flip.
507  */
508 static NvBool HsFlipQueueEntryIsReady(
509     const NVHsChannelEvoRec *pHsChannel,
510     const NVHsLayerRequestedFlipState *pFlipState)
511 {
512     const NVFlipNIsoSurfaceEvoHwState *pSemaSurface =
513         &pFlipState->syncObject.u.semaphores.acquireSurface;
514 
515     if (pFlipState->syncObject.usingSyncpt) {
516         return TRUE;
517     }
518 
519     /*
520      * If a semaphore surface was specified, check if the semaphore has reached
521      * the specified acquire value.
522      */
523     if (pSemaSurface->pSurfaceEvo != NULL) {
524         const NvU32 semaphoreValue =
525             HsFlipQueueReadSemaphore(pHsChannel, pSemaSurface);
526 
527         if (pHsChannel->swapGroupFlipping) {
528             // With swap group flipping, the client semaphore should be
529             // written before the non-stall interrupt kicking off the flip.
530             nvAssert(semaphoreValue == pFlipState->syncObject.u.semaphores.acquireValue);
531         } else {
532             if (semaphoreValue != pFlipState->syncObject.u.semaphores.acquireValue) {
533                 return FALSE;
534             }
535         }
536     }
537 
538     /*
539      * If a time stamp was specified for the flip, check if the time stamp has
540      * been satisfied.
541      *
542      * XXX NVKMS HEADSURFACE TODO: Implement time stamp flip check.
543      */
544 
545     return TRUE;
546 }
547 
548 /*!
549  * Update the reference count of all the surfaces described in the pFlipState.
550  */
551 static void HsUpdateFlipQueueEntrySurfaceRefCount(
552     const NVHsLayerRequestedFlipState *pFlipState,
553     NvBool increase)
554 {
555     HsChangeSurfaceFlipRefCount(
556         pFlipState->pSurfaceEvo[NVKMS_LEFT], increase);
557 
558     HsChangeSurfaceFlipRefCount(
559         pFlipState->pSurfaceEvo[NVKMS_RIGHT], increase);
560 
561     if (!pFlipState->syncObject.usingSyncpt) {
562         HsChangeSurfaceFlipRefCount(
563             pFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, increase);
564 
565         HsChangeSurfaceFlipRefCount(
566             pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, increase);
567     }
568 }
569 
570 /*!
571  * Update bookkeeping for "flipping away" from a pFlipState.
572  */
573 static void HsReleaseFlipQueueEntry(
574     NVDevEvoPtr pDevEvo,
575     NVHsChannelEvoPtr pHsChannel,
576     const NVHsLayerRequestedFlipState *pFlipState)
577 {
578     /*
579      * If a semaphore surface was specified, we can now write its release value.
580      */
581     if (!pFlipState->syncObject.usingSyncpt &&
582         pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) {
583 
584         /*
585          * XXX NVKMS HEADSURFACE TODO: write the timestamp in the EVO/NVDisplay
586          * semaphore structure, based on NvKmsNIsoFormat.  The graphics channel
587          * doesn't support all the NvKmsNIsoFormats, so we would need to use a
588          * graphics channel semaphore release of STRUCTURE_SIZE = ONE_WORD with
589          * the timestamp as payload.  It would be unfortunate to read ptimer
590          * registers in order to compute the payload value.
591          */
592 
593         nvHs3dReleaseSemaphore(pHsChannel,
594                                pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo,
595                                pFlipState->syncObject.u.semaphores.releaseSurface.format,
596                                pFlipState->syncObject.u.semaphores.releaseSurface.offsetInWords,
597                                pFlipState->syncObject.u.semaphores.releaseValue,
598                                TRUE /* allPreceedingReads */);
599     }
600 
601     /*
602      * HeadSurface no longer needs to read from the surfaces in pFlipState;
603      * decrement their reference counts.
604      */
605     HsUpdateFlipQueueEntrySurfaceRefCount(pFlipState, FALSE);
606 }
607 
608 /*!
609  * "Fast forward" through flip queue entries that are ready.
610  *
611  * \param[in,out]  pHsChannel               The headSurface channel.
612  * \param[in]      layer                    The layer of the flip queue.
613  * \param[in]      honorIsReadyCriteria     Honor the isReady check for
614  *                                          flip queue entries.
615  * \param[in]      honorMinPresentInterval  Honor the minPresentInterval in
616  *                                          flip queue entries.
617  */
618 static void HsFastForwardFlipQueue(
619     NVHsChannelEvoPtr pHsChannel,
620     const NvU8 layer,
621     const NvBool honorIsReadyCriteria,
622     const NvBool honorMinPresentInterval)
623 {
624     NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
625     NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
626 
627     /*
628      * For swapgroup flips, every flip kicked off by the client needs to result
629      * in a real flip in hardware, so we can't fast forward through flips here.
630      */
631     if (pHsChannel->config.neededForSwapGroup) {
632         return;
633     }
634 
635     while (!nvListIsEmpty(pFlipQueue)) {
636 
637         NVHsChannelFlipQueueEntry *pEntry =
638             nvListFirstEntry(pFlipQueue,
639                              NVHsChannelFlipQueueEntry,
640                              flipQueueEntry);
641         /*
642          * Stop "fast forwarding" once we find a flip queue entry that is not
643          * ready: we must not release semaphores out of order, otherwise we
644          * could confuse client semaphore interlocking.
645          */
646         if (honorIsReadyCriteria &&
647             !HsFlipQueueEntryIsReady(pHsChannel, &pEntry->hwState)) {
648             break;
649         }
650 
651         /*
652          * Normally, we want to make sure that each MinPresentInterval > 0 flip
653          * is displayed for one frame, so we shouldn't fast forward past them.
654          */
655         if (honorMinPresentInterval &&
656             (pEntry->hwState.minPresentInterval != 0)) {
657             break;
658         }
659 
660         /*
661          * We are "flipping away" from the flip queue entry in current.  Release
662          * it, and replace it with the entry in pEntry.
663          */
664 
665         HsReleaseFlipQueueEntry(pDevEvo, pHsChannel,
666                                 &pHsChannel->flipQueue[layer].current);
667 
668         pHsChannel->flipQueue[layer].current = pEntry->hwState;
669 
670         nvListDel(&pEntry->flipQueueEntry);
671         nvFree(pEntry);
672     }
673 }
674 
675 /*!
676  * Push a new entry to the end of the headSurface channel's flip queue.
677  *
678  * \param[in,out]  pHsChannel  The headSurface channel.
679  * \param[in]      layer       The layer of the flip queue.
680  * \param[in]      pFlipState    The hwState to be pushed on the flip queue.
681  */
682 void nvHsPushFlipQueueEntry(
683     NVHsChannelEvoPtr pHsChannel,
684     const NvU8 layer,
685     const NVHsLayerRequestedFlipState *pFlipState)
686 {
687     NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
688     NVHsChannelFlipQueueEntry *pEntry = nvCalloc(1, sizeof(*pEntry));
689 
690     if (pEntry == NULL) {
691         /*
692          * XXX NVKMS HEADSURFACE TODO: we cannot fail at this point in the call
693          * chain (we've already committed to the flip).  Move the nvCalloc() call
694          * earlier in the call chain to a point where we can fail.
695          */
696         return;
697     }
698 
699     pEntry->hwState = *pFlipState;
700 
701     /* Increment the ref counts on the surfaces in the flip queue entry. */
702 
703     HsUpdateFlipQueueEntrySurfaceRefCount(&pEntry->hwState, TRUE);
704 
705     /* "Fast forward" through existing flip queue entries that are ready. */
706 
707     HsFastForwardFlipQueue(pHsChannel, layer,
708                            TRUE /* honorIsReadyCriteria */,
709                            TRUE /* honorMinPresentInterval */);
710 
711     /* Append the new entry. */
712 
713     nvListAppend(&pEntry->flipQueueEntry, pFlipQueue);
714 }
715 
716 /*!
717  * Remove the first entry in the flip queue and return it.
718  *
719  * If the first entry in the flipQueue is ready to be consumed by headSurface,
720  * remove it from the list and return it in the 'pFlipState' argument.
721  *
722  * If this function returns TRUE, it is the caller's responsibility to
723  * eventually call
724  *
725  *    HsUpdateFlipQueueEntrySurfaceRefCount(pFlipState, FALSE)
726  *
727  * for the returned pFlipState.
728  *
729  * \param[in,out]  pHsChannel  The headSurface channel.
730  * \param[in]      layer       The layer of the flip queue.
731  * \param[out]     pFlipState    The hwState that was popped off the flip queue.
732  *
733  * \return   Return TRUE if a flip queue entry was popped off the queue and
734  *           copied into pFlipState.
735  */
736 static NvBool HsPopFlipQueueEntry(
737     NVHsChannelEvoPtr pHsChannel,
738     const NvU8 layer,
739     NVHsLayerRequestedFlipState *pFlipState)
740 {
741     NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
742     NVHsChannelFlipQueueEntry *pEntry;
743 
744     if (nvListIsEmpty(pFlipQueue)) {
745         return FALSE;
746     }
747 
748     pEntry = nvListFirstEntry(pFlipQueue,
749                               NVHsChannelFlipQueueEntry,
750                               flipQueueEntry);
751 
752     if (!HsFlipQueueEntryIsReady(pHsChannel, &pEntry->hwState)) {
753         return FALSE;
754     }
755 
756     *pFlipState = pEntry->hwState;
757 
758     nvListDel(&pEntry->flipQueueEntry);
759     nvFree(pEntry);
760 
761     return TRUE;
762 }
763 
764 /*!
765  * Update the current flip queue entry for a new headSurface frame.
766  *
767  * To build a new frame of headSurface, we look at the flip queue of each layer.
768  * If there is an entry available, we pop it off the queue and replace .current
769  * with the entry.
770  */
771 static void HsUpdateFlipQueueCurrent(
772     NVHsChannelEvoPtr pHsChannel)
773 {
774     NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
775     NvU8 layer;
776 
777     for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
778 
779         NVHsLayerRequestedFlipState newCurrent = { };
780 
781         /*
782          * XXX NVKMS HEADSURFACE TODO: fast forward to the last ready flip queue
783          * entry.  Share code with similar functionality in
784          * nvHsPushFlipQueueEntry().
785          */
786 
787         if (!HsPopFlipQueueEntry(pHsChannel, layer, &newCurrent)) {
788             continue;
789         }
790 
791         /*
792          * We have a new flip queue entry to place in current.  Release the old
793          * current flip queue entry, and replace it with the popped entry.
794          */
795         HsReleaseFlipQueueEntry(pDevEvo, pHsChannel,
796                                 &pHsChannel->flipQueue[layer].current);
797 
798         pHsChannel->flipQueue[layer].current = newCurrent;
799     }
800 }
801 
802 /*!
803  * Drain the flip queue on each layer of pHsChannel.
804  *
805  * In preparation to disable headSurface, release the flip queue entry in
806  * .current, as well as all entries in the queue.
807  */
808 void nvHsDrainFlipQueue(
809     NVHsChannelEvoPtr pHsChannel)
810 {
811     NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
812     NvU8 layer;
813 
814     for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
815         NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
816 
817         HsReleaseFlipQueueEntry(pDevEvo, pHsChannel,
818                                 &pHsChannel->flipQueue[layer].current);
819 
820         nvkms_memset(&pHsChannel->flipQueue[layer].current, 0,
821                      sizeof(pHsChannel->flipQueue[layer].current));
822 
823         while (!nvListIsEmpty(pFlipQueue)) {
824 
825             NVHsChannelFlipQueueEntry *pEntry =
826                 nvListFirstEntry(pFlipQueue,
827                                  NVHsChannelFlipQueueEntry,
828                                  flipQueueEntry);
829 
830             HsReleaseFlipQueueEntry(pDevEvo, pHsChannel, &pEntry->hwState);
831 
832             nvListDel(&pEntry->flipQueueEntry);
833             nvFree(pEntry);
834         }
835     }
836 }
837 
838 /*!
839  * Return whether all flip queues on this pHsChannel are idle.
840  *
841  * As a side effect, attempt to "fast forward" through flip queue entries, in an
842  * effort to make the flip queues idle.  When fast forwarding, always ignore the
843  * client-requested minPresentInterval.  Optionally (when force == TRUE), also
844  * ignore the "IsReady" check.
845  *
846  * This is intended to be used in two scenarios:
847  *
848  * - First, call nvHsIdleFlipQueue(force=FALSE) in a loop with all other heads
849  *   we are trying to idle.  This should allow semaphore interlocking to
850  *   progress naturally.
851  *
852  * - If that loop times out, call nvHsIdleFlipQueue(force=TRUE), which will
853  *   ignore the IsReady conditions and forcibly make the flip queues idle.
854  */
855 NvBool nvHsIdleFlipQueue(
856     NVHsChannelEvoPtr pHsChannel,
857     NvBool force)
858 {
859     const NvBool honorIsReadyCriteria = !force;
860     NvBool ret = TRUE;
861     NvU8 layer;
862 
863     for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
864 
865         HsFastForwardFlipQueue(pHsChannel, layer,
866                                honorIsReadyCriteria,
867                                FALSE /* honorMinPresentInterval */);
868 
869         if (!nvListIsEmpty(&pHsChannel->flipQueue[layer].queue)) {
870             /* force should always result in an empty flip queue */
871             nvAssert(!force);
872             ret = FALSE;
873         }
874     }
875 
876     return ret;
877 }
878 
879 /*
880  * We use notifiers to know when headSurface frames are presented, so that we
881  * don't render to the visible buffer.
882  */
883 
884 static NvU32 AllocNotifierMemory(
885     const NVDevEvoRec *pDevEvo,
886     NvU32 handle)
887 {
888     NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
889 
890     memAllocParams.owner = NVKMS_RM_HEAP_ID;
891     memAllocParams.size = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES;
892     memAllocParams.type = NVOS32_TYPE_DMA;
893 
894     memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) |
895                           DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS) |
896                           DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) |
897                           DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED);
898 
899     memAllocParams.flags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN |
900                            NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT |
901                            NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE;
902 
903     memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _ISO, _NO);
904 
905     return nvRmApiAlloc(nvEvoGlobal.clientHandle,
906                        pDevEvo->deviceHandle,
907                        handle,
908                        NV01_MEMORY_LOCAL_USER,
909                        &memAllocParams);
910 }
911 
912 static NvBool MapNotifiers(NVHsDeviceEvoRec *pHsDevice)
913 {
914     NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
915     NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers;
916     const NvU64 size = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES;
917     NvU32 sd, ret;
918 
919     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
920         ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
921                                pDevEvo->pSubDevices[sd]->handle,
922                                pNotifiers->rmHandle,
923                                0,
924                                size,
925                                (void **)&pNotifiers->sd[sd].ptr,
926                                0);
927         if (ret != NVOS_STATUS_SUCCESS) {
928             return FALSE;
929         }
930 
931         /*
932          * Intentionally use NVMISC_MEMSET() rather than nvkms_memset(): some
933          * CPU architectures, notably ARM, may fault if streaming stores like in
934          * an optimized memset() implementation are used on a BAR1 mapping.
935          * NVMISC_MEMSET() is conveniently not optimized.
936          */
937         NVMISC_MEMSET((void *)pNotifiers->sd[sd].ptr, 0, size);
938     }
939 
940     return TRUE;
941 }
942 
943 static void UnmapNotifiers(NVHsDeviceEvoRec *pHsDevice)
944 {
945     NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
946     NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers;
947     NvU32 sd;
948 
949     if (pNotifiers->rmHandle == 0) {
950         return;
951     }
952 
953     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
954 
955         if (pNotifiers->sd[sd].ptr == NULL) {
956             continue;
957         }
958 
959         nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
960                            pDevEvo->pSubDevices[sd]->handle,
961                            pNotifiers->rmHandle,
962                            pNotifiers->sd[sd].ptr,
963                            0);
964 
965         pNotifiers->sd[sd].ptr = NULL;
966     }
967 }
968 
969 static NvBool RegisterNotifiersWithNvKms(NVHsDeviceEvoRec *pHsDevice)
970 {
971     struct NvKmsRegisterSurfaceParams params = { };
972     NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers;
973     NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
974     const NvBool requireCtxDma = TRUE;
975 
976     params.request.useFd       = FALSE;
977     params.request.rmClient    = nvEvoGlobal.clientHandle;
978 
979     params.request.layout      = NvKmsSurfaceMemoryLayoutPitch;
980     params.request.format      = NvKmsSurfaceMemoryFormatI8;
981 
982     params.request.isoType = NVKMS_MEMORY_NISO;
983 
984     params.request.planes[0].u.rmObject = pNotifiers->rmHandle;
985     params.request.planes[0].pitch = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES;
986     params.request.planes[0].rmObjectSizeInBytes =
987         NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES;
988 
989     nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, &params,
990                          NvHsMapPermissionsReadWrite);
991 
992     pHsDevice->notifiers.nvKmsHandle = params.reply.surfaceHandle;
993 
994     if (pHsDevice->notifiers.nvKmsHandle == 0) {
995         return FALSE;
996     }
997 
998     pHsDevice->notifiers.pSurfaceEvo =
999         nvHsGetNvKmsSurface(pDevEvo,
1000                             pHsDevice->notifiers.nvKmsHandle,
1001                             requireCtxDma);
1002 
1003     return (pHsDevice->notifiers.pSurfaceEvo != NULL);
1004 }
1005 
1006 static void AssignNIsoFormat(NVHsDeviceEvoRec *pHsDevice)
1007 {
1008     const NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
1009 
1010     if (pDevEvo->caps.validNIsoFormatMask &
1011         NVBIT(NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) {
1012         /* If available, use the "nvdisplay" format. */
1013         pHsDevice->notifiers.nIsoFormat = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY;
1014     } else {
1015         /* Otherwise, use the "legacy" format. */
1016         nvAssert((pDevEvo->caps.validNIsoFormatMask &
1017                   NVBIT(NVKMS_NISO_FORMAT_LEGACY)) != 0);
1018         pHsDevice->notifiers.nIsoFormat = NVKMS_NISO_FORMAT_LEGACY;
1019     }
1020 }
1021 
1022 static NvBool AllocNotifiers(NVHsDeviceEvoRec *pHsDevice)
1023 {
1024     NvU32 ret;
1025     NVDevEvoRec *pDevEvo;
1026 
1027     pDevEvo = pHsDevice->pDevEvo;
1028 
1029     pHsDevice->notifiers.rmHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
1030 
1031     if (pHsDevice->notifiers.rmHandle == 0) {
1032         goto fail;
1033     }
1034 
1035     ret = AllocNotifierMemory(pHsDevice->pDevEvo, pHsDevice->notifiers.rmHandle);
1036 
1037     if (ret != NVOS_STATUS_SUCCESS) {
1038         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1039                            pHsDevice->notifiers.rmHandle);
1040         pHsDevice->notifiers.rmHandle = 0;
1041 
1042         goto fail;
1043     }
1044 
1045     if (!MapNotifiers(pHsDevice)) {
1046         goto fail;
1047     }
1048 
1049     if (!RegisterNotifiersWithNvKms(pHsDevice)) {
1050         goto fail;
1051     }
1052 
1053     AssignNIsoFormat(pHsDevice);
1054 
1055     return TRUE;
1056 
1057 fail:
1058     FreeNotifiers(pHsDevice);
1059 
1060     return FALSE;
1061 }
1062 
1063 static void FreeNotifiers(NVHsDeviceEvoRec *pHsDevice)
1064 {
1065     NVDevEvoRec *pDevEvo;
1066     NVHsNotifiersRec *pNotifiers;
1067 
1068     if (pHsDevice == NULL) {
1069         return;
1070     }
1071 
1072     pDevEvo = pHsDevice->pDevEvo;
1073     pNotifiers = &pHsDevice->notifiers;
1074 
1075     if (pNotifiers->nvKmsHandle != 0) {
1076         nvEvoUnregisterSurface(pDevEvo,
1077                                pDevEvo->pNvKmsOpenDev,
1078                                pNotifiers->nvKmsHandle,
1079                                FALSE /* skipUpdate */);
1080         pNotifiers->pSurfaceEvo = NULL;
1081     }
1082 
1083     UnmapNotifiers(pHsDevice);
1084 
1085     if (pHsDevice->notifiers.rmHandle != 0) {
1086         nvRmApiFree(nvEvoGlobal.clientHandle,
1087                     pDevEvo->deviceHandle,
1088                     pHsDevice->notifiers.rmHandle);
1089 
1090         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1091                            pHsDevice->notifiers.rmHandle);
1092         pHsDevice->notifiers.rmHandle = 0;
1093     }
1094 }
1095 
1096 /*!
1097  * Reset headSurface notifiers for this channel to NOT_BEGUN.
1098  *
1099  * By the time the modeset completes to transition into a new headSurface
1100  * configuration, all headSurface flips from the previous completion should be
1101  * completed.  But, that would leave at least one notifier set to FINISHED.
1102  *
1103  * Initialize all notifiers for this channel to NOT_BEGUN, so that
1104  * HsVBlankCallbackDeferredWork() does not interpret notifier state from the
1105  * previous headSurface configuration as applying to the new headSurface
1106  * configuration.
1107  */
1108 static void HsInitNotifiers(
1109     NVHsDeviceEvoRec *pHsDevice,
1110     NVHsChannelEvoRec *pHsChannel)
1111 {
1112     const NvU32 apiHead = pHsChannel->apiHead;
1113     const NvU32 sd = pHsChannel->pDispEvo->displayOwner;
1114     NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
1115     NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
1116     NvU8 slot, buffer;
1117 
1118     for (slot = 0; slot < NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD; slot++) {
1119         nvKmsResetNotifier(pHsNotifiers->nIsoFormat,
1120                            FALSE /* overlay */,
1121                            slot,
1122                            pHsNotifiersOneSd->notifier[apiHead]);
1123     }
1124 
1125     for (buffer = 0; buffer < NVKMS_HEAD_SURFACE_MAX_BUFFERS; buffer++) {
1126         nvKmsResetSemaphore(pHsNotifiers->nIsoFormat,
1127                             buffer, pHsNotifiersOneSd->semaphore[apiHead],
1128                             NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_RENDERABLE);
1129     }
1130 }
1131 
1132 void nvHsInitNotifiers(
1133     NVHsDeviceEvoRec *pHsDevice,
1134     NVHsChannelEvoRec *pHsChannel)
1135 {
1136     if (pHsChannel->config.neededForSwapGroup) {
1137         /*
1138          * XXX NVKMS HEADSURFACE TODO: initialize tracking for ViewPortIn
1139          * flips.
1140          */
1141     } else {
1142         HsInitNotifiers(pHsDevice, pHsChannel);
1143     }
1144 }
1145 
1146 /*!
1147  * For the given head and sd, prepare the next notifier:
1148  *
1149  * - Look up the next notifier to use.
1150  * - Clear that notifier to STATUS_NOT_BEGUN.
1151  * - Update the slot bookkeeping for the (head,sd) pair.
1152  * - Return the dword offset of the notifier.
1153  */
1154 static NvU16 PrepareNextNotifier(
1155     NVHsNotifiersRec *pHsNotifiers,
1156     NvU32 sd,
1157     NvU32 apiHead)
1158 {
1159     const NvU32 notifierSize =
1160         nvKmsSizeOfNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */);
1161 
1162     const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot;
1163 
1164     NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
1165 
1166     const NvU8 *headBase = pHsNotifiersOneSd->notifier[apiHead];
1167 
1168     const NvU8 offsetInBytes =
1169         (headBase - ((const NvU8 *) pHsNotifiersOneSd)) +
1170         (notifierSize * nextSlot);
1171 
1172     nvAssert(notifierSize <= NVKMS_HEAD_SURFACE_MAX_NOTIFIER_SIZE);
1173 
1174     nvKmsResetNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */,
1175                        nextSlot, pHsNotifiersOneSd->notifier[apiHead]);
1176 
1177     pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot =
1178         (nextSlot + 1) % NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD;
1179 
1180     return offsetInBytes / 4;
1181 }
1182 
1183 /*!
1184  * Helper function for nvHsFlip(); populate NvKmsFlipRequest and call
1185  * nvFlipEvo().
1186  *
1187  * \param[in,out]  pHsDevice         The headSurface device.
1188  * \param[in,out]  pHsChannel        The headSurface channel.
1189  * \param[in]      perEyeStereoFlip  Whether to flip per-eye.
1190  * \param[in]      surfaceHandles    The surfaces to flip to.
1191  * \param[in]      isFirstFlip       Whether this is the first flip after
1192  *                                   enabling headsurface.
1193  * \param[in]      allowFlipLock     Whether to allow fliplock for this flip.
1194  */
1195 static void HsFlipHelper(
1196     NVHsDeviceEvoRec *pHsDevice,
1197     NVHsChannelEvoRec *pHsChannel,
1198     const NvBool perEyeStereoFlip,
1199     const NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES],
1200     const NvBool isFirstFlip,
1201     const NvBool allowFlipLock)
1202 {
1203     NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
1204     struct NvKmsFlipCommonParams *pParamsOneHead;
1205     NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
1206     const NvU32 sd = pHsChannel->pDispEvo->displayOwner;
1207     const NvU32 apiHead = pHsChannel->apiHead;
1208     NvBool ret;
1209 
1210     /*
1211      * Use preallocated memory, so that we don't have to allocate
1212      * memory here (and deal with allocation failure).
1213      */
1214     struct NvKmsFlipRequestOneHead *pFlipHead = &pHsChannel->scratchParams;
1215 
1216     nvkms_memset(pFlipHead, 0, sizeof(*pFlipHead));
1217 
1218     pFlipHead->sd = sd;
1219     pFlipHead->head = apiHead;
1220     pParamsOneHead = &pFlipHead->flip;
1221 
1222     if (isFirstFlip) {
1223         /*
1224          * For the first flip after enabling headsurface
1225          * (NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME), the old viewport
1226          * (saved in HsConfigInitSwapGroupOneHead or HsConfigInitModesetOneHead
1227          * and restored in HsConfigRestoreMainLayerSurface) which may specify an
1228          * offset within a multi-head surface needs to be overridden to the
1229          * origin for the per-head headsurface surfaces.
1230          */
1231         pParamsOneHead->viewPortIn.specified = TRUE;
1232         pParamsOneHead->viewPortIn.point.x = 0;
1233         pParamsOneHead->viewPortIn.point.y = 0;
1234 
1235         pParamsOneHead->cursor.imageSpecified = TRUE;
1236 
1237         pParamsOneHead->cursor.positionSpecified = TRUE;
1238     }
1239 
1240     pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_LEFT] =
1241         surfaceHandles[NVKMS_LEFT];
1242     pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_RIGHT] =
1243         surfaceHandles[NVKMS_RIGHT];
1244     pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.specified = TRUE;
1245     pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE;
1246     pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE;
1247     pParamsOneHead->layer[NVKMS_MAIN_LAYER].tearing = FALSE;
1248     pParamsOneHead->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip = perEyeStereoFlip;
1249     pParamsOneHead->layer[NVKMS_MAIN_LAYER].minPresentInterval = 1;
1250     pParamsOneHead->layer[NVKMS_MAIN_LAYER].csc.specified = TRUE;
1251 
1252     /*
1253      * XXX NVKMS HEADSURFACE TODO: Work out in which cases we should use the
1254      * head's current CSC.
1255      */
1256     pParamsOneHead->layer[NVKMS_MAIN_LAYER].csc.matrix = NVKMS_IDENTITY_CSC_MATRIX;
1257 
1258     pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = TRUE;
1259 
1260     if (surfaceHandles[NVKMS_LEFT] != 0) {
1261         NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
1262             nvGetSurfaceHandlesFromOpenDev(pDevEvo->pNvKmsOpenDev);
1263         NVSurfaceEvoPtr pSurfaceEvo =
1264             nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, surfaceHandles[NVKMS_LEFT]);
1265         struct NvKmsSemaphore *pSema;
1266 
1267         pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.surfaceHandle =
1268             pHsNotifiers->nvKmsHandle;
1269         pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.format =
1270             pHsNotifiers->nIsoFormat;
1271         pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.offsetInWords =
1272             PrepareNextNotifier(pHsNotifiers, sd, apiHead);
1273 
1274         pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE;
1275         pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE;
1276 
1277         pSema = &pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.acquire;
1278         pSema->surface.surfaceHandle = pHsNotifiers->nvKmsHandle;
1279         pSema->surface.format = pHsNotifiers->nIsoFormat;
1280         pSema->surface.offsetInWords =
1281             HsGetFrameSemaphoreOffsetInWords(pHsChannel);
1282         pSema->value = NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_DISPLAYABLE;
1283 
1284         pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.release =
1285             pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.acquire;
1286 
1287         pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.release.value =
1288             NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_RENDERABLE;
1289 
1290         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE;
1291         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val.width =
1292             pSurfaceEvo->widthInPixels;
1293         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val.height =
1294             pSurfaceEvo->heightInPixels;
1295 
1296         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE;
1297         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeOut.val =
1298             pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val;
1299     }
1300 
1301     ret = nvFlipEvo(pDevEvo,
1302                     pDevEvo->pNvKmsOpenDev,
1303                     pFlipHead,
1304                     1     /* numFlipHeads */,
1305                     TRUE  /* commit */,
1306                     FALSE /* allowVrr */,
1307                     NULL  /* pReply */,
1308                     FALSE /* skipUpdate */,
1309                     allowFlipLock);
1310 
1311     if (!ret) {
1312         nvAssert(!"headSurface flip failed?");
1313     }
1314 }
1315 
1316 /*!
1317  * Flip to the headSurface buffer specified by index.
1318  *
1319  * If pHsOneHeadAllDisps == NULL, disable headSurface by flipping to NULL.
1320  *
1321  * \param[in,out]  pHsDevice           The headSurface device.
1322  * \param[in,out]  pHsChannel          The headSurface channel.
1323  * \param[in]      eyeMask             The mask of which eyes to flip.
1324  * \param[in]      perEyeStereoFlip    Whether to flip per-eye.
1325  * \param[in]      index               Which buffer to flip to.
1326  * \param[in]      pHsOneHeadAllDisps  The headSurface config.
1327  * \param[in]      isFirstFlip         Whether this is the first flip after
1328  *                                     enabling headsurface.
1329  * \param[in]      allowFlipLock       Whether to allow fliplock for this flip.
1330  */
1331 void nvHsFlip(
1332     NVHsDeviceEvoRec *pHsDevice,
1333     NVHsChannelEvoRec *pHsChannel,
1334     const NvU8 eyeMask,
1335     const NvBool perEyeStereoFlip,
1336     const NvU8 index,
1337     const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps,
1338     const NvBool isFirstFlip,
1339     const NvBool allowFlipLock)
1340 {
1341     NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES] = { 0, 0 };
1342     const NvBool enable = (pHsOneHeadAllDisps != NULL);
1343 
1344     if (enable) {
1345         NvU8 eye;
1346 
1347         for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) {
1348 
1349             const NVHsSurfaceRec *pHsSurface =
1350                 pHsOneHeadAllDisps->surfaces[eye][index].pSurface;
1351 
1352             if ((eyeMask & NVBIT(eye)) == 0) {
1353                 continue;
1354             }
1355 
1356             nvAssert(pHsSurface != NULL);
1357 
1358             surfaceHandles[eye] = pHsSurface->nvKmsHandle;
1359             nvAssert(surfaceHandles[eye] != 0);
1360         }
1361     }
1362 
1363     HsFlipHelper(pHsDevice,
1364                  pHsChannel,
1365                  perEyeStereoFlip,
1366                  surfaceHandles,
1367                  isFirstFlip,
1368                  allowFlipLock);
1369 
1370     if (!enable) {
1371         /* XXX NVKMS HEADSURFACE TODO: disable stereo toggling, if necessary. */
1372     }
1373 }
1374 
1375 /*!
1376  * "Flip" using the core channel's ViewPortIn.
1377  */
1378 static void HsFlipViewPortIn(NVHsChannelEvoPtr pHsChannel, NvU16 x, NvU16 y)
1379 {
1380     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
1381 
1382     /*
1383      * XXX NVKMS HEADSURFACE TODO: use the panning NVKMS API request, rather
1384      * than call the low-level SetViewportPointIn() HAL proc.  But, to do that,
1385      * we would need to make the pan request much lighter weight, so that it is
1386      * usable for our needs here.
1387      */
1388     nvApiHeadSetViewportPointIn(pDispEvo, pHsChannel->apiHead, x, y);
1389 
1390     /*
1391      * XXX NVKMS HEADSURFACE TODO: Add tracking so that IsPreviousFrameDone()
1392      * can know if this update latched.
1393      */
1394 }
1395 
1396 static void HsPickSrcEyeAndPixelShift(
1397     const NVHsChannelEvoRec *pHsChannel,
1398     const NvU8 dstEye,
1399     NvU8 *pSrcEye,
1400     enum NvKmsPixelShiftMode *pPixelShift)
1401 {
1402     if (pHsChannel->config.pixelShift == NVKMS_PIXEL_SHIFT_8K) {
1403 
1404         if (dstEye == NVKMS_LEFT) {
1405             *pSrcEye = NVKMS_LEFT;
1406             *pPixelShift = NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT;
1407         }
1408 
1409         if (dstEye == NVKMS_RIGHT) {
1410             *pSrcEye = NVKMS_LEFT;
1411             *pPixelShift = NVKMS_PIXEL_SHIFT_4K_TOP_LEFT;
1412         }
1413     } else {
1414         *pSrcEye = dstEye;
1415         *pPixelShift = pHsChannel->config.pixelShift;
1416     }
1417 }
1418 
1419 /*!
1420  * Structure to drive the behavior of nvHsNextFrame().
1421  */
1422 struct NvHsNextFrameWorkArea {
1423 
1424     /*
1425      * The range of surface indices to render to.  Indices here are used as the
1426      * 'index' in NVHsStateOneHeadAllDisps::surfaces[eye][index]::pSurface.
1427      */
1428     NvU8 dstBufferIndexStart;
1429     NvU8 dstBufferIndexEnd;
1430 
1431     /* Whether to flip to the surface indicated by pHsChannel->nextIndex. */
1432     NvBool doFlipToNextIndex;
1433 
1434     /* Whether to allow fliplock on the flip to the next surface. */
1435     NvBool allowFlipLock;
1436 
1437     /* Whether to flip to the destRect region of the surface.*/
1438     NvBool doFlipToDestRect;
1439 
1440     /* Whether to increment nextIndex and/or nextOffset. */
1441     NvBool doIncrementNextIndex;
1442     NvBool doIncrementNextOffset;
1443 
1444     /*
1445      * On which dstBuffer indices to honor the SwapGroup's exclusive
1446      * clip list.
1447      */
1448     NvU8 honorSwapGroupClipListBufferMask;
1449 
1450     /* The region within the surface to render into.  */
1451     struct NvKmsRect destRect;
1452 
1453     /*
1454      * If perEyeStereo::override == TRUE, use perEyeStereo::value to control the
1455      * headSurface flip.
1456      */
1457     struct {
1458         NvBool override;
1459         NvBool value;
1460     } perEyeStereo;
1461 };
1462 
1463 /*!
1464  * Assign an NvHsNextFrameWorkArea structure, to drive execution of
1465  * nvHsNextFrame().
1466  */
1467 static struct NvHsNextFrameWorkArea HsAssignNextFrameWorkArea(
1468     const NVHsChannelEvoRec *pHsChannel,
1469     const NvHsNextFrameRequestType requestType)
1470 {
1471     struct NvHsNextFrameWorkArea workArea = { };
1472     NvU8 destOffset;
1473 
1474     if ((requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME) ||
1475         (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK)) {
1476 
1477         /*
1478          * The swapgroup first frame renders and flips both core and base to
1479          * the back index double height headsurface swapgroup surface, just
1480          * like a non-swapgroup headsurface flip.
1481          */
1482         if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME ||
1483             !pHsChannel->config.neededForSwapGroup) {
1484 
1485             /*
1486              * In the non-SwapGroup case, headSurface should:
1487              * - only render to the 'nextIndex' surface,
1488              * - flip to the nextIndex surface,
1489              * - increment nextIndex.
1490              */
1491             workArea.dstBufferIndexStart   = pHsChannel->nextIndex;
1492             workArea.dstBufferIndexEnd     = pHsChannel->nextIndex;
1493 
1494             workArea.doFlipToNextIndex     = TRUE;
1495             workArea.allowFlipLock         = FALSE;
1496             workArea.doFlipToDestRect      = FALSE;
1497 
1498             workArea.doIncrementNextIndex  = TRUE;
1499             workArea.doIncrementNextOffset = FALSE;
1500 
1501         } else {
1502 
1503             /*
1504              * In the SwapGroup case, headSurface should:
1505              * - render to both surfaces,
1506              * - flip to the nextOffset,
1507              * - increment nextOffset.
1508              */
1509             workArea.dstBufferIndexStart   = 0;
1510             workArea.dstBufferIndexEnd     = NVKMS_HEAD_SURFACE_MAX_BUFFERS - 1;
1511 
1512             workArea.doFlipToNextIndex     = FALSE;
1513 
1514             workArea.allowFlipLock         = FALSE;
1515             workArea.doFlipToDestRect      = TRUE;
1516 
1517             workArea.doIncrementNextIndex  = FALSE;
1518             workArea.doIncrementNextOffset = TRUE;
1519 
1520             /*
1521              * For VBLANK-initiated frames of SwapGroup headSurface, we want the
1522              * surface indicated by pHsChannel->nextIndex to contain the new
1523              * SwapGroup content, and the non-nextIndex surface to contain the
1524              * old SwapGroup content.
1525              *
1526              * Therefore, set the non-nextIndex bit(s) in
1527              * honorSwapGroupClipListBufferMask, so that we leave the old
1528              * SwapGroup content in that case.  In all other cases, we will get
1529              * the new SwapGroup content.
1530              */
1531             workArea.honorSwapGroupClipListBufferMask =
1532                 ~NVBIT(pHsChannel->nextIndex);
1533         }
1534 
1535     } else {
1536         /*
1537          * SWAP_GROUP_READY-initiated headSurface frames are special: we render
1538          * a new frame to the nextIndex surface, using the previous destRect
1539          * (i.e., the location that ViewPortIn will use at the next vblank).
1540          * However, the flip may take indefinitely long to arrive: it will wait
1541          * for the rest of the SwapBarrier.  That is okay, because
1542          * nvHsNextFrame(VBLANK) calls between now and the flip actually
1543          * occurring will keep updating both surfaces, using ViewPortIn to
1544          * "flip" to the new content.
1545          *
1546          * Therefore, we do _not_ increment nextIndex here.  Instead, we update
1547          * nextIndex when we find that the flip completed.  Until then, we keep
1548          * nextIndex the same, so that nvHsNextFrame(VBLANK) frames know which
1549          * surface should receive the new SwapGroup content.
1550          */
1551 
1552         const NVSwapGroupRec *pSwapGroup =
1553             pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead];
1554 
1555         nvAssert(requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY);
1556         nvAssert(pHsChannel->config.neededForSwapGroup);
1557 
1558         workArea.dstBufferIndexStart   = pHsChannel->nextIndex;
1559         workArea.dstBufferIndexEnd     = pHsChannel->nextIndex;
1560 
1561         workArea.doFlipToNextIndex     = TRUE;
1562         workArea.allowFlipLock         = TRUE;
1563         workArea.doFlipToDestRect      = FALSE;
1564 
1565         workArea.doIncrementNextIndex  = FALSE;
1566         workArea.doIncrementNextOffset = FALSE;
1567 
1568         workArea.perEyeStereo.override = TRUE;
1569         workArea.perEyeStereo.value    =
1570             nvHsSwapGroupGetPerEyeStereo(pSwapGroup);
1571     }
1572 
1573     /*
1574      * Pick the rect within the destination surface that headSurface should
1575      * render into.
1576      *
1577      * For normal (!neededForSwapGroup) use, this should be simply:
1578      *   { 0, 0,                frameSize.width, frameSize.height }
1579      * When SwapGroups are enabled, the headSurface is allocated at
1580      * double height and we alternate between
1581      *   { 0, 0,                frameSize.width, frameSize.height }
1582      *   { 0, frameSize.height, frameSize.width, frameSize.height }
1583      * And use ViewPortIn to flip to the updated half.
1584      *
1585      * The 'nextOffset' field tracks which half headSurface should use for the
1586      * next frame.
1587      *
1588      * The exception to the above is SWAP_GROUP_READY: in that case, we will
1589      * flip between surfaces, but not change ViewPortIn, so we want to use the
1590      * _previous_ nextOffset value.
1591      */
1592     if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY) {
1593         destOffset = HsGetPreviousOffset(pHsChannel);
1594     } else {
1595         destOffset = pHsChannel->nextOffset;
1596     }
1597 
1598     workArea.destRect.x      = 0;
1599     workArea.destRect.y      = pHsChannel->config.frameSize.height *
1600                                destOffset;
1601     workArea.destRect.width  = pHsChannel->config.frameSize.width;
1602     workArea.destRect.height = pHsChannel->config.frameSize.height;
1603 
1604     return workArea;
1605 }
1606 
1607 /*!
1608  * Produce the next headSurface frame.
1609  *
1610  * Render the frame, flip to it, and update next{Index,Offset} bookkeeping
1611  * as necessary.
1612  *
1613  * \param[in,out]  pHsDevice   The device to render on.
1614  * \param[in,out]  pHsChannel  The channel to use for rendering.
1615  * \param[in]      requestType This indicates the type of frame behavior
1616  *                             desired by the caller: when FIRST_FRAME, we need
1617  *                             to populate the surface in the core channel on
1618  *                             pre-NVDisplay.
1619  */
1620 void nvHsNextFrame(
1621     NVHsDeviceEvoPtr pHsDevice,
1622     NVHsChannelEvoPtr pHsChannel,
1623     const NvHsNextFrameRequestType requestType)
1624 {
1625     const NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
1626     const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps =
1627         &pDevEvo->apiHeadSurfaceAllDisps[pHsChannel->apiHead];
1628     NvBool perEyeStereoFlip = FALSE;
1629     NvU8 dstEye;
1630     NvU8 eyeMask = 0;
1631 
1632     struct NvHsNextFrameWorkArea workArea =
1633         HsAssignNextFrameWorkArea(pHsChannel, requestType);
1634 
1635     HsUpdateFlipQueueCurrent(pHsChannel);
1636 
1637     for (dstEye = NVKMS_LEFT; dstEye < NVKMS_MAX_EYES; dstEye++) {
1638 
1639         const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD];
1640         NvBool surfacesPresent = FALSE;
1641         NvU8 layer, srcEye = dstEye;
1642         NvU8 dstBufferIndex;
1643         enum NvKmsPixelShiftMode pixelShift = pHsChannel->config.pixelShift;
1644         NvBool ret;
1645 
1646         HsPickSrcEyeAndPixelShift(pHsChannel, dstEye, &srcEye, &pixelShift);
1647 
1648         for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
1649             pSurfaceEvo[layer] =
1650                 pHsChannel->flipQueue[layer].current.pSurfaceEvo[srcEye];
1651 
1652             surfacesPresent = surfacesPresent || (pSurfaceEvo[layer] != NULL);
1653 
1654             perEyeStereoFlip = perEyeStereoFlip ||
1655                 pHsChannel->flipQueue[layer].current.perEyeStereoFlip;
1656         }
1657 
1658         /*
1659          * If there are no surfaces present for this srcEye, and the dstEye is
1660          * not LEFT, don't render it.
1661          *
1662          * This condition is limited to LEFT because:
1663          * - We need to perform _a_ flip even if no source surface is provided.
1664          * - We don't want to perform more rendering than absolutely
1665          *   unnecessarily.
1666          */
1667         if (!surfacesPresent && (dstEye != NVKMS_LEFT)) {
1668             continue;
1669         }
1670 
1671         for (dstBufferIndex = workArea.dstBufferIndexStart;
1672              dstBufferIndex <= workArea.dstBufferIndexEnd;
1673              dstBufferIndex++) {
1674 
1675             NvU8 thisEyeMask = 0;
1676             const NvBool honorSwapGroupClipList =
1677                 !!(workArea.honorSwapGroupClipListBufferMask &
1678                    NVBIT(dstBufferIndex));
1679 
1680             ret = nvHs3dRenderFrame(pHsChannel,
1681                                     requestType,
1682                                     honorSwapGroupClipList,
1683                                     dstEye,
1684                                     dstBufferIndex,
1685                                     pixelShift,
1686                                     workArea.destRect,
1687                                     pSurfaceEvo);
1688             /*
1689              * Record which eyes we've rendered, so that we only flip those
1690              * eyes.
1691              *
1692              * In the case that we're looping over multiple buffer indices, we
1693              * should get the same result across buffers.
1694              */
1695             if (ret) {
1696                 thisEyeMask = NVBIT(dstEye);
1697             }
1698 
1699             if (dstBufferIndex != workArea.dstBufferIndexStart) {
1700                 nvAssert((eyeMask & NVBIT(dstEye)) ==
1701                          (thisEyeMask & NVBIT(dstEye)));
1702             }
1703 
1704             eyeMask |= thisEyeMask;
1705         }
1706     }
1707 
1708     if (workArea.doFlipToNextIndex) {
1709 
1710         if (workArea.perEyeStereo.override) {
1711             perEyeStereoFlip = workArea.perEyeStereo.value;
1712         }
1713 
1714         nvHs3dReleaseSemaphore(
1715             pHsChannel,
1716             pHsDevice->notifiers.pSurfaceEvo,
1717             pHsDevice->notifiers.nIsoFormat,
1718             HsGetFrameSemaphoreOffsetInWords(pHsChannel),
1719             NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_DISPLAYABLE,
1720             FALSE /* allPreceedingReads */);
1721 
1722         nvHsFlip(
1723             pHsDevice,
1724             pHsChannel,
1725             eyeMask,
1726             perEyeStereoFlip,
1727             pHsChannel->nextIndex,
1728             pHsOneHeadAllDisps,
1729             requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME,
1730             workArea.allowFlipLock);
1731         HsIncrementFrameSemaphoreIndex(pHsChannel);
1732 
1733         // Record fullscreen/non-fullscreen swapgroup flip counts
1734         const NVSwapGroupRec *pSwapGroup =
1735             pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead];
1736 
1737         if (pSwapGroup) {
1738             HsProcFsRecordFullscreenSgFrames(pHsChannel,
1739                                              pSwapGroup->swapGroupIsFullscreen);
1740         }
1741 
1742         // Record the time of the last flip originating from client update
1743         if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY) {
1744             pHsChannel->lastHsClientFlipTimeUs = nvkms_get_usec();
1745         }
1746     }
1747 
1748     if (workArea.doFlipToDestRect) {
1749         // Viewport fake flips are only used in swapgroup configurations.
1750         nvAssert(pHsChannel->config.neededForSwapGroup);
1751 
1752         if (pHsChannel->usingRgIntrForSwapGroups) {
1753             nvHs3dPushPendingViewportFlip(pHsChannel);
1754         } else {
1755             HsFlipViewPortIn(pHsChannel,
1756                              workArea.destRect.x, workArea.destRect.y);
1757         }
1758     }
1759 
1760     if (workArea.doIncrementNextIndex) {
1761         HsIncrementNextIndex(pHsDevice, pHsChannel);
1762     }
1763 
1764     if (workArea.doIncrementNextOffset) {
1765         HsIncrementNextOffset(pHsDevice, pHsChannel);
1766     }
1767 }
1768 
1769 /*!
1770  * In response to a non-stall interrupt, check if a headsurface channel has
1771  * completed a frame of non-swapgroup headsurface rendering and kick off a
1772  * viewport flip to the offset that was used for that rendering.
1773  */
1774 void nvHsProcessPendingViewportFlips(NVDevEvoPtr pDevEvo)
1775 {
1776     NVDispEvoPtr pDispEvo;
1777     NvU32 dispIndex;
1778 
1779     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1780         NvU32 apiHead;
1781         for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) {
1782             NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
1783             NvU32 lastRenderedOffset;
1784 
1785             if (pHsChannel == NULL) {
1786                 continue;
1787             }
1788 
1789             lastRenderedOffset = nvHs3dLastRenderedOffset(pHsChannel);
1790 
1791             /*
1792              * If this channel is marked as having kicked off a frame of
1793              * rendering, and the semaphore write of the render offset to
1794              * NVKMS_HEADSURFACE_VIEWPORT_OFFSET_SEMAPHORE_INDEX has completed,
1795              * then this channel is ready to make a viewport flip to that
1796              * offset.
1797              */
1798             if (pHsChannel->viewportFlipPending &&
1799                 (lastRenderedOffset == HsGetPreviousOffset(pHsChannel))) {
1800 
1801                 HsFlipViewPortIn(pHsChannel, 0 /* x */,
1802                                  lastRenderedOffset *
1803                                  pHsChannel->config.frameSize.height);
1804                 pHsChannel->viewportFlipPending = FALSE;
1805             }
1806         }
1807     }
1808 }
1809 
1810 /*!
1811  * Record the current scanline, for procfs statistics reporting.
1812  */
1813 static void HsProcFsRecordScanline(
1814     const NVDispEvoRec *pDispEvo,
1815     const NvU32 apiHead)
1816 {
1817 #if NVKMS_PROCFS_ENABLE
1818     NVHsChannelEvoRec *pHsChannel = pDispEvo->pHsChannel[apiHead];
1819     NvU16 scanLine = 0;
1820     NvBool inBlankingPeriod = FALSE;
1821 
1822     if (pHsChannel->statistics.scanLine.pHistogram == NULL) {
1823         return;
1824     }
1825 
1826     nvApiHeadGetScanLine(pDispEvo, apiHead, &scanLine, &inBlankingPeriod);
1827 
1828     if (inBlankingPeriod) {
1829         pHsChannel->statistics.scanLine.nInBlankingPeriod++;
1830     } else {
1831         pHsChannel->statistics.scanLine.nNotInBlankingPeriod++;
1832 
1833         if (scanLine <= pHsChannel->statistics.scanLine.vVisible) {
1834             pHsChannel->statistics.scanLine.pHistogram[scanLine]++;
1835         } else {
1836             nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
1837                 "HsProcFsRecordScanline(): scanLine (%d) > vVisible (%d)",
1838                 scanLine, pHsChannel->statistics.scanLine.vVisible);
1839         }
1840     }
1841 #endif /* NVKMS_PROCFS_ENABLE */
1842 }
1843 
1844 static void HsProcFsRecordPreviousFrameNotDone(
1845     NVHsChannelEvoPtr pHsChannel)
1846 {
1847 #if NVKMS_PROCFS_ENABLE
1848     pHsChannel->statistics.nPreviousFrameNotDone++;
1849 #endif
1850 }
1851 
1852 static void HsProcFsRecordFullscreenSgFrames(
1853     NVHsChannelEvoPtr pHsChannel,
1854     NvBool isFullscreen)
1855 {
1856 #if NVKMS_PROCFS_ENABLE
1857     if (isFullscreen) {
1858         pHsChannel->statistics.nFullscreenSgFrames++;
1859     } else {
1860         pHsChannel->statistics.nNonFullscreenSgFrames++;
1861     }
1862 #endif /* NVKMS_PROCFS_ENABLE */
1863 }
1864 
1865 static void HsProcFsRecordOmittedNonSgHsUpdate(
1866     NVHsChannelEvoPtr pHsChannel)
1867 {
1868 #if NVKMS_PROCFS_ENABLE
1869     pHsChannel->statistics.nOmittedNonSgHsUpdates++;
1870 #endif
1871 }
1872 
1873 /*!
1874  * Determine if we've flipped to the previous frame.
1875  *
1876  * When we program the flip method, we reset the notifier to NOT_BEGUN, and when
1877  * EVO peforms the flip, it changes the notifier to BEGUN.
1878  *
1879  * Find the notifier slot for the previous frame, parse its notifier, and return
1880  * whether it is BEGUN.
1881  */
1882 static NvBool IsPreviousFlipDone(NVHsChannelEvoPtr pHsChannel)
1883 {
1884     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
1885     const NvU32 apiHead = pHsChannel->apiHead;
1886     const NvU32 sd = pDispEvo->displayOwner;
1887     const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
1888     const NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
1889     const NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
1890     const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot;
1891     struct nvKmsParsedNotifier parsed = { };
1892 
1893     const NvU8 prevSlot =
1894         A_minus_b_with_wrap_U8(nextSlot, 1,
1895                                NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD);
1896 
1897     nvKmsParseNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */,
1898                        prevSlot, pHsNotifiersOneSd->notifier[apiHead], &parsed);
1899 
1900     return parsed.status == NVKMS_NOTIFIER_STATUS_BEGUN;
1901 }
1902 
1903 /*!
1904  * Determine if we've flipped to the previous frame.
1905  */
1906 static NvBool IsPreviousFrameDone(NVHsChannelEvoPtr pHsChannel)
1907 {
1908     if (pHsChannel->config.neededForSwapGroup) {
1909         /*
1910          * XXX NVKMS HEADSURFACE TODO: Somehow determine if the previous
1911          * ViewPortIn update for this head was latched.
1912          */
1913 
1914         /*
1915          * XXX NVKMS HEADSURFACE TODO: In the absence of a mechanism to
1916          * determine if ViewPortIn was latched, we would normally rely on this
1917          * callback arriving once per vblank.  Unfortunately, bug 2086726 can
1918          * cause us to get called twice per vblank.  WAR this for now by
1919          * ignoring callbacks that arrive in a very small window of the previous
1920          * callback.
1921          *
1922          * Throttling is now implemented using the RG line 1 interrupt
1923          * headsurface rendering mechanism, so this limit can be lowered once
1924          * the old vblank-triggered viewport flipping mechanism is removed.
1925          */
1926 
1927         const NvU64 oldUSec = pHsChannel->lastCallbackUSec;
1928         const NvU64 newUSec = nvkms_get_usec();
1929 
1930         /*
1931          * This threshold is somewhat arbitrary.  In bug 2086726, we see the
1932          * callback get called from both the ISR and the bottom half, which are
1933          * usually within ~200 usec of each other on an idle system.  There
1934          * shouldn't be a danger of mistaking legitimate periodic callbacks with
1935          * this small threshold: 500 usec per refresh would require a 2000 Hz
1936          * mode.
1937          */
1938         const NvU64 thresholdUSec = 500;
1939 
1940         nvAssert(!pHsChannel->usingRgIntrForSwapGroups);
1941 
1942         if ((newUSec > oldUSec) &&
1943             (newUSec - oldUSec) < thresholdUSec) {
1944             return FALSE;
1945         }
1946 
1947         pHsChannel->lastCallbackUSec = newUSec;
1948 
1949         return TRUE;
1950     } else {
1951         return IsPreviousFlipDone(pHsChannel);
1952     }
1953 }
1954 
1955 /*!
1956  * If the client provided a notifier surface with a real flip
1957  * request while swap groups were enabled, write to that
1958  * notifier with the BEGUN status and the most recent
1959  * headsurface notifier timestamp to emulate what the client
1960  * would observe if their notifier was used in hardware.
1961  */
1962 static void HsUpdateClientNotifier(NVHsChannelEvoPtr pHsChannel)
1963 {
1964     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
1965     const NvU32 apiHead = pHsChannel->apiHead;
1966     const NvU32 sd = pDispEvo->displayOwner;
1967     const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
1968     const NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
1969     const NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
1970     const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot;
1971     struct nvKmsParsedNotifier parsed = { };
1972     NVFlipNIsoSurfaceEvoHwState *pClientNotifier =
1973         &pHsChannel->flipQueue[NVKMS_MAIN_LAYER].current.completionNotifier.surface;
1974 
1975     if (pClientNotifier->pSurfaceEvo == NULL) {
1976         return;
1977     }
1978 
1979     const NvU8 prevSlot =
1980         A_minus_b_with_wrap_U8(nextSlot, 1,
1981                                NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD);
1982 
1983     nvKmsParseNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */,
1984                        prevSlot, pHsNotifiersOneSd->notifier[apiHead], &parsed);
1985 
1986     nvAssert(parsed.status == NVKMS_NOTIFIER_STATUS_BEGUN);
1987 
1988     /*
1989      * XXX NVKMS HEADSURFACE TODO: Get valid timestamp through other means to
1990      * support this on platforms with legacy HW semaphores without valid
1991      * HW notifier timestamps in the main channel.
1992      */
1993     nvAssert(parsed.timeStampValid);
1994 
1995     nvKmsSetNotifier(pClientNotifier->format,
1996                      FALSE /* overlay */,
1997                      pClientNotifier->offsetInWords / 4,
1998                      pClientNotifier->pSurfaceEvo->cpuAddress[sd],
1999                      parsed.timeStamp);
2000 }
2001 
2002 /*!
2003  * Check if all flips completed for this SwapGroup.  If so, release the
2004  * SwapGroup.
2005  */
2006 static void HsCheckSwapGroupFlipDone(
2007     NVDevEvoPtr pDevEvo,
2008     NVSwapGroupRec *pSwapGroup)
2009 {
2010     const NVHsDeviceEvoRec *pHsDevice = pDevEvo->pHsDevice;
2011     NVDispEvoPtr pDispEvo;
2012     NvU32 dispIndex;
2013 
2014     nvAssert(pSwapGroup != NULL);
2015 
2016     if (!pSwapGroup->pendingFlip) {
2017         return;
2018     }
2019 
2020     /*
2021      * Check if all active heads in the SwapGroup have completed their flips.
2022      * If any haven't, return early.
2023      */
2024     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
2025         NvU32 apiHead;
2026         for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) {
2027 
2028             if (pDispEvo->pSwapGroup[apiHead] == pSwapGroup) {
2029 
2030                 NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
2031 
2032                 if (pHsChannel == NULL) {
2033                     continue;
2034                 }
2035 
2036                 nvAssert(pHsChannel->config.neededForSwapGroup);
2037 
2038                 if (!IsPreviousFlipDone(pHsChannel)) {
2039                     return;
2040                 }
2041             }
2042         }
2043     }
2044 
2045     /*
2046      * The SwapGroup is ready: update client notifiers if necessary and
2047      * increment nextIndex for all active heads, so that subsequent frames of
2048      * headSurface render to the next buffer.
2049      */
2050     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
2051         NvU32 apiHead;
2052         for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) {
2053 
2054             if (pDispEvo->pSwapGroup[apiHead] == pSwapGroup) {
2055 
2056                 NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
2057 
2058                 if (pHsChannel == NULL) {
2059                     continue;
2060                 }
2061 
2062                 nvAssert(pHsChannel->config.neededForSwapGroup);
2063                 nvAssert(IsPreviousFlipDone(pHsChannel));
2064 
2065                 HsUpdateClientNotifier(pHsChannel);
2066                 HsIncrementNextIndex(pHsDevice, pHsChannel);
2067             }
2068         }
2069     }
2070 
2071     /*
2072      * The SwapGroup is ready: release all SwapGroup members so that they can
2073      * proceed.
2074      */
2075     nvHsSwapGroupRelease(pDevEvo, pSwapGroup);
2076 }
2077 
2078 /*
2079  * Called from RG line interrupt handler to determine whether rendering a
2080  * new frame could be skipped.
2081  */
2082 static NvBool HsCanOmitNonSgHsUpdate(NVHsChannelEvoPtr pHsChannel)
2083 {
2084     const NVSwapGroupRec *pHeadSwapGroup =
2085         pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead];
2086 
2087     /*
2088      * When fullscreen swapgroup flipping, updating
2089      * non-swapgroup content at vblank is unnecessary and
2090      * dangerous, since it results in releasing client
2091      * semaphores before their contents have actually been
2092      * displayed.
2093      */
2094     if (pHsChannel->swapGroupFlipping) {
2095         return NV_TRUE;
2096     }
2097 
2098     /*
2099      * In the case of a fullscreen swapgroup, we can generally omit updating
2100      * the headsurface entirely upon vblank as long as the client is
2101      * actively rendering. All the swapgroup content has already been
2102      * updated to the headsurface backbuffer at the client's swapbuffers
2103      * time and there's no need to update the backbuffer again on RG line 1
2104      * or vblank interrupt time.
2105      *
2106      * There is one exception to this. If the client isn't rendering
2107      * actively then updates to the cursor (and possibly overlays, head
2108      * config) still require rendering an updated frame to the backbuffer.
2109      * Thus, we will simply limit this optimization for frames that come
2110      * within one frame time after the last recorded flip.
2111      */
2112     if (pHeadSwapGroup &&
2113         pHeadSwapGroup->swapGroupIsFullscreen) {
2114 
2115         NvU64 nowUs = nvkms_get_usec();
2116         NvU64 frameTimeUs = nvEvoFrametimeUsFromTimings(
2117             &pHsChannel->pDispEvo->apiHeadState[pHsChannel->apiHead].timings);
2118 
2119         if (nowUs - pHsChannel->lastHsClientFlipTimeUs < frameTimeUs) {
2120             return NV_TRUE;
2121         }
2122     }
2123 
2124     return NV_FALSE;
2125 }
2126 
2127 /*!
2128  * Receive RG line 1 callback, in process context with nvkms_lock held.
2129  */
2130 static void HsRgLine1CallbackProc(NVDispEvoRec *pDispEvo,
2131                                   const NvU32 head,
2132                                   NVRgLine1CallbackPtr pCallback)
2133 {
2134     const NvU32 apiHead =
2135         (NvU32)(NvUPtr)pCallback->pUserData;
2136     NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
2137 
2138     /*
2139      * The pHsChannel may have been torn down between when the callback was
2140      * generated and when this was called.  Ignore spurious callbacks.
2141      */
2142     if (pHsChannel == NULL) {
2143         return;
2144     }
2145 
2146     if (pHsChannel->config.neededForSwapGroup) {
2147         /*
2148          * Update the non-swapgroup content on the back half of both
2149          * headsurface surfaces, and the swapgroup content on the back half of
2150          * the back headsurface surface, and perform a viewportoffset flip to
2151          * the back offset.
2152          *
2153          * Synchronization is achieved by the following mechanism:
2154          *
2155          * - Before rendering a new frame, check that we aren't still scanning
2156          *   out from that half of the surface.
2157          * - After rendering a frame, push a semaphore write with the render
2158          *   offset and a non-stall interrupt.
2159          * - In response to the non-stall interrupt, perform the viewport
2160          *   flip to the render offset.
2161          */
2162         NvU32 activeViewportOffset =
2163             nvApiHeadGetActiveViewportOffset(pDispEvo, apiHead);
2164 
2165         nvAssert((activeViewportOffset == 0) ||
2166                  (activeViewportOffset == pHsChannel->config.frameSize.height));
2167 
2168         activeViewportOffset /= pHsChannel->config.frameSize.height;
2169 
2170         if (activeViewportOffset == HsGetPreviousOffset(pHsChannel)) {
2171             /*
2172              * The active viewport is the same as the last one we pushed, so
2173              * it's safe to start rendering to pHsChannel->nextOffset; check if
2174              * rendering from a previous interrupt hasn't completed yet.
2175              */
2176             if (pHsChannel->viewportFlipPending) {
2177                 /*
2178                  * A non-stall interrupt hasn't been triggered since we kicked
2179                  * off the previous frame's rendering.
2180                  */
2181                 HsProcFsRecordPreviousFrameNotDone(pHsChannel);
2182             } else {
2183                 NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
2184 
2185                 HsProcFsRecordScanline(pDispEvo, apiHead);
2186 
2187                 if (HsCanOmitNonSgHsUpdate(pHsChannel)) {
2188                     HsProcFsRecordOmittedNonSgHsUpdate(pHsChannel);
2189                 } else {
2190                     nvHsNextFrame(pHsDevice, pHsChannel, NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK);
2191                 }
2192             }
2193         } else {
2194             /*
2195              * The viewport flip we pushed after the previous frame's rendering
2196              * hasn't been applied in hardware yet.
2197              */
2198             HsProcFsRecordPreviousFrameNotDone(pHsChannel);
2199         }
2200 
2201         HsCheckSwapGroupFlipDone(pDispEvo->pDevEvo, pDispEvo->pSwapGroup[apiHead]);
2202     }
2203 }
2204 
2205 /*!
2206  * Receive vblank callback, in process context with nvkms_lock held.
2207  *
2208  */
2209 static void HsVBlankCallback(NVDispEvoRec *pDispEvo,
2210                              NVVBlankCallbackPtr pCallbackData)
2211 {
2212     const NvU32 apiHead = pCallbackData->apiHead;
2213     NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
2214     NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
2215 
2216     /*
2217      * The pHsChannel may have been torn down between when the vblank was
2218      * generated and when this was called.  Ignore spurious callbacks.
2219      */
2220     if (pHsChannel == NULL) {
2221         return;
2222     }
2223 
2224     if (!pHsChannel->usingRgIntrForSwapGroups &&
2225         pHsChannel->config.neededForSwapGroup) {
2226         HsCheckSwapGroupFlipDone(pDispEvo->pDevEvo, pDispEvo->pSwapGroup[apiHead]);
2227     }
2228 
2229     if (pHsChannel->usingRgIntrForSwapGroups &&
2230         pHsChannel->config.neededForSwapGroup) {
2231         // The next frame will be rendered during the RG line 1 interrupt.
2232         return;
2233     }
2234 
2235     /*
2236      * If we have not flipped to the previous buffer, yet, we should not render
2237      * to the next buffer.  Wait until the next vblank callback.
2238      */
2239     if (!IsPreviousFrameDone(pHsChannel)) {
2240         HsProcFsRecordPreviousFrameNotDone(pHsChannel);
2241         return;
2242     }
2243 
2244     HsProcFsRecordScanline(pDispEvo, apiHead);
2245 
2246     /*
2247      * XXX NVKMS HEADSURFACE TODO: evaluate whether there has been
2248      * damage to the source buffer since the last headSurface frame.
2249      * Only if so, perform the headSurface transformation and flip to
2250      * the resulting headSurface buffer.
2251      *
2252      * For headSurface bringup purposes, just always flip to the next
2253      * headSurface buffer.
2254      */
2255 
2256     /*
2257      * When fullscreen swapgroup flipping, updating
2258      * non-swapgroup content at vblank is unnecessary and
2259      * dangerous, since it results in releasing client
2260      * semaphores before their contents have actually been
2261      * displayed.
2262      */
2263     if (!pHsChannel->swapGroupFlipping) {
2264         nvHsNextFrame(pHsDevice, pHsChannel,
2265                       NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK);
2266     }
2267 }
2268 
2269 /*!
2270  * Schedule vblank callbacks from resman on a specific head and subdevice.
2271  */
2272 void nvHsAddVBlankCallback(NVHsChannelEvoPtr pHsChannel)
2273 {
2274     NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2275 
2276     pHsChannel->vBlankCallback =
2277         nvApiHeadRegisterVBlankCallback(pDispEvo,
2278                                         pHsChannel->apiHead,
2279                                         HsVBlankCallback,
2280                                         NULL);
2281 }
2282 
2283 /*!
2284  * Add an RG line 1 callback to check the swapgroup flip notifier and release
2285  * its associated deferred request fifo.
2286  *
2287  * This is done in an RG line 1 callback instead of the vblank callback to WAR
2288  * an issue where certain mode timings cause the vblank callback to fire
2289  * slightly before LOADV causes the notifier to transition from NOT_BEGUN
2290  * to BEGUN, causing an extra frame of delay before the next vblank occurs and
2291  * the deferred request fifo can be released.
2292  */
2293 void nvHsAddRgLine1Callback(NVHsChannelEvoPtr pHsChannel)
2294 {
2295     NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2296     NvBool found;
2297     NvU32 val;
2298 
2299     /*
2300      * Use the RG line 1 interrupt to check swapgroup completion by default,
2301      * but allow setting NVKMS_DELAY_SWAPGROUP_CHECK=0 by regkey to revert to
2302      * the old method of checking during vblank for debugging purposes.
2303      */
2304     found = nvGetRegkeyValue(pDispEvo->pDevEvo, "NVKMS_DELAY_SWAPGROUP_CHECK",
2305                              &val);
2306 
2307     if (found && (val == 0)) {
2308         return;
2309     }
2310 
2311     pHsChannel->pRgIntrCallback =
2312         nvApiHeadAddRgLine1Callback(pDispEvo,
2313                                     pHsChannel->apiHead,
2314                                     HsRgLine1CallbackProc,
2315                                     (void*)(NvUPtr)pHsChannel->apiHead);
2316 
2317     if (pHsChannel->pRgIntrCallback == NULL) {
2318         nvAssert(!"Failed to register headSurface RG line 1 interrupt");
2319     } else {
2320         pHsChannel->usingRgIntrForSwapGroups = TRUE;
2321     }
2322 }
2323 
2324 /*!
2325  * Cancel RG line 1 callbacks from resman on the specified head and subdevice.
2326  *
2327  * The same limitations regarding leftover vblank callbacks after vblank
2328  * callbacks are disabled in nvHsRemoveVblankCallback apply to RG callbacks.
2329  */
2330 void nvHsRemoveRgLine1Callback(NVHsChannelEvoPtr pHsChannel)
2331 {
2332     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2333 
2334     if (pHsChannel->usingRgIntrForSwapGroups) {
2335         nvRmRemoveRgLine1Callback(pDispEvo,
2336                                   pHsChannel->pRgIntrCallback);
2337         pHsChannel->pRgIntrCallback = NULL;
2338     }
2339 }
2340 
2341 /*!
2342  * Cancel vblank callbacks from resman on the specified head and subdevice.
2343  *
2344  * Note that there could currently be callbacks in flight.  We should be
2345  * prepared to handle a spurious callback after cancelling the callbacks here.
2346  *
2347  * XXX NVKMS HEADSURFACE TODO: It would be better to:
2348  *
2349  * (a) Remove the vblank callback before the modeset that disables headSurface.
2350  * (b) Drain/cancel any in flight callbacks while holding the nvkms_lock.
2351  *
2352  * A mechanism like that should avoid spurious callbacks.
2353  */
2354 void nvHsRemoveVBlankCallback(NVHsChannelEvoPtr pHsChannel)
2355 {
2356     NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2357 
2358     nvApiHeadUnregisterVBlankCallback(pDispEvo,
2359                                       pHsChannel->vBlankCallback);
2360     pHsChannel->vBlankCallback = NULL;
2361 }
2362 
2363 void nvHsAllocStatistics(
2364     NVHsChannelEvoRec *pHsChannel)
2365 {
2366 #if NVKMS_PROCFS_ENABLE
2367     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2368     const NvU32 apiHead = pHsChannel->apiHead;
2369     const NVHwModeTimingsEvo *pTimings =
2370         &pDispEvo->apiHeadState[apiHead].timings;
2371     NvU32 n;
2372 
2373     nvkms_memset(&pHsChannel->statistics, 0, sizeof(pHsChannel->statistics));
2374 
2375     pHsChannel->statistics.scanLine.vVisible = nvEvoVisibleHeight(pTimings);
2376 
2377     n = pHsChannel->statistics.scanLine.vVisible + 1;
2378 
2379     pHsChannel->statistics.scanLine.pHistogram = nvCalloc(1, sizeof(NvU64) * n);
2380 #endif /* NVKMS_PROCFS_ENABLE */
2381 }
2382 
2383 void nvHsFreeStatistics(
2384     NVHsChannelEvoRec *pHsChannel)
2385 {
2386 #if NVKMS_PROCFS_ENABLE
2387     nvFree(pHsChannel->statistics.scanLine.pHistogram);
2388     nvkms_memset(&pHsChannel->statistics, 0, sizeof(pHsChannel->statistics));
2389 #endif /* NVKMS_PROCFS_ENABLE */
2390 }
2391 
2392 #if NVKMS_PROCFS_ENABLE
2393 
2394 static const struct {
2395     const char *before;
2396     const char *after;
2397 } HsProcFsIndentTable[] = {
2398     [0] = { .before = "", .after = "    " },
2399     [1] = { .before = " ", .after = "   " },
2400     [2] = { .before = "  ", .after = "  " },
2401     [3] = { .before = "   ", .after = " " },
2402     [5] = { .before = "    ", .after = "" },
2403 };
2404 
2405 static const char *HsProcFsIndentBefore(NvU8 indent)
2406 {
2407     nvAssert(indent < ARRAY_LEN(HsProcFsIndentTable));
2408 
2409     return HsProcFsIndentTable[indent].before;
2410 }
2411 
2412 static const char *HsProcFsIndentAfter(NvU8 indent)
2413 {
2414     nvAssert(indent < ARRAY_LEN(HsProcFsIndentTable));
2415 
2416     return HsProcFsIndentTable[indent].after;
2417 }
2418 
2419 static void HsProcFsGpuTime(
2420     NVEvoInfoStringRec *pInfoString,
2421     const NvU64 nFrames,
2422     const NvU64 gpuTimeSpent,
2423     const NvU8 indent)
2424 {
2425     /*
2426      * Use nFrames - 1 to compute averageGpuTimeNs: the nvHs3dRenderFrame() path
2427      * increments nFrames at the end of rendering a frame, but it only updates
2428      * gpuTimeSpent at the start of rendering the _next_ frame.  I.e.,
2429      * gpuTimeSpent has time for nFrames - 1 frames.
2430      */
2431     const NvU64 averageGpuTimeNs =
2432         (nFrames <= 1) ? 0 : (gpuTimeSpent / (nFrames - 1));
2433     const NvU64 averageGpuTimeUs = (averageGpuTimeNs + 500) / 1000;
2434     const NvU64 nFramesToReport = (nFrames <= 1) ? 0 : nFrames - 1;
2435 
2436     nvEvoLogInfoString(
2437         pInfoString, "   %savg GPU time / frame%s   : "
2438         "%" NvU64_fmtu ".%03" NvU64_fmtu " msec "
2439         "(%" NvU64_fmtu " nsec / %" NvU64_fmtu " frames)",
2440         HsProcFsIndentBefore(indent),
2441         HsProcFsIndentAfter(indent),
2442         averageGpuTimeUs / 1000,
2443         averageGpuTimeUs % 1000,
2444         gpuTimeSpent,
2445         nFramesToReport);
2446 }
2447 
2448 static void HsProcFsFrameStatisticsOneEye(
2449     NVEvoInfoStringRec *pInfoString,
2450     const NVHsChannelEvoRec *pHsChannel,
2451     const NvU8 eye,
2452     const NvU8 slot,
2453     const NvU8 indent)
2454 {
2455     const NVHsChannelStatisticsOneEyeRec *pPerEye =
2456         &pHsChannel->statistics.perEye[eye][slot];
2457 
2458     const NvU64 framesPerMs = pPerEye->fps.framesPerMs;
2459 
2460     nvEvoLogInfoString(
2461         pInfoString,
2462         "   %snFrames%s                : %" NvU64_fmtu,
2463         HsProcFsIndentBefore(indent),
2464         HsProcFsIndentAfter(indent),
2465         pPerEye->nFrames);
2466 
2467     nvEvoLogInfoString(
2468         pInfoString, "   %sFPS (computed every 5s)%s: "
2469         "%" NvU64_fmtu ".%03" NvU64_fmtu,
2470         HsProcFsIndentBefore(indent),
2471         HsProcFsIndentAfter(indent),
2472         framesPerMs / 1000,
2473         framesPerMs % 1000);
2474 
2475     HsProcFsGpuTime(
2476         pInfoString,
2477         pPerEye->nFrames,
2478         pPerEye->gpuTimeSpent,
2479         indent);
2480 }
2481 
2482 static void HsProcFsFrameStatisticsOneSlot(
2483     NVEvoInfoStringRec *pInfoString,
2484     const NVHsChannelEvoRec *pHsChannel,
2485     const NvU8 slot,
2486     const NvU8 indent)
2487 {
2488     const char *eyeLabel[] = {
2489         [NVKMS_LEFT]  = "Left Eye ",
2490         [NVKMS_RIGHT] = "Right Eye",
2491     };
2492 
2493     const NvBool needEyeLabel =
2494         pHsChannel->statistics.perEye[NVKMS_RIGHT][slot].nFrames != 0;
2495     NvU8 eye;
2496 
2497     for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) {
2498 
2499         NvU8 localIndent = 0;
2500 
2501         if (pHsChannel->statistics.perEye[eye][slot].nFrames == 0) {
2502             continue;
2503         }
2504 
2505         if (needEyeLabel) {
2506             nvEvoLogInfoString(
2507                 pInfoString, "   %s%s%s              :",
2508                 HsProcFsIndentBefore(indent),
2509                 eyeLabel[eye],
2510                 HsProcFsIndentAfter(indent));
2511             localIndent++;
2512         }
2513 
2514         HsProcFsFrameStatisticsOneEye(
2515             pInfoString,
2516             pHsChannel,
2517             eye,
2518             slot,
2519             indent + localIndent);
2520     }
2521 }
2522 
2523 static void HsProcFsFrameStatistics(
2524     NVEvoInfoStringRec *pInfoString,
2525     const NVHsChannelEvoRec *pHsChannel)
2526 {
2527     NvU8 slot;
2528 
2529     if (pHsChannel->config.neededForSwapGroup) {
2530         nvEvoLogInfoString(pInfoString,
2531                            "   VBLANK frames              :");
2532 
2533         nvEvoLogInfoString(pInfoString,
2534                            "    Old swapGroup content     :");
2535 
2536         slot = Hs3dStatisticsGetSlot(
2537                     pHsChannel,
2538                     NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0,
2539                     TRUE /* honorSwapGroupClipList */);
2540 
2541         HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 2);
2542 
2543         nvEvoLogInfoString(pInfoString,
2544                            "    New swapGroup content     :");
2545 
2546         slot = Hs3dStatisticsGetSlot(
2547                     pHsChannel,
2548                     NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0,
2549                     FALSE /* honorSwapGroupClipList */);
2550 
2551         HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 2);
2552 
2553         nvEvoLogInfoString(pInfoString,
2554                            "   SWAP_GROUP_READY frames    :");
2555 
2556         slot = Hs3dStatisticsGetSlot(
2557                     pHsChannel,
2558                     NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY, 0,
2559                     FALSE /* honorSwapGroupClipList */);
2560 
2561         HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 1);
2562 
2563     } else {
2564         const NvU8 indent = 0; /* start with no indentation */
2565 
2566         slot = Hs3dStatisticsGetSlot(
2567                     pHsChannel,
2568                     NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0,
2569                     FALSE);
2570 
2571         HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, indent);
2572     }
2573 }
2574 
2575 static void HsProcFsScanLine(
2576     NVEvoInfoStringRec *pInfoString,
2577     const NVHsChannelEvoRec *pHsChannel)
2578 {
2579     NvU16 i;
2580 
2581     nvEvoLogInfoString(pInfoString,
2582                        "   scanLine information       :");
2583 
2584     nvEvoLogInfoString(pInfoString,
2585                        "    nInBlankingPeriod         : %" NvU64_fmtu,
2586                        pHsChannel->statistics.scanLine.nInBlankingPeriod);
2587     nvEvoLogInfoString(pInfoString,
2588                        "    nNotInBlankingPeriod      : %" NvU64_fmtu,
2589                        pHsChannel->statistics.scanLine.nNotInBlankingPeriod);
2590     nvEvoLogInfoString(pInfoString,
2591                        "    vVisible                  : %d",
2592                        pHsChannel->statistics.scanLine.vVisible);
2593 
2594     if (pHsChannel->statistics.scanLine.pHistogram == NULL) {
2595 
2596         nvEvoLogInfoString(pInfoString,
2597                            "    scanline histogram        : failed allocation");
2598     } else {
2599 
2600         nvEvoLogInfoString(pInfoString,
2601                            "    scanline histogram        :");
2602 
2603         for (i = 0; i <= pHsChannel->statistics.scanLine.vVisible; i++) {
2604 
2605             if (pHsChannel->statistics.scanLine.pHistogram[i] != 0) {
2606                 nvEvoLogInfoString(pInfoString,
2607                     "     scanLine[%04d]           : %" NvU64_fmtu,
2608                     i, pHsChannel->statistics.scanLine.pHistogram[i]);
2609             }
2610         }
2611     }
2612 }
2613 
2614 static void HsProcFsFlipQueueOneEntry(
2615     NVEvoInfoStringRec *pInfoString,
2616     const NVHsLayerRequestedFlipState *pFlipState)
2617 {
2618     /*
2619      * Print the pointers by casting to NvUPtr and formatting with NvUPtr_fmtx,
2620      * so that NULL is printed as "0x0", rather than "(null)".
2621      */
2622 
2623     nvEvoLogInfoString(pInfoString,
2624         "        pSurfaceEvo(L,R)      : 0x%" NvUPtr_fmtx ", 0x%" NvUPtr_fmtx,
2625         (NvUPtr)pFlipState->pSurfaceEvo[NVKMS_LEFT],
2626         (NvUPtr)pFlipState->pSurfaceEvo[NVKMS_RIGHT]);
2627 
2628     if (!pFlipState->syncObject.usingSyncpt) {
2629         nvEvoLogInfoString(pInfoString,
2630             "        semaphore             : "
2631             "acquire pSurfaceEvo: 0x%" NvUPtr_fmtx ", "
2632             "release pSurfaceEvo: 0x%" NvUPtr_fmtx ", "
2633             "acquire value: 0x%08x, "
2634             "release value: 0x%08x",
2635             (NvUPtr)pFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo,
2636             (NvUPtr)pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo,
2637             pFlipState->syncObject.u.semaphores.acquireValue,
2638             pFlipState->syncObject.u.semaphores.releaseValue);
2639     }
2640 }
2641 
2642 static void HsProcFsFlipQueue(
2643     NVEvoInfoStringRec *pInfoString,
2644     const NVHsChannelEvoRec *pHsChannel)
2645 {
2646     const NVHsChannelFlipQueueEntry *pEntry;
2647     NvU8 layer;
2648 
2649     for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
2650 
2651         const char *layerString[NVKMS_MAX_LAYERS_PER_HEAD] = {
2652             [NVKMS_MAIN_LAYER]    = "(main)   ",
2653             [NVKMS_OVERLAY_LAYER] = "(overlay)",
2654         };
2655 
2656         nvEvoLogInfoString(pInfoString,
2657             "   flipQueue%s         :", layerString[layer]);
2658 
2659         nvEvoLogInfoString(pInfoString,
2660             "     current                  :");
2661 
2662         HsProcFsFlipQueueOneEntry(pInfoString,
2663                                   &pHsChannel->flipQueue[layer].current);
2664 
2665         nvListForEachEntry(pEntry,
2666                            &pHsChannel->flipQueue[layer].queue,
2667                            flipQueueEntry) {
2668 
2669             nvEvoLogInfoString(pInfoString,
2670                 "     pending                  :");
2671 
2672             HsProcFsFlipQueueOneEntry(pInfoString, &pEntry->hwState);
2673         }
2674     }
2675 }
2676 
2677 static const char *HsGetEyeMaskString(const NvU8 eyeMask)
2678 {
2679     if (eyeMask == NVBIT(NVKMS_LEFT)) {
2680         return "L";
2681     } else {
2682         nvAssert(eyeMask == (NVBIT(NVKMS_LEFT) | NVBIT(NVKMS_RIGHT)));
2683         return "L|R";
2684     }
2685 }
2686 
2687 static const char *HsGetPixelShiftString(
2688     const enum NvKmsPixelShiftMode pixelShift)
2689 {
2690     switch (pixelShift) {
2691     case NVKMS_PIXEL_SHIFT_NONE:            return "none";
2692     case NVKMS_PIXEL_SHIFT_4K_TOP_LEFT:     return "4kTopLeft";
2693     case NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT: return "4kBottomRight";
2694     case NVKMS_PIXEL_SHIFT_8K:              return "8k";
2695     }
2696 
2697     return "unknown";
2698 }
2699 
2700 static void HsProcFsTransform(
2701     NVEvoInfoStringRec *pInfoString,
2702     const NVHsChannelEvoRec *pHsChannel)
2703 {
2704     nvEvoLogInfoString(pInfoString,
2705                        "   transform matrix           : "
2706                        "{ { 0x%08x, 0x%08x, 0x%08x },",
2707                        F32viewAsNvU32(pHsChannel->config.transform.m[0][0]),
2708                        F32viewAsNvU32(pHsChannel->config.transform.m[0][1]),
2709                        F32viewAsNvU32(pHsChannel->config.transform.m[0][2]));
2710 
2711     nvEvoLogInfoString(pInfoString,
2712                        "                              : "
2713                        "  { 0x%08x, 0x%08x, 0x%08x },",
2714                        F32viewAsNvU32(pHsChannel->config.transform.m[1][0]),
2715                        F32viewAsNvU32(pHsChannel->config.transform.m[1][1]),
2716                        F32viewAsNvU32(pHsChannel->config.transform.m[1][2]));
2717 
2718     nvEvoLogInfoString(pInfoString,
2719                        "                              : "
2720                        "  { 0x%08x, 0x%08x, 0x%08x } }",
2721                        F32viewAsNvU32(pHsChannel->config.transform.m[2][0]),
2722                        F32viewAsNvU32(pHsChannel->config.transform.m[2][1]),
2723                        F32viewAsNvU32(pHsChannel->config.transform.m[2][2]));
2724 }
2725 
2726 static void HsProcFsStaticWarpMesh(
2727     NVEvoInfoStringRec *pInfoString,
2728     const NVHsChannelEvoRec *pHsChannel)
2729 {
2730     nvEvoLogInfoString(pInfoString,
2731                        "   staticWarpMesh             : "
2732                        "{ { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },",
2733                        pHsChannel->config.staticWarpMesh.vertex[0].x,
2734                        pHsChannel->config.staticWarpMesh.vertex[0].y,
2735                        pHsChannel->config.staticWarpMesh.vertex[0].u,
2736                        pHsChannel->config.staticWarpMesh.vertex[0].v,
2737                        pHsChannel->config.staticWarpMesh.vertex[0].r,
2738                        pHsChannel->config.staticWarpMesh.vertex[0].q);
2739 
2740     nvEvoLogInfoString(pInfoString,
2741                        "                              : "
2742                        "  { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },",
2743                        pHsChannel->config.staticWarpMesh.vertex[1].x,
2744                        pHsChannel->config.staticWarpMesh.vertex[1].y,
2745                        pHsChannel->config.staticWarpMesh.vertex[1].u,
2746                        pHsChannel->config.staticWarpMesh.vertex[1].v,
2747                        pHsChannel->config.staticWarpMesh.vertex[1].r,
2748                        pHsChannel->config.staticWarpMesh.vertex[1].q);
2749 
2750     nvEvoLogInfoString(pInfoString,
2751                        "                              : "
2752                        "  { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },",
2753                        pHsChannel->config.staticWarpMesh.vertex[2].x,
2754                        pHsChannel->config.staticWarpMesh.vertex[2].y,
2755                        pHsChannel->config.staticWarpMesh.vertex[2].u,
2756                        pHsChannel->config.staticWarpMesh.vertex[2].v,
2757                        pHsChannel->config.staticWarpMesh.vertex[2].r,
2758                        pHsChannel->config.staticWarpMesh.vertex[2].q);
2759 
2760     nvEvoLogInfoString(pInfoString,
2761                        "                              : "
2762                        "  { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x } }",
2763                        pHsChannel->config.staticWarpMesh.vertex[3].x,
2764                        pHsChannel->config.staticWarpMesh.vertex[3].y,
2765                        pHsChannel->config.staticWarpMesh.vertex[3].u,
2766                        pHsChannel->config.staticWarpMesh.vertex[3].v,
2767                        pHsChannel->config.staticWarpMesh.vertex[3].r,
2768                        pHsChannel->config.staticWarpMesh.vertex[3].q);
2769 }
2770 
2771 static const char *HsProcFsGetNeededForString(
2772     const NVHsChannelEvoRec *pHsChannel)
2773 {
2774     if (pHsChannel->config.neededForModeset &&
2775         pHsChannel->config.neededForSwapGroup) {
2776         return "modeset, swapgroup";
2777     }
2778 
2779     if (pHsChannel->config.neededForModeset &&
2780         !pHsChannel->config.neededForSwapGroup) {
2781         return "modeset";
2782     }
2783 
2784     if (!pHsChannel->config.neededForModeset &&
2785         pHsChannel->config.neededForSwapGroup) {
2786         return "swapgroup";
2787     }
2788 
2789     return "unknown";
2790 }
2791 
2792 static void HsProcFsFrameSemaphores(
2793     NVEvoInfoStringRec *pInfoString,
2794     const NVHsChannelEvoRec *pHsChannel)
2795 {
2796     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2797     const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
2798     const NvU32 sd = pDispEvo->displayOwner;
2799     const NVHsNotifiersOneSdRec *p = pHsDevice->notifiers.sd[sd].ptr;
2800     const NvGpuSemaphore *pSema =
2801         (const NvGpuSemaphore *)p->semaphore[pHsChannel->apiHead];
2802 
2803     NvU8 buffer;
2804 
2805     for (buffer = 0; buffer < NVKMS_HEAD_SURFACE_MAX_BUFFERS; buffer++) {
2806         nvEvoLogInfoString(pInfoString,
2807                            "   frameSemaphore[%d]          : 0x%0x",
2808                            buffer,
2809                            pSema[buffer].data[0]);
2810     }
2811 }
2812 
2813 void nvHsProcFs(
2814     NVEvoInfoStringRec *pInfoString,
2815     NVDevEvoRec *pDevEvo,
2816     NvU32 dispIndex,
2817     NvU32 apiHead)
2818 {
2819     NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[dispIndex];
2820     const NVHsChannelEvoRec *pHsChannel = pDispEvo->pHsChannel[apiHead];
2821     const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps =
2822         &pDevEvo->apiHeadSurfaceAllDisps[apiHead];
2823 
2824     if (pHsChannel == NULL) {
2825         nvEvoLogInfoString(pInfoString,
2826                            "  headSurface[head:%02d]        : disabled", apiHead);
2827         return;
2828     }
2829 
2830     nvEvoLogInfoString(pInfoString,
2831                        "  headSurface[head:%02d]        : "
2832                        "enabled (needed for: %s)",
2833                        apiHead, HsProcFsGetNeededForString(pHsChannel));
2834 
2835     HsProcFsFrameStatistics(pInfoString, pHsChannel);
2836 
2837     nvEvoLogInfoString(pInfoString,
2838                        "   nextIndex                  : %d",
2839                        pHsChannel->nextIndex);
2840 
2841     nvEvoLogInfoString(pInfoString,
2842                        "   nextOffset                 : %d",
2843                        pHsChannel->nextOffset);
2844 
2845     nvEvoLogInfoString(pInfoString,
2846                        "   nPreviousFrameNotDone      : %" NvU64_fmtu,
2847                        pHsChannel->statistics.nPreviousFrameNotDone);
2848 
2849     nvEvoLogInfoString(pInfoString,
2850                        "   nOmittedNonSgHsUpdates     : %" NvU64_fmtu,
2851                        pHsChannel->statistics.nOmittedNonSgHsUpdates);
2852 
2853     nvEvoLogInfoString(pInfoString,
2854                        "   nFullscreenSgFrames        : %" NvU64_fmtu,
2855                        pHsChannel->statistics.nFullscreenSgFrames);
2856 
2857     nvEvoLogInfoString(pInfoString,
2858                        "   nNonFullscreenSgFrames     : %" NvU64_fmtu,
2859                        pHsChannel->statistics.nNonFullscreenSgFrames);
2860 
2861     nvEvoLogInfoString(pInfoString,
2862                        "   viewPortIn                 : %d x %d +%d +%d",
2863                        pHsChannel->config.viewPortIn.width,
2864                        pHsChannel->config.viewPortIn.height,
2865                        pHsChannel->config.viewPortIn.x,
2866                        pHsChannel->config.viewPortIn.y);
2867 
2868     nvEvoLogInfoString(pInfoString,
2869                        "   viewPortOut                : %d x %d +%d +%d",
2870                        pHsChannel->config.viewPortOut.width,
2871                        pHsChannel->config.viewPortOut.height,
2872                        pHsChannel->config.viewPortOut.x,
2873                        pHsChannel->config.viewPortOut.y);
2874 
2875     nvEvoLogInfoString(pInfoString,
2876                        "   frameSize                  : %d x %d",
2877                        pHsChannel->config.frameSize.width,
2878                        pHsChannel->config.frameSize.height);
2879 
2880     nvEvoLogInfoString(pInfoString,
2881                        "   surfaceSize                : %d x %d",
2882                        pHsChannel->config.surfaceSize.width,
2883                        pHsChannel->config.surfaceSize.height);
2884 
2885     nvEvoLogInfoString(pInfoString,
2886                        "   stagingSurfaceSize         : %d x %d",
2887                        pHsChannel->config.stagingSurfaceSize.width,
2888                        pHsChannel->config.stagingSurfaceSize.height);
2889 
2890     nvEvoLogInfoString(pInfoString,
2891                        "   allDispsSurfaceSize        : %d x %d",
2892                        pHsOneHeadAllDisps->size.width,
2893                        pHsOneHeadAllDisps->size.height);
2894 
2895     nvEvoLogInfoString(pInfoString,
2896                        "   allDispsStagingSize        : %d x %d",
2897                        pHsOneHeadAllDisps->stagingSize.width,
2898                        pHsOneHeadAllDisps->stagingSize.height);
2899 
2900     nvEvoLogInfoString(pInfoString,
2901                        "   allDispsSurfaceCount       : %d",
2902                        pHsOneHeadAllDisps->surfaceCount);
2903 
2904     nvEvoLogInfoString(pInfoString,
2905                        "   eyeMask                    : %s",
2906                        HsGetEyeMaskString(pHsChannel->config.eyeMask));
2907 
2908     nvEvoLogInfoString(pInfoString,
2909                        "   pixelShift                 : %s",
2910                        HsGetPixelShiftString(pHsChannel->config.pixelShift));
2911 
2912     HsProcFsTransform(pInfoString, pHsChannel);
2913 
2914     HsProcFsStaticWarpMesh(pInfoString, pHsChannel);
2915 
2916     HsProcFsFlipQueue(pInfoString, pHsChannel);
2917 
2918     HsProcFsFrameSemaphores(pInfoString, pHsChannel);
2919 
2920     HsProcFsScanLine(pInfoString, pHsChannel);
2921 }
2922 #endif /* NVKMS_PROCFS_ENABLE */
2923