1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "nvkms-types.h"
25 #include "nvkms-headsurface.h"
26 #include "nvkms-headsurface-3d.h"
27 #include "nvkms-headsurface-priv.h"
28 #include "nvkms-headsurface-swapgroup.h"
29 #include "nvkms-utils.h"
30 #include "nvkms-rmapi.h"
31 #include "nvkms-surface.h"
32 #include "nvkms-sync.h"
33 #include "nvkms-flip.h"
34 #include "nvkms-private.h"
35 #include "nvkms-evo.h"
36 #include "nvkms-dma.h"
37 #include "nvkms-modeset.h"
38 #include "nvkms-rm.h"
39 
40 #include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */
41 
42 static NvBool AllocNotifiers(NVHsDeviceEvoRec *pHsDevice);
43 static void FreeNotifiers(NVHsDeviceEvoRec *pHsDevice);
44 static void HsProcFsRecordFullscreenSgFrames(NVHsChannelEvoPtr pHsChannel,
45                                              NvBool isFullscreen);
46 
47 static NvU32 GetLog2GobsPerBlockY(NvU32 height)
48 {
49     NvU32 log2GobsPerBlockY = 4; // 16 gobs/block
50 
51     const NvU64 heightAndOneHalf = (NvU64)height + ((NvU64)height/2ULL);
52     const NvU64 nvFermiBlockLinearGobHeight = NVKMS_BLOCK_LINEAR_GOB_HEIGHT;
53 
54     // If we're wasting too much memory, cap the block height
55     while ((log2GobsPerBlockY > 0U) &&
56            (((nvFermiBlockLinearGobHeight * ((NvU64)1ULL << log2GobsPerBlockY))) >
57             heightAndOneHalf)) {
58         log2GobsPerBlockY--;
59     }
60 
61     // If there is more than one gob per block,
62     if (log2GobsPerBlockY > 0U) {
63 
64         // Proposed shrunk block size.
65         // compute a new proposedBlockSize, based on a gob size that is half
66         // of the current value (log2 - 1).  the "if(log2 > 0)" above keeps this
67         // value always ">= 0".
68         NvU32 proposedBlockSize =
69             NVKMS_BLOCK_LINEAR_GOB_HEIGHT << (log2GobsPerBlockY - 1U);
70 
71         // While the proposedBlockSize is greater than the image size,
72         while (proposedBlockSize >= height) {
73             // It's safe to cut the gobs per block in half.
74             --log2GobsPerBlockY;
75 
76             // If we've hit 1 gob per block, stop.
77             if (log2GobsPerBlockY == 0U) {
78                 break;
79             }
80             // Otherwise, divide the proposed block dimension/size by two.
81             proposedBlockSize /= 2U;
82         }
83     }
84 
85     return log2GobsPerBlockY;
86 }
87 
88 static void GetLog2GobsPerBlock(
89     NvU32 bytesPerPixel,
90     NvU32 widthInPixels,
91     NvU32 heightInPixels,
92     NvU32 *pLog2GobsPerBlockY,
93     NvU32 *pitchInBlocks,
94     NvU64 *sizeInBytes)
95 {
96     NvU32 xAlign, yAlign, pitchInBytes, lines;
97 
98     NvU32 log2GobsPerBlockY = GetLog2GobsPerBlockY(heightInPixels);
99 
100     xAlign = NVKMS_BLOCK_LINEAR_GOB_WIDTH - 1;
101     yAlign = (NVKMS_BLOCK_LINEAR_GOB_HEIGHT << log2GobsPerBlockY) - 1;
102 
103     pitchInBytes = NV_ALIGN_UP(widthInPixels * bytesPerPixel, xAlign);
104     lines = NV_ALIGN_UP(heightInPixels, yAlign);
105 
106     *pLog2GobsPerBlockY = log2GobsPerBlockY;
107     *sizeInBytes = (NvU64)pitchInBytes * lines;
108     *pitchInBlocks = pitchInBytes / NVKMS_BLOCK_LINEAR_GOB_WIDTH;
109 }
110 
111 static NvU32 AllocSurfaceVidmem(
112     const NVDevEvoRec *pDevEvo,
113     NvU32 handle,
114     NvU64 sizeInBytes)
115 {
116     NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
117 
118     memAllocParams.owner = NVKMS_RM_HEAP_ID;
119     memAllocParams.size = sizeInBytes;
120     memAllocParams.type = NVOS32_TYPE_IMAGE;
121 
122     memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) |
123                           DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS) |
124                           DRF_DEF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR);
125 
126     memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT);
127 
128     memAllocParams.flags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN |
129                            NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE;
130 
131     memAllocParams.alignment = NV_EVO_SURFACE_ALIGNMENT;
132 
133     return nvRmApiAlloc(nvEvoGlobal.clientHandle,
134                         pDevEvo->deviceHandle,
135                         handle,
136                         NV01_MEMORY_LOCAL_USER,
137                         &memAllocParams);
138 }
139 
140 NvU64 nvHsMapSurfaceToDevice(
141     const NVDevEvoRec *pDevEvo,
142     const NvU32 rmHandle,
143     const NvU64 sizeInBytes,
144     const enum NvHsMapPermissions hsMapPermissions)
145 {
146     NvU32 ret;
147     NvU32 flags = DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE);
148     NvU64 gpuAddress = 0;
149 
150     /* pHsDevice could be NULL if we are in no3d mode. */
151 
152     if (pDevEvo->pHsDevice == NULL) {
153         return gpuAddress;
154     }
155 
156     switch (hsMapPermissions) {
157     case NvHsMapPermissionsNone:
158         return gpuAddress;
159     case NvHsMapPermissionsReadWrite:
160         flags |= DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_WRITE);
161         break;
162     case NvHsMapPermissionsReadOnly:
163         flags |= DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_ONLY);
164         break;
165     }
166 
167     ret = nvRmApiMapMemoryDma(nvEvoGlobal.clientHandle,
168                               pDevEvo->deviceHandle,
169                               pDevEvo->nvkmsGpuVASpace,
170                               rmHandle,
171                               0, /* offset */
172                               sizeInBytes,
173                               flags,
174                               &gpuAddress);
175 
176     if (ret == NVOS_STATUS_SUCCESS) {
177         return gpuAddress;
178     } else {
179         return NV_HS_BAD_GPU_ADDRESS;
180     }
181 }
182 
183 void nvHsUnmapSurfaceFromDevice(
184     const NVDevEvoRec *pDevEvo,
185     const NvU32 rmHandle,
186     const NvU64 gpuAddress)
187 {
188     if ((gpuAddress == 0) || (gpuAddress == NV_HS_BAD_GPU_ADDRESS)) {
189         return;
190     }
191 
192     if (pDevEvo->pHsDevice == NULL) {
193         return;
194     }
195 
196     nvRmApiUnmapMemoryDma(nvEvoGlobal.clientHandle,
197                           pDevEvo->deviceHandle,
198                           pDevEvo->nvkmsGpuVASpace,
199                           rmHandle,
200                           0, /* flags */
201                           gpuAddress);
202 }
203 
204 /*!
205  * Free an NVHsSurfaceRec, allocated by nvHsAllocSurface().
206  *
207  * \param[in]  pDevEvo     The device.
208  * \param[in]  pHsSurface  The NVHsSurfaceRec to free.
209  */
210 void nvHsFreeSurface(
211     NVDevEvoRec *pDevEvo,
212     NVHsSurfaceRec *pHsSurface)
213 {
214     if (pHsSurface == NULL) {
215         return;
216     }
217 
218     if (pHsSurface->rmHandle != 0) {
219         nvRmApiFree(nvEvoGlobal.clientHandle,
220                     pDevEvo->deviceHandle,
221                     pHsSurface->rmHandle);
222 
223         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pHsSurface->rmHandle);
224         pHsSurface->rmHandle = 0;
225     }
226 
227     if (pHsSurface->nvKmsHandle != 0) {
228         nvEvoUnregisterSurface(pDevEvo,
229                                pDevEvo->pNvKmsOpenDev,
230                                pHsSurface->nvKmsHandle,
231                                FALSE /* skipUpdate */);
232     }
233 
234     nvFree(pHsSurface);
235 }
236 
237 NVSurfaceEvoRec *nvHsGetNvKmsSurface(const NVDevEvoRec *pDevEvo,
238                                      NvKmsSurfaceHandle surfaceHandle,
239                                      const NvBool requireCtxDma)
240 {
241     const NVEvoApiHandlesRec *pNvKmsOpenDevSurfaceHandles;
242     NVSurfaceEvoRec *pKmsSurface;
243 
244     pNvKmsOpenDevSurfaceHandles =
245         nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev);
246 
247     nvAssert(pNvKmsOpenDevSurfaceHandles != NULL);
248 
249     pKmsSurface =
250         nvEvoGetSurfaceFromHandleNoCtxDmaOk(pDevEvo,
251                                             pNvKmsOpenDevSurfaceHandles,
252                                             surfaceHandle);
253     nvAssert(pKmsSurface != NULL);
254     nvAssert(pKmsSurface->requireCtxDma == requireCtxDma);
255 
256     return pKmsSurface;
257 }
258 
259 /*!
260  * Allocate an NVHsSurfaceRec, for use with headSurface.
261  *
262  * Video memory is allocated, mapped into the device's GPU virtual address
263  * space, and registered with NVKMS's pNvKmsOpenDev.
264  *
265  * Note the video memory is not cleared here, because the corresponding graphics
266  * channel may not be allocated, yet.
267  *
268  * \param[in]  pDevEvo         The device.
269  * \param[in]  requireCtxDma   Whether display hardware requires access.
270  * \param[in]  format          The format of the surface.
271  * \param[in]  widthInPixels   The width of the surface, in pixels.
272  * \param[in]  heightInPixels  The height of the surface, in pixels.
273  *
274  * \return  On success, an allocate NVHsSurfaceRec structure is returned.
275  *          On failure, NULL is returned.
276  */
277 NVHsSurfaceRec *nvHsAllocSurface(
278     NVDevEvoRec *pDevEvo,
279     const NvBool requireCtxDma,
280     const enum NvKmsSurfaceMemoryFormat format,
281     const NvU32 widthInPixels,
282     const NvU32 heightInPixels)
283 {
284     struct NvKmsRegisterSurfaceParams nvKmsParams = { };
285     const NvKmsSurfaceMemoryFormatInfo *pFormatInfo =
286         nvKmsGetSurfaceMemoryFormatInfo(format);
287     NvU32 pitchInBlocks = 0;
288     NvU64 sizeInBytes = 0;
289     NvU32 log2GobsPerBlockY = 0;
290     NvU32 ret = 0;
291     NVHsSurfaceRec *pHsSurface = nvCalloc(1, sizeof(*pHsSurface));
292 
293     if (pHsSurface == NULL) {
294         return NULL;
295     }
296 
297     GetLog2GobsPerBlock(pFormatInfo->rgb.bytesPerPixel,
298                         widthInPixels,
299                         heightInPixels,
300                         &log2GobsPerBlockY,
301                         &pitchInBlocks,
302                         &sizeInBytes);
303 
304     sizeInBytes = NV_ALIGN_UP(sizeInBytes, NV_EVO_SURFACE_ALIGNMENT);
305 
306     pHsSurface->rmHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
307 
308     if (pHsSurface->rmHandle == 0) {
309         goto fail;
310     }
311 
312     ret = AllocSurfaceVidmem(pDevEvo, pHsSurface->rmHandle, sizeInBytes);
313 
314     if (ret != NVOS_STATUS_SUCCESS) {
315         nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pHsSurface->rmHandle);
316         pHsSurface->rmHandle = 0;
317 
318         goto fail;
319     }
320 
321     pHsSurface->gobsPerBlock.y = log2GobsPerBlockY;
322 
323     /*
324      * For blocklinear surfaces, the NVKMS pitch is in units of blocks, which
325      * matches what GetLog2GobsPerBlock() returned to us.
326      */
327     nvKmsParams.request.useFd = FALSE;
328     nvKmsParams.request.rmClient = nvEvoGlobal.clientHandle;
329     nvKmsParams.request.widthInPixels = widthInPixels;
330     nvKmsParams.request.heightInPixels = heightInPixels;
331     nvKmsParams.request.layout = NvKmsSurfaceMemoryLayoutBlockLinear;
332     nvKmsParams.request.format = format;
333     nvKmsParams.request.noDisplayHardwareAccess = !requireCtxDma;
334     nvKmsParams.request.log2GobsPerBlockY = log2GobsPerBlockY;
335 
336     nvKmsParams.request.planes[0].u.rmObject = pHsSurface->rmHandle;
337     nvKmsParams.request.planes[0].pitch = pitchInBlocks;
338     nvKmsParams.request.planes[0].rmObjectSizeInBytes = sizeInBytes;
339 
340     nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, &nvKmsParams,
341                          NvHsMapPermissionsReadWrite);
342 
343     if (nvKmsParams.reply.surfaceHandle == 0) {
344         goto fail;
345     }
346 
347     pHsSurface->nvKmsHandle = nvKmsParams.reply.surfaceHandle;
348 
349     pHsSurface->pSurfaceEvo =
350         nvHsGetNvKmsSurface(pDevEvo, pHsSurface->nvKmsHandle, requireCtxDma);
351 
352     if (pHsSurface->pSurfaceEvo == NULL) {
353         goto fail;
354     }
355 
356     return pHsSurface;
357 
358 fail:
359     nvHsFreeSurface(pDevEvo, pHsSurface);
360 
361     return NULL;
362 }
363 
364 NvBool nvHsAllocDevice(
365     NVDevEvoRec *pDevEvo,
366     const struct NvKmsAllocDeviceRequest *pRequest)
367 {
368     NVHsDeviceEvoRec *pHsDevice;
369 
370     nvAssert(pDevEvo->pHsDevice == NULL);
371 
372     if (!pDevEvo->isHeadSurfaceSupported) {
373         return TRUE;
374     }
375 
376     if (pRequest->no3d) {
377         return TRUE;
378     }
379 
380     pHsDevice = nvCalloc(1, sizeof(*pHsDevice));
381 
382     if (pHsDevice == NULL) {
383         goto fail;
384     }
385 
386     pDevEvo->pHsDevice = pHsDevice;
387     pHsDevice->pDevEvo = pDevEvo;
388 
389     nvAssert(pDevEvo->nvkmsGpuVASpace);
390 
391     if (!nvHs3dAllocDevice(pHsDevice)) {
392         goto fail;
393     }
394 
395     if (!AllocNotifiers(pHsDevice)) {
396         goto fail;
397     }
398 
399     return TRUE;
400 
401 fail:
402     nvHsFreeDevice(pDevEvo);
403 
404     return FALSE;
405 }
406 
407 void nvHsFreeDevice(NVDevEvoRec *pDevEvo)
408 {
409     NVHsDeviceEvoRec *pHsDevice = pDevEvo->pHsDevice;
410 
411     if (pHsDevice == NULL) {
412         return;
413     }
414 
415     FreeNotifiers(pHsDevice);
416 
417     nvHs3dFreeDevice(pHsDevice);
418 
419     nvFree(pHsDevice);
420 
421     pDevEvo->pHsDevice = NULL;
422 }
423 
424 NVHsChannelEvoPtr nvHsAllocChannel(NVDispEvoRec *pDispEvo, NvU32 apiHead)
425 {
426     NVHsChannelEvoRec *pHsChannel = nvCalloc(1, sizeof(*pHsChannel));
427 
428     if (pHsChannel == NULL) {
429         goto fail;
430     }
431 
432     pHsChannel->pDispEvo = pDispEvo;
433     pHsChannel->apiHead = apiHead;
434 
435     if (!nvHs3dAllocChannel(pHsChannel)) {
436         goto fail;
437     }
438 
439     return pHsChannel;
440 
441 fail:
442     nvHsFreeChannel(pHsChannel);
443 
444     return NULL;
445 }
446 
447 void nvHsFreeChannel(NVHsChannelEvoPtr pHsChannel)
448 {
449     if (pHsChannel == NULL) {
450         return;
451     }
452 
453     nvHs3dFreeChannel(pHsChannel);
454 
455     nvFree(pHsChannel);
456 }
457 
458 static NvU32 HsGetSemaphoreIndex(
459     const NVFlipNIsoSurfaceEvoHwState *pSemaSurface)
460 {
461     const NvU32 offsetInBytes = pSemaSurface->offsetInWords * 4;
462     const enum NvKmsNIsoFormat format = pSemaSurface->format;
463     const NvU32 sizeOfSemaphore = nvKmsSizeOfSemaphore(format);
464 
465     /*
466      * The semaphore size must be greater than zero.  Flip validation should
467      * prevent us from getting here with an invalid NvKmsNIsoFormat.
468      */
469     nvAssert(sizeOfSemaphore > 0);
470 
471     /* The semaphore offset should be a multiple of the semaphore size. */
472     nvAssert((offsetInBytes % sizeOfSemaphore) == 0);
473 
474     return offsetInBytes / sizeOfSemaphore;
475 }
476 
477 /*!
478  * Read the payload of the semaphore described in the pHwState.
479  */
480 static NvU32 HsFlipQueueReadSemaphore(
481     const NVHsChannelEvoRec *pHsChannel,
482     const NVFlipNIsoSurfaceEvoHwState *pSemaSurface)
483 {
484     const enum NvKmsNIsoFormat format = pSemaSurface->format;
485     const NvU32 semaphoreIndex = HsGetSemaphoreIndex(pSemaSurface);
486     const NvU32 sd = pHsChannel->pDispEvo->displayOwner;
487     const void *ptr;
488     struct nvKmsParsedSemaphore parsedSemaphore = { };
489 
490     /* We should only get here if we have a valid semaphore surface. */
491     nvAssert(pSemaSurface->pSurfaceEvo != NULL);
492 
493     ptr = pSemaSurface->pSurfaceEvo->cpuAddress[sd];
494 
495     if (ptr == NULL) {
496         nvAssert(!"Semaphore surface without CPU mapping!");
497         return 0;
498     }
499 
500     nvKmsParseSemaphore(format, semaphoreIndex, ptr, &parsedSemaphore);
501 
502     return parsedSemaphore.payload;
503 }
504 
505 /*!
506  * Return whether the specified pHwState is ready to flip.
507  */
508 static NvBool HsFlipQueueEntryIsReady(
509     const NVHsChannelEvoRec *pHsChannel,
510     const NVFlipChannelEvoHwState *pHwState)
511 {
512     const NVFlipNIsoSurfaceEvoHwState *pSemaSurface =
513         &pHwState->syncObject.u.semaphores.acquireSurface;
514 
515     if (pHwState->syncObject.usingSyncpt) {
516         return TRUE;
517     }
518 
519     /*
520      * If a semaphore surface was specified, check if the semaphore has reached
521      * the specified acquire value.
522      */
523     if (pSemaSurface->pSurfaceEvo != NULL) {
524         const NvU32 semaphoreValue =
525             HsFlipQueueReadSemaphore(pHsChannel, pSemaSurface);
526 
527         if (pHsChannel->swapGroupFlipping) {
528             // With swap group flipping, the client semaphore should be
529             // written before the non-stall interrupt kicking off the flip.
530             nvAssert(semaphoreValue == pHwState->syncObject.u.semaphores.acquireValue);
531         } else {
532             if (semaphoreValue != pHwState->syncObject.u.semaphores.acquireValue) {
533                 return FALSE;
534             }
535         }
536     }
537 
538     /*
539      * If a time stamp was specified for the flip, check if the time stamp has
540      * been satisfied.
541      *
542      * XXX NVKMS HEADSURFACE TODO: Implement time stamp flip check.
543      */
544 
545     return TRUE;
546 }
547 
548 /*!
549  * Update the reference count of all the surfaces described in the pHwState.
550  */
551 static void HsUpdateFlipQueueEntrySurfaceRefCount(
552     const NVFlipChannelEvoHwState *pHwState,
553     NvBool increase)
554 {
555     HsChangeSurfaceFlipRefCount(
556         pHwState->pSurfaceEvo[NVKMS_LEFT], increase);
557 
558     HsChangeSurfaceFlipRefCount(
559         pHwState->pSurfaceEvo[NVKMS_RIGHT], increase);
560 
561     HsChangeSurfaceFlipRefCount(
562         pHwState->completionNotifier.surface.pSurfaceEvo, increase);
563 
564     if (!pHwState->syncObject.usingSyncpt) {
565         HsChangeSurfaceFlipRefCount(
566             pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, increase);
567 
568         HsChangeSurfaceFlipRefCount(
569             pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, increase);
570     }
571 }
572 
573 /*!
574  * Update bookkeeping for "flipping away" from a pHwState.
575  */
576 static void HsReleaseFlipQueueEntry(
577     NVDevEvoPtr pDevEvo,
578     NVHsChannelEvoPtr pHsChannel,
579     const NVFlipChannelEvoHwState *pHwState)
580 {
581     /*
582      * If a semaphore surface was specified, we can now write its release value.
583      */
584     if (!pHwState->syncObject.usingSyncpt &&
585         pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) {
586 
587         /*
588          * XXX NVKMS HEADSURFACE TODO: write the timestamp in the EVO/NVDisplay
589          * semaphore structure, based on NvKmsNIsoFormat.  The graphics channel
590          * doesn't support all the NvKmsNIsoFormats, so we would need to use a
591          * graphics channel semaphore release of STRUCTURE_SIZE = ONE_WORD with
592          * the timestamp as payload.  It would be unfortunate to read ptimer
593          * registers in order to compute the payload value.
594          */
595 
596         nvHs3dReleaseSemaphore(pHsChannel,
597                                pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo,
598                                pHwState->syncObject.u.semaphores.releaseSurface.format,
599                                pHwState->syncObject.u.semaphores.releaseSurface.offsetInWords,
600                                pHwState->syncObject.u.semaphores.releaseValue,
601                                TRUE /* allPreceedingReads */);
602     }
603 
604     /*
605      * HeadSurface no longer needs to read from the surfaces in pHwState;
606      * decrement their reference counts.
607      */
608     HsUpdateFlipQueueEntrySurfaceRefCount(pHwState, FALSE);
609 }
610 
611 /*!
612  * "Fast forward" through flip queue entries that are ready.
613  *
614  * \param[in,out]  pHsChannel               The headSurface channel.
615  * \param[in]      layer                    The layer of the flip queue.
616  * \param[in]      honorIsReadyCriteria     Honor the isReady check for
617  *                                          flip queue entries.
618  * \param[in]      honorMinPresentInterval  Honor the minPresentInterval in
619  *                                          flip queue entries.
620  */
621 static void HsFastForwardFlipQueue(
622     NVHsChannelEvoPtr pHsChannel,
623     const NvU8 layer,
624     const NvBool honorIsReadyCriteria,
625     const NvBool honorMinPresentInterval)
626 {
627     NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
628     NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
629 
630     /*
631      * For swapgroup flips, every flip kicked off by the client needs to result
632      * in a real flip in hardware, so we can't fast forward through flips here.
633      */
634     if (pHsChannel->config.neededForSwapGroup) {
635         return;
636     }
637 
638     while (!nvListIsEmpty(pFlipQueue)) {
639 
640         NVHsChannelFlipQueueEntry *pEntry =
641             nvListFirstEntry(pFlipQueue,
642                              NVHsChannelFlipQueueEntry,
643                              flipQueueEntry);
644         /*
645          * Stop "fast forwarding" once we find a flip queue entry that is not
646          * ready: we must not release semaphores out of order, otherwise we
647          * could confuse client semaphore interlocking.
648          */
649         if (honorIsReadyCriteria &&
650             !HsFlipQueueEntryIsReady(pHsChannel, &pEntry->hwState)) {
651             break;
652         }
653 
654         /*
655          * Normally, we want to make sure that each MinPresentInterval > 0 flip
656          * is displayed for one frame, so we shouldn't fast forward past them.
657          */
658         if (honorMinPresentInterval &&
659             (pEntry->hwState.minPresentInterval != 0)) {
660             break;
661         }
662 
663         /*
664          * We are "flipping away" from the flip queue entry in current.  Release
665          * it, and replace it with the entry in pEntry.
666          */
667 
668         HsReleaseFlipQueueEntry(pDevEvo, pHsChannel,
669                                 &pHsChannel->flipQueue[layer].current);
670 
671         pHsChannel->flipQueue[layer].current = pEntry->hwState;
672 
673         nvListDel(&pEntry->flipQueueEntry);
674         nvFree(pEntry);
675     }
676 }
677 
678 /*!
679  * Push a new entry to the end of the headSurface channel's flip queue.
680  *
681  * \param[in,out]  pHsChannel  The headSurface channel.
682  * \param[in]      layer       The layer of the flip queue.
683  * \param[in]      pHwState    The hwState to be pushed on the flip queue.
684  */
685 void nvHsPushFlipQueueEntry(
686     NVHsChannelEvoPtr pHsChannel,
687     const NvU8 layer,
688     const NVFlipChannelEvoHwState *pHwState)
689 {
690     NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
691     NVHsChannelFlipQueueEntry *pEntry = nvCalloc(1, sizeof(*pEntry));
692 
693     if (pEntry == NULL) {
694         /*
695          * XXX NVKMS HEADSURFACE TODO: we cannot fail at this point in the call
696          * chain (we've already committed to the flip).  Move the nvCalloc() call
697          * earlier in the call chain to a point where we can fail.
698          */
699         return;
700     }
701 
702     pEntry->hwState = *pHwState;
703 
704     /* Increment the ref counts on the surfaces in the flip queue entry. */
705 
706     HsUpdateFlipQueueEntrySurfaceRefCount(&pEntry->hwState, TRUE);
707 
708     /* "Fast forward" through existing flip queue entries that are ready. */
709 
710     HsFastForwardFlipQueue(pHsChannel, layer,
711                            TRUE /* honorIsReadyCriteria */,
712                            TRUE /* honorMinPresentInterval */);
713 
714     /* Append the new entry. */
715 
716     nvListAppend(&pEntry->flipQueueEntry, pFlipQueue);
717 }
718 
719 /*!
720  * Remove the first entry in the flip queue and return it.
721  *
722  * If the first entry in the flipQueue is ready to be consumed by headSurface,
723  * remove it from the list and return it in the 'pHwState' argument.
724  *
725  * If this function returns TRUE, it is the caller's responsibility to
726  * eventually call
727  *
728  *    HsUpdateFlipQueueEntrySurfaceRefCount(pHwState, FALSE)
729  *
730  * for the returned pHwState.
731  *
732  * \param[in,out]  pHsChannel  The headSurface channel.
733  * \param[in]      layer       The layer of the flip queue.
734  * \param[out]     pHwState    The hwState that was popped off the flip queue.
735  *
736  * \return   Return TRUE if a flip queue entry was popped off the queue and
737  *           copied into pHwState.
738  */
739 static NvBool HsPopFlipQueueEntry(
740     NVHsChannelEvoPtr pHsChannel,
741     const NvU8 layer,
742     NVFlipChannelEvoHwState *pHwState)
743 {
744     NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
745     NVHsChannelFlipQueueEntry *pEntry;
746 
747     if (nvListIsEmpty(pFlipQueue)) {
748         return FALSE;
749     }
750 
751     pEntry = nvListFirstEntry(pFlipQueue,
752                               NVHsChannelFlipQueueEntry,
753                               flipQueueEntry);
754 
755     if (!HsFlipQueueEntryIsReady(pHsChannel, &pEntry->hwState)) {
756         return FALSE;
757     }
758 
759     *pHwState = pEntry->hwState;
760 
761     nvListDel(&pEntry->flipQueueEntry);
762     nvFree(pEntry);
763 
764     return TRUE;
765 }
766 
767 /*!
768  * Update the current flip queue entry for a new headSurface frame.
769  *
770  * To build a new frame of headSurface, we look at the flip queue of each layer.
771  * If there is an entry available, we pop it off the queue and replace .current
772  * with the entry.
773  */
774 static void HsUpdateFlipQueueCurrent(
775     NVHsChannelEvoPtr pHsChannel)
776 {
777     NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
778     NvU8 layer;
779 
780     for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
781 
782         NVFlipChannelEvoHwState newCurrent = { };
783 
784         /*
785          * XXX NVKMS HEADSURFACE TODO: fast forward to the last ready flip queue
786          * entry.  Share code with similar functionality in
787          * nvHsPushFlipQueueEntry().
788          */
789 
790         if (!HsPopFlipQueueEntry(pHsChannel, layer, &newCurrent)) {
791             continue;
792         }
793 
794         /*
795          * We have a new flip queue entry to place in current.  Release the old
796          * current flip queue entry, and replace it with the popped entry.
797          */
798         HsReleaseFlipQueueEntry(pDevEvo, pHsChannel,
799                                 &pHsChannel->flipQueue[layer].current);
800 
801         pHsChannel->flipQueue[layer].current = newCurrent;
802     }
803 }
804 
805 /*!
806  * Drain the flip queue on each layer of pHsChannel.
807  *
808  * In preparation to disable headSurface, release the flip queue entry in
809  * .current, as well as all entries in the queue.
810  */
811 void nvHsDrainFlipQueue(
812     NVHsChannelEvoPtr pHsChannel)
813 {
814     NVDevEvoPtr pDevEvo = pHsChannel->pDispEvo->pDevEvo;
815     NvU8 layer;
816 
817     for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
818         NVListRec *pFlipQueue = &pHsChannel->flipQueue[layer].queue;
819 
820         HsReleaseFlipQueueEntry(pDevEvo, pHsChannel,
821                                 &pHsChannel->flipQueue[layer].current);
822 
823         nvkms_memset(&pHsChannel->flipQueue[layer].current, 0,
824                      sizeof(pHsChannel->flipQueue[layer].current));
825 
826         while (!nvListIsEmpty(pFlipQueue)) {
827 
828             NVHsChannelFlipQueueEntry *pEntry =
829                 nvListFirstEntry(pFlipQueue,
830                                  NVHsChannelFlipQueueEntry,
831                                  flipQueueEntry);
832 
833             HsReleaseFlipQueueEntry(pDevEvo, pHsChannel, &pEntry->hwState);
834 
835             nvListDel(&pEntry->flipQueueEntry);
836             nvFree(pEntry);
837         }
838     }
839 }
840 
841 /*!
842  * Return whether all flip queues on this pHsChannel are idle.
843  *
844  * As a side effect, attempt to "fast forward" through flip queue entries, in an
845  * effort to make the flip queues idle.  When fast forwarding, always ignore the
846  * client-requested minPresentInterval.  Optionally (when force == TRUE), also
847  * ignore the "IsReady" check.
848  *
849  * This is intended to be used in two scenarios:
850  *
851  * - First, call nvHsIdleFlipQueue(force=FALSE) in a loop with all other heads
852  *   we are trying to idle.  This should allow semaphore interlocking to
853  *   progress naturally.
854  *
855  * - If that loop times out, call nvHsIdleFlipQueue(force=TRUE), which will
856  *   ignore the IsReady conditions and forcibly make the flip queues idle.
857  */
858 NvBool nvHsIdleFlipQueue(
859     NVHsChannelEvoPtr pHsChannel,
860     NvBool force)
861 {
862     const NvBool honorIsReadyCriteria = !force;
863     NvBool ret = TRUE;
864     NvU8 layer;
865 
866     for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
867 
868         HsFastForwardFlipQueue(pHsChannel, layer,
869                                honorIsReadyCriteria,
870                                FALSE /* honorMinPresentInterval */);
871 
872         if (!nvListIsEmpty(&pHsChannel->flipQueue[layer].queue)) {
873             /* force should always result in an empty flip queue */
874             nvAssert(!force);
875             ret = FALSE;
876         }
877     }
878 
879     return ret;
880 }
881 
882 /*
883  * We use notifiers to know when headSurface frames are presented, so that we
884  * don't render to the visible buffer.
885  */
886 
887 static NvU32 AllocNotifierMemory(
888     const NVDevEvoRec *pDevEvo,
889     NvU32 handle)
890 {
891     NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
892 
893     memAllocParams.owner = NVKMS_RM_HEAP_ID;
894     memAllocParams.size = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES;
895     memAllocParams.type = NVOS32_TYPE_DMA;
896 
897     memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) |
898                           DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS) |
899                           DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) |
900                           DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED);
901 
902     memAllocParams.flags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN |
903                            NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT |
904                            NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE;
905 
906     memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _ISO, _NO);
907 
908     return nvRmApiAlloc(nvEvoGlobal.clientHandle,
909                        pDevEvo->deviceHandle,
910                        handle,
911                        NV01_MEMORY_LOCAL_USER,
912                        &memAllocParams);
913 }
914 
915 static NvBool MapNotifiers(NVHsDeviceEvoRec *pHsDevice)
916 {
917     NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
918     NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers;
919     const NvU64 size = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES;
920     NvU32 sd, ret;
921 
922     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
923         ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
924                                pDevEvo->pSubDevices[sd]->handle,
925                                pNotifiers->rmHandle,
926                                0,
927                                size,
928                                (void **)&pNotifiers->sd[sd].ptr,
929                                0);
930         if (ret != NVOS_STATUS_SUCCESS) {
931             return FALSE;
932         }
933 
934         /*
935          * Intentionally use NVMISC_MEMSET() rather than nvkms_memset(): some
936          * CPU architectures, notably ARM, may fault if streaming stores like in
937          * an optimized memset() implementation are used on a BAR1 mapping.
938          * NVMISC_MEMSET() is conveniently not optimized.
939          */
940         NVMISC_MEMSET((void *)pNotifiers->sd[sd].ptr, 0, size);
941     }
942 
943     return TRUE;
944 }
945 
946 static void UnmapNotifiers(NVHsDeviceEvoRec *pHsDevice)
947 {
948     NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
949     NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers;
950     NvU32 sd;
951 
952     if (pNotifiers->rmHandle == 0) {
953         return;
954     }
955 
956     for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
957 
958         if (pNotifiers->sd[sd].ptr == NULL) {
959             continue;
960         }
961 
962         nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
963                            pDevEvo->pSubDevices[sd]->handle,
964                            pNotifiers->rmHandle,
965                            pNotifiers->sd[sd].ptr,
966                            0);
967 
968         pNotifiers->sd[sd].ptr = NULL;
969     }
970 }
971 
972 static NvBool RegisterNotifiersWithNvKms(NVHsDeviceEvoRec *pHsDevice)
973 {
974     struct NvKmsRegisterSurfaceParams params = { };
975     NVHsNotifiersRec *pNotifiers = &pHsDevice->notifiers;
976     NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
977     const NvBool requireCtxDma = TRUE;
978 
979     params.request.useFd       = FALSE;
980     params.request.rmClient    = nvEvoGlobal.clientHandle;
981 
982     params.request.layout      = NvKmsSurfaceMemoryLayoutPitch;
983     params.request.format      = NvKmsSurfaceMemoryFormatI8;
984 
985     params.request.isoType = NVKMS_MEMORY_NISO;
986 
987     params.request.planes[0].u.rmObject = pNotifiers->rmHandle;
988     params.request.planes[0].pitch = NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES;
989     params.request.planes[0].rmObjectSizeInBytes =
990         NVKMS_HEAD_SURFACE_NOTIFIERS_SIZE_IN_BYTES;
991 
992     nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, &params,
993                          NvHsMapPermissionsReadWrite);
994 
995     pHsDevice->notifiers.nvKmsHandle = params.reply.surfaceHandle;
996 
997     if (pHsDevice->notifiers.nvKmsHandle == 0) {
998         return FALSE;
999     }
1000 
1001     pHsDevice->notifiers.pSurfaceEvo =
1002         nvHsGetNvKmsSurface(pDevEvo,
1003                             pHsDevice->notifiers.nvKmsHandle,
1004                             requireCtxDma);
1005 
1006     return (pHsDevice->notifiers.pSurfaceEvo != NULL);
1007 }
1008 
1009 static void AssignNIsoFormat(NVHsDeviceEvoRec *pHsDevice)
1010 {
1011     const NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
1012 
1013     if (pDevEvo->caps.validNIsoFormatMask &
1014         NVBIT(NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) {
1015         /* If available, use the "nvdisplay" format. */
1016         pHsDevice->notifiers.nIsoFormat = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY;
1017     } else {
1018         /* Otherwise, use the "legacy" format. */
1019         nvAssert((pDevEvo->caps.validNIsoFormatMask &
1020                   NVBIT(NVKMS_NISO_FORMAT_LEGACY)) != 0);
1021         pHsDevice->notifiers.nIsoFormat = NVKMS_NISO_FORMAT_LEGACY;
1022     }
1023 }
1024 
1025 static NvBool AllocNotifiers(NVHsDeviceEvoRec *pHsDevice)
1026 {
1027     NvU32 ret;
1028     NVDevEvoRec *pDevEvo;
1029 
1030     pDevEvo = pHsDevice->pDevEvo;
1031 
1032     pHsDevice->notifiers.rmHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
1033 
1034     if (pHsDevice->notifiers.rmHandle == 0) {
1035         goto fail;
1036     }
1037 
1038     ret = AllocNotifierMemory(pHsDevice->pDevEvo, pHsDevice->notifiers.rmHandle);
1039 
1040     if (ret != NVOS_STATUS_SUCCESS) {
1041         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1042                            pHsDevice->notifiers.rmHandle);
1043         pHsDevice->notifiers.rmHandle = 0;
1044 
1045         goto fail;
1046     }
1047 
1048     if (!MapNotifiers(pHsDevice)) {
1049         goto fail;
1050     }
1051 
1052     if (!RegisterNotifiersWithNvKms(pHsDevice)) {
1053         goto fail;
1054     }
1055 
1056     AssignNIsoFormat(pHsDevice);
1057 
1058     return TRUE;
1059 
1060 fail:
1061     FreeNotifiers(pHsDevice);
1062 
1063     return FALSE;
1064 }
1065 
1066 static void FreeNotifiers(NVHsDeviceEvoRec *pHsDevice)
1067 {
1068     NVDevEvoRec *pDevEvo;
1069     NVHsNotifiersRec *pNotifiers;
1070 
1071     if (pHsDevice == NULL) {
1072         return;
1073     }
1074 
1075     pDevEvo = pHsDevice->pDevEvo;
1076     pNotifiers = &pHsDevice->notifiers;
1077 
1078     if (pNotifiers->nvKmsHandle != 0) {
1079         nvEvoUnregisterSurface(pDevEvo,
1080                                pDevEvo->pNvKmsOpenDev,
1081                                pNotifiers->nvKmsHandle,
1082                                FALSE /* skipUpdate */);
1083         pNotifiers->pSurfaceEvo = NULL;
1084     }
1085 
1086     UnmapNotifiers(pHsDevice);
1087 
1088     if (pHsDevice->notifiers.rmHandle != 0) {
1089         nvRmApiFree(nvEvoGlobal.clientHandle,
1090                     pDevEvo->deviceHandle,
1091                     pHsDevice->notifiers.rmHandle);
1092 
1093         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
1094                            pHsDevice->notifiers.rmHandle);
1095         pHsDevice->notifiers.rmHandle = 0;
1096     }
1097 }
1098 
1099 /*!
1100  * Reset headSurface notifiers for this channel to NOT_BEGUN.
1101  *
1102  * By the time the modeset completes to transition into a new headSurface
1103  * configuration, all headSurface flips from the previous completion should be
1104  * completed.  But, that would leave at least one notifier set to FINISHED.
1105  *
1106  * Initialize all notifiers for this channel to NOT_BEGUN, so that
1107  * HsVBlankCallbackDeferredWork() does not interpret notifier state from the
1108  * previous headSurface configuration as applying to the new headSurface
1109  * configuration.
1110  */
1111 static void HsInitNotifiers(
1112     NVHsDeviceEvoRec *pHsDevice,
1113     NVHsChannelEvoRec *pHsChannel)
1114 {
1115     const NvU32 apiHead = pHsChannel->apiHead;
1116     const NvU32 sd = pHsChannel->pDispEvo->displayOwner;
1117     NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
1118     NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
1119     NvU8 slot, buffer;
1120 
1121     for (slot = 0; slot < NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD; slot++) {
1122         nvKmsResetNotifier(pHsNotifiers->nIsoFormat,
1123                            FALSE /* overlay */,
1124                            slot,
1125                            pHsNotifiersOneSd->notifier[apiHead]);
1126     }
1127 
1128     for (buffer = 0; buffer < NVKMS_HEAD_SURFACE_MAX_BUFFERS; buffer++) {
1129         nvKmsResetSemaphore(pHsNotifiers->nIsoFormat,
1130                             buffer, pHsNotifiersOneSd->semaphore[apiHead],
1131                             NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_RENDERABLE);
1132     }
1133 }
1134 
1135 void nvHsInitNotifiers(
1136     NVHsDeviceEvoRec *pHsDevice,
1137     NVHsChannelEvoRec *pHsChannel)
1138 {
1139     if (pHsChannel->config.neededForSwapGroup) {
1140         /*
1141          * XXX NVKMS HEADSURFACE TODO: initialize tracking for ViewPortIn
1142          * flips.
1143          */
1144     } else {
1145         HsInitNotifiers(pHsDevice, pHsChannel);
1146     }
1147 }
1148 
1149 /*!
1150  * For the given head and sd, prepare the next notifier:
1151  *
1152  * - Look up the next notifier to use.
1153  * - Clear that notifier to STATUS_NOT_BEGUN.
1154  * - Update the slot bookkeeping for the (head,sd) pair.
1155  * - Return the dword offset of the notifier.
1156  */
1157 static NvU16 PrepareNextNotifier(
1158     NVHsNotifiersRec *pHsNotifiers,
1159     NvU32 sd,
1160     NvU32 apiHead)
1161 {
1162     const NvU32 notifierSize =
1163         nvKmsSizeOfNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */);
1164 
1165     const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot;
1166 
1167     NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
1168 
1169     const NvU8 *headBase = pHsNotifiersOneSd->notifier[apiHead];
1170 
1171     const NvU8 offsetInBytes =
1172         (headBase - ((const NvU8 *) pHsNotifiersOneSd)) +
1173         (notifierSize * nextSlot);
1174 
1175     nvAssert(notifierSize <= NVKMS_HEAD_SURFACE_MAX_NOTIFIER_SIZE);
1176 
1177     nvKmsResetNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */,
1178                        nextSlot, pHsNotifiersOneSd->notifier[apiHead]);
1179 
1180     pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot =
1181         (nextSlot + 1) % NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD;
1182 
1183     return offsetInBytes / 4;
1184 }
1185 
1186 /*!
1187  * Helper function for nvHsFlip(); populate NvKmsFlipRequest and call
1188  * nvFlipEvo().
1189  *
1190  * \param[in,out]  pHsDevice         The headSurface device.
1191  * \param[in,out]  pHsChannel        The headSurface channel.
1192  * \param[in]      perEyeStereoFlip  Whether to flip per-eye.
1193  * \param[in]      surfaceHandles    The surfaces to flip to.
1194  * \param[in]      isFirstFlip       Whether this is the first flip after
1195  *                                   enabling headsurface.
1196  * \param[in]      allowFlipLock     Whether to allow fliplock for this flip.
1197  */
1198 static void HsFlipHelper(
1199     NVHsDeviceEvoRec *pHsDevice,
1200     NVHsChannelEvoRec *pHsChannel,
1201     const NvBool perEyeStereoFlip,
1202     const NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES],
1203     const NvBool isFirstFlip,
1204     const NvBool allowFlipLock)
1205 {
1206     NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
1207     struct NvKmsFlipRequest *pRequest;
1208     struct NvKmsFlipParams *pFlipParams;
1209     struct NvKmsFlipCommonParams *pParamsOneHead;
1210     NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
1211     const NvU32 sd = pHsChannel->pDispEvo->displayOwner;
1212     const NvU32 apiHead = pHsChannel->apiHead;
1213     NvBool ret;
1214 
1215     /*
1216      * Use a preallocated NvKmsFlipParams, so that we don't have to allocate
1217      * memory here (and deal with allocation failure).
1218      */
1219     pFlipParams = &pHsChannel->scratchParams;
1220 
1221     nvkms_memset(pFlipParams, 0, sizeof(*pFlipParams));
1222 
1223     pRequest = &pFlipParams->request;
1224 
1225     pParamsOneHead = &pRequest->sd[sd].head[apiHead];
1226 
1227     pRequest->commit = NV_TRUE;
1228 
1229     if (isFirstFlip) {
1230         /*
1231          * For the first flip after enabling headsurface
1232          * (NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME), the old viewport
1233          * (saved in HsConfigInitSwapGroupOneHead or HsConfigInitModesetOneHead
1234          * and restored in HsConfigRestoreMainLayerSurface) which may specify an
1235          * offset within a multi-head surface needs to be overridden to the
1236          * origin for the per-head headsurface surfaces.
1237          */
1238         pParamsOneHead->viewPortIn.specified = TRUE;
1239         pParamsOneHead->viewPortIn.point.x = 0;
1240         pParamsOneHead->viewPortIn.point.y = 0;
1241 
1242         pParamsOneHead->cursor.imageSpecified = TRUE;
1243 
1244         pParamsOneHead->cursor.positionSpecified = TRUE;
1245     }
1246 
1247     pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_LEFT] =
1248         surfaceHandles[NVKMS_LEFT];
1249     pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_RIGHT] =
1250         surfaceHandles[NVKMS_RIGHT];
1251     pParamsOneHead->layer[NVKMS_MAIN_LAYER].surface.specified = TRUE;
1252     pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE;
1253     pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE;
1254     pParamsOneHead->layer[NVKMS_MAIN_LAYER].tearing = FALSE;
1255     pParamsOneHead->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip = perEyeStereoFlip;
1256     pParamsOneHead->layer[NVKMS_MAIN_LAYER].minPresentInterval = 1;
1257     pParamsOneHead->layer[NVKMS_MAIN_LAYER].csc.specified = TRUE;
1258 
1259     /*
1260      * XXX NVKMS HEADSURFACE TODO: Work out in which cases we should use the
1261      * head's current CSC.
1262      */
1263     pParamsOneHead->layer[NVKMS_MAIN_LAYER].csc.matrix = NVKMS_IDENTITY_CSC_MATRIX;
1264 
1265     pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = TRUE;
1266 
1267     pRequest->sd[sd].requestedHeadsBitMask = NVBIT(apiHead);
1268 
1269     if (surfaceHandles[NVKMS_LEFT] != 0) {
1270         NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
1271             nvGetSurfaceHandlesFromOpenDev(pDevEvo->pNvKmsOpenDev);
1272         NVSurfaceEvoPtr pSurfaceEvo =
1273             nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, surfaceHandles[NVKMS_LEFT]);
1274         struct NvKmsSemaphore *pSema;
1275 
1276         pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.surfaceHandle =
1277             pHsNotifiers->nvKmsHandle;
1278         pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.format =
1279             pHsNotifiers->nIsoFormat;
1280         pParamsOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.val.surface.offsetInWords =
1281             PrepareNextNotifier(pHsNotifiers, sd, apiHead);
1282 
1283         pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE;
1284         pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE;
1285 
1286         pSema = &pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.acquire;
1287         pSema->surface.surfaceHandle = pHsNotifiers->nvKmsHandle;
1288         pSema->surface.format = pHsNotifiers->nIsoFormat;
1289         pSema->surface.offsetInWords =
1290             HsGetFrameSemaphoreOffsetInWords(pHsChannel);
1291         pSema->value = NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_DISPLAYABLE;
1292 
1293         pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.release =
1294             pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.acquire;
1295 
1296         pParamsOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.u.semaphores.release.value =
1297             NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_RENDERABLE;
1298 
1299         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE;
1300         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val.width =
1301             pSurfaceEvo->widthInPixels;
1302         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val.height =
1303             pSurfaceEvo->heightInPixels;
1304 
1305         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE;
1306         pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeOut.val =
1307             pParamsOneHead->layer[NVKMS_MAIN_LAYER].sizeIn.val;
1308     }
1309 
1310     ret = nvFlipEvo(pDevEvo,
1311                     pDevEvo->pNvKmsOpenDev,
1312                     pRequest,
1313                     &pFlipParams->reply,
1314                     FALSE /* skipUpdate */,
1315                     allowFlipLock);
1316 
1317     if (!ret) {
1318         nvAssert(!"headSurface flip failed?");
1319     }
1320 }
1321 
1322 /*!
1323  * Flip to the headSurface buffer specified by index.
1324  *
1325  * If pHsOneHeadAllDisps == NULL, disable headSurface by flipping to NULL.
1326  *
1327  * \param[in,out]  pHsDevice           The headSurface device.
1328  * \param[in,out]  pHsChannel          The headSurface channel.
1329  * \param[in]      eyeMask             The mask of which eyes to flip.
1330  * \param[in]      perEyeStereoFlip    Whether to flip per-eye.
1331  * \param[in]      index               Which buffer to flip to.
1332  * \param[in]      pHsOneHeadAllDisps  The headSurface config.
1333  * \param[in]      isFirstFlip         Whether this is the first flip after
1334  *                                     enabling headsurface.
1335  * \param[in]      allowFlipLock       Whether to allow fliplock for this flip.
1336  */
1337 void nvHsFlip(
1338     NVHsDeviceEvoRec *pHsDevice,
1339     NVHsChannelEvoRec *pHsChannel,
1340     const NvU8 eyeMask,
1341     const NvBool perEyeStereoFlip,
1342     const NvU8 index,
1343     const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps,
1344     const NvBool isFirstFlip,
1345     const NvBool allowFlipLock)
1346 {
1347     NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES] = { 0, 0 };
1348     const NvBool enable = (pHsOneHeadAllDisps != NULL);
1349 
1350     if (enable) {
1351         NvU8 eye;
1352 
1353         for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) {
1354 
1355             const NVHsSurfaceRec *pHsSurface =
1356                 pHsOneHeadAllDisps->surfaces[eye][index].pSurface;
1357 
1358             if ((eyeMask & NVBIT(eye)) == 0) {
1359                 continue;
1360             }
1361 
1362             nvAssert(pHsSurface != NULL);
1363 
1364             surfaceHandles[eye] = pHsSurface->nvKmsHandle;
1365             nvAssert(surfaceHandles[eye] != 0);
1366         }
1367     }
1368 
1369     HsFlipHelper(pHsDevice,
1370                  pHsChannel,
1371                  perEyeStereoFlip,
1372                  surfaceHandles,
1373                  isFirstFlip,
1374                  allowFlipLock);
1375 
1376     if (!enable) {
1377         /* XXX NVKMS HEADSURFACE TODO: disable stereo toggling, if necessary. */
1378     }
1379 }
1380 
1381 /*!
1382  * "Flip" using the core channel's ViewPortIn.
1383  */
1384 static void HsFlipViewPortIn(NVHsChannelEvoPtr pHsChannel, NvU16 x, NvU16 y)
1385 {
1386     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
1387 
1388     /*
1389      * XXX NVKMS HEADSURFACE TODO: use the panning NVKMS API request, rather
1390      * than call the low-level SetViewportPointIn() HAL proc.  But, to do that,
1391      * we would need to make the pan request much lighter weight, so that it is
1392      * usable for our needs here.
1393      */
1394     nvApiHeadSetViewportPointIn(pDispEvo, pHsChannel->apiHead, x, y);
1395 
1396     /*
1397      * XXX NVKMS HEADSURFACE TODO: Add tracking so that IsPreviousFrameDone()
1398      * can know if this update latched.
1399      */
1400 }
1401 
1402 static void HsPickSrcEyeAndPixelShift(
1403     const NVHsChannelEvoRec *pHsChannel,
1404     const NvU8 dstEye,
1405     NvU8 *pSrcEye,
1406     enum NvKmsPixelShiftMode *pPixelShift)
1407 {
1408     if (pHsChannel->config.pixelShift == NVKMS_PIXEL_SHIFT_8K) {
1409 
1410         if (dstEye == NVKMS_LEFT) {
1411             *pSrcEye = NVKMS_LEFT;
1412             *pPixelShift = NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT;
1413         }
1414 
1415         if (dstEye == NVKMS_RIGHT) {
1416             *pSrcEye = NVKMS_LEFT;
1417             *pPixelShift = NVKMS_PIXEL_SHIFT_4K_TOP_LEFT;
1418         }
1419     } else {
1420         *pSrcEye = dstEye;
1421         *pPixelShift = pHsChannel->config.pixelShift;
1422     }
1423 }
1424 
1425 /*!
1426  * Structure to drive the behavior of nvHsNextFrame().
1427  */
1428 struct NvHsNextFrameWorkArea {
1429 
1430     /*
1431      * The range of surface indices to render to.  Indices here are used as the
1432      * 'index' in NVHsStateOneHeadAllDisps::surfaces[eye][index]::pSurface.
1433      */
1434     NvU8 dstBufferIndexStart;
1435     NvU8 dstBufferIndexEnd;
1436 
1437     /* Whether to flip to the surface indicated by pHsChannel->nextIndex. */
1438     NvBool doFlipToNextIndex;
1439 
1440     /* Whether to allow fliplock on the flip to the next surface. */
1441     NvBool allowFlipLock;
1442 
1443     /* Whether to flip to the destRect region of the surface.*/
1444     NvBool doFlipToDestRect;
1445 
1446     /* Whether to increment nextIndex and/or nextOffset. */
1447     NvBool doIncrementNextIndex;
1448     NvBool doIncrementNextOffset;
1449 
1450     /*
1451      * On which dstBuffer indices to honor the SwapGroup's exclusive
1452      * clip list.
1453      */
1454     NvU8 honorSwapGroupClipListBufferMask;
1455 
1456     /* The region within the surface to render into.  */
1457     struct NvKmsRect destRect;
1458 
1459     /*
1460      * If perEyeStereo::override == TRUE, use perEyeStereo::value to control the
1461      * headSurface flip.
1462      */
1463     struct {
1464         NvBool override;
1465         NvBool value;
1466     } perEyeStereo;
1467 };
1468 
1469 /*!
1470  * Assign an NvHsNextFrameWorkArea structure, to drive execution of
1471  * nvHsNextFrame().
1472  */
1473 static struct NvHsNextFrameWorkArea HsAssignNextFrameWorkArea(
1474     const NVHsChannelEvoRec *pHsChannel,
1475     const NvHsNextFrameRequestType requestType)
1476 {
1477     struct NvHsNextFrameWorkArea workArea = { };
1478     NvU8 destOffset;
1479 
1480     if ((requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME) ||
1481         (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK)) {
1482 
1483         /*
1484          * The swapgroup first frame renders and flips both core and base to
1485          * the back index double height headsurface swapgroup surface, just
1486          * like a non-swapgroup headsurface flip.
1487          */
1488         if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME ||
1489             !pHsChannel->config.neededForSwapGroup) {
1490 
1491             /*
1492              * In the non-SwapGroup case, headSurface should:
1493              * - only render to the 'nextIndex' surface,
1494              * - flip to the nextIndex surface,
1495              * - increment nextIndex.
1496              */
1497             workArea.dstBufferIndexStart   = pHsChannel->nextIndex;
1498             workArea.dstBufferIndexEnd     = pHsChannel->nextIndex;
1499 
1500             workArea.doFlipToNextIndex     = TRUE;
1501             workArea.allowFlipLock         = FALSE;
1502             workArea.doFlipToDestRect      = FALSE;
1503 
1504             workArea.doIncrementNextIndex  = TRUE;
1505             workArea.doIncrementNextOffset = FALSE;
1506 
1507         } else {
1508 
1509             /*
1510              * In the SwapGroup case, headSurface should:
1511              * - render to both surfaces,
1512              * - flip to the nextOffset,
1513              * - increment nextOffset.
1514              */
1515             workArea.dstBufferIndexStart   = 0;
1516             workArea.dstBufferIndexEnd     = NVKMS_HEAD_SURFACE_MAX_BUFFERS - 1;
1517 
1518             workArea.doFlipToNextIndex     = FALSE;
1519 
1520             workArea.allowFlipLock         = FALSE;
1521             workArea.doFlipToDestRect      = TRUE;
1522 
1523             workArea.doIncrementNextIndex  = FALSE;
1524             workArea.doIncrementNextOffset = TRUE;
1525 
1526             /*
1527              * For VBLANK-initiated frames of SwapGroup headSurface, we want the
1528              * surface indicated by pHsChannel->nextIndex to contain the new
1529              * SwapGroup content, and the non-nextIndex surface to contain the
1530              * old SwapGroup content.
1531              *
1532              * Therefore, set the non-nextIndex bit(s) in
1533              * honorSwapGroupClipListBufferMask, so that we leave the old
1534              * SwapGroup content in that case.  In all other cases, we will get
1535              * the new SwapGroup content.
1536              */
1537             workArea.honorSwapGroupClipListBufferMask =
1538                 ~NVBIT(pHsChannel->nextIndex);
1539         }
1540 
1541     } else {
1542         /*
1543          * SWAP_GROUP_READY-initiated headSurface frames are special: we render
1544          * a new frame to the nextIndex surface, using the previous destRect
1545          * (i.e., the location that ViewPortIn will use at the next vblank).
1546          * However, the flip may take indefinitely long to arrive: it will wait
1547          * for the rest of the SwapBarrier.  That is okay, because
1548          * nvHsNextFrame(VBLANK) calls between now and the flip actually
1549          * occurring will keep updating both surfaces, using ViewPortIn to
1550          * "flip" to the new content.
1551          *
1552          * Therefore, we do _not_ increment nextIndex here.  Instead, we update
1553          * nextIndex when we find that the flip completed.  Until then, we keep
1554          * nextIndex the same, so that nvHsNextFrame(VBLANK) frames know which
1555          * surface should receive the new SwapGroup content.
1556          */
1557 
1558         const NVSwapGroupRec *pSwapGroup =
1559             pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead];
1560 
1561         nvAssert(requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY);
1562         nvAssert(pHsChannel->config.neededForSwapGroup);
1563 
1564         workArea.dstBufferIndexStart   = pHsChannel->nextIndex;
1565         workArea.dstBufferIndexEnd     = pHsChannel->nextIndex;
1566 
1567         workArea.doFlipToNextIndex     = TRUE;
1568         workArea.allowFlipLock         = TRUE;
1569         workArea.doFlipToDestRect      = FALSE;
1570 
1571         workArea.doIncrementNextIndex  = FALSE;
1572         workArea.doIncrementNextOffset = FALSE;
1573 
1574         workArea.perEyeStereo.override = TRUE;
1575         workArea.perEyeStereo.value    =
1576             nvHsSwapGroupGetPerEyeStereo(pSwapGroup);
1577     }
1578 
1579     /*
1580      * Pick the rect within the destination surface that headSurface should
1581      * render into.
1582      *
1583      * For normal (!neededForSwapGroup) use, this should be simply:
1584      *   { 0, 0,                frameSize.width, frameSize.height }
1585      * When SwapGroups are enabled, the headSurface is allocated at
1586      * double height and we alternate between
1587      *   { 0, 0,                frameSize.width, frameSize.height }
1588      *   { 0, frameSize.height, frameSize.width, frameSize.height }
1589      * And use ViewPortIn to flip to the updated half.
1590      *
1591      * The 'nextOffset' field tracks which half headSurface should use for the
1592      * next frame.
1593      *
1594      * The exception to the above is SWAP_GROUP_READY: in that case, we will
1595      * flip between surfaces, but not change ViewPortIn, so we want to use the
1596      * _previous_ nextOffset value.
1597      */
1598     if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY) {
1599         destOffset = HsGetPreviousOffset(pHsChannel);
1600     } else {
1601         destOffset = pHsChannel->nextOffset;
1602     }
1603 
1604     workArea.destRect.x      = 0;
1605     workArea.destRect.y      = pHsChannel->config.frameSize.height *
1606                                destOffset;
1607     workArea.destRect.width  = pHsChannel->config.frameSize.width;
1608     workArea.destRect.height = pHsChannel->config.frameSize.height;
1609 
1610     return workArea;
1611 }
1612 
1613 /*!
1614  * Produce the next headSurface frame.
1615  *
1616  * Render the frame, flip to it, and update next{Index,Offset} bookkeeping
1617  * as necessary.
1618  *
1619  * \param[in,out]  pHsDevice   The device to render on.
1620  * \param[in,out]  pHsChannel  The channel to use for rendering.
1621  * \param[in]      requestType This indicates the type of frame behavior
1622  *                             desired by the caller: when FIRST_FRAME, we need
1623  *                             to populate the surface in the core channel on
1624  *                             pre-NVDisplay.
1625  */
1626 void nvHsNextFrame(
1627     NVHsDeviceEvoPtr pHsDevice,
1628     NVHsChannelEvoPtr pHsChannel,
1629     const NvHsNextFrameRequestType requestType)
1630 {
1631     const NVDevEvoRec *pDevEvo = pHsDevice->pDevEvo;
1632     const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps =
1633         &pDevEvo->apiHeadSurfaceAllDisps[pHsChannel->apiHead];
1634     NvBool perEyeStereoFlip = FALSE;
1635     NvU8 dstEye;
1636     NvU8 eyeMask = 0;
1637 
1638     struct NvHsNextFrameWorkArea workArea =
1639         HsAssignNextFrameWorkArea(pHsChannel, requestType);
1640 
1641     HsUpdateFlipQueueCurrent(pHsChannel);
1642 
1643     for (dstEye = NVKMS_LEFT; dstEye < NVKMS_MAX_EYES; dstEye++) {
1644 
1645         const NVSurfaceEvoRec *pSurfaceEvo[NVKMS_MAX_LAYERS_PER_HEAD];
1646         NvBool surfacesPresent = FALSE;
1647         NvU8 layer, srcEye = dstEye;
1648         NvU8 dstBufferIndex;
1649         enum NvKmsPixelShiftMode pixelShift = pHsChannel->config.pixelShift;
1650         NvBool ret;
1651 
1652         HsPickSrcEyeAndPixelShift(pHsChannel, dstEye, &srcEye, &pixelShift);
1653 
1654         for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
1655             pSurfaceEvo[layer] =
1656                 pHsChannel->flipQueue[layer].current.pSurfaceEvo[srcEye];
1657 
1658             surfacesPresent = surfacesPresent || (pSurfaceEvo[layer] != NULL);
1659 
1660             perEyeStereoFlip = perEyeStereoFlip ||
1661                 pHsChannel->flipQueue[layer].current.perEyeStereoFlip;
1662         }
1663 
1664         /*
1665          * If there are no surfaces present for this srcEye, and the dstEye is
1666          * not LEFT, don't render it.
1667          *
1668          * This condition is limited to LEFT because:
1669          * - We need to perform _a_ flip even if no source surface is provided.
1670          * - We don't want to perform more rendering than absolutely
1671          *   unnecessarily.
1672          */
1673         if (!surfacesPresent && (dstEye != NVKMS_LEFT)) {
1674             continue;
1675         }
1676 
1677         for (dstBufferIndex = workArea.dstBufferIndexStart;
1678              dstBufferIndex <= workArea.dstBufferIndexEnd;
1679              dstBufferIndex++) {
1680 
1681             NvU8 thisEyeMask = 0;
1682             const NvBool honorSwapGroupClipList =
1683                 !!(workArea.honorSwapGroupClipListBufferMask &
1684                    NVBIT(dstBufferIndex));
1685 
1686             ret = nvHs3dRenderFrame(pHsChannel,
1687                                     requestType,
1688                                     honorSwapGroupClipList,
1689                                     dstEye,
1690                                     dstBufferIndex,
1691                                     pixelShift,
1692                                     workArea.destRect,
1693                                     pSurfaceEvo);
1694             /*
1695              * Record which eyes we've rendered, so that we only flip those
1696              * eyes.
1697              *
1698              * In the case that we're looping over multiple buffer indices, we
1699              * should get the same result across buffers.
1700              */
1701             if (ret) {
1702                 thisEyeMask = NVBIT(dstEye);
1703             }
1704 
1705             if (dstBufferIndex != workArea.dstBufferIndexStart) {
1706                 nvAssert((eyeMask & NVBIT(dstEye)) ==
1707                          (thisEyeMask & NVBIT(dstEye)));
1708             }
1709 
1710             eyeMask |= thisEyeMask;
1711         }
1712     }
1713 
1714     if (workArea.doFlipToNextIndex) {
1715 
1716         if (workArea.perEyeStereo.override) {
1717             perEyeStereoFlip = workArea.perEyeStereo.value;
1718         }
1719 
1720         nvHs3dReleaseSemaphore(
1721             pHsChannel,
1722             pHsDevice->notifiers.pSurfaceEvo,
1723             pHsDevice->notifiers.nIsoFormat,
1724             HsGetFrameSemaphoreOffsetInWords(pHsChannel),
1725             NVKMS_HEAD_SURFACE_FRAME_SEMAPHORE_DISPLAYABLE,
1726             FALSE /* allPreceedingReads */);
1727 
1728         nvHsFlip(
1729             pHsDevice,
1730             pHsChannel,
1731             eyeMask,
1732             perEyeStereoFlip,
1733             pHsChannel->nextIndex,
1734             pHsOneHeadAllDisps,
1735             requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_FIRST_FRAME,
1736             workArea.allowFlipLock);
1737         HsIncrementFrameSemaphoreIndex(pHsChannel);
1738 
1739         // Record fullscreen/non-fullscreen swapgroup flip counts
1740         const NVSwapGroupRec *pSwapGroup =
1741             pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead];
1742 
1743         if (pSwapGroup) {
1744             HsProcFsRecordFullscreenSgFrames(pHsChannel,
1745                                              pSwapGroup->swapGroupIsFullscreen);
1746         }
1747 
1748         // Record the time of the last flip originating from client update
1749         if (requestType == NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY) {
1750             pHsChannel->lastHsClientFlipTimeUs = nvkms_get_usec();
1751         }
1752     }
1753 
1754     if (workArea.doFlipToDestRect) {
1755         // Viewport fake flips are only used in swapgroup configurations.
1756         nvAssert(pHsChannel->config.neededForSwapGroup);
1757 
1758         if (pHsChannel->usingRgIntrForSwapGroups) {
1759             nvHs3dPushPendingViewportFlip(pHsChannel);
1760         } else {
1761             HsFlipViewPortIn(pHsChannel,
1762                              workArea.destRect.x, workArea.destRect.y);
1763         }
1764     }
1765 
1766     if (workArea.doIncrementNextIndex) {
1767         HsIncrementNextIndex(pHsDevice, pHsChannel);
1768     }
1769 
1770     if (workArea.doIncrementNextOffset) {
1771         HsIncrementNextOffset(pHsDevice, pHsChannel);
1772     }
1773 }
1774 
1775 /*!
1776  * In response to a non-stall interrupt, check if a headsurface channel has
1777  * completed a frame of non-swapgroup headsurface rendering and kick off a
1778  * viewport flip to the offset that was used for that rendering.
1779  */
1780 void nvHsProcessPendingViewportFlips(NVDevEvoPtr pDevEvo)
1781 {
1782     NVDispEvoPtr pDispEvo;
1783     NvU32 dispIndex;
1784 
1785     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
1786         NvU32 apiHead;
1787         for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) {
1788             NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
1789             NvU32 lastRenderedOffset;
1790 
1791             if (pHsChannel == NULL) {
1792                 continue;
1793             }
1794 
1795             lastRenderedOffset = nvHs3dLastRenderedOffset(pHsChannel);
1796 
1797             /*
1798              * If this channel is marked as having kicked off a frame of
1799              * rendering, and the semaphore write of the render offset to
1800              * NVKMS_HEADSURFACE_VIEWPORT_OFFSET_SEMAPHORE_INDEX has completed,
1801              * then this channel is ready to make a viewport flip to that
1802              * offset.
1803              */
1804             if (pHsChannel->viewportFlipPending &&
1805                 (lastRenderedOffset == HsGetPreviousOffset(pHsChannel))) {
1806 
1807                 HsFlipViewPortIn(pHsChannel, 0 /* x */,
1808                                  lastRenderedOffset *
1809                                  pHsChannel->config.frameSize.height);
1810                 pHsChannel->viewportFlipPending = FALSE;
1811             }
1812         }
1813     }
1814 }
1815 
1816 /*!
1817  * Record the current scanline, for procfs statistics reporting.
1818  */
1819 static void HsProcFsRecordScanline(
1820     const NVDispEvoRec *pDispEvo,
1821     const NvU32 head)
1822 {
1823 #if NVKMS_PROCFS_ENABLE
1824     const NvU32 apiHead = nvHardwareHeadToApiHead(head);
1825     const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
1826     NVHsChannelEvoRec *pHsChannel = pDispEvo->pHsChannel[apiHead];
1827     NvU16 scanLine = 0;
1828     NvBool inBlankingPeriod = FALSE;
1829 
1830     if (pHsChannel->statistics.scanLine.pHistogram == NULL) {
1831         return;
1832     }
1833 
1834     pDevEvo->hal->GetScanLine(pDispEvo, head, &scanLine, &inBlankingPeriod);
1835 
1836     if (inBlankingPeriod) {
1837         pHsChannel->statistics.scanLine.nInBlankingPeriod++;
1838     } else {
1839         pHsChannel->statistics.scanLine.nNotInBlankingPeriod++;
1840 
1841         if (scanLine <= pHsChannel->statistics.scanLine.vVisible) {
1842             pHsChannel->statistics.scanLine.pHistogram[scanLine]++;
1843         } else {
1844             nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR,
1845                 "HsProcFsRecordScanline(): scanLine (%d) > vVisible (%d)",
1846                 scanLine, pHsChannel->statistics.scanLine.vVisible);
1847         }
1848     }
1849 #endif /* NVKMS_PROCFS_ENABLE */
1850 }
1851 
1852 static void HsProcFsRecordPreviousFrameNotDone(
1853     NVHsChannelEvoPtr pHsChannel)
1854 {
1855 #if NVKMS_PROCFS_ENABLE
1856     pHsChannel->statistics.nPreviousFrameNotDone++;
1857 #endif
1858 }
1859 
1860 static void HsProcFsRecordFullscreenSgFrames(
1861     NVHsChannelEvoPtr pHsChannel,
1862     NvBool isFullscreen)
1863 {
1864 #if NVKMS_PROCFS_ENABLE
1865     if (isFullscreen) {
1866         pHsChannel->statistics.nFullscreenSgFrames++;
1867     } else {
1868         pHsChannel->statistics.nNonFullscreenSgFrames++;
1869     }
1870 #endif /* NVKMS_PROCFS_ENABLE */
1871 }
1872 
1873 static void HsProcFsRecordOmittedNonSgHsUpdate(
1874     NVHsChannelEvoPtr pHsChannel)
1875 {
1876 #if NVKMS_PROCFS_ENABLE
1877     pHsChannel->statistics.nOmittedNonSgHsUpdates++;
1878 #endif
1879 }
1880 
1881 /*!
1882  * Determine if we've flipped to the previous frame.
1883  *
1884  * When we program the flip method, we reset the notifier to NOT_BEGUN, and when
1885  * EVO peforms the flip, it changes the notifier to BEGUN.
1886  *
1887  * Find the notifier slot for the previous frame, parse its notifier, and return
1888  * whether it is BEGUN.
1889  */
1890 static NvBool IsPreviousFlipDone(NVHsChannelEvoPtr pHsChannel)
1891 {
1892     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
1893     const NvU32 apiHead = pHsChannel->apiHead;
1894     const NvU32 sd = pDispEvo->displayOwner;
1895     const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
1896     const NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
1897     const NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
1898     const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot;
1899     struct nvKmsParsedNotifier parsed = { };
1900 
1901     const NvU8 prevSlot =
1902         A_minus_b_with_wrap_U8(nextSlot, 1,
1903                                NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD);
1904 
1905     nvKmsParseNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */,
1906                        prevSlot, pHsNotifiersOneSd->notifier[apiHead], &parsed);
1907 
1908     return parsed.status == NVKMS_NOTIFIER_STATUS_BEGUN;
1909 }
1910 
1911 /*!
1912  * Determine if we've flipped to the previous frame.
1913  */
1914 static NvBool IsPreviousFrameDone(NVHsChannelEvoPtr pHsChannel)
1915 {
1916     if (pHsChannel->config.neededForSwapGroup) {
1917         /*
1918          * XXX NVKMS HEADSURFACE TODO: Somehow determine if the previous
1919          * ViewPortIn update for this head was latched.
1920          */
1921 
1922         /*
1923          * XXX NVKMS HEADSURFACE TODO: In the absence of a mechanism to
1924          * determine if ViewPortIn was latched, we would normally rely on this
1925          * callback arriving once per vblank.  Unfortunately, bug 2086726 can
1926          * cause us to get called twice per vblank.  WAR this for now by
1927          * ignoring callbacks that arrive in a very small window of the previous
1928          * callback.
1929          *
1930          * Throttling is now implemented using the RG line 1 interrupt
1931          * headsurface rendering mechanism, so this limit can be lowered once
1932          * the old vblank-triggered viewport flipping mechanism is removed.
1933          */
1934 
1935         const NvU64 oldUSec = pHsChannel->lastCallbackUSec;
1936         const NvU64 newUSec = nvkms_get_usec();
1937 
1938         /*
1939          * This threshold is somewhat arbitrary.  In bug 2086726, we see the
1940          * callback get called from both the ISR and the bottom half, which are
1941          * usually within ~200 usec of each other on an idle system.  There
1942          * shouldn't be a danger of mistaking legitimate periodic callbacks with
1943          * this small threshold: 500 usec per refresh would require a 2000 Hz
1944          * mode.
1945          */
1946         const NvU64 thresholdUSec = 500;
1947 
1948         nvAssert(!pHsChannel->usingRgIntrForSwapGroups);
1949 
1950         if ((newUSec > oldUSec) &&
1951             (newUSec - oldUSec) < thresholdUSec) {
1952             return FALSE;
1953         }
1954 
1955         pHsChannel->lastCallbackUSec = newUSec;
1956 
1957         return TRUE;
1958     } else {
1959         return IsPreviousFlipDone(pHsChannel);
1960     }
1961 }
1962 
1963 /*!
1964  * If the client provided a notifier surface with a real flip
1965  * request while swap groups were enabled, write to that
1966  * notifier with the BEGUN status and the most recent
1967  * headsurface notifier timestamp to emulate what the client
1968  * would observe if their notifier was used in hardware.
1969  */
1970 static void HsUpdateClientNotifier(NVHsChannelEvoPtr pHsChannel)
1971 {
1972     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
1973     const NvU32 apiHead = pHsChannel->apiHead;
1974     const NvU32 sd = pDispEvo->displayOwner;
1975     const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
1976     const NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
1977     const NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
1978     const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot;
1979     struct nvKmsParsedNotifier parsed = { };
1980     NVFlipNIsoSurfaceEvoHwState *pClientNotifier =
1981         &pHsChannel->flipQueue[NVKMS_MAIN_LAYER].current.completionNotifier.surface;
1982 
1983     if (pClientNotifier->pSurfaceEvo == NULL) {
1984         return;
1985     }
1986 
1987     const NvU8 prevSlot =
1988         A_minus_b_with_wrap_U8(nextSlot, 1,
1989                                NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD);
1990 
1991     nvKmsParseNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */,
1992                        prevSlot, pHsNotifiersOneSd->notifier[apiHead], &parsed);
1993 
1994     nvAssert(parsed.status == NVKMS_NOTIFIER_STATUS_BEGUN);
1995 
1996     /*
1997      * XXX NVKMS HEADSURFACE TODO: Get valid timestamp through other means to
1998      * support this on platforms with legacy HW semaphores without valid
1999      * HW notifier timestamps in the main channel.
2000      */
2001     nvAssert(parsed.timeStampValid);
2002 
2003     nvKmsSetNotifier(pClientNotifier->format,
2004                      FALSE /* overlay */,
2005                      pClientNotifier->offsetInWords / 4,
2006                      pClientNotifier->pSurfaceEvo->cpuAddress[sd],
2007                      parsed.timeStamp);
2008 }
2009 
2010 /*!
2011  * Check if all flips completed for this SwapGroup.  If so, release the
2012  * SwapGroup.
2013  */
2014 static void HsCheckSwapGroupFlipDone(
2015     NVDevEvoPtr pDevEvo,
2016     NVSwapGroupRec *pSwapGroup)
2017 {
2018     const NVHsDeviceEvoRec *pHsDevice = pDevEvo->pHsDevice;
2019     NVDispEvoPtr pDispEvo;
2020     NvU32 dispIndex;
2021 
2022     nvAssert(pSwapGroup != NULL);
2023 
2024     if (!pSwapGroup->pendingFlip) {
2025         return;
2026     }
2027 
2028     /*
2029      * Check if all active heads in the SwapGroup have completed their flips.
2030      * If any haven't, return early.
2031      */
2032     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
2033         NvU32 apiHead;
2034         for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) {
2035 
2036             if (pDispEvo->pSwapGroup[apiHead] == pSwapGroup) {
2037 
2038                 NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
2039 
2040                 if (pHsChannel == NULL) {
2041                     continue;
2042                 }
2043 
2044                 nvAssert(pHsChannel->config.neededForSwapGroup);
2045 
2046                 if (!IsPreviousFlipDone(pHsChannel)) {
2047                     return;
2048                 }
2049             }
2050         }
2051     }
2052 
2053     /*
2054      * The SwapGroup is ready: update client notifiers if necessary and
2055      * increment nextIndex for all active heads, so that subsequent frames of
2056      * headSurface render to the next buffer.
2057      */
2058     FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
2059         NvU32 apiHead;
2060         for (apiHead = 0; apiHead < ARRAY_LEN(pDispEvo->pSwapGroup); apiHead++) {
2061 
2062             if (pDispEvo->pSwapGroup[apiHead] == pSwapGroup) {
2063 
2064                 NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
2065 
2066                 if (pHsChannel == NULL) {
2067                     continue;
2068                 }
2069 
2070                 nvAssert(pHsChannel->config.neededForSwapGroup);
2071                 nvAssert(IsPreviousFlipDone(pHsChannel));
2072 
2073                 HsUpdateClientNotifier(pHsChannel);
2074                 HsIncrementNextIndex(pHsDevice, pHsChannel);
2075             }
2076         }
2077     }
2078 
2079     /*
2080      * The SwapGroup is ready: release all SwapGroup members so that they can
2081      * proceed.
2082      */
2083     nvHsSwapGroupRelease(pDevEvo, pSwapGroup);
2084 }
2085 
2086 /*
2087  * Called from RG line interrupt handler to determine whether rendering a
2088  * new frame could be skipped.
2089  */
2090 static NvBool HsCanOmitNonSgHsUpdate(NVHsChannelEvoPtr pHsChannel)
2091 {
2092     const NVSwapGroupRec *pHeadSwapGroup =
2093         pHsChannel->pDispEvo->pSwapGroup[pHsChannel->apiHead];
2094 
2095     /*
2096      * In the case of a fullscreen swapgroup, we can generally omit updating
2097      * the headsurface entirely upon vblank as long as the client is
2098      * actively rendering. All the swapgroup content has already been
2099      * updated to the headsurface backbuffer at the client's swapbuffers
2100      * time and there's no need to update the backbuffer again on RG line 1
2101      * or vblank interrupt time.
2102      *
2103      * There is one exception to this. If the client isn't rendering
2104      * actively then updates to the cursor (and possibly overlays, head
2105      * config) still require rendering an updated frame to the backbuffer.
2106      * Thus, we will simply limit this optimization for frames that come
2107      * within one frame time after the last recorded flip.
2108      */
2109     if (pHeadSwapGroup &&
2110         pHeadSwapGroup->swapGroupIsFullscreen) {
2111 
2112         NvU64 nowUs = nvkms_get_usec();
2113         NvU64 frameTimeUs = nvEvoFrametimeUsFromTimings(
2114             &pHsChannel->pDispEvo->apiHeadState[pHsChannel->apiHead].timings);
2115 
2116         if (nowUs - pHsChannel->lastHsClientFlipTimeUs < frameTimeUs) {
2117             return NV_TRUE;
2118         }
2119     }
2120 
2121     return NV_FALSE;
2122 }
2123 
2124 /*!
2125  * Receive RG line 1 callback, in process context with nvkms_lock held.
2126  */
2127 static void HsServiceRGLineInterrupt(void *dataPtr, NvU32 dataU32)
2128 {
2129     NVDispEvoRec *pDispEvo = (NVDispEvoRec *)dataPtr;
2130     NvU32 head = dataU32;
2131     const NvU32 apiHead = nvHardwareHeadToApiHead(head);
2132     NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
2133 
2134     /*
2135      * The pHsChannel may have been torn down between when the callback was
2136      * generated and when this was called.  Ignore spurious callbacks.
2137      */
2138     if (pHsChannel == NULL) {
2139         return;
2140     }
2141 
2142     if (pHsChannel->config.neededForSwapGroup) {
2143         /*
2144          * Update the non-swapgroup content on the back half of both
2145          * headsurface surfaces, and the swapgroup content on the back half of
2146          * the back headsurface surface, and perform a viewportoffset flip to
2147          * the back offset.
2148          *
2149          * Synchronization is achieved by the following mechanism:
2150          *
2151          * - Before rendering a new frame, check that we aren't still scanning
2152          *   out from that half of the surface.
2153          * - After rendering a frame, push a semaphore write with the render
2154          *   offset and a non-stall interrupt.
2155          * - In response to the non-stall interrupt, perform the viewport
2156          *   flip to the render offset.
2157          */
2158         NvU32 activeViewportOffset =
2159             pDispEvo->pDevEvo->hal->GetActiveViewportOffset(pDispEvo, head);
2160 
2161         nvAssert((activeViewportOffset == 0) ||
2162                  (activeViewportOffset == pHsChannel->config.frameSize.height));
2163 
2164         activeViewportOffset /= pHsChannel->config.frameSize.height;
2165 
2166         if (activeViewportOffset == HsGetPreviousOffset(pHsChannel)) {
2167             /*
2168              * The active viewport is the same as the last one we pushed, so
2169              * it's safe to start rendering to pHsChannel->nextOffset; check if
2170              * rendering from a previous interrupt hasn't completed yet.
2171              */
2172             if (pHsChannel->viewportFlipPending) {
2173                 /*
2174                  * A non-stall interrupt hasn't been triggered since we kicked
2175                  * off the previous frame's rendering.
2176                  */
2177                 HsProcFsRecordPreviousFrameNotDone(pHsChannel);
2178             } else {
2179                 NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
2180 
2181                 HsProcFsRecordScanline(pDispEvo, head);
2182 
2183                 if (HsCanOmitNonSgHsUpdate(pHsChannel)) {
2184                     HsProcFsRecordOmittedNonSgHsUpdate(pHsChannel);
2185                 } else {
2186                     nvHsNextFrame(pHsDevice, pHsChannel, NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK);
2187                 }
2188             }
2189         } else {
2190             /*
2191              * The viewport flip we pushed after the previous frame's rendering
2192              * hasn't been applied in hardware yet.
2193              */
2194             HsProcFsRecordPreviousFrameNotDone(pHsChannel);
2195         }
2196 
2197         HsCheckSwapGroupFlipDone(pDispEvo->pDevEvo, pDispEvo->pSwapGroup[apiHead]);
2198     }
2199 }
2200 
2201 /*!
2202  * Receive vblank callback, in process context with nvkms_lock held.
2203  *
2204  */
2205 static void HsVBlankCallback(NVDispEvoRec *pDispEvo,
2206                              const NvU32 head,
2207                              NVVBlankCallbackPtr pCallbackData)
2208 {
2209     const NvU32 apiHead = nvHardwareHeadToApiHead(head);
2210     NVHsChannelEvoPtr pHsChannel = pDispEvo->pHsChannel[apiHead];
2211     NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
2212 
2213     /*
2214      * The pHsChannel may have been torn down between when the vblank was
2215      * generated and when this was called.  Ignore spurious callbacks.
2216      */
2217     if (pHsChannel == NULL) {
2218         return;
2219     }
2220 
2221     if (!pHsChannel->usingRgIntrForSwapGroups &&
2222         pHsChannel->config.neededForSwapGroup) {
2223         HsCheckSwapGroupFlipDone(pDispEvo->pDevEvo, pDispEvo->pSwapGroup[apiHead]);
2224     }
2225 
2226     if (pHsChannel->usingRgIntrForSwapGroups &&
2227         pHsChannel->config.neededForSwapGroup) {
2228         // The next frame will be rendered during the RG line 1 interrupt.
2229         return;
2230     }
2231 
2232     /*
2233      * If we have not flipped to the previous buffer, yet, we should not render
2234      * to the next buffer.  Wait until the next vblank callback.
2235      */
2236     if (!IsPreviousFrameDone(pHsChannel)) {
2237         HsProcFsRecordPreviousFrameNotDone(pHsChannel);
2238         return;
2239     }
2240 
2241     HsProcFsRecordScanline(pDispEvo, head);
2242 
2243     /*
2244      * XXX NVKMS HEADSURFACE TODO: evaluate whether there has been
2245      * damage to the source buffer since the last headSurface frame.
2246      * Only if so, perform the headSurface transformation and flip to
2247      * the resulting headSurface buffer.
2248      *
2249      * For headSurface bringup purposes, just always flip to the next
2250      * headSurface buffer.
2251      */
2252 
2253     /*
2254      * When fullscreen swapgroup flipping, we don't need to update
2255      * non-swapgroup content at vblank.
2256      */
2257     if (!pHsChannel->swapGroupFlipping) {
2258         nvHsNextFrame(pHsDevice, pHsChannel,
2259                       NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK);
2260     }
2261 }
2262 
2263 /*!
2264  * Receive RG line 1 interrupt notification from resman.
2265  *
2266  * This function is registered as the kernel callback function from resman when
2267  * the RG line 1 interrupt is generated.
2268  *
2269  * This function is called within resman's context, so we schedule a zero timer
2270  * callback to process the swapgroup check and release without holding the
2271  * resman lock.
2272  */
2273 static void HsRGLineInterruptCallback(NvU32 rgIntrLine, void *param1,
2274                                       NvBool bIsIrqlIsr /* unused */)
2275 {
2276     void *pDispEvoRefPtr = (void *)((NvUPtr)param1 &
2277                                     ~(NVKMS_MAX_HEADS_PER_DISP-1));
2278     NvU32 head = (NvUPtr)param1 & (NVKMS_MAX_HEADS_PER_DISP-1);
2279     (void) nvkms_alloc_timer_with_ref_ptr(
2280         HsServiceRGLineInterrupt, /* callback */
2281         pDispEvoRefPtr, /* ref_ptr */
2282         head,  /* dataU32 */
2283         0); /* usec */
2284 }
2285 
2286 /*!
2287  * Schedule vblank callbacks from resman on a specific head and subdevice.
2288  */
2289 void nvHsAddVBlankCallback(NVHsChannelEvoPtr pHsChannel)
2290 {
2291     NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2292 
2293     pHsChannel->vBlankCallback =
2294         nvApiHeadRegisterVBlankCallback(pDispEvo,
2295                                         pHsChannel->apiHead,
2296                                         HsVBlankCallback,
2297                                         NULL);
2298 }
2299 
2300 /*!
2301  * Add an RG line 1 callback to check the swapgroup flip notifier and release
2302  * its associated deferred request fifo.
2303  *
2304  * This is done in an RG line 1 callback instead of the vblank callback to WAR
2305  * an issue where certain mode timings cause the vblank callback to fire
2306  * slightly before LOADV causes the notifier to transition from NOT_BEGUN
2307  * to BEGUN, causing an extra frame of delay before the next vblank occurs and
2308  * the deferred request fifo can be released.
2309  */
2310 void nvHsAddRgLine1Callback(NVHsChannelEvoPtr pHsChannel)
2311 {
2312     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2313     NvBool found;
2314     NvU32 val;
2315 
2316     /*
2317      * Use the RG line 1 interrupt to check swapgroup completion by default,
2318      * but allow setting NVKMS_DELAY_SWAPGROUP_CHECK=0 by regkey to revert to
2319      * the old method of checking during vblank for debugging purposes.
2320      */
2321     found = nvGetRegkeyValue(pDispEvo->pDevEvo, "NVKMS_DELAY_SWAPGROUP_CHECK",
2322                              &val);
2323 
2324     if (found && (val == 0)) {
2325         return;
2326     }
2327 
2328     pHsChannel->rgIntrCallbackObjectHandle =
2329         nvApiHeadAddRgLine1Callback(pDispEvo,
2330                                     pHsChannel->apiHead,
2331                                     HsRGLineInterruptCallback);
2332 
2333     if (pHsChannel->rgIntrCallbackObjectHandle == 0) {
2334         nvAssert(!"Failed to register headSurface RG line 1 interrupt");
2335     } else {
2336         pHsChannel->usingRgIntrForSwapGroups = TRUE;
2337     }
2338 }
2339 
2340 /*!
2341  * Cancel RG line 1 callbacks from resman on the specified head and subdevice.
2342  *
2343  * The same limitations regarding leftover vblank callbacks after vblank
2344  * callbacks are disabled in nvHsRemoveVblankCallback apply to RG callbacks.
2345  */
2346 void nvHsRemoveRgLine1Callback(NVHsChannelEvoPtr pHsChannel)
2347 {
2348     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2349 
2350     if (pHsChannel->usingRgIntrForSwapGroups) {
2351         nvRmRemoveRgLine1Callback(pDispEvo,
2352                                   pHsChannel->rgIntrCallbackObjectHandle);
2353         pHsChannel->rgIntrCallbackObjectHandle = 0;
2354     }
2355 }
2356 
2357 /*!
2358  * Cancel vblank callbacks from resman on the specified head and subdevice.
2359  *
2360  * Note that there could currently be callbacks in flight.  We should be
2361  * prepared to handle a spurious callback after cancelling the callbacks here.
2362  *
2363  * XXX NVKMS HEADSURFACE TODO: It would be better to:
2364  *
2365  * (a) Remove the vblank callback before the modeset that disables headSurface.
2366  * (b) Drain/cancel any in flight callbacks while holding the nvkms_lock.
2367  *
2368  * A mechanism like that should avoid spurious callbacks.
2369  */
2370 void nvHsRemoveVBlankCallback(NVHsChannelEvoPtr pHsChannel)
2371 {
2372     NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2373 
2374     nvApiHeadUnregisterVBlankCallback(pDispEvo,
2375                                       pHsChannel->apiHead,
2376                                       pHsChannel->vBlankCallback);
2377     pHsChannel->vBlankCallback = NULL;
2378 }
2379 
2380 void nvHsAllocStatistics(
2381     NVHsChannelEvoRec *pHsChannel)
2382 {
2383 #if NVKMS_PROCFS_ENABLE
2384     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2385     const NvU32 apiHead = pHsChannel->apiHead;
2386     const NVHwModeTimingsEvo *pTimings =
2387         &pDispEvo->apiHeadState[apiHead].timings;
2388     NvU32 n;
2389 
2390     nvkms_memset(&pHsChannel->statistics, 0, sizeof(pHsChannel->statistics));
2391 
2392     pHsChannel->statistics.scanLine.vVisible = nvEvoVisibleHeight(pTimings);
2393 
2394     n = pHsChannel->statistics.scanLine.vVisible + 1;
2395 
2396     pHsChannel->statistics.scanLine.pHistogram = nvCalloc(1, sizeof(NvU64) * n);
2397 #endif /* NVKMS_PROCFS_ENABLE */
2398 }
2399 
2400 void nvHsFreeStatistics(
2401     NVHsChannelEvoRec *pHsChannel)
2402 {
2403 #if NVKMS_PROCFS_ENABLE
2404     nvFree(pHsChannel->statistics.scanLine.pHistogram);
2405     nvkms_memset(&pHsChannel->statistics, 0, sizeof(pHsChannel->statistics));
2406 #endif /* NVKMS_PROCFS_ENABLE */
2407 }
2408 
2409 #if NVKMS_PROCFS_ENABLE
2410 
2411 static const struct {
2412     const char *before;
2413     const char *after;
2414 } HsProcFsIndentTable[] = {
2415     [0] = { .before = "", .after = "    " },
2416     [1] = { .before = " ", .after = "   " },
2417     [2] = { .before = "  ", .after = "  " },
2418     [3] = { .before = "   ", .after = " " },
2419     [5] = { .before = "    ", .after = "" },
2420 };
2421 
2422 static const char *HsProcFsIndentBefore(NvU8 indent)
2423 {
2424     nvAssert(indent < ARRAY_LEN(HsProcFsIndentTable));
2425 
2426     return HsProcFsIndentTable[indent].before;
2427 }
2428 
2429 static const char *HsProcFsIndentAfter(NvU8 indent)
2430 {
2431     nvAssert(indent < ARRAY_LEN(HsProcFsIndentTable));
2432 
2433     return HsProcFsIndentTable[indent].after;
2434 }
2435 
2436 static void HsProcFsGpuTime(
2437     NVEvoInfoStringRec *pInfoString,
2438     const NvU64 nFrames,
2439     const NvU64 gpuTimeSpent,
2440     const NvU8 indent)
2441 {
2442     /*
2443      * Use nFrames - 1 to compute averageGpuTimeNs: the nvHs3dRenderFrame() path
2444      * increments nFrames at the end of rendering a frame, but it only updates
2445      * gpuTimeSpent at the start of rendering the _next_ frame.  I.e.,
2446      * gpuTimeSpent has time for nFrames - 1 frames.
2447      */
2448     const NvU64 averageGpuTimeNs =
2449         (nFrames <= 1) ? 0 : (gpuTimeSpent / (nFrames - 1));
2450     const NvU64 averageGpuTimeUs = (averageGpuTimeNs + 500) / 1000;
2451     const NvU64 nFramesToReport = (nFrames <= 1) ? 0 : nFrames - 1;
2452 
2453     nvEvoLogInfoString(
2454         pInfoString, "   %savg GPU time / frame%s   : "
2455         "%" NvU64_fmtu ".%03" NvU64_fmtu " msec "
2456         "(%" NvU64_fmtu " nsec / %" NvU64_fmtu " frames)",
2457         HsProcFsIndentBefore(indent),
2458         HsProcFsIndentAfter(indent),
2459         averageGpuTimeUs / 1000,
2460         averageGpuTimeUs % 1000,
2461         gpuTimeSpent,
2462         nFramesToReport);
2463 }
2464 
2465 static void HsProcFsFrameStatisticsOneEye(
2466     NVEvoInfoStringRec *pInfoString,
2467     const NVHsChannelEvoRec *pHsChannel,
2468     const NvU8 eye,
2469     const NvU8 slot,
2470     const NvU8 indent)
2471 {
2472     const NVHsChannelStatisticsOneEyeRec *pPerEye =
2473         &pHsChannel->statistics.perEye[eye][slot];
2474 
2475     const NvU64 framesPerMs = pPerEye->fps.framesPerMs;
2476 
2477     nvEvoLogInfoString(
2478         pInfoString,
2479         "   %snFrames%s                : %" NvU64_fmtu,
2480         HsProcFsIndentBefore(indent),
2481         HsProcFsIndentAfter(indent),
2482         pPerEye->nFrames);
2483 
2484     nvEvoLogInfoString(
2485         pInfoString, "   %sFPS (computed every 5s)%s: "
2486         "%" NvU64_fmtu ".%03" NvU64_fmtu,
2487         HsProcFsIndentBefore(indent),
2488         HsProcFsIndentAfter(indent),
2489         framesPerMs / 1000,
2490         framesPerMs % 1000);
2491 
2492     HsProcFsGpuTime(
2493         pInfoString,
2494         pPerEye->nFrames,
2495         pPerEye->gpuTimeSpent,
2496         indent);
2497 }
2498 
2499 static void HsProcFsFrameStatisticsOneSlot(
2500     NVEvoInfoStringRec *pInfoString,
2501     const NVHsChannelEvoRec *pHsChannel,
2502     const NvU8 slot,
2503     const NvU8 indent)
2504 {
2505     const char *eyeLabel[] = {
2506         [NVKMS_LEFT]  = "Left Eye ",
2507         [NVKMS_RIGHT] = "Right Eye",
2508     };
2509 
2510     const NvBool needEyeLabel =
2511         pHsChannel->statistics.perEye[NVKMS_RIGHT][slot].nFrames != 0;
2512     NvU8 eye;
2513 
2514     for (eye = NVKMS_LEFT; eye < NVKMS_MAX_EYES; eye++) {
2515 
2516         NvU8 localIndent = 0;
2517 
2518         if (pHsChannel->statistics.perEye[eye][slot].nFrames == 0) {
2519             continue;
2520         }
2521 
2522         if (needEyeLabel) {
2523             nvEvoLogInfoString(
2524                 pInfoString, "   %s%s%s              :",
2525                 HsProcFsIndentBefore(indent),
2526                 eyeLabel[eye],
2527                 HsProcFsIndentAfter(indent));
2528             localIndent++;
2529         }
2530 
2531         HsProcFsFrameStatisticsOneEye(
2532             pInfoString,
2533             pHsChannel,
2534             eye,
2535             slot,
2536             indent + localIndent);
2537     }
2538 }
2539 
2540 static void HsProcFsFrameStatistics(
2541     NVEvoInfoStringRec *pInfoString,
2542     const NVHsChannelEvoRec *pHsChannel)
2543 {
2544     NvU8 slot;
2545 
2546     if (pHsChannel->config.neededForSwapGroup) {
2547         nvEvoLogInfoString(pInfoString,
2548                            "   VBLANK frames              :");
2549 
2550         nvEvoLogInfoString(pInfoString,
2551                            "    Old swapGroup content     :");
2552 
2553         slot = Hs3dStatisticsGetSlot(
2554                     pHsChannel,
2555                     NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0,
2556                     TRUE /* honorSwapGroupClipList */);
2557 
2558         HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 2);
2559 
2560         nvEvoLogInfoString(pInfoString,
2561                            "    New swapGroup content     :");
2562 
2563         slot = Hs3dStatisticsGetSlot(
2564                     pHsChannel,
2565                     NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0,
2566                     FALSE /* honorSwapGroupClipList */);
2567 
2568         HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 2);
2569 
2570         nvEvoLogInfoString(pInfoString,
2571                            "   SWAP_GROUP_READY frames    :");
2572 
2573         slot = Hs3dStatisticsGetSlot(
2574                     pHsChannel,
2575                     NV_HS_NEXT_FRAME_REQUEST_TYPE_SWAP_GROUP_READY, 0,
2576                     FALSE /* honorSwapGroupClipList */);
2577 
2578         HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, 1);
2579 
2580     } else {
2581         const NvU8 indent = 0; /* start with no indentation */
2582 
2583         slot = Hs3dStatisticsGetSlot(
2584                     pHsChannel,
2585                     NV_HS_NEXT_FRAME_REQUEST_TYPE_VBLANK, 0,
2586                     FALSE);
2587 
2588         HsProcFsFrameStatisticsOneSlot(pInfoString, pHsChannel, slot, indent);
2589     }
2590 }
2591 
2592 static void HsProcFsScanLine(
2593     NVEvoInfoStringRec *pInfoString,
2594     const NVHsChannelEvoRec *pHsChannel)
2595 {
2596     NvU16 i;
2597 
2598     nvEvoLogInfoString(pInfoString,
2599                        "   scanLine information       :");
2600 
2601     nvEvoLogInfoString(pInfoString,
2602                        "    nInBlankingPeriod         : %" NvU64_fmtu,
2603                        pHsChannel->statistics.scanLine.nInBlankingPeriod);
2604     nvEvoLogInfoString(pInfoString,
2605                        "    nNotInBlankingPeriod      : %" NvU64_fmtu,
2606                        pHsChannel->statistics.scanLine.nNotInBlankingPeriod);
2607     nvEvoLogInfoString(pInfoString,
2608                        "    vVisible                  : %d",
2609                        pHsChannel->statistics.scanLine.vVisible);
2610 
2611     if (pHsChannel->statistics.scanLine.pHistogram == NULL) {
2612 
2613         nvEvoLogInfoString(pInfoString,
2614                            "    scanline histogram        : failed allocation");
2615     } else {
2616 
2617         nvEvoLogInfoString(pInfoString,
2618                            "    scanline histogram        :");
2619 
2620         for (i = 0; i <= pHsChannel->statistics.scanLine.vVisible; i++) {
2621 
2622             if (pHsChannel->statistics.scanLine.pHistogram[i] != 0) {
2623                 nvEvoLogInfoString(pInfoString,
2624                     "     scanLine[%04d]           : %" NvU64_fmtu,
2625                     i, pHsChannel->statistics.scanLine.pHistogram[i]);
2626             }
2627         }
2628     }
2629 }
2630 
2631 static void HsProcFsFlipQueueOneEntry(
2632     NVEvoInfoStringRec *pInfoString,
2633     const NVFlipChannelEvoHwState *pFlipState)
2634 {
2635     /*
2636      * Print the pointers by casting to NvUPtr and formatting with NvUPtr_fmtx,
2637      * so that NULL is printed as "0x0", rather than "(null)".
2638      */
2639 
2640     nvEvoLogInfoString(pInfoString,
2641         "        pSurfaceEvo(L,R)      : 0x%" NvUPtr_fmtx ", 0x%" NvUPtr_fmtx,
2642         (NvUPtr)pFlipState->pSurfaceEvo[NVKMS_LEFT],
2643         (NvUPtr)pFlipState->pSurfaceEvo[NVKMS_RIGHT]);
2644 
2645     if (!pFlipState->syncObject.usingSyncpt) {
2646         nvEvoLogInfoString(pInfoString,
2647             "        semaphore             : "
2648             "acquire pSurfaceEvo: 0x%" NvUPtr_fmtx ", "
2649             "release pSurfaceEvo: 0x%" NvUPtr_fmtx ", "
2650             "acquire value: 0x%08x, "
2651             "release value: 0x%08x",
2652             (NvUPtr)pFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo,
2653             (NvUPtr)pFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo,
2654             pFlipState->syncObject.u.semaphores.acquireValue,
2655             pFlipState->syncObject.u.semaphores.releaseValue);
2656     }
2657 }
2658 
2659 static void HsProcFsFlipQueue(
2660     NVEvoInfoStringRec *pInfoString,
2661     const NVHsChannelEvoRec *pHsChannel)
2662 {
2663     const NVHsChannelFlipQueueEntry *pEntry;
2664     NvU8 layer;
2665 
2666     for (layer = 0; layer < ARRAY_LEN(pHsChannel->flipQueue); layer++) {
2667 
2668         const char *layerString[NVKMS_MAX_LAYERS_PER_HEAD] = {
2669             [NVKMS_MAIN_LAYER]    = "(main)   ",
2670             [NVKMS_OVERLAY_LAYER] = "(overlay)",
2671         };
2672 
2673         nvEvoLogInfoString(pInfoString,
2674             "   flipQueue%s         :", layerString[layer]);
2675 
2676         nvEvoLogInfoString(pInfoString,
2677             "     current                  :");
2678 
2679         HsProcFsFlipQueueOneEntry(pInfoString,
2680                                   &pHsChannel->flipQueue[layer].current);
2681 
2682         nvListForEachEntry(pEntry,
2683                            &pHsChannel->flipQueue[layer].queue,
2684                            flipQueueEntry) {
2685 
2686             nvEvoLogInfoString(pInfoString,
2687                 "     pending                  :");
2688 
2689             HsProcFsFlipQueueOneEntry(pInfoString, &pEntry->hwState);
2690         }
2691     }
2692 }
2693 
2694 static const char *HsGetEyeMaskString(const NvU8 eyeMask)
2695 {
2696     if (eyeMask == NVBIT(NVKMS_LEFT)) {
2697         return "L";
2698     } else {
2699         nvAssert(eyeMask == (NVBIT(NVKMS_LEFT) | NVBIT(NVKMS_RIGHT)));
2700         return "L|R";
2701     }
2702 }
2703 
2704 static const char *HsGetPixelShiftString(
2705     const enum NvKmsPixelShiftMode pixelShift)
2706 {
2707     switch (pixelShift) {
2708     case NVKMS_PIXEL_SHIFT_NONE:            return "none";
2709     case NVKMS_PIXEL_SHIFT_4K_TOP_LEFT:     return "4kTopLeft";
2710     case NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT: return "4kBottomRight";
2711     case NVKMS_PIXEL_SHIFT_8K:              return "8k";
2712     }
2713 
2714     return "unknown";
2715 }
2716 
2717 static void HsProcFsTransform(
2718     NVEvoInfoStringRec *pInfoString,
2719     const NVHsChannelEvoRec *pHsChannel)
2720 {
2721     nvEvoLogInfoString(pInfoString,
2722                        "   transform matrix           : "
2723                        "{ { 0x%08x, 0x%08x, 0x%08x },",
2724                        F32viewAsNvU32(pHsChannel->config.transform.m[0][0]),
2725                        F32viewAsNvU32(pHsChannel->config.transform.m[0][1]),
2726                        F32viewAsNvU32(pHsChannel->config.transform.m[0][2]));
2727 
2728     nvEvoLogInfoString(pInfoString,
2729                        "                              : "
2730                        "  { 0x%08x, 0x%08x, 0x%08x },",
2731                        F32viewAsNvU32(pHsChannel->config.transform.m[1][0]),
2732                        F32viewAsNvU32(pHsChannel->config.transform.m[1][1]),
2733                        F32viewAsNvU32(pHsChannel->config.transform.m[1][2]));
2734 
2735     nvEvoLogInfoString(pInfoString,
2736                        "                              : "
2737                        "  { 0x%08x, 0x%08x, 0x%08x } }",
2738                        F32viewAsNvU32(pHsChannel->config.transform.m[2][0]),
2739                        F32viewAsNvU32(pHsChannel->config.transform.m[2][1]),
2740                        F32viewAsNvU32(pHsChannel->config.transform.m[2][2]));
2741 }
2742 
2743 static void HsProcFsStaticWarpMesh(
2744     NVEvoInfoStringRec *pInfoString,
2745     const NVHsChannelEvoRec *pHsChannel)
2746 {
2747     nvEvoLogInfoString(pInfoString,
2748                        "   staticWarpMesh             : "
2749                        "{ { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },",
2750                        pHsChannel->config.staticWarpMesh.vertex[0].x,
2751                        pHsChannel->config.staticWarpMesh.vertex[0].y,
2752                        pHsChannel->config.staticWarpMesh.vertex[0].u,
2753                        pHsChannel->config.staticWarpMesh.vertex[0].v,
2754                        pHsChannel->config.staticWarpMesh.vertex[0].r,
2755                        pHsChannel->config.staticWarpMesh.vertex[0].q);
2756 
2757     nvEvoLogInfoString(pInfoString,
2758                        "                              : "
2759                        "  { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },",
2760                        pHsChannel->config.staticWarpMesh.vertex[1].x,
2761                        pHsChannel->config.staticWarpMesh.vertex[1].y,
2762                        pHsChannel->config.staticWarpMesh.vertex[1].u,
2763                        pHsChannel->config.staticWarpMesh.vertex[1].v,
2764                        pHsChannel->config.staticWarpMesh.vertex[1].r,
2765                        pHsChannel->config.staticWarpMesh.vertex[1].q);
2766 
2767     nvEvoLogInfoString(pInfoString,
2768                        "                              : "
2769                        "  { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x },",
2770                        pHsChannel->config.staticWarpMesh.vertex[2].x,
2771                        pHsChannel->config.staticWarpMesh.vertex[2].y,
2772                        pHsChannel->config.staticWarpMesh.vertex[2].u,
2773                        pHsChannel->config.staticWarpMesh.vertex[2].v,
2774                        pHsChannel->config.staticWarpMesh.vertex[2].r,
2775                        pHsChannel->config.staticWarpMesh.vertex[2].q);
2776 
2777     nvEvoLogInfoString(pInfoString,
2778                        "                              : "
2779                        "  { 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x } }",
2780                        pHsChannel->config.staticWarpMesh.vertex[3].x,
2781                        pHsChannel->config.staticWarpMesh.vertex[3].y,
2782                        pHsChannel->config.staticWarpMesh.vertex[3].u,
2783                        pHsChannel->config.staticWarpMesh.vertex[3].v,
2784                        pHsChannel->config.staticWarpMesh.vertex[3].r,
2785                        pHsChannel->config.staticWarpMesh.vertex[3].q);
2786 }
2787 
2788 static const char *HsProcFsGetNeededForString(
2789     const NVHsChannelEvoRec *pHsChannel)
2790 {
2791     if (pHsChannel->config.neededForModeset &&
2792         pHsChannel->config.neededForSwapGroup) {
2793         return "modeset, swapgroup";
2794     }
2795 
2796     if (pHsChannel->config.neededForModeset &&
2797         !pHsChannel->config.neededForSwapGroup) {
2798         return "modeset";
2799     }
2800 
2801     if (!pHsChannel->config.neededForModeset &&
2802         pHsChannel->config.neededForSwapGroup) {
2803         return "swapgroup";
2804     }
2805 
2806     return "unknown";
2807 }
2808 
2809 static void HsProcFsFrameSemaphores(
2810     NVEvoInfoStringRec *pInfoString,
2811     const NVHsChannelEvoRec *pHsChannel)
2812 {
2813     const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
2814     const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
2815     const NvU32 sd = pDispEvo->displayOwner;
2816     const NVHsNotifiersOneSdRec *p = pHsDevice->notifiers.sd[sd].ptr;
2817     const NvGpuSemaphore *pSema =
2818         (const NvGpuSemaphore *)p->semaphore[pHsChannel->apiHead];
2819 
2820     NvU8 buffer;
2821 
2822     for (buffer = 0; buffer < NVKMS_HEAD_SURFACE_MAX_BUFFERS; buffer++) {
2823         nvEvoLogInfoString(pInfoString,
2824                            "   frameSemaphore[%d]          : 0x%0x",
2825                            buffer,
2826                            pSema[buffer].data[0]);
2827     }
2828 }
2829 
2830 void nvHsProcFs(
2831     NVEvoInfoStringRec *pInfoString,
2832     NVDevEvoRec *pDevEvo,
2833     NvU32 dispIndex,
2834     NvU32 head)
2835 {
2836     NVDispEvoPtr pDispEvo = pDevEvo->pDispEvo[dispIndex];
2837     const NvU32 apiHead = nvHardwareHeadToApiHead(head);
2838     const NVHsChannelEvoRec *pHsChannel = pDispEvo->pHsChannel[apiHead];
2839     const NVHsStateOneHeadAllDisps *pHsOneHeadAllDisps =
2840         &pDevEvo->apiHeadSurfaceAllDisps[apiHead];
2841 
2842     if (pHsChannel == NULL) {
2843         nvEvoLogInfoString(pInfoString,
2844                            "  headSurface[head:%02d]        : disabled", head);
2845         return;
2846     }
2847 
2848     nvEvoLogInfoString(pInfoString,
2849                        "  headSurface[head:%02d]        : "
2850                        "enabled (needed for: %s)",
2851                        head, HsProcFsGetNeededForString(pHsChannel));
2852 
2853     HsProcFsFrameStatistics(pInfoString, pHsChannel);
2854 
2855     nvEvoLogInfoString(pInfoString,
2856                        "   nextIndex                  : %d",
2857                        pHsChannel->nextIndex);
2858 
2859     nvEvoLogInfoString(pInfoString,
2860                        "   nextOffset                 : %d",
2861                        pHsChannel->nextOffset);
2862 
2863     nvEvoLogInfoString(pInfoString,
2864                        "   nPreviousFrameNotDone      : %" NvU64_fmtu,
2865                        pHsChannel->statistics.nPreviousFrameNotDone);
2866 
2867     nvEvoLogInfoString(pInfoString,
2868                        "   nOmittedNonSgHsUpdates     : %" NvU64_fmtu,
2869                        pHsChannel->statistics.nOmittedNonSgHsUpdates);
2870 
2871     nvEvoLogInfoString(pInfoString,
2872                        "   nFullscreenSgFrames        : %" NvU64_fmtu,
2873                        pHsChannel->statistics.nFullscreenSgFrames);
2874 
2875     nvEvoLogInfoString(pInfoString,
2876                        "   nNonFullscreenSgFrames     : %" NvU64_fmtu,
2877                        pHsChannel->statistics.nNonFullscreenSgFrames);
2878 
2879     nvEvoLogInfoString(pInfoString,
2880                        "   viewPortIn                 : %d x %d +%d +%d",
2881                        pHsChannel->config.viewPortIn.width,
2882                        pHsChannel->config.viewPortIn.height,
2883                        pHsChannel->config.viewPortIn.x,
2884                        pHsChannel->config.viewPortIn.y);
2885 
2886     nvEvoLogInfoString(pInfoString,
2887                        "   viewPortOut                : %d x %d +%d +%d",
2888                        pHsChannel->config.viewPortOut.width,
2889                        pHsChannel->config.viewPortOut.height,
2890                        pHsChannel->config.viewPortOut.x,
2891                        pHsChannel->config.viewPortOut.y);
2892 
2893     nvEvoLogInfoString(pInfoString,
2894                        "   frameSize                  : %d x %d",
2895                        pHsChannel->config.frameSize.width,
2896                        pHsChannel->config.frameSize.height);
2897 
2898     nvEvoLogInfoString(pInfoString,
2899                        "   surfaceSize                : %d x %d",
2900                        pHsChannel->config.surfaceSize.width,
2901                        pHsChannel->config.surfaceSize.height);
2902 
2903     nvEvoLogInfoString(pInfoString,
2904                        "   stagingSurfaceSize         : %d x %d",
2905                        pHsChannel->config.stagingSurfaceSize.width,
2906                        pHsChannel->config.stagingSurfaceSize.height);
2907 
2908     nvEvoLogInfoString(pInfoString,
2909                        "   allDispsSurfaceSize        : %d x %d",
2910                        pHsOneHeadAllDisps->size.width,
2911                        pHsOneHeadAllDisps->size.height);
2912 
2913     nvEvoLogInfoString(pInfoString,
2914                        "   allDispsStagingSize        : %d x %d",
2915                        pHsOneHeadAllDisps->stagingSize.width,
2916                        pHsOneHeadAllDisps->stagingSize.height);
2917 
2918     nvEvoLogInfoString(pInfoString,
2919                        "   allDispsSurfaceCount       : %d",
2920                        pHsOneHeadAllDisps->surfaceCount);
2921 
2922     nvEvoLogInfoString(pInfoString,
2923                        "   eyeMask                    : %s",
2924                        HsGetEyeMaskString(pHsChannel->config.eyeMask));
2925 
2926     nvEvoLogInfoString(pInfoString,
2927                        "   pixelShift                 : %s",
2928                        HsGetPixelShiftString(pHsChannel->config.pixelShift));
2929 
2930     HsProcFsTransform(pInfoString, pHsChannel);
2931 
2932     HsProcFsStaticWarpMesh(pInfoString, pHsChannel);
2933 
2934     HsProcFsFlipQueue(pInfoString, pHsChannel);
2935 
2936     HsProcFsFrameSemaphores(pInfoString, pHsChannel);
2937 
2938     HsProcFsScanLine(pInfoString, pHsChannel);
2939 }
2940 #endif /* NVKMS_PROCFS_ENABLE */
2941