1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "nvkms-surface.h"
25 #include "nvkms-rm.h"
26 #include "nvkms-rmapi.h"
27 #include "nvkms-utils.h"
28 #include "nvkms-flip.h"
29 #include "nvkms-private.h"
30 #include "nvkms-headsurface.h"
31 #include "nvkms-headsurface-swapgroup.h"
32 #include "nvos.h"
33 
34 // NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD
35 #include "ctrl/ctrl0000/ctrl0000unix.h"
36 
37 /* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */
38 #include "class/cl0071.h"
39 
40 static void FreeSurfaceEvoStruct(NVSurfaceEvoPtr pSurfaceEvo)
41 {
42     if (pSurfaceEvo == NULL) {
43         return;
44     }
45 
46     nvAssert(!nvSurfaceEvoInAnyOpens(pSurfaceEvo));
47 
48     nvAssert(pSurfaceEvo->structRefCnt == 0);
49     nvAssert(pSurfaceEvo->rmRefCnt == 0);
50 
51     nvFree(pSurfaceEvo);
52 }
53 
54 static void FreeSurfaceEvoRm(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo)
55 {
56     NvU64 structRefCnt;
57     NvU32 firstPlaneRmHandle;
58     NvU8 planeIndex;
59 
60     if ((pDevEvo == NULL) || (pSurfaceEvo == NULL)) {
61         return;
62     }
63 
64     nvAssert(pSurfaceEvo->rmRefCnt == 0);
65 
66     FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) {
67         nvRmEvoFreeDispContextDMA(pDevEvo,
68                                   &pSurfaceEvo->planes[planeIndex].ctxDma);
69     }
70 
71     firstPlaneRmHandle = pSurfaceEvo->planes[0].rmHandle;
72 
73     if (firstPlaneRmHandle != 0) {
74 
75         NvU32 sd;
76 
77         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
78 
79             if (pSurfaceEvo->cpuAddress[sd] != NULL) {
80                 nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
81                                    pDevEvo->pSubDevices[sd]->handle,
82                                    firstPlaneRmHandle,
83                                    pSurfaceEvo->cpuAddress[sd],
84                                    0);
85                 pSurfaceEvo->cpuAddress[sd] = NULL;
86             }
87         }
88 
89         nvHsUnmapSurfaceFromDevice(pDevEvo,
90                                    firstPlaneRmHandle,
91                                    pSurfaceEvo->gpuAddress);
92     }
93 
94     FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) {
95 
96         if (pSurfaceEvo->planes[planeIndex].rmHandle == 0) {
97             break;
98         }
99 
100         nvRmApiFree(nvEvoGlobal.clientHandle,
101                     pDevEvo->deviceHandle,
102                     pSurfaceEvo->planes[planeIndex].rmHandle);
103 
104         nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
105                            pSurfaceEvo->planes[planeIndex].rmHandle);
106 
107         pSurfaceEvo->planes[planeIndex].rmHandle = 0;
108     }
109 
110     /*
111      * The surface is now an orphan: clear the pSurfaceEvo, for
112      * everything other than its structRefCnt.  The only operation
113      * that can be done on it is unregistration.
114      */
115     structRefCnt = pSurfaceEvo->structRefCnt;
116     nvkms_memset(pSurfaceEvo, 0, sizeof(*pSurfaceEvo));
117     pSurfaceEvo->structRefCnt = structRefCnt;
118 }
119 
120 void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo)
121 {
122     nvAssert(!nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo));
123 
124     pSurfaceEvo->structRefCnt++;
125 }
126 
127 void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo)
128 {
129     nvAssert(pSurfaceEvo->structRefCnt >= 1);
130     pSurfaceEvo->structRefCnt--;
131 
132     if (pSurfaceEvo->structRefCnt == 0) {
133         FreeSurfaceEvoStruct(pSurfaceEvo);
134     }
135 }
136 
137 static NvBool ValidatePlaneProperties(
138     NVDevEvoPtr pDevEvo,
139     const struct NvKmsRegisterSurfaceRequest *pRequest)
140 {
141     const NvKmsSurfaceMemoryFormatInfo *pFormatInfo =
142         nvKmsGetSurfaceMemoryFormatInfo(pRequest->format);
143     NvU8 planeIndex;
144 
145     /*
146      * Reject all registration requests for multi-planar NISO surfaces.
147      * This is a non-sensical request.
148      */
149     if ((pRequest->isoType == NVKMS_MEMORY_NISO) &&
150         (pFormatInfo->numPlanes > 1)) {
151         return FALSE;
152     }
153 
154     for (planeIndex = 0; planeIndex < pFormatInfo->numPlanes; planeIndex++) {
155 
156         const NvU64 planeOffset = pRequest->planes[planeIndex].offset;
157         NvU64 planePitch = pRequest->planes[planeIndex].pitch;
158         NvU64 rmObjectSizeInBytes =
159             pRequest->planes[planeIndex].rmObjectSizeInBytes;
160         NvU64 widthInBytes;
161         NvU64 planeSizeInBytes;
162         NvU32 planeEffectiveLines = pRequest->heightInPixels;
163         NvU32 widthInPixels = pRequest->widthInPixels;
164 
165         if ((planePitch == 0U) || (rmObjectSizeInBytes == 0U))
166         {
167             nvEvoLog(EVO_LOG_ERROR, "Invalid request parameters, planePitch or rmObjectSizeInBytes, passed during surface registration");
168             return FALSE;
169         }
170 
171         if ((pRequest->isoType == NVKMS_MEMORY_ISO) &&
172             ((planeEffectiveLines == 0U) || (widthInPixels == 0U)))
173         {
174             nvEvoLog(EVO_LOG_ERROR, "Invalid request parameters, heightInPixels or widthInPixels, passed during surface registration for ISO surfaces");
175             return FALSE;
176         }
177 
178         /* The offset must be 1KB-aligned. */
179         if ((planeOffset &
180             ((1 << NV_SURFACE_OFFSET_ALIGNMENT_SHIFT) - 1)) != 0) {
181             return FALSE;
182         }
183 
184         /*
185          * Convert planePitch to units of bytes if it's currently specified in
186          * units of blocks. Each block is 64-bytes wide.
187          */
188         if (pRequest->layout == NvKmsSurfaceMemoryLayoutBlockLinear) {
189             planePitch <<= NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH;
190         }
191 
192         /*
193          * Convert width to bytes.
194          */
195         widthInBytes = widthInPixels;
196 
197         if (pFormatInfo->isYUV) {
198             NvU8 divisor = 1;
199             NvU8 bytesPerBlock = pFormatInfo->yuv.storageBitsPerComponent >> 3;
200 
201             switch (pFormatInfo->numPlanes) {
202             case 3:
203                 /* planar */
204                 if (planeIndex > 0) {
205                     divisor = pFormatInfo->yuv.horizChromaDecimationFactor;
206                 }
207                 break;
208 
209             case 2:
210                 /* semi-planar */
211                 if (planeIndex > 0) {
212                     divisor = pFormatInfo->yuv.horizChromaDecimationFactor;
213                     bytesPerBlock *= 2;
214                 }
215                 break;
216 
217             case 1:
218                 /* 4:2:2 packed */
219                 bytesPerBlock *= 2;
220             }
221 
222             widthInBytes *= bytesPerBlock;
223             /* Dimensions of decimated planes of odd-width YUV surfaces are
224              * supposed to be rounded up */
225             widthInBytes = (widthInBytes + (divisor - 1)) / divisor;
226         } else {
227             widthInBytes *= pFormatInfo->rgb.bytesPerPixel;
228         }
229 
230         /*
231          * Check that an entire line of pixels will fit in the pitch value
232          * specified.
233          */
234         if (widthInBytes > planePitch) {
235             return FALSE;
236         }
237 
238         /*
239          * Check that the entire memory region occupied by this plane falls
240          * within the size of the underlying memory allocation.
241          *
242          * Force planeEffectiveLines to be even before dividing by
243          * vertChromaDecimationFactor. The height of the source fetch rectangle
244          * must be even anyways if there's vertical decimation.
245          */
246         if (planeIndex != 0 && pFormatInfo->isYUV &&
247             pFormatInfo->yuv.vertChromaDecimationFactor > 1) {
248             planeEffectiveLines = planeEffectiveLines & ~(0x1);
249             planeEffectiveLines /= pFormatInfo->yuv.vertChromaDecimationFactor;
250         }
251 
252         planeSizeInBytes = planeEffectiveLines * planePitch;
253 
254         if ((pRequest->isoType == NVKMS_MEMORY_ISO) &&
255             (planeSizeInBytes == 0U))
256         {
257             nvEvoLog(EVO_LOG_ERROR, "Plane size calculated during ISO surface registration is 0");
258             return FALSE;
259         }
260 
261         if ((planeSizeInBytes > rmObjectSizeInBytes) ||
262             (planeOffset > (rmObjectSizeInBytes - planeSizeInBytes))) {
263             return FALSE;
264         }
265     }
266 
267     return TRUE;
268 }
269 
270 static NvBool ValidateRegisterSurfaceRequest(
271     NVDevEvoPtr pDevEvo,
272     const struct NvKmsRegisterSurfaceRequest *pRequest)
273 {
274     const NvKmsSurfaceMemoryFormatInfo *pFormatInfo =
275         nvKmsGetSurfaceMemoryFormatInfo(pRequest->format);
276 
277     /*
278      * The purpose of this check is to make sure the given format is valid and not
279      * some garbage number. It exists to check for format validity in the case
280      * where noDisplayHardWareAccess is TRUE.
281      */
282     if (pFormatInfo->depth == 0) {
283         return FALSE;
284     }
285 
286     /*
287      * NvKmsSurfaceMemoryFormat has a few formats that we will never display.
288      * Head surface has several formats it wants to texture from but we won't
289      * (and can't) display surfaces with those formats. We should reject any
290      * attempt to register a surface that is marked for display and uses one of
291      * those formats.
292      */
293     if (!pRequest->noDisplayHardwareAccess) {
294         /*
295          * This isn't a perfect check since we can't predict which channel this
296          * surface will be used on, but we should definitely reject a format if
297          * it isn't usable on any channel.
298          */
299         NvBool usableOnAnyChannel = FALSE;
300         NvU8 layer;
301 
302         for (layer = 0;
303              layer < ARRAY_LEN(pDevEvo->caps.layerCaps);
304              layer++) {
305 
306             if (NVBIT64(pRequest->format) &
307                 pDevEvo->caps.layerCaps[layer].supportedSurfaceMemoryFormats) {
308                 usableOnAnyChannel = TRUE;
309                 break;
310             }
311         }
312 
313         if (!usableOnAnyChannel) {
314             return FALSE;
315         }
316 
317         if (!pDevEvo->hal->ValidateWindowFormat(pRequest->format, NULL, NULL)) {
318             return FALSE;
319         }
320     }
321 
322     if (!ValidatePlaneProperties(pDevEvo, pRequest)) {
323         return FALSE;
324     }
325 
326     /* XXX Validate surface properties. */
327 
328     return TRUE;
329 }
330 
331 
332 void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo,
333                           struct NvKmsPerOpenDev *pOpenDev,
334                           struct NvKmsRegisterSurfaceParams *pParams,
335                           enum NvHsMapPermissions hsMapPermissions)
336 {
337     NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
338         nvGetSurfaceHandlesFromOpenDev(pOpenDev);
339     const struct NvKmsRegisterSurfaceRequest *pRequest = &pParams->request;
340     NVSurfaceEvoPtr pSurfaceEvo = NULL;
341     NvKmsSurfaceHandle surfaceHandle = 0;
342     NvU32 result;
343     NvU8 planeIndex;
344     NvBool nisoMemory = (pRequest->isoType == NVKMS_MEMORY_NISO);
345 
346     /*
347      * HeadSurface needs a CPU mapping of surfaces containing semaphores, in
348      * order to check, from the CPU, if a semaphore-interlocked flip is ready.
349      */
350     const NvBool needCpuMapping = nisoMemory && pDevEvo->isHeadSurfaceSupported;
351 
352     nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
353 
354     if (!ValidateRegisterSurfaceRequest(pDevEvo, pRequest)) {
355         goto fail;
356     }
357 
358     pSurfaceEvo = nvCalloc(1, sizeof(*pSurfaceEvo));
359 
360     if (pSurfaceEvo == NULL) {
361         goto fail;
362     }
363 
364     pSurfaceEvo->format = pRequest->format;
365 
366     surfaceHandle = nvEvoCreateApiHandle(pOpenDevSurfaceHandles, pSurfaceEvo);
367 
368     if (surfaceHandle == 0) {
369         goto fail;
370     }
371 
372     FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) {
373 
374         const NvU32 planeRmHandle =
375             nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
376 
377         if (planeRmHandle == 0) {
378             goto fail;
379         }
380 
381         pSurfaceEvo->planes[planeIndex].rmHandle = planeRmHandle;
382 
383         if (pRequest->useFd) {
384              /*
385               * On T234, the 'fd' provided is allocated outside of RM whereas on
386               * dGPU it is allocated by RM. So we check whether the fd is associated
387               * with an nvidia character device, and if it is, then we consider that
388               * it belongs to RM. Based on whether it belongs to RM or not we need
389               * to call different mechanisms to import it.
390               */
391             if (nvkms_fd_is_nvidia_chardev(pRequest->planes[planeIndex].u.fd)) {
392                 NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { };
393                 importParams.fd = pRequest->planes[planeIndex].u.fd;
394                 importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM;
395                 importParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle;
396                 importParams.object.data.rmObject.hParent = pDevEvo->deviceHandle;
397                 importParams.object.data.rmObject.hObject = planeRmHandle;
398 
399                 result = nvRmApiControl(nvEvoGlobal.clientHandle,
400                                         nvEvoGlobal.clientHandle,
401                                         NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD,
402                                         &importParams,
403                                         sizeof(importParams));
404             } else {
405                 /*
406                  * If 'fd' doesn't belongs to resman assume that it is allocated by
407                  * some other dmabuf allocator (like nvmap).
408                  */
409                 NV_OS_DESC_MEMORY_ALLOCATION_PARAMS allocParams = { };
410 
411                 allocParams.type = NVOS32_TYPE_IMAGE;
412                 allocParams.descriptor =
413                     (NvP64)(NvU64)(pRequest->planes[planeIndex].u.fd);
414                 allocParams.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE;
415                 allocParams.limit = pRequest->planes[planeIndex].rmObjectSizeInBytes - 1;
416 
417                 allocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI,
418                                                allocParams.attr);
419                 allocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE,
420                                                 _NO, allocParams.attr2);
421 
422                 /*
423                  * The NVKMS client performing the import doesn't know what the original
424                  * CPU cache attributes are, so assume WRITE_BACK since we only need RM to
425                  * IOVA map the memory into display's address space and the CPU cache
426                  * attributes shouldn't really matter in this case.
427                  */
428                 allocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY,
429                                                _WRITE_BACK, allocParams.attr);
430                 allocParams.flags = NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED;
431 
432                 switch (pRequest->layout) {
433                     case NvKmsSurfaceMemoryLayoutBlockLinear:
434                         allocParams.attr =
435                             FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR,
436                                         allocParams.attr);
437                         break;
438 
439                     case NvKmsSurfaceMemoryLayoutPitch:
440                         allocParams.attr =
441                             FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH,
442                                         allocParams.attr);
443                         break;
444 
445                     default:
446                         nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Unknown layout");
447                         goto fail;
448                 }
449 
450                 if (nisoMemory) {
451                     allocParams.attr2 =
452                         FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES,
453                                     allocParams.attr2);
454                 }
455 
456                 result = nvRmApiAlloc(nvEvoGlobal.clientHandle,
457                                       pDevEvo->deviceHandle,
458                                       planeRmHandle,
459                                       NV01_MEMORY_SYSTEM_OS_DESCRIPTOR,
460                                       &allocParams);
461 
462                 /*
463                  * Bug 200614156. RM doesn't support mapping osdesc objects into CPU’s
464                  * address space.
465                  */
466                 nvAssert(!needCpuMapping);
467             }
468         } else {
469             /*
470              * If 'useFd' is not specified, the (rmClient, rmObject) tuple from
471              * the request is an object in the caller's RM client space.
472              * Call RM to dup the memory into nvkms's RM client.
473              */
474             result = nvRmApiDupObject(nvEvoGlobal.clientHandle,
475                                       pDevEvo->deviceHandle,
476                                       planeRmHandle,
477                                       pRequest->rmClient,
478                                       pRequest->planes[planeIndex].u.rmObject,
479                                       0);
480         }
481 
482         if (result != NVOS_STATUS_SUCCESS) {
483             goto fail;
484         }
485 
486         /* XXX Validate sizeInBytes: can we query the surface size from RM? */
487 
488         if (!pRequest->noDisplayHardwareAccess) {
489 
490             const NvU32 planeCtxDma =
491                 nvRmEvoAllocateAndBindDispContextDMA(
492                     pDevEvo,
493                     planeRmHandle,
494                     pRequest->layout,
495                     pRequest->planes[planeIndex].rmObjectSizeInBytes - 1);
496             if (!planeCtxDma) {
497                 goto fail;
498             }
499 
500             pSurfaceEvo->planes[planeIndex].ctxDma = planeCtxDma;
501         }
502 
503         pSurfaceEvo->planes[planeIndex].pitch =
504                             pRequest->planes[planeIndex].pitch;
505         pSurfaceEvo->planes[planeIndex].offset =
506                             pRequest->planes[planeIndex].offset;
507         pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes =
508                             pRequest->planes[planeIndex].rmObjectSizeInBytes;
509     }
510 
511     pSurfaceEvo->requireCtxDma = !pRequest->noDisplayHardwareAccess;
512     pSurfaceEvo->noDisplayCaching = pRequest->noDisplayCaching;
513 
514     /*
515      * Map the surface into the GPU's virtual address space, for use with
516      * headSurface.  If the surface may be used for semaphores, headSurface will
517      * need to write to it through the graphics channel.  Force a writable GPU
518      * mapping.
519      *
520      * Map the first plane of the surface only into the GPU's address space.
521      * We would have already rejected multi-planar semaphore requests earlier.
522      */
523     if (nisoMemory) {
524         hsMapPermissions = NvHsMapPermissionsReadWrite;
525     }
526 
527     pSurfaceEvo->gpuAddress = nvHsMapSurfaceToDevice(
528                     pDevEvo,
529                     pSurfaceEvo->planes[0].rmHandle,
530                     pRequest->planes[0].rmObjectSizeInBytes,
531                     hsMapPermissions);
532 
533     if (pSurfaceEvo->gpuAddress == NV_HS_BAD_GPU_ADDRESS) {
534         goto fail;
535     }
536 
537     /*
538      * Map the first plane of the surface only into the CPU's address space.
539      * This is the only valid plane since we would have already rejected
540      * multi-planar semaphore requests earlier.
541      */
542     if (needCpuMapping) {
543 
544         NvU32 sd;
545 
546         for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
547 
548             result = nvRmApiMapMemory(
549                     nvEvoGlobal.clientHandle,
550                     pDevEvo->pSubDevices[sd]->handle,
551                     pSurfaceEvo->planes[0].rmHandle,
552                     0,
553                     pRequest->planes[0].rmObjectSizeInBytes,
554                     (void **) &pSurfaceEvo->cpuAddress[sd],
555                     0);
556 
557             if (result != NVOS_STATUS_SUCCESS) {
558                 goto fail;
559             }
560         }
561     }
562 
563     pSurfaceEvo->widthInPixels        = pRequest->widthInPixels;
564     pSurfaceEvo->heightInPixels       = pRequest->heightInPixels;
565     pSurfaceEvo->layout               = pRequest->layout;
566     pSurfaceEvo->log2GobsPerBlockY    = pRequest->log2GobsPerBlockY;
567     pSurfaceEvo->isoType              = pRequest->isoType;
568 
569     pSurfaceEvo->rmRefCnt = 1;
570     pSurfaceEvo->structRefCnt = 1;
571 
572     pSurfaceEvo->owner.pOpenDev = pOpenDev;
573     pSurfaceEvo->owner.surfaceHandle = surfaceHandle;
574 
575     pParams->reply.surfaceHandle = surfaceHandle;
576 
577     return;
578 
579 fail:
580     nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
581 
582     FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo);
583     FreeSurfaceEvoStruct(pSurfaceEvo);
584 }
585 
586 /* Temporary storage used by ClearSurfaceUsage{Collect,Apply}. */
587 struct ClearSurfaceUsageCache {
588     struct {
589         struct {
590             NvBool flipToNull           : 1;
591             NvBool flipSemaphoreToNull  : 1;
592 
593             NvBool needToIdle           : 1;
594         } layer[NVKMS_MAX_LAYERS_PER_HEAD];
595 
596         NvBool flipCursorToNull         : 1;
597     } apiHead[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP];
598 };
599 
600 /*
601  * Search for heads where the surfaces are used, and populate the structure
602  * pointed to by 'pCache' to indicate which channels need to be updated.
603  */
604 static void
605 ClearSurfaceUsageCollect(NVDevEvoPtr pDevEvo,
606                          NVSurfaceEvoPtr pSurfaceEvo,
607                          struct ClearSurfaceUsageCache *pCache)
608 {
609     NVDispEvoPtr pDispEvo;
610     NvU32 apiHead, sd;
611 
612     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
613 
614         for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
615             NvU32 usageMaskOneHead = nvCollectSurfaceUsageMaskOneApiHead(pDispEvo,
616                 apiHead, pSurfaceEvo);
617             NvU32 usageMaskMainLayer = DRF_IDX_VAL(_SURFACE,
618                 _USAGE_MASK, _LAYER, NVKMS_MAIN_LAYER, usageMaskOneHead);
619             NvU32 layer;
620 
621             /*
622              * XXX NVKMS TODO: flip across heads/subdevices for all scenarios
623              * that are flip locked.
624              */
625 
626             if (FLD_TEST_DRF(_SURFACE, _USAGE_MASK_LAYER, _SEMAPHORE,
627                     _ENABLE, usageMaskMainLayer)) {
628                 pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].
629                     flipSemaphoreToNull = TRUE;
630             }
631 
632             if (FLD_TEST_DRF(_SURFACE, _USAGE_MASK_LAYER, _NOTIFIER,
633                     _ENABLE, usageMaskMainLayer) ||
634                     FLD_TEST_DRF(_SURFACE, _USAGE_MASK_LAYER, _SCANOUT,
635                         _ENABLE, usageMaskMainLayer)) {
636                 pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].
637                     flipToNull = TRUE;
638             }
639 
640             for (layer = 0;
641                  layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
642                 NvU32 usageMaskOneLayer = DRF_IDX_VAL(_SURFACE,
643                     _USAGE_MASK, _LAYER, layer, usageMaskOneHead);
644 
645                 if (layer == NVKMS_MAIN_LAYER) {
646                     continue;
647                 }
648 
649                 if (usageMaskOneLayer != 0x0) {
650                     pCache->apiHead[sd][apiHead].layer[layer].
651                         flipToNull = TRUE;
652                 } if (pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].
653                         flipToNull) {
654                     NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES] = { };
655                     /*
656                      * EVO requires that, when flipping the base channel
657                      * (aka main layer) to NULL, overlay channel is also
658                      * flipped to NULL.
659                      */
660                     if ((pSurfaceEvos[NVKMS_LEFT] != NULL) ||
661                             (pSurfaceEvos[NVKMS_RIGHT] != NULL)) {
662                         pCache->apiHead[sd][apiHead].layer[layer].
663                             flipToNull = TRUE;
664                      }
665                 }
666             }
667 
668             if (FLD_TEST_DRF(_SURFACE, _USAGE_MASK, _CURSOR,
669                     _ENABLE, usageMaskOneHead) != 0x0) {
670                 pCache->apiHead[sd][apiHead].flipCursorToNull = TRUE;
671             }
672         }
673     }
674 }
675 
676 /*
677  * Do the hard work to babysit the hardware to ensure that any channels which
678  * need clearing have actually done so before proceeding to free memory and
679  * remove ctxdmas from the hash table.
680  *
681  * This is achieved in several steps:
682  * 1. Issue a flip of any overlay layer to NULL -- these are processed
683  *    separately since using one Flip request would interlock them, potentially
684  *    exacerbating stuck channels by getting other channels stuck too.
685  *    Pre-NVDisplay requires that, when flipping the core channel to NULL,
686  *    all satellite channels are also flipped to NULL. The EVO2 hal takes care
687  *    to enable/disable the core surface along with the base surface,
688  *    therefore flip overlay to NULL before base.
689  * 2. Issue a flip of any main layer to NULL
690  * 3. Wait for any base/overlay layer that we expect to be idle to actually
691  *    be idle.  If they don't idle in a timely fashion, apply accelerators to
692  *    forcibly idle any problematic channels.
693  * 4. Issue a flip of any core channels to NULL.
694  */
695 static void
696 ClearSurfaceUsageApply(NVDevEvoPtr pDevEvo,
697                        struct ClearSurfaceUsageCache *pCache,
698                        NvBool skipUpdate)
699 {
700     NVDispEvoPtr pDispEvo;
701     NvU32 apiHead, sd;
702     const NvU32 maxApiHeads = pDevEvo->numApiHeads * pDevEvo->numSubDevices;
703     struct NvKmsFlipRequestOneHead *pFlipApiHead =
704         nvCalloc(1, sizeof(*pFlipApiHead) * maxApiHeads);
705     NvU32 numFlipApiHeads = 0;
706 
707     if (pFlipApiHead == NULL) {
708         nvAssert(!"Failed to allocate memory");
709         return;
710     }
711 
712     /* 1. Issue a flip of any overlay layer to NULL */
713     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
714 
715         for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
716 
717             struct NvKmsFlipCommonParams *pRequestOneApiHead =
718                 &pFlipApiHead[numFlipApiHeads].flip;
719             NvU32 layer;
720             NvBool found = FALSE;
721 
722             if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
723                 continue;
724             }
725 
726             for (layer = 0;
727                  layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
728 
729                 if (layer == NVKMS_MAIN_LAYER) {
730                     continue;
731                 }
732 
733                 if (pCache->apiHead[sd][apiHead].layer[layer].flipToNull) {
734                     pRequestOneApiHead->layer[layer].surface.specified = TRUE;
735                     // No need to specify sizeIn/sizeOut as we are flipping NULL surface.
736                     pRequestOneApiHead->layer[layer].compositionParams.specified = TRUE;
737                     pRequestOneApiHead->layer[layer].syncObjects.specified = TRUE;
738                     pRequestOneApiHead->layer[layer].completionNotifier.specified = TRUE;
739 
740                     found = TRUE;
741 
742                     pCache->apiHead[sd][apiHead].layer[layer].needToIdle = TRUE;
743                 }
744             }
745 
746             if (found) {
747                 pFlipApiHead[numFlipApiHeads].sd = sd;
748                 pFlipApiHead[numFlipApiHeads].head = apiHead;
749                 numFlipApiHeads++;
750                 nvAssert(numFlipApiHeads <= maxApiHeads);
751             }
752         }
753     }
754 
755     if (numFlipApiHeads > 0) {
756         nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev,
757                   pFlipApiHead,
758                   numFlipApiHeads,
759                   TRUE  /* commit */,
760                   FALSE /* allowVrr */,
761                   NULL  /* pReply */,
762                   skipUpdate,
763                   FALSE /* allowFlipLock */);
764 
765         nvkms_memset(pFlipApiHead, 0,
766             sizeof(pFlipApiHead[0]) * numFlipApiHeads);
767         numFlipApiHeads = 0;
768     }
769 
770     /*
771      * No need to idle the overlay layer before flipping the main channel to
772      * NULL, because the FlipOverlay90() function in the EVO2 hal makes sure
773      * that the overlay's flip to NULL is always interlocked with the core
774      * channel and the base (main layer) channel's flip to NULL can proceed only
775      * after completion of the overlay's flip to NULL (the base channel's flip
776      * to NULL interlocks with the core channel's flip to NULL).
777      */
778 
779     /* 2. Issue a flip of any main layer to NULL */
780     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
781 
782         for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
783 
784             struct NvKmsFlipCommonParams *pRequestOneApiHead =
785                 &pFlipApiHead[numFlipApiHeads].flip;
786             NvBool found = FALSE;
787 
788             if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
789                 continue;
790             }
791 
792             if (pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].flipToNull ||
793                 pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].flipSemaphoreToNull) {
794 
795                 if (pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].flipToNull) {
796                     pRequestOneApiHead->layer[NVKMS_MAIN_LAYER].surface.specified = TRUE;
797                     // No need to specify sizeIn/sizeOut as we are flipping NULL surface.
798                     pRequestOneApiHead->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = TRUE;
799 
800                     pCache->apiHead[sd][apiHead].layer[NVKMS_MAIN_LAYER].needToIdle = TRUE;
801                 }
802 
803                 /* XXX arguably we should also idle for this case, but we
804                  * don't currently have a way to do so without also
805                  * clearing the ISO surface */
806                 pRequestOneApiHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE;
807                 pRequestOneApiHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE;
808 
809                 found = TRUE;
810             }
811 
812             if (found) {
813                 pFlipApiHead[numFlipApiHeads].sd = sd;
814                 pFlipApiHead[numFlipApiHeads].head = apiHead;
815                 numFlipApiHeads++;
816                 nvAssert(numFlipApiHeads <= maxApiHeads);
817             }
818         }
819     }
820 
821     if (numFlipApiHeads > 0) {
822         nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev,
823                   pFlipApiHead,
824                   numFlipApiHeads,
825                   TRUE  /* commit */,
826                   FALSE /* allowVrr */,
827                   NULL  /* pReply */,
828                   skipUpdate,
829                   FALSE /* allowFlipLock */);
830 
831         nvkms_memset(pFlipApiHead, 0,
832             sizeof(pFlipApiHead[0]) * numFlipApiHeads);
833         numFlipApiHeads = 0;
834     }
835 
836     /*
837      * 3. Wait for any base/overlay layer that we expect to be idle to actually
838      *    be idle.  If they don't idle in a timely fashion, apply accelerators to
839      *    forcibly idle any problematic channels.
840      */
841     if (!skipUpdate) {
842         NvU32 layerMaskPerSdApiHead[NVKMS_MAX_SUBDEVICES]
843             [NVKMS_MAX_HEADS_PER_DISP] = { };
844         FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
845             for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
846                 for (NvU32 layer = 0;
847                      layer < pDevEvo->apiHead[apiHead].numLayers; layer++) {
848                     if (pCache->apiHead[sd][apiHead].layer[layer].needToIdle) {
849                         layerMaskPerSdApiHead[sd][apiHead] |= NVBIT(layer);
850                     }
851                 }
852             }
853         }
854         nvIdleLayerChannels(pDevEvo, layerMaskPerSdApiHead);
855     }
856 
857     /* 4. Issue a flip of any core channels to NULL */
858     FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
859 
860         for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
861 
862             if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
863                 continue;
864             }
865 
866             if (pCache->apiHead[sd][apiHead].flipCursorToNull) {
867                 pFlipApiHead[numFlipApiHeads].flip.cursor.imageSpecified = TRUE;
868                 pFlipApiHead[numFlipApiHeads].sd = sd;
869                 pFlipApiHead[numFlipApiHeads].head = apiHead;
870                 numFlipApiHeads++;
871                 nvAssert(numFlipApiHeads <= maxApiHeads);
872             }
873         }
874     }
875 
876     if (numFlipApiHeads > 0) {
877         nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev,
878                   pFlipApiHead,
879                   numFlipApiHeads,
880                   TRUE  /* commit */,
881                   FALSE /* allowVrr */,
882                   NULL  /* pReply */,
883                   skipUpdate,
884                   FALSE /* allowFlipLock */);
885     }
886 
887     nvFree(pFlipApiHead);
888 }
889 
890 /*
891  * This function unregisters/releases all of the surface handles remaining for
892  * the given pOpenDev.
893  *
894  * It duplicates some functionality of nvEvoUnregisterSurface() and
895  * nvEvoReleaseSurface(), but with an important difference: it processes the
896  * "clear surface usage" step for all surfaces up front, and only once that is
897  * complete it proceeds with freeing the surfaces.
898  *
899  * In practice, this makes teardown much smoother than invoking those functions
900  * individually for each surface, particularly in the case that the hardware is
901  * stuck and needs accelerators.  Consider the case where a client has
902  * registered several surfaces, and is flipping between two of them, and the
903  * hardware is stuck on a semaphore acquire that will never complete with
904  * several frames pending in the pushbuffer.  If the first surface processed
905  * by nvEvoUnregisterSurface() happens to be the current "back buffer" (i.e.,
906  * not the most recently pushed surface to be displayed), then
907  * nvEvoUnregisterSurface() will call ClearSurfaceUsage(), but it will find no
908  * channels to clear, and will proceed with nvEvoDecrementSurfaceRefCnts()
909  * which will call nvRMSyncEvoChannel() to drain any outstanding methods.  Due
910  * to the stalled semaphore, nvRMSyncEvoChannel() will stall for 2 seconds,
911  * time out along with a nasty message to the kernel log, then we'll free the
912  * surface and remove its entry from the display hash table anyway.  And that
913  * may happen several times until we finally call nvEvoUnregisterSurface() on
914  * the surface which is the most recently requested flip, where
915  * ClearSurfaceUsage() will finally get a chance to tear down the channel
916  * forcefully by using accelerators to skip the semaphore acquire.  But, some
917  * of the methods which were outstanding and now get processed may reference a
918  * ctxdma which was already freed, triggering nasty Xid messages.
919  *
920  * By gathering up all the channels we can to find which ones to clear first,
921  * we have a much higher chance of avoiding these timeouts.
922  */
923 void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo,
924                              struct NvKmsPerOpenDev *pOpenDev,
925                              NVEvoApiHandlesRec *pOpenDevSurfaceHandles)
926 {
927     NvKmsGenericHandle surfaceHandle;
928     NVSurfaceEvoPtr pSurfaceEvo;
929     struct ClearSurfaceUsageCache cache = { };
930     NvBool needApply = FALSE;
931 
932     FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pOpenDevSurfaceHandles,
933                                         pSurfaceEvo, surfaceHandle) {
934 
935         if (nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) {
936             /*
937              * If something besides the owner has an rmRefCnt reference,
938              * the surface might be in use by EVO; flip to NULL to attempt
939              * to free it.
940              */
941             if (pSurfaceEvo->rmRefCnt > 1) {
942                 ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache);
943                 needApply = TRUE;
944             }
945         }
946     }
947 
948     if (needApply) {
949         ClearSurfaceUsageApply(pDevEvo, &cache, FALSE);
950     }
951 
952     FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pOpenDevSurfaceHandles,
953                                         pSurfaceEvo, surfaceHandle) {
954         const NvBool isOwner =
955             nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle);
956 
957         /* Remove the handle from the calling client's namespace. */
958         nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
959 
960         if (isOwner) {
961             nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
962         } else {
963             nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo);
964         }
965     }
966 
967 }
968 
969 void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo,
970                             struct NvKmsPerOpenDev *pOpenDev,
971                             NvKmsSurfaceHandle surfaceHandle,
972                             NvBool skipUpdate)
973 {
974     NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
975         nvGetSurfaceHandlesFromOpenDev(pOpenDev);
976     NVSurfaceEvoPtr pSurfaceEvo;
977 
978     pSurfaceEvo = nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles,
979                                                surfaceHandle);
980     if (pSurfaceEvo == NULL) {
981         return;
982     }
983 
984     if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) {
985         nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN,
986                          "Surface unregister attempted by non-owner; "
987                          "non-owners must release the surface.");
988         return;
989     }
990 
991     /*
992      * If something besides the owner has an rmRefCnt reference,
993      * the surface might be in use by EVO; flip to NULL to attempt
994      * to free it.
995      */
996     if (pSurfaceEvo->rmRefCnt > 1) {
997         struct ClearSurfaceUsageCache cache = { };
998 
999         ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache);
1000         ClearSurfaceUsageApply(pDevEvo, &cache, skipUpdate);
1001     }
1002 
1003     /* Remove the handle from the calling client's namespace. */
1004     nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
1005 
1006     nvEvoDecrementSurfaceRefCnts(pSurfaceEvo);
1007 }
1008 
1009 void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo,
1010                          struct NvKmsPerOpenDev *pOpenDev,
1011                          NvKmsSurfaceHandle surfaceHandle)
1012 {
1013     NVEvoApiHandlesRec *pOpenDevSurfaceHandles =
1014         nvGetSurfaceHandlesFromOpenDev(pOpenDev);
1015     NVSurfaceEvoPtr pSurfaceEvo;
1016 
1017     pSurfaceEvo = nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles,
1018                                                surfaceHandle);
1019     if (pSurfaceEvo == NULL) {
1020         return;
1021     }
1022 
1023     if (nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) {
1024         nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN,
1025                          "Surface release attempted by owner; "
1026                          "owners must unregister the surface.");
1027         return;
1028     }
1029 
1030     /* Remove the handle from the calling client's namespace. */
1031     nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
1032 
1033     nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo);
1034 }
1035 
1036 void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo)
1037 {
1038     nvAssert(!nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo));
1039 
1040     pSurfaceEvo->rmRefCnt++;
1041     pSurfaceEvo->structRefCnt++;
1042 }
1043 
1044 void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo)
1045 {
1046     nvAssert(pSurfaceEvo->rmRefCnt >= 1);
1047     pSurfaceEvo->rmRefCnt--;
1048 
1049     if (pSurfaceEvo->rmRefCnt == 0) {
1050         NVDevEvoPtr pDevEvo =
1051             nvGetDevEvoFromOpenDev(pSurfaceEvo->owner.pOpenDev);
1052 
1053         /*
1054          * Don't sync if this surface was registered as not requiring display
1055          * hardware access, to WAR timeouts that result from OGL unregistering
1056          * a deferred request fifo causing a sync here that may timeout if
1057          * GLS hasn't had the opportunity to release semaphores with pending
1058          * flips. (Bug 2050970)
1059          */
1060         if (pSurfaceEvo->requireCtxDma) {
1061             nvEvoClearSurfaceUsage(pDevEvo, pSurfaceEvo);
1062         }
1063 
1064         FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo);
1065     }
1066 
1067     nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo);
1068 }
1069 
1070 NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo)
1071 {
1072     return ((pSurfaceEvo->rmRefCnt == NV_U64_MAX) ||
1073             (pSurfaceEvo->structRefCnt == NV_U64_MAX));
1074 }
1075 
1076 static NVSurfaceEvoPtr GetSurfaceFromHandle(
1077     const NVDevEvoRec *pDevEvo,
1078     const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
1079     const NvKmsSurfaceHandle surfaceHandle,
1080     const NvBool isUsedByCursorChannel,
1081     const NvBool isUsedByLayerChannel,
1082     const NvBool requireCtxDma)
1083 {
1084     NVSurfaceEvoPtr pSurfaceEvo =
1085         nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, surfaceHandle);
1086 
1087     nvAssert(requireCtxDma || (!isUsedByCursorChannel && !isUsedByLayerChannel));
1088 
1089     if (pSurfaceEvo == NULL) {
1090         return NULL;
1091     }
1092 
1093     if (pSurfaceEvo->rmRefCnt == 0) { /* orphan */
1094         return NULL;
1095     }
1096 
1097     if (requireCtxDma && !pSurfaceEvo->requireCtxDma) {
1098         return NULL;
1099     }
1100 
1101     /* Validate that the surface can be used as a cursor image */
1102     if (isUsedByCursorChannel &&
1103         !pDevEvo->hal->ValidateCursorSurface(pDevEvo, pSurfaceEvo)) {
1104         return NULL;
1105     }
1106 
1107     /*
1108      * XXX If !requireCtxDma, fetched surfaces aren't going to be accessed by
1109      * the display hardware, so they shouldn't need to be checked by
1110      * nvEvoGetHeadSetStoragePitchValue(). These surfaces will be used as a
1111      * texture by the 3d engine. But previously all surfaces were checked by
1112      * nvEvoGetHeadSetStoragePitchValue() at registration time, and we don't
1113      * know if nvEvoGetHeadSetStoragePitchValue() was protecting us from any
1114      * surface dimensions that could cause trouble for the 3d engine.
1115      */
1116     if (isUsedByLayerChannel || !requireCtxDma) {
1117         NvU8 planeIndex;
1118 
1119         FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) {
1120             if (nvEvoGetHeadSetStoragePitchValue(
1121                                 pDevEvo,
1122                                 pSurfaceEvo->layout,
1123                                 pSurfaceEvo->planes[planeIndex].pitch) == 0) {
1124                 return NULL;
1125             }
1126         }
1127     }
1128 
1129     return pSurfaceEvo;
1130 }
1131 
1132 NVSurfaceEvoPtr nvEvoGetSurfaceFromHandle(
1133     const NVDevEvoRec *pDevEvo,
1134     const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
1135     const NvKmsSurfaceHandle surfaceHandle,
1136     const NvBool isUsedByCursorChannel,
1137     const NvBool isUsedByLayerChannel)
1138 {
1139     return GetSurfaceFromHandle(pDevEvo,
1140                                 pOpenDevSurfaceHandles,
1141                                 surfaceHandle,
1142                                 isUsedByCursorChannel,
1143                                 isUsedByLayerChannel,
1144                                 TRUE /* requireCtxDma */);
1145 }
1146 
1147 NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoCtxDmaOk(
1148     const NVDevEvoRec *pDevEvo,
1149     const NVEvoApiHandlesRec *pOpenDevSurfaceHandles,
1150     NvKmsSurfaceHandle surfaceHandle)
1151 {
1152     return GetSurfaceFromHandle(pDevEvo,
1153                                 pOpenDevSurfaceHandles,
1154                                 surfaceHandle,
1155                                 FALSE /* isUsedByCursorChannel */,
1156                                 FALSE /* isUsedByLayerChannel */,
1157                                 FALSE /* requireCtxDma */);
1158 }
1159 
1160 /*!
1161  * Create a deferred request fifo, using the specified pSurfaceEvo.
1162  */
1163 NVDeferredRequestFifoRec *nvEvoRegisterDeferredRequestFifo(
1164     NVDevEvoPtr pDevEvo,
1165     NVSurfaceEvoPtr pSurfaceEvo)
1166 {
1167     NVDeferredRequestFifoRec *pDeferredRequestFifo;
1168     NvU32 ret;
1169 
1170     if (pSurfaceEvo->planes[0].rmObjectSizeInBytes <
1171         sizeof(struct NvKmsDeferredRequestFifo)) {
1172         return NULL;
1173     }
1174 
1175     /*
1176      * XXX validate that the surface is in sysmem; can we query that from
1177      * resman?
1178      */
1179 
1180     pDeferredRequestFifo = nvCalloc(1, sizeof(*pDeferredRequestFifo));
1181 
1182     if (pDeferredRequestFifo == NULL) {
1183         return NULL;
1184     }
1185 
1186     /* Get a CPU mapping of the surface. */
1187 
1188     ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle,
1189                            pDevEvo->deviceHandle,
1190                            pSurfaceEvo->planes[0].rmHandle,
1191                            0,
1192                            sizeof(*pDeferredRequestFifo->fifo),
1193                            (void **) &pDeferredRequestFifo->fifo,
1194                            0);
1195 
1196     if (ret != NVOS_STATUS_SUCCESS) {
1197         nvFree(pDeferredRequestFifo);
1198         return NULL;
1199     }
1200 
1201     pDeferredRequestFifo->pSurfaceEvo = pSurfaceEvo;
1202 
1203     nvEvoIncrementSurfaceRefCnts(pSurfaceEvo);
1204 
1205     return pDeferredRequestFifo;
1206 }
1207 
1208 /*!
1209  * Free the deferred request fifo.
1210  */
1211 void nvEvoUnregisterDeferredRequestFifo(
1212     NVDevEvoPtr pDevEvo,
1213     NVDeferredRequestFifoRec *pDeferredRequestFifo)
1214 {
1215     nvAssert(pDeferredRequestFifo->fifo != NULL);
1216     nvAssert(pDeferredRequestFifo->pSurfaceEvo != NULL);
1217 
1218     nvHsLeaveSwapGroup(pDevEvo, pDeferredRequestFifo, FALSE /* teardown */);
1219 
1220     nvRmApiUnmapMemory(
1221                     nvEvoGlobal.clientHandle,
1222                     pDevEvo->deviceHandle,
1223                     pDeferredRequestFifo->pSurfaceEvo->planes[0].rmHandle,
1224                     pDeferredRequestFifo->fifo,
1225                     0);
1226 
1227     nvEvoDecrementSurfaceRefCnts(pDeferredRequestFifo->pSurfaceEvo);
1228 
1229     nvFree(pDeferredRequestFifo);
1230 }
1231