1 /*
2  * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
24 
25 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
26 
27 #include "nvidia-drm-helper.h"
28 #include "nvidia-drm-priv.h"
29 #include "nvidia-drm-crtc.h"
30 #include "nvidia-drm-connector.h"
31 #include "nvidia-drm-encoder.h"
32 #include "nvidia-drm-utils.h"
33 #include "nvidia-drm-fb.h"
34 #include "nvidia-drm-ioctl.h"
35 #include "nvidia-drm-format.h"
36 
37 #include "nvmisc.h"
38 
39 #include <drm/drm_crtc_helper.h>
40 #include <drm/drm_plane_helper.h>
41 
42 #include <drm/drm_atomic.h>
43 #include <drm/drm_atomic_helper.h>
44 
45 #if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
46 #include <linux/nvhost.h>
47 #elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
48 #include <linux/host1x-next.h>
49 #endif
50 
51 #if defined(NV_DRM_DRM_COLOR_MGMT_H_PRESENT)
52 #include <drm/drm_color_mgmt.h>
53 #endif
54 
55 
56 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
57 static int
nv_drm_atomic_replace_property_blob_from_id(struct drm_device * dev,struct drm_property_blob ** blob,uint64_t blob_id,ssize_t expected_size)58 nv_drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
59                                             struct drm_property_blob **blob,
60                                             uint64_t blob_id,
61                                             ssize_t expected_size)
62 {
63     struct drm_property_blob *new_blob = NULL;
64 
65     if (blob_id != 0) {
66         new_blob = drm_property_lookup_blob(dev, blob_id);
67         if (new_blob == NULL) {
68             return -EINVAL;
69         }
70 
71         if ((expected_size > 0) &&
72             (new_blob->length != expected_size)) {
73             drm_property_blob_put(new_blob);
74             return -EINVAL;
75         }
76     }
77 
78     drm_property_replace_blob(blob, new_blob);
79     drm_property_blob_put(new_blob);
80 
81     return 0;
82 }
83 #endif
84 
nv_drm_plane_destroy(struct drm_plane * plane)85 static void nv_drm_plane_destroy(struct drm_plane *plane)
86 {
87     struct nv_drm_plane *nv_plane = to_nv_plane(plane);
88 
89     /* plane->state gets freed here */
90     drm_plane_cleanup(plane);
91 
92     nv_drm_free(nv_plane);
93 }
94 
95 static inline void
plane_config_clear(struct NvKmsKapiLayerConfig * layerConfig)96 plane_config_clear(struct NvKmsKapiLayerConfig *layerConfig)
97 {
98     if (layerConfig == NULL) {
99         return;
100     }
101 
102     memset(layerConfig, 0, sizeof(*layerConfig));
103     layerConfig->csc = NVKMS_IDENTITY_CSC_MATRIX;
104 }
105 
106 static inline void
plane_req_config_disable(struct NvKmsKapiLayerRequestedConfig * req_config)107 plane_req_config_disable(struct NvKmsKapiLayerRequestedConfig *req_config)
108 {
109     /* Clear layer config */
110     plane_config_clear(&req_config->config);
111 
112     /* Set flags to get cleared layer config applied */
113     req_config->flags.surfaceChanged = NV_TRUE;
114     req_config->flags.srcXYChanged = NV_TRUE;
115     req_config->flags.srcWHChanged = NV_TRUE;
116     req_config->flags.dstXYChanged = NV_TRUE;
117     req_config->flags.dstWHChanged = NV_TRUE;
118 }
119 
120 static inline void
cursor_req_config_disable(struct NvKmsKapiCursorRequestedConfig * req_config)121 cursor_req_config_disable(struct NvKmsKapiCursorRequestedConfig *req_config)
122 {
123     req_config->surface = NULL;
124     req_config->flags.surfaceChanged = NV_TRUE;
125 }
126 
127 #if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
color_mgmt_config_ctm_to_csc(struct NvKmsCscMatrix * nvkms_csc,struct drm_color_ctm * drm_ctm)128 static void color_mgmt_config_ctm_to_csc(struct NvKmsCscMatrix *nvkms_csc,
129                                          struct drm_color_ctm  *drm_ctm)
130 {
131     int y;
132 
133     /* CTM is a 3x3 matrix while ours is 3x4. Zero out the last column. */
134     nvkms_csc->m[0][3] = nvkms_csc->m[1][3] = nvkms_csc->m[2][3] = 0;
135 
136     for (y = 0; y < 3; y++) {
137         int x;
138 
139         for (x = 0; x < 3; x++) {
140             /*
141              * Values in the CTM are encoded in S31.32 sign-magnitude fixed-
142              * point format, while NvKms CSC values are signed 2's-complement
143              * S15.16 (Ssign-extend12-3.16?) fixed-point format.
144              */
145             NvU64 ctmVal = drm_ctm->matrix[y*3 + x];
146             NvU64 signBit = ctmVal & (1ULL << 63);
147             NvU64 magnitude = ctmVal & ~signBit;
148 
149             /*
150              * Drop the low 16 bits of the fractional part and the high 17 bits
151              * of the integral part. Drop 17 bits to avoid corner cases where
152              * the highest resulting bit is a 1, causing the `cscVal = -cscVal`
153              * line to result in a positive number.
154              */
155             NvS32 cscVal = (magnitude >> 16) & ((1ULL << 31) - 1);
156             if (signBit) {
157                 cscVal = -cscVal;
158             }
159 
160             nvkms_csc->m[y][x] = cscVal;
161         }
162     }
163 }
164 #endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
165 
166 static void
cursor_plane_req_config_update(struct drm_plane * plane,struct drm_plane_state * plane_state,struct NvKmsKapiCursorRequestedConfig * req_config)167 cursor_plane_req_config_update(struct drm_plane *plane,
168                                struct drm_plane_state *plane_state,
169                                struct NvKmsKapiCursorRequestedConfig *req_config)
170 {
171     struct nv_drm_plane *nv_plane = to_nv_plane(plane);
172     struct NvKmsKapiCursorRequestedConfig old_config = *req_config;
173 
174     if (plane_state->fb == NULL) {
175         cursor_req_config_disable(req_config);
176         return;
177     }
178 
179     *req_config = (struct NvKmsKapiCursorRequestedConfig) {
180         .surface = to_nv_framebuffer(plane_state->fb)->pSurface,
181 
182         .dstX = plane_state->crtc_x,
183         .dstY = plane_state->crtc_y,
184     };
185 
186 #if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE)
187     if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) {
188 
189         switch (plane_state->pixel_blend_mode) {
190             case DRM_MODE_BLEND_PREMULTI:
191                 req_config->compParams.compMode =
192                     NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA;
193                 break;
194             case DRM_MODE_BLEND_COVERAGE:
195                 req_config->compParams.compMode =
196                     NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA;
197                 break;
198             default:
199                 /*
200                  * We should not hit this, because
201                  * plane_state->pixel_blend_mode should only have values
202                  * registered in
203                  * __nv_drm_plane_create_alpha_blending_properties().
204                  */
205                 WARN_ON("Unsupported blending mode");
206                 break;
207 
208         }
209 
210         req_config->compParams.surfaceAlpha =
211             plane_state->alpha >> 8;
212 
213     } else if (plane->blend_mode_property != NULL) {
214 
215         switch (plane_state->pixel_blend_mode) {
216             case DRM_MODE_BLEND_PREMULTI:
217                 req_config->compParams.compMode =
218                     NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA;
219                 break;
220             case DRM_MODE_BLEND_COVERAGE:
221                 req_config->compParams.compMode =
222                     NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA;
223                 break;
224             default:
225                 /*
226                  * We should not hit this, because
227                  * plane_state->pixel_blend_mode should only have values
228                  * registered in
229                  * __nv_drm_plane_create_alpha_blending_properties().
230                  */
231                 WARN_ON("Unsupported blending mode");
232                 break;
233 
234         }
235 
236     } else {
237         req_config->compParams.compMode =
238             nv_plane->defaultCompositionMode;
239     }
240 #else
241     req_config->compParams.compMode = nv_plane->defaultCompositionMode;
242 #endif
243 
244     /*
245      * Unconditionally mark the surface as changed, even if nothing changed,
246      * so that we always get a flip event: a DRM client may flip with
247      * the same surface and wait for a flip event.
248      */
249     req_config->flags.surfaceChanged = NV_TRUE;
250 
251     if (old_config.surface == NULL &&
252         old_config.surface != req_config->surface) {
253         req_config->flags.dstXYChanged = NV_TRUE;
254         return;
255     }
256 
257     req_config->flags.dstXYChanged =
258         old_config.dstX != req_config->dstX ||
259         old_config.dstY != req_config->dstY;
260 }
261 
262 static int
plane_req_config_update(struct drm_plane * plane,struct drm_plane_state * plane_state,struct NvKmsKapiLayerRequestedConfig * req_config)263 plane_req_config_update(struct drm_plane *plane,
264                         struct drm_plane_state *plane_state,
265                         struct NvKmsKapiLayerRequestedConfig *req_config)
266 {
267     struct nv_drm_plane *nv_plane = to_nv_plane(plane);
268     struct NvKmsKapiLayerConfig old_config = req_config->config;
269     struct nv_drm_device *nv_dev = to_nv_device(plane->dev);
270     struct nv_drm_plane_state *nv_drm_plane_state =
271         to_nv_drm_plane_state(plane_state);
272 
273     if (plane_state->fb == NULL) {
274         plane_req_config_disable(req_config);
275         return 0;
276     }
277 
278     *req_config = (struct NvKmsKapiLayerRequestedConfig) {
279         .config = {
280             .surface = to_nv_framebuffer(plane_state->fb)->pSurface,
281 
282             /* Source values are 16.16 fixed point */
283             .srcX = plane_state->src_x >> 16,
284             .srcY = plane_state->src_y >> 16,
285             .srcWidth  = plane_state->src_w >> 16,
286             .srcHeight = plane_state->src_h >> 16,
287 
288             .dstX = plane_state->crtc_x,
289             .dstY = plane_state->crtc_y,
290             .dstWidth  = plane_state->crtc_w,
291             .dstHeight = plane_state->crtc_h,
292 
293             .csc = old_config.csc
294         },
295     };
296 
297 #if defined(NV_DRM_ROTATION_AVAILABLE)
298     /*
299      * plane_state->rotation is only valid when plane->rotation_property
300      * is non-NULL.
301      */
302     if (plane->rotation_property != NULL) {
303         if (plane_state->rotation & DRM_MODE_REFLECT_X) {
304             req_config->config.rrParams.reflectionX = true;
305         }
306 
307         if (plane_state->rotation & DRM_MODE_REFLECT_Y) {
308             req_config->config.rrParams.reflectionY = true;
309         }
310 
311         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
312             case DRM_MODE_ROTATE_0:
313                 req_config->config.rrParams.rotation = NVKMS_ROTATION_0;
314                 break;
315             case DRM_MODE_ROTATE_90:
316                 req_config->config.rrParams.rotation = NVKMS_ROTATION_90;
317                 break;
318             case DRM_MODE_ROTATE_180:
319                 req_config->config.rrParams.rotation = NVKMS_ROTATION_180;
320                 break;
321             case DRM_MODE_ROTATE_270:
322                 req_config->config.rrParams.rotation = NVKMS_ROTATION_270;
323                 break;
324             default:
325                 /*
326                  * We should not hit this, because
327                  * plane_state->rotation should only have values
328                  * registered in
329                  * __nv_drm_plane_create_rotation_property().
330                  */
331                 WARN_ON("Unsupported rotation");
332                 break;
333         }
334     }
335 #endif
336 
337 #if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE)
338     if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) {
339 
340         switch (plane_state->pixel_blend_mode) {
341             case DRM_MODE_BLEND_PREMULTI:
342                 req_config->config.compParams.compMode =
343                     NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA;
344                 break;
345             case DRM_MODE_BLEND_COVERAGE:
346                 req_config->config.compParams.compMode =
347                     NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA;
348                 break;
349             default:
350                 /*
351                  * We should not hit this, because
352                  * plane_state->pixel_blend_mode should only have values
353                  * registered in
354                  * __nv_drm_plane_create_alpha_blending_properties().
355                  */
356                 WARN_ON("Unsupported blending mode");
357                 break;
358 
359         }
360 
361         req_config->config.compParams.surfaceAlpha =
362             plane_state->alpha >> 8;
363 
364     } else if (plane->blend_mode_property != NULL) {
365 
366         switch (plane_state->pixel_blend_mode) {
367             case DRM_MODE_BLEND_PREMULTI:
368                 req_config->config.compParams.compMode =
369                     NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA;
370                 break;
371             case DRM_MODE_BLEND_COVERAGE:
372                 req_config->config.compParams.compMode =
373                     NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA;
374                 break;
375             default:
376                 /*
377                  * We should not hit this, because
378                  * plane_state->pixel_blend_mode should only have values
379                  * registered in
380                  * __nv_drm_plane_create_alpha_blending_properties().
381                  */
382                 WARN_ON("Unsupported blending mode");
383                 break;
384 
385         }
386 
387     } else {
388         req_config->config.compParams.compMode =
389             nv_plane->defaultCompositionMode;
390     }
391 #else
392     req_config->config.compParams.compMode =
393         nv_plane->defaultCompositionMode;
394 #endif
395 
396     req_config->config.inputColorSpace =
397         nv_drm_plane_state->input_colorspace;
398 
399     req_config->config.syncptParams.preSyncptSpecified = false;
400     req_config->config.syncptParams.postSyncptRequested = false;
401 
402     if (plane_state->fence != NULL || nv_drm_plane_state->fd_user_ptr) {
403         if (!nv_dev->supportsSyncpts) {
404             return -1;
405         }
406 
407 #if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
408 #if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT)
409         if (plane_state->fence != NULL) {
410             int ret = nvhost_dma_fence_unpack(
411                           plane_state->fence,
412                           &req_config->config.syncptParams.preSyncptId,
413                           &req_config->config.syncptParams.preSyncptValue);
414             if (ret != 0) {
415                 return ret;
416             }
417             req_config->config.syncptParams.preSyncptSpecified = true;
418         }
419 #endif
420 
421         if (nv_drm_plane_state->fd_user_ptr) {
422             req_config->config.syncptParams.postSyncptRequested = true;
423         }
424 #elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
425         if (plane_state->fence != NULL) {
426             int ret = host1x_fence_extract(
427                       plane_state->fence,
428                       &req_config->config.syncptParams.preSyncptId,
429                       &req_config->config.syncptParams.preSyncptValue);
430             if (ret != 0) {
431                 return ret;
432             }
433             req_config->config.syncptParams.preSyncptSpecified = true;
434         }
435 
436         if (nv_drm_plane_state->fd_user_ptr) {
437             req_config->config.syncptParams.postSyncptRequested = true;
438         }
439 #else
440         return -1;
441 #endif
442     }
443 
444 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
445     if (nv_drm_plane_state->hdr_output_metadata != NULL) {
446         struct hdr_output_metadata *hdr_metadata =
447             nv_drm_plane_state->hdr_output_metadata->data;
448         struct hdr_metadata_infoframe *info_frame =
449             &hdr_metadata->hdmi_metadata_type1;
450         struct nv_drm_device *nv_dev = to_nv_device(plane->dev);
451         uint32_t i;
452 
453         if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) {
454             NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported Metadata Type");
455             return -1;
456         }
457 
458         for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i ++) {
459             req_config->config.hdrMetadata.val.displayPrimaries[i].x =
460                 info_frame->display_primaries[i].x;
461             req_config->config.hdrMetadata.val.displayPrimaries[i].y =
462                 info_frame->display_primaries[i].y;
463         }
464 
465         req_config->config.hdrMetadata.val.whitePoint.x =
466             info_frame->white_point.x;
467         req_config->config.hdrMetadata.val.whitePoint.y =
468             info_frame->white_point.y;
469         req_config->config.hdrMetadata.val.maxDisplayMasteringLuminance =
470             info_frame->max_display_mastering_luminance;
471         req_config->config.hdrMetadata.val.minDisplayMasteringLuminance =
472             info_frame->min_display_mastering_luminance;
473         req_config->config.hdrMetadata.val.maxCLL =
474             info_frame->max_cll;
475         req_config->config.hdrMetadata.val.maxFALL =
476             info_frame->max_fall;
477 
478         switch (info_frame->eotf) {
479             case HDMI_EOTF_SMPTE_ST2084:
480                 req_config->config.tf = NVKMS_OUTPUT_TF_PQ;
481                 break;
482             case HDMI_EOTF_TRADITIONAL_GAMMA_SDR:
483                 req_config->config.tf =
484                     NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR;
485                 break;
486             default:
487                 NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported EOTF");
488                 return -1;
489         }
490 
491         req_config->config.hdrMetadata.enabled = true;
492     } else {
493         req_config->config.hdrMetadata.enabled = false;
494         req_config->config.tf = NVKMS_OUTPUT_TF_NONE;
495     }
496 
497     req_config->flags.hdrMetadataChanged =
498         ((old_config.hdrMetadata.enabled !=
499           req_config->config.hdrMetadata.enabled) ||
500          memcmp(&old_config.hdrMetadata.val,
501                 &req_config->config.hdrMetadata.val,
502                 sizeof(struct NvKmsHDRStaticMetadata)));
503 
504     req_config->flags.tfChanged = (old_config.tf != req_config->config.tf);
505 #endif
506 
507     /*
508      * Unconditionally mark the surface as changed, even if nothing changed,
509      * so that we always get a flip event: a DRM client may flip with
510      * the same surface and wait for a flip event.
511      */
512     req_config->flags.surfaceChanged = NV_TRUE;
513 
514     if (old_config.surface == NULL &&
515         old_config.surface != req_config->config.surface) {
516         req_config->flags.srcXYChanged = NV_TRUE;
517         req_config->flags.srcWHChanged = NV_TRUE;
518         req_config->flags.dstXYChanged = NV_TRUE;
519         req_config->flags.dstWHChanged = NV_TRUE;
520         return 0;
521     }
522 
523     req_config->flags.srcXYChanged =
524         old_config.srcX != req_config->config.srcX ||
525         old_config.srcY != req_config->config.srcY;
526 
527     req_config->flags.srcWHChanged =
528         old_config.srcWidth != req_config->config.srcWidth ||
529         old_config.srcHeight != req_config->config.srcHeight;
530 
531     req_config->flags.dstXYChanged =
532         old_config.dstX != req_config->config.dstX ||
533         old_config.dstY != req_config->config.dstY;
534 
535     req_config->flags.dstWHChanged =
536         old_config.dstWidth != req_config->config.dstWidth ||
537         old_config.dstHeight != req_config->config.dstHeight;
538 
539     return 0;
540 }
541 
__is_async_flip_requested(const struct drm_plane * plane,const struct drm_crtc_state * crtc_state)542 static bool __is_async_flip_requested(const struct drm_plane *plane,
543                                       const struct drm_crtc_state *crtc_state)
544 {
545     if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
546 #if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP)
547         return crtc_state->async_flip;
548 #elif defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS)
549         return !!(crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC);
550 #endif
551     }
552 
553     return false;
554 }
555 
__nv_drm_cursor_atomic_check(struct drm_plane * plane,struct drm_plane_state * plane_state)556 static int __nv_drm_cursor_atomic_check(struct drm_plane *plane,
557                                         struct drm_plane_state *plane_state)
558 {
559     struct nv_drm_plane *nv_plane = to_nv_plane(plane);
560     int i;
561     struct drm_crtc *crtc;
562     struct drm_crtc_state *crtc_state;
563 
564     WARN_ON(nv_plane->layer_idx != NVKMS_KAPI_LAYER_INVALID_IDX);
565 
566     nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
567         struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state);
568         struct NvKmsKapiHeadRequestedConfig *head_req_config =
569             &nv_crtc_state->req_config;
570         struct NvKmsKapiCursorRequestedConfig *cursor_req_config =
571             &head_req_config->cursorRequestedConfig;
572 
573         if (plane->state->crtc == crtc &&
574             plane->state->crtc != plane_state->crtc) {
575             cursor_req_config_disable(cursor_req_config);
576             continue;
577         }
578 
579         if (plane_state->crtc == crtc) {
580             cursor_plane_req_config_update(plane, plane_state,
581                                            cursor_req_config);
582         }
583     }
584 
585     return 0;
586 }
587 
588 #if defined(NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG)
nv_drm_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)589 static int nv_drm_plane_atomic_check(struct drm_plane *plane,
590                                      struct drm_atomic_state *state)
591 #else
592 static int nv_drm_plane_atomic_check(struct drm_plane *plane,
593                                      struct drm_plane_state *plane_state)
594 #endif
595 {
596     struct nv_drm_plane *nv_plane = to_nv_plane(plane);
597 #if defined(NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG)
598     struct drm_plane_state *plane_state =
599         drm_atomic_get_new_plane_state(state, plane);
600 #endif
601     int i;
602     struct drm_crtc *crtc;
603     struct drm_crtc_state *crtc_state;
604     int ret;
605 
606     if (plane->type == DRM_PLANE_TYPE_CURSOR) {
607         return __nv_drm_cursor_atomic_check(plane, plane_state);
608     }
609 
610     WARN_ON(nv_plane->layer_idx == NVKMS_KAPI_LAYER_INVALID_IDX);
611 
612     nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
613         struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state);
614         struct NvKmsKapiHeadRequestedConfig *head_req_config =
615             &nv_crtc_state->req_config;
616         struct NvKmsKapiLayerRequestedConfig *plane_requested_config =
617             &head_req_config->layerRequestedConfig[nv_plane->layer_idx];
618 
619         if (plane->state->crtc == crtc &&
620             plane->state->crtc != plane_state->crtc) {
621             plane_req_config_disable(plane_requested_config);
622             continue;
623         }
624 
625         if (plane_state->crtc == crtc) {
626             ret = plane_req_config_update(plane,
627                                           plane_state,
628                                           plane_requested_config);
629             if (ret != 0) {
630                 return ret;
631             }
632 
633 #if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
634             if (crtc_state->color_mgmt_changed) {
635                 /*
636                  * According to the comment in the Linux kernel's
637                  * drivers/gpu/drm/drm_color_mgmt.c, if this property is NULL,
638                  * the CTM needs to be changed to the identity matrix
639                  */
640                 if (crtc_state->ctm) {
641                     color_mgmt_config_ctm_to_csc(&plane_requested_config->config.csc,
642                                                  (struct drm_color_ctm *)crtc_state->ctm->data);
643                 } else {
644                     plane_requested_config->config.csc = NVKMS_IDENTITY_CSC_MATRIX;
645                 }
646                 plane_requested_config->config.cscUseMain = NV_FALSE;
647                 plane_requested_config->flags.cscChanged = NV_TRUE;
648             }
649 #endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
650 
651             if (__is_async_flip_requested(plane, crtc_state)) {
652                 /*
653                  * Async flip requests that the flip happen 'as soon as
654                  * possible', meaning that it not delay waiting for vblank.
655                  * This may cause tearing on the screen.
656                  */
657                 plane_requested_config->config.minPresentInterval = 0;
658                 plane_requested_config->config.tearing = NV_TRUE;
659             } else {
660                 plane_requested_config->config.minPresentInterval = 1;
661                 plane_requested_config->config.tearing = NV_FALSE;
662             }
663         }
664     }
665 
666     return 0;
667 }
668 
669 #if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG)
nv_drm_plane_format_mod_supported(struct drm_plane * plane,uint32_t format,uint64_t modifier)670 static bool nv_drm_plane_format_mod_supported(struct drm_plane *plane,
671                                               uint32_t format,
672                                               uint64_t modifier)
673 {
674     /* All supported modifiers are compatible with all supported formats */
675     return true;
676 }
677 #endif
678 
679 
nv_drm_plane_atomic_set_property(struct drm_plane * plane,struct drm_plane_state * state,struct drm_property * property,uint64_t val)680 static int nv_drm_plane_atomic_set_property(
681     struct drm_plane *plane,
682     struct drm_plane_state *state,
683     struct drm_property *property,
684     uint64_t val)
685 {
686     struct nv_drm_device *nv_dev = to_nv_device(plane->dev);
687     struct nv_drm_plane_state *nv_drm_plane_state =
688         to_nv_drm_plane_state(state);
689 
690     if (property == nv_dev->nv_out_fence_property) {
691 #if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
692         nv_drm_plane_state->fd_user_ptr = u64_to_user_ptr(val);
693 #endif
694         return 0;
695     } else if (property == nv_dev->nv_input_colorspace_property) {
696         nv_drm_plane_state->input_colorspace = val;
697         return 0;
698     }
699 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
700     else if (property == nv_dev->nv_hdr_output_metadata_property) {
701         return nv_drm_atomic_replace_property_blob_from_id(
702                 nv_dev->dev,
703                 &nv_drm_plane_state->hdr_output_metadata,
704                 val,
705                 sizeof(struct hdr_output_metadata));
706     }
707 #endif
708 
709     return -EINVAL;
710 }
711 
nv_drm_plane_atomic_get_property(struct drm_plane * plane,const struct drm_plane_state * state,struct drm_property * property,uint64_t * val)712 static int nv_drm_plane_atomic_get_property(
713     struct drm_plane *plane,
714     const struct drm_plane_state *state,
715     struct drm_property *property,
716     uint64_t *val)
717 {
718     struct nv_drm_device *nv_dev = to_nv_device(plane->dev);
719     const struct nv_drm_plane_state *nv_drm_plane_state =
720         to_nv_drm_plane_state_const(state);
721 
722     if (property == nv_dev->nv_out_fence_property) {
723         return 0;
724     } else if (property == nv_dev->nv_input_colorspace_property) {
725         *val = nv_drm_plane_state->input_colorspace;
726         return 0;
727     }
728 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
729     else if (property ==  nv_dev->nv_hdr_output_metadata_property) {
730         const struct nv_drm_plane_state *nv_drm_plane_state =
731             to_nv_drm_plane_state_const(state);
732         *val = nv_drm_plane_state->hdr_output_metadata ?
733             nv_drm_plane_state->hdr_output_metadata->base.id : 0;
734         return 0;
735     }
736 #endif
737 
738     return -EINVAL;
739 }
740 
741 /**
742  * nv_drm_plane_atomic_reset - plane state reset hook
743  * @plane: DRM plane
744  *
745  * Allocate an empty DRM plane state.
746  */
nv_drm_plane_atomic_reset(struct drm_plane * plane)747 static void nv_drm_plane_atomic_reset(struct drm_plane *plane)
748 {
749     struct nv_drm_plane_state *nv_plane_state =
750         nv_drm_calloc(1, sizeof(*nv_plane_state));
751 
752     if (!nv_plane_state) {
753         return;
754     }
755 
756     drm_atomic_helper_plane_reset(plane);
757 
758     /*
759      * The drm atomic helper function allocates a state object that is the wrong
760      * size. Copy its contents into the one we allocated above and replace the
761      * pointer.
762      */
763     if (plane->state) {
764         nv_plane_state->base = *plane->state;
765         kfree(plane->state);
766         plane->state = &nv_plane_state->base;
767     } else {
768         kfree(nv_plane_state);
769     }
770 }
771 
772 
773 static struct drm_plane_state *
nv_drm_plane_atomic_duplicate_state(struct drm_plane * plane)774 nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
775 {
776     struct nv_drm_plane_state *nv_old_plane_state =
777         to_nv_drm_plane_state(plane->state);
778     struct nv_drm_plane_state *nv_plane_state =
779         nv_drm_calloc(1, sizeof(*nv_plane_state));
780 
781     if (nv_plane_state == NULL) {
782         return NULL;
783     }
784 
785     __drm_atomic_helper_plane_duplicate_state(plane, &nv_plane_state->base);
786 
787     nv_plane_state->fd_user_ptr = nv_old_plane_state->fd_user_ptr;
788     nv_plane_state->input_colorspace = nv_old_plane_state->input_colorspace;
789 
790 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
791     nv_plane_state->hdr_output_metadata = nv_old_plane_state->hdr_output_metadata;
792     if (nv_plane_state->hdr_output_metadata) {
793         drm_property_blob_get(nv_plane_state->hdr_output_metadata);
794     }
795 #endif
796 
797     return &nv_plane_state->base;
798 }
799 
__nv_drm_plane_atomic_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)800 static inline void __nv_drm_plane_atomic_destroy_state(
801     struct drm_plane *plane,
802     struct drm_plane_state *state)
803 {
804 #if defined(NV_DRM_ATOMIC_HELPER_PLANE_DESTROY_STATE_HAS_PLANE_ARG)
805     __drm_atomic_helper_plane_destroy_state(plane, state);
806 #else
807     __drm_atomic_helper_plane_destroy_state(state);
808 #endif
809 
810 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
811     {
812         struct nv_drm_plane_state *nv_drm_plane_state =
813             to_nv_drm_plane_state(state);
814         drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
815     }
816 #endif
817 }
818 
nv_drm_plane_atomic_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)819 static void nv_drm_plane_atomic_destroy_state(
820     struct drm_plane *plane,
821     struct drm_plane_state *state)
822 {
823     __nv_drm_plane_atomic_destroy_state(plane, state);
824 
825     nv_drm_free(to_nv_drm_plane_state(state));
826 }
827 
828 static const struct drm_plane_funcs nv_plane_funcs = {
829     .update_plane           = drm_atomic_helper_update_plane,
830     .disable_plane          = drm_atomic_helper_disable_plane,
831     .destroy                = nv_drm_plane_destroy,
832     .reset                  = nv_drm_plane_atomic_reset,
833     .atomic_get_property    = nv_drm_plane_atomic_get_property,
834     .atomic_set_property    = nv_drm_plane_atomic_set_property,
835     .atomic_duplicate_state = nv_drm_plane_atomic_duplicate_state,
836     .atomic_destroy_state   = nv_drm_plane_atomic_destroy_state,
837 #if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG)
838     .format_mod_supported   = nv_drm_plane_format_mod_supported,
839 #endif
840 };
841 
842 static const struct drm_plane_helper_funcs nv_plane_helper_funcs = {
843     .atomic_check   = nv_drm_plane_atomic_check,
844 };
845 
nv_drm_crtc_destroy(struct drm_crtc * crtc)846 static void nv_drm_crtc_destroy(struct drm_crtc *crtc)
847 {
848     struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
849 
850     drm_crtc_cleanup(crtc);
851 
852     nv_drm_free(nv_crtc);
853 }
854 
855 static inline void
__nv_drm_atomic_helper_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * crtc_state)856 __nv_drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
857                                           struct drm_crtc_state *crtc_state)
858 {
859 #if defined(NV_DRM_ATOMIC_HELPER_CRTC_DESTROY_STATE_HAS_CRTC_ARG)
860     __drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
861 #else
862     __drm_atomic_helper_crtc_destroy_state(crtc_state);
863 #endif
864 }
865 
nv_drm_crtc_duplicate_req_head_modeset_config(const struct NvKmsKapiHeadRequestedConfig * old,struct NvKmsKapiHeadRequestedConfig * new)866 static inline void nv_drm_crtc_duplicate_req_head_modeset_config(
867     const struct NvKmsKapiHeadRequestedConfig *old,
868     struct NvKmsKapiHeadRequestedConfig *new)
869 {
870     uint32_t i;
871 
872     /*
873      * Do not duplicate fields like 'modeChanged' flags expressing delta changed
874      * in new configuration with respect to previous/old configuration because
875      * there is no change in new configuration yet with respect
876      * to older one!
877      */
878     *new = (struct NvKmsKapiHeadRequestedConfig) {
879         .modeSetConfig = old->modeSetConfig,
880     };
881 
882     for (i = 0; i < ARRAY_SIZE(old->layerRequestedConfig); i++) {
883         new->layerRequestedConfig[i] = (struct NvKmsKapiLayerRequestedConfig) {
884             .config = old->layerRequestedConfig[i].config,
885         };
886     }
887 }
888 
nv_drm_crtc_state_alloc(void)889 static inline struct nv_drm_crtc_state *nv_drm_crtc_state_alloc(void)
890 {
891     struct nv_drm_crtc_state *nv_state = nv_drm_calloc(1, sizeof(*nv_state));
892     int i;
893 
894     if (nv_state == NULL) {
895         return NULL;
896     }
897 
898     for (i = 0; i < ARRAY_SIZE(nv_state->req_config.layerRequestedConfig); i++) {
899         plane_config_clear(&nv_state->req_config.layerRequestedConfig[i].config);
900     }
901     return nv_state;
902 }
903 
904 
905 /**
906  * nv_drm_atomic_crtc_reset - crtc state reset hook
907  * @crtc: DRM crtc
908  *
909  * Allocate an empty DRM crtc state.
910  */
nv_drm_atomic_crtc_reset(struct drm_crtc * crtc)911 static void nv_drm_atomic_crtc_reset(struct drm_crtc *crtc)
912 {
913     struct nv_drm_crtc_state *nv_state = nv_drm_crtc_state_alloc();
914 
915     if (!nv_state) {
916         return;
917     }
918 
919     drm_atomic_helper_crtc_reset(crtc);
920 
921     /*
922      * The drm atomic helper function allocates a state object that is the wrong
923      * size. Copy its contents into the one we allocated above and replace the
924      * pointer.
925      */
926     if (crtc->state) {
927         nv_state->base = *crtc->state;
928         kfree(crtc->state);
929         crtc->state = &nv_state->base;
930     } else {
931         kfree(nv_state);
932     }
933 }
934 
935 /**
936  * nv_drm_atomic_crtc_duplicate_state - crtc state duplicate hook
937  * @crtc: DRM crtc
938  *
939  * Allocate and accosiate flip state with DRM crtc state, this flip state will
940  * be getting consumed at the time of atomic update commit to hardware by
941  * nv_drm_atomic_helper_commit_tail().
942  */
943 static struct drm_crtc_state*
nv_drm_atomic_crtc_duplicate_state(struct drm_crtc * crtc)944 nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
945 {
946     struct nv_drm_crtc_state *nv_state = nv_drm_crtc_state_alloc();
947 
948     if (nv_state == NULL) {
949         return NULL;
950     }
951 
952     if ((nv_state->nv_flip =
953             nv_drm_calloc(1, sizeof(*(nv_state->nv_flip)))) == NULL) {
954         nv_drm_free(nv_state);
955         return NULL;
956     }
957 
958     __drm_atomic_helper_crtc_duplicate_state(crtc, &nv_state->base);
959 
960     INIT_LIST_HEAD(&nv_state->nv_flip->list_entry);
961     INIT_LIST_HEAD(&nv_state->nv_flip->deferred_flip_list);
962 
963     nv_drm_crtc_duplicate_req_head_modeset_config(
964         &(to_nv_crtc_state(crtc->state)->req_config),
965         &nv_state->req_config);
966 
967     nv_state->ilut_ramps = NULL;
968     nv_state->olut_ramps = NULL;
969 
970     return &nv_state->base;
971 }
972 
973 /**
974  * nv_drm_atomic_crtc_destroy_state - crtc state destroy hook
975  * @crtc: DRM crtc
976  * @state: DRM crtc state object to destroy
977  *
978  * Destroy flip state associated with the given crtc state if it haven't get
979  * consumed because failure of atomic commit.
980  */
nv_drm_atomic_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)981 static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc,
982                                              struct drm_crtc_state *state)
983 {
984     struct nv_drm_crtc_state *nv_state = to_nv_crtc_state(state);
985 
986     if (nv_state->nv_flip != NULL) {
987         nv_drm_free(nv_state->nv_flip);
988         nv_state->nv_flip = NULL;
989     }
990 
991     __nv_drm_atomic_helper_crtc_destroy_state(crtc, &nv_state->base);
992 
993     nv_drm_free(nv_state->ilut_ramps);
994     nv_drm_free(nv_state->olut_ramps);
995 
996     nv_drm_free(nv_state);
997 }
998 
999 static struct drm_crtc_funcs nv_crtc_funcs = {
1000     .set_config             = drm_atomic_helper_set_config,
1001     .page_flip              = drm_atomic_helper_page_flip,
1002     .reset                  = nv_drm_atomic_crtc_reset,
1003     .destroy                = nv_drm_crtc_destroy,
1004     .atomic_duplicate_state = nv_drm_atomic_crtc_duplicate_state,
1005     .atomic_destroy_state   = nv_drm_atomic_crtc_destroy_state,
1006 #if defined(NV_DRM_ATOMIC_HELPER_LEGACY_GAMMA_SET_PRESENT)
1007     .gamma_set = drm_atomic_helper_legacy_gamma_set,
1008 #endif
1009 };
1010 
1011 /*
1012  * In kernel versions before the addition of
1013  * drm_crtc_state::connectors_changed, connector changes were
1014  * reflected in drm_crtc_state::mode_changed.
1015  */
1016 static inline bool
nv_drm_crtc_state_connectors_changed(struct drm_crtc_state * crtc_state)1017 nv_drm_crtc_state_connectors_changed(struct drm_crtc_state *crtc_state)
1018 {
1019 #if defined(NV_DRM_CRTC_STATE_HAS_CONNECTORS_CHANGED)
1020     return crtc_state->connectors_changed;
1021 #else
1022     return crtc_state->mode_changed;
1023 #endif
1024 }
1025 
head_modeset_config_attach_connector(struct nv_drm_connector * nv_connector,struct NvKmsKapiHeadModeSetConfig * head_modeset_config)1026 static int head_modeset_config_attach_connector(
1027     struct nv_drm_connector *nv_connector,
1028     struct NvKmsKapiHeadModeSetConfig *head_modeset_config)
1029 {
1030     struct nv_drm_encoder *nv_encoder = nv_connector->nv_detected_encoder;
1031 
1032     if (NV_DRM_WARN(nv_encoder == NULL ||
1033                     head_modeset_config->numDisplays >=
1034                         ARRAY_SIZE(head_modeset_config->displays))) {
1035         return -EINVAL;
1036     }
1037     head_modeset_config->displays[head_modeset_config->numDisplays++] =
1038         nv_encoder->hDisplay;
1039     return 0;
1040 }
1041 
1042 #if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
color_mgmt_config_copy_lut(struct NvKmsLutRamps * nvkms_lut,struct drm_color_lut * drm_lut,uint64_t lut_len)1043 static int color_mgmt_config_copy_lut(struct NvKmsLutRamps *nvkms_lut,
1044                                       struct drm_color_lut *drm_lut,
1045                                       uint64_t lut_len)
1046 {
1047     uint64_t i = 0;
1048     if (lut_len != NVKMS_LUT_ARRAY_SIZE) {
1049         return -EINVAL;
1050     }
1051 
1052     /*
1053      * Both NvKms and drm LUT values are 16-bit linear values. NvKms LUT ramps
1054      * are in arrays in a single struct while drm LUT ramps are an array of
1055      * structs.
1056      */
1057     for (i = 0; i < lut_len; i++) {
1058         nvkms_lut->red[i]   = drm_lut[i].red;
1059         nvkms_lut->green[i] = drm_lut[i].green;
1060         nvkms_lut->blue[i]  = drm_lut[i].blue;
1061     }
1062     return 0;
1063 }
1064 
color_mgmt_config_set_luts(struct nv_drm_crtc_state * nv_crtc_state,struct NvKmsKapiHeadRequestedConfig * req_config)1065 static int color_mgmt_config_set_luts(struct nv_drm_crtc_state *nv_crtc_state,
1066                                       struct NvKmsKapiHeadRequestedConfig *req_config)
1067 {
1068     struct NvKmsKapiHeadModeSetConfig *modeset_config =
1069         &req_config->modeSetConfig;
1070     struct drm_crtc_state *crtc_state = &nv_crtc_state->base;
1071     int ret = 0;
1072 
1073     /*
1074      * According to the comment in the Linux kernel's
1075      * drivers/gpu/drm/drm_color_mgmt.c, if either property is NULL, that LUT
1076      * needs to be changed to a linear LUT
1077      */
1078 
1079     req_config->flags.lutChanged = NV_TRUE;
1080     if (crtc_state->degamma_lut) {
1081         struct drm_color_lut *degamma_lut = NULL;
1082         uint64_t degamma_len = 0;
1083 
1084         nv_crtc_state->ilut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->ilut_ramps));
1085         if (!nv_crtc_state->ilut_ramps) {
1086             ret = -ENOMEM;
1087             goto fail;
1088         }
1089 
1090         degamma_lut = (struct drm_color_lut *)crtc_state->degamma_lut->data;
1091         degamma_len = crtc_state->degamma_lut->length /
1092                       sizeof(struct drm_color_lut);
1093 
1094         if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->ilut_ramps,
1095                                               degamma_lut,
1096                                               degamma_len)) != 0) {
1097             goto fail;
1098         }
1099 
1100         modeset_config->lut.input.specified = NV_TRUE;
1101         modeset_config->lut.input.depth     = 30; /* specify the full LUT */
1102         modeset_config->lut.input.start     = 0;
1103         modeset_config->lut.input.end       = degamma_len - 1;
1104         modeset_config->lut.input.pRamps    = nv_crtc_state->ilut_ramps;
1105     } else {
1106         /* setting input.end to 0 is equivalent to disabling the LUT, which
1107          * should be equivalent to a linear LUT */
1108         modeset_config->lut.input.specified = NV_TRUE;
1109         modeset_config->lut.input.depth     = 30; /* specify the full LUT */
1110         modeset_config->lut.input.start     = 0;
1111         modeset_config->lut.input.end       = 0;
1112         modeset_config->lut.input.pRamps    = NULL;
1113 
1114     }
1115 
1116     if (crtc_state->gamma_lut) {
1117         struct drm_color_lut *gamma_lut = NULL;
1118         uint64_t gamma_len = 0;
1119 
1120         nv_crtc_state->olut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->olut_ramps));
1121         if (!nv_crtc_state->olut_ramps) {
1122             ret = -ENOMEM;
1123             goto fail;
1124         }
1125 
1126         gamma_lut = (struct drm_color_lut *)crtc_state->gamma_lut->data;
1127         gamma_len = crtc_state->gamma_lut->length /
1128                     sizeof(struct drm_color_lut);
1129 
1130         if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->olut_ramps,
1131                                               gamma_lut,
1132                                               gamma_len)) != 0) {
1133             goto fail;
1134         }
1135 
1136         modeset_config->lut.output.specified = NV_TRUE;
1137         modeset_config->lut.output.enabled   = NV_TRUE;
1138         modeset_config->lut.output.pRamps    = nv_crtc_state->olut_ramps;
1139     } else {
1140         /* disabling the output LUT should be equivalent to setting a linear
1141          * LUT */
1142         modeset_config->lut.output.specified = NV_TRUE;
1143         modeset_config->lut.output.enabled   = NV_FALSE;
1144         modeset_config->lut.output.pRamps    = NULL;
1145     }
1146 
1147     return 0;
1148 
1149 fail:
1150     /* free allocated state */
1151     nv_drm_free(nv_crtc_state->ilut_ramps);
1152     nv_drm_free(nv_crtc_state->olut_ramps);
1153 
1154     /* remove dangling pointers */
1155     nv_crtc_state->ilut_ramps = NULL;
1156     nv_crtc_state->olut_ramps = NULL;
1157     modeset_config->lut.input.pRamps = NULL;
1158     modeset_config->lut.output.pRamps = NULL;
1159 
1160     /* prevent attempts at reading NULLs */
1161     modeset_config->lut.input.specified = NV_FALSE;
1162     modeset_config->lut.output.specified = NV_FALSE;
1163 
1164     return ret;
1165 }
1166 #endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
1167 
1168 /**
1169  * nv_drm_crtc_atomic_check() can fail after it has modified
1170  * the 'nv_drm_crtc_state::req_config', that is fine because 'nv_drm_crtc_state'
1171  * will be discarded if ->atomic_check() fails.
1172  */
1173 #if defined(NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG)
nv_drm_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)1174 static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
1175                                     struct drm_atomic_state *state)
1176 #else
1177 static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
1178                                     struct drm_crtc_state *crtc_state)
1179 #endif
1180 {
1181 #if defined(NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG)
1182     struct drm_crtc_state *crtc_state =
1183         drm_atomic_get_new_crtc_state(state, crtc);
1184 #endif
1185     struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state);
1186     struct NvKmsKapiHeadRequestedConfig *req_config =
1187         &nv_crtc_state->req_config;
1188     int ret = 0;
1189 #if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
1190     struct nv_drm_device *nv_dev = to_nv_device(crtc_state->crtc->dev);
1191 #endif
1192 
1193     if (crtc_state->mode_changed) {
1194         drm_mode_to_nvkms_display_mode(&crtc_state->mode,
1195                                        &req_config->modeSetConfig.mode);
1196         req_config->flags.modeChanged = NV_TRUE;
1197     }
1198 
1199     if (nv_drm_crtc_state_connectors_changed(crtc_state)) {
1200         struct NvKmsKapiHeadModeSetConfig *config = &req_config->modeSetConfig;
1201         struct drm_connector *connector;
1202         struct drm_connector_state *connector_state;
1203         int j;
1204 
1205         config->numDisplays = 0;
1206 
1207         memset(config->displays, 0, sizeof(config->displays));
1208 
1209         req_config->flags.displaysChanged = NV_TRUE;
1210 
1211         nv_drm_for_each_connector_in_state(crtc_state->state,
1212                                            connector, connector_state, j) {
1213             if (connector_state->crtc != crtc) {
1214                 continue;
1215             }
1216 
1217             if ((ret = head_modeset_config_attach_connector(
1218                             to_nv_connector(connector),
1219                             config)) != 0) {
1220                 return ret;
1221             }
1222         }
1223     }
1224 
1225     if (crtc_state->active_changed) {
1226         req_config->modeSetConfig.bActive = crtc_state->active;
1227         req_config->flags.activeChanged = NV_TRUE;
1228     }
1229 
1230 #if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
1231     req_config->modeSetConfig.vrrEnabled = crtc_state->vrr_enabled;
1232 #endif
1233 
1234 #if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
1235     if (nv_dev->drmMasterChangedSinceLastAtomicCommit &&
1236         (crtc_state->degamma_lut ||
1237          crtc_state->ctm ||
1238          crtc_state->gamma_lut)) {
1239 
1240         crtc_state->color_mgmt_changed = NV_TRUE;
1241     }
1242     if (crtc_state->color_mgmt_changed) {
1243         if ((ret = color_mgmt_config_set_luts(nv_crtc_state, req_config)) != 0) {
1244             return ret;
1245         }
1246     }
1247 #endif
1248 
1249     return ret;
1250 }
1251 
1252 static bool
nv_drm_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)1253 nv_drm_crtc_mode_fixup(struct drm_crtc *crtc,
1254                        const struct drm_display_mode *mode,
1255                        struct drm_display_mode *adjusted_mode)
1256 {
1257     return true;
1258 }
1259 
1260 static const struct drm_crtc_helper_funcs nv_crtc_helper_funcs = {
1261     .atomic_check = nv_drm_crtc_atomic_check,
1262     .mode_fixup = nv_drm_crtc_mode_fixup,
1263 };
1264 
nv_drm_plane_install_properties(struct drm_plane * plane,NvBool supportsHDR)1265 static void nv_drm_plane_install_properties(
1266     struct drm_plane *plane,
1267     NvBool supportsHDR)
1268 {
1269     struct nv_drm_device *nv_dev = to_nv_device(plane->dev);
1270 
1271     if (nv_dev->nv_out_fence_property) {
1272         drm_object_attach_property(
1273             &plane->base, nv_dev->nv_out_fence_property, 0);
1274     }
1275 
1276     if (nv_dev->nv_input_colorspace_property) {
1277         drm_object_attach_property(
1278             &plane->base, nv_dev->nv_input_colorspace_property,
1279             NVKMS_INPUT_COLORSPACE_NONE);
1280     }
1281 
1282 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
1283     if (supportsHDR && nv_dev->nv_hdr_output_metadata_property) {
1284         drm_object_attach_property(
1285             &plane->base, nv_dev->nv_hdr_output_metadata_property, 0);
1286     }
1287 #endif
1288 }
1289 
1290 static void
__nv_drm_plane_create_alpha_blending_properties(struct drm_plane * plane,NvU32 validCompModes)1291 __nv_drm_plane_create_alpha_blending_properties(struct drm_plane *plane,
1292                                                  NvU32 validCompModes)
1293 {
1294 #if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE)
1295     if ((validCompModes &
1296          NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) != 0x0 &&
1297         (validCompModes &
1298          NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA)) != 0x0) {
1299 
1300         drm_plane_create_alpha_property(plane);
1301         drm_plane_create_blend_mode_property(plane,
1302                                              NVBIT(DRM_MODE_BLEND_PREMULTI) |
1303                                              NVBIT(DRM_MODE_BLEND_COVERAGE));
1304     } else if ((validCompModes &
1305                 NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) != 0x0 &&
1306                (validCompModes &
1307                 NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA)) != 0x0) {
1308 
1309         drm_plane_create_blend_mode_property(plane,
1310                                              NVBIT(DRM_MODE_BLEND_PREMULTI) |
1311                                              NVBIT(DRM_MODE_BLEND_COVERAGE));
1312    }
1313 #endif
1314 }
1315 
1316 static void
__nv_drm_plane_create_rotation_property(struct drm_plane * plane,NvU16 validLayerRRTransforms)1317 __nv_drm_plane_create_rotation_property(struct drm_plane *plane,
1318                                         NvU16 validLayerRRTransforms)
1319 {
1320 #if defined(NV_DRM_ROTATION_AVAILABLE)
1321     enum NvKmsRotation curRotation;
1322     NvU32 supported_rotations = 0;
1323     struct NvKmsRRParams rrParams = {
1324         .rotation = NVKMS_ROTATION_0,
1325         .reflectionX = true,
1326         .reflectionY = true,
1327     };
1328 
1329     if ((NVBIT(NvKmsRRParamsToCapBit(&rrParams)) &
1330         validLayerRRTransforms) != 0) {
1331         supported_rotations |= DRM_MODE_REFLECT_X;
1332         supported_rotations |= DRM_MODE_REFLECT_Y;
1333     }
1334 
1335     rrParams.reflectionX = false;
1336     rrParams.reflectionY = false;
1337 
1338     for (curRotation = NVKMS_ROTATION_MIN;
1339          curRotation <= NVKMS_ROTATION_MAX; curRotation++) {
1340         rrParams.rotation = curRotation;
1341         if ((NVBIT(NvKmsRRParamsToCapBit(&rrParams)) &
1342                     validLayerRRTransforms) == 0) {
1343             continue;
1344         }
1345 
1346         switch (curRotation) {
1347             case NVKMS_ROTATION_0:
1348                 supported_rotations |= DRM_MODE_ROTATE_0;
1349                 break;
1350             case NVKMS_ROTATION_90:
1351                 supported_rotations |= DRM_MODE_ROTATE_90;
1352                 break;
1353             case NVKMS_ROTATION_180:
1354                 supported_rotations |= DRM_MODE_ROTATE_180;
1355                 break;
1356             case NVKMS_ROTATION_270:
1357                 supported_rotations |= DRM_MODE_ROTATE_270;
1358                 break;
1359             default:
1360                 break;
1361         }
1362 
1363     }
1364 
1365     if (supported_rotations != 0) {
1366         drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1367                                            supported_rotations);
1368     }
1369 #endif
1370 }
1371 
1372 static struct drm_plane*
nv_drm_plane_create(struct drm_device * dev,enum drm_plane_type plane_type,uint32_t layer_idx,NvU32 head,const struct NvKmsKapiDeviceResourcesInfo * pResInfo)1373 nv_drm_plane_create(struct drm_device *dev,
1374                     enum drm_plane_type plane_type,
1375                     uint32_t layer_idx,
1376                     NvU32 head,
1377                     const struct NvKmsKapiDeviceResourcesInfo *pResInfo)
1378 {
1379 #if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG)
1380     struct nv_drm_device *nv_dev = to_nv_device(dev);
1381     const NvU64 linear_modifiers[] = {
1382         DRM_FORMAT_MOD_LINEAR,
1383         DRM_FORMAT_MOD_INVALID,
1384     };
1385 #endif
1386     enum NvKmsCompositionBlendingMode defaultCompositionMode;
1387     struct nv_drm_plane *nv_plane = NULL;
1388     struct nv_drm_plane_state *nv_plane_state = NULL;
1389     struct drm_plane *plane = NULL;
1390     int ret = -ENOMEM;
1391     uint32_t *formats = NULL;
1392     unsigned int formats_count = 0;
1393     const NvU32 validCompositionModes =
1394         (plane_type == DRM_PLANE_TYPE_CURSOR) ?
1395             pResInfo->caps.validCursorCompositionModes :
1396             pResInfo->caps.layer[layer_idx].validCompositionModes;
1397     const long unsigned int nvkms_formats_mask =
1398         (plane_type == DRM_PLANE_TYPE_CURSOR) ?
1399             pResInfo->caps.supportedCursorSurfaceMemoryFormats :
1400             pResInfo->supportedSurfaceMemoryFormats[layer_idx];
1401     const NvU16 validLayerRRTransforms =
1402         (plane_type == DRM_PLANE_TYPE_CURSOR) ?
1403             0x0 : pResInfo->caps.layer[layer_idx].validRRTransforms;
1404 
1405     if ((validCompositionModes &
1406          NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) != 0x0) {
1407         defaultCompositionMode = NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE;
1408     } else if ((validCompositionModes &
1409                 NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) != 0x0) {
1410         defaultCompositionMode = NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA;
1411     } else {
1412         goto failed;
1413     }
1414 
1415     formats =
1416         nv_drm_format_array_alloc(&formats_count,
1417                                   nvkms_formats_mask);
1418     if (formats == NULL) {
1419         goto failed;
1420     }
1421 
1422     if ((nv_plane = nv_drm_calloc(1, sizeof(*nv_plane))) == NULL) {
1423         goto failed_plane_alloc;
1424     }
1425     plane = &nv_plane->base;
1426 
1427     nv_plane->defaultCompositionMode = defaultCompositionMode;
1428     nv_plane->layer_idx = layer_idx;
1429 
1430     if ((nv_plane_state =
1431             nv_drm_calloc(1, sizeof(*nv_plane_state))) == NULL) {
1432         goto failed_state_alloc;
1433     }
1434 
1435     plane->state = &nv_plane_state->base;
1436     plane->state->plane = plane;
1437 
1438     /*
1439      * Possible_crtcs for primary and cursor plane is zero because
1440      * drm_crtc_init_with_planes() will assign the plane's possible_crtcs
1441      * after the crtc is successfully initialized.
1442      */
1443     ret = drm_universal_plane_init(
1444         dev,
1445         plane,
1446         (plane_type == DRM_PLANE_TYPE_OVERLAY) ?
1447         (1 << head) : 0,
1448         &nv_plane_funcs,
1449         formats, formats_count,
1450 #if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG)
1451         (plane_type == DRM_PLANE_TYPE_CURSOR) ?
1452         linear_modifiers : nv_dev->modifiers,
1453 #endif
1454         plane_type
1455 #if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG)
1456         , NULL
1457 #endif
1458         );
1459 
1460     if (ret != 0) {
1461         goto failed_plane_init;
1462     }
1463 
1464     drm_plane_helper_add(plane, &nv_plane_helper_funcs);
1465 
1466     if (plane_type != DRM_PLANE_TYPE_CURSOR) {
1467         nv_drm_plane_install_properties(
1468                 plane,
1469                 pResInfo->supportsHDR[layer_idx]);
1470     }
1471 
1472     __nv_drm_plane_create_alpha_blending_properties(
1473             plane,
1474             validCompositionModes);
1475 
1476     __nv_drm_plane_create_rotation_property(
1477             plane,
1478             validLayerRRTransforms);
1479 
1480     nv_drm_free(formats);
1481 
1482     return plane;
1483 
1484 failed_plane_init:
1485     nv_drm_free(nv_plane_state);
1486 
1487 failed_state_alloc:
1488     nv_drm_free(nv_plane);
1489 
1490 failed_plane_alloc:
1491     nv_drm_free(formats);
1492 
1493 failed:
1494     return ERR_PTR(ret);
1495 }
1496 
1497 /*
1498  * Add drm crtc for given head and supported enum NvKmsSurfaceMemoryFormats.
1499  */
__nv_drm_crtc_create(struct nv_drm_device * nv_dev,struct drm_plane * primary_plane,struct drm_plane * cursor_plane,unsigned int head)1500 static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev,
1501                                              struct drm_plane *primary_plane,
1502                                              struct drm_plane *cursor_plane,
1503                                              unsigned int head)
1504 {
1505     struct nv_drm_crtc *nv_crtc = NULL;
1506     struct nv_drm_crtc_state *nv_state = NULL;
1507     int ret = -ENOMEM;
1508 
1509     if ((nv_crtc = nv_drm_calloc(1, sizeof(*nv_crtc))) == NULL) {
1510         goto failed;
1511     }
1512 
1513     nv_state = nv_drm_crtc_state_alloc();
1514     if (nv_state == NULL) {
1515         goto failed_state_alloc;
1516     }
1517 
1518     nv_crtc->base.state = &nv_state->base;
1519     nv_crtc->base.state->crtc = &nv_crtc->base;
1520 
1521     nv_crtc->head = head;
1522     INIT_LIST_HEAD(&nv_crtc->flip_list);
1523     spin_lock_init(&nv_crtc->flip_list_lock);
1524     nv_crtc->modeset_permission_filep = NULL;
1525 
1526     ret = drm_crtc_init_with_planes(nv_dev->dev,
1527                                     &nv_crtc->base,
1528                                     primary_plane, cursor_plane,
1529                                     &nv_crtc_funcs
1530 #if defined(NV_DRM_CRTC_INIT_WITH_PLANES_HAS_NAME_ARG)
1531                                     , NULL
1532 #endif
1533                                     );
1534 
1535     if (ret != 0) {
1536         NV_DRM_DEV_LOG_ERR(
1537             nv_dev,
1538             "Failed to init crtc for head %u with planes", head);
1539         goto failed_init_crtc;
1540     }
1541 
1542     /* Add crtc to drm sub-system */
1543 
1544     drm_crtc_helper_add(&nv_crtc->base, &nv_crtc_helper_funcs);
1545 
1546 #if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
1547 #if defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
1548     drm_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE, true,
1549                                NVKMS_LUT_ARRAY_SIZE);
1550 #else
1551     drm_helper_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE,
1552                                       NVKMS_LUT_ARRAY_SIZE);
1553 #endif
1554     ret = drm_mode_crtc_set_gamma_size(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE);
1555     if (ret != 0) {
1556         NV_DRM_DEV_LOG_WARN(
1557             nv_dev,
1558             "Failed to initialize legacy gamma support for head %u", head);
1559     }
1560 #endif
1561 
1562     return &nv_crtc->base;
1563 
1564 failed_init_crtc:
1565     nv_drm_free(nv_state);
1566 
1567 failed_state_alloc:
1568     nv_drm_free(nv_crtc);
1569 
1570 failed:
1571     return ERR_PTR(ret);
1572 }
1573 
nv_drm_enumerate_crtcs_and_planes(struct nv_drm_device * nv_dev,const struct NvKmsKapiDeviceResourcesInfo * pResInfo)1574 void nv_drm_enumerate_crtcs_and_planes(
1575     struct nv_drm_device *nv_dev,
1576     const struct NvKmsKapiDeviceResourcesInfo *pResInfo)
1577 {
1578     unsigned int i;
1579 
1580     for (i = 0; i < pResInfo->numHeads; i++) {
1581         struct drm_plane *primary_plane = NULL, *cursor_plane = NULL;
1582         NvU32 layer;
1583 
1584         if (pResInfo->numLayers[i] <= NVKMS_KAPI_LAYER_PRIMARY_IDX) {
1585             continue;
1586         }
1587 
1588         primary_plane =
1589             nv_drm_plane_create(nv_dev->dev,
1590                                 DRM_PLANE_TYPE_PRIMARY,
1591                                 NVKMS_KAPI_LAYER_PRIMARY_IDX,
1592                                 i,
1593                                 pResInfo);
1594 
1595         if (IS_ERR(primary_plane)) {
1596             NV_DRM_DEV_LOG_ERR(
1597                 nv_dev,
1598                 "Failed to create primary plane for head %u, error = %ld",
1599                 i, PTR_ERR(primary_plane));
1600             continue;
1601         }
1602 
1603         cursor_plane =
1604             nv_drm_plane_create(nv_dev->dev,
1605                                 DRM_PLANE_TYPE_CURSOR,
1606                                 NVKMS_KAPI_LAYER_INVALID_IDX,
1607                                 i,
1608                                 pResInfo);
1609         if (IS_ERR(cursor_plane)) {
1610             NV_DRM_DEV_LOG_ERR(
1611                 nv_dev,
1612                 "Failed to create cursor plane for head %u, error = %ld",
1613                 i, PTR_ERR(cursor_plane));
1614             cursor_plane = NULL;
1615         }
1616 
1617         /* Create crtc with the primary and cursor planes */
1618         {
1619             struct drm_crtc *crtc =
1620                 __nv_drm_crtc_create(nv_dev,
1621                                      primary_plane, cursor_plane,
1622                                      i);
1623             if (IS_ERR(crtc)) {
1624                 nv_drm_plane_destroy(primary_plane);
1625 
1626                 if (cursor_plane != NULL) {
1627                     nv_drm_plane_destroy(cursor_plane);
1628                 }
1629 
1630                 NV_DRM_DEV_LOG_ERR(
1631                     nv_dev,
1632                     "Failed to add DRM CRTC for head %u, error = %ld",
1633                     i, PTR_ERR(crtc));
1634                 continue;
1635             }
1636         }
1637 
1638         for (layer = 0; layer < pResInfo->numLayers[i]; layer++) {
1639             struct drm_plane *overlay_plane = NULL;
1640 
1641             if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) {
1642                 continue;
1643             }
1644 
1645             overlay_plane =
1646                 nv_drm_plane_create(nv_dev->dev,
1647                                     DRM_PLANE_TYPE_OVERLAY,
1648                                     layer,
1649                                     i,
1650                                     pResInfo);
1651 
1652             if (IS_ERR(overlay_plane)) {
1653                 NV_DRM_DEV_LOG_ERR(
1654                     nv_dev,
1655                     "Failed to create plane for layer-%u of head %u, error = %ld",
1656                     layer, i, PTR_ERR(overlay_plane));
1657             }
1658         }
1659 
1660     }
1661 }
1662 /*
1663  * Helper function to convert NvKmsKapiCrcs to drm_nvidia_crtc_crc32_out.
1664  */
NvKmsKapiCrcsToDrm(const struct NvKmsKapiCrcs * crcs,struct drm_nvidia_crtc_crc32_v2_out * drmCrcs)1665 static void NvKmsKapiCrcsToDrm(const struct NvKmsKapiCrcs *crcs,
1666                                struct drm_nvidia_crtc_crc32_v2_out *drmCrcs)
1667 {
1668     drmCrcs->outputCrc32.value = crcs->outputCrc32.value;
1669     drmCrcs->outputCrc32.supported = crcs->outputCrc32.supported;
1670     drmCrcs->outputCrc32.__pad0 = 0;
1671     drmCrcs->outputCrc32.__pad1 = 0;
1672     drmCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value;
1673     drmCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported;
1674     drmCrcs->rasterGeneratorCrc32.__pad0 = 0;
1675     drmCrcs->rasterGeneratorCrc32.__pad1 = 0;
1676     drmCrcs->compositorCrc32.value = crcs->compositorCrc32.value;
1677     drmCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported;
1678     drmCrcs->compositorCrc32.__pad0 = 0;
1679     drmCrcs->compositorCrc32.__pad1 = 0;
1680 }
1681 
nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)1682 int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
1683                                    void *data, struct drm_file *filep)
1684 {
1685     struct drm_nvidia_get_crtc_crc32_v2_params *params = data;
1686     struct nv_drm_device *nv_dev = to_nv_device(dev);
1687     struct drm_crtc *crtc = NULL;
1688     struct nv_drm_crtc *nv_crtc = NULL;
1689     struct NvKmsKapiCrcs crc32;
1690 
1691     if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
1692         return -ENOENT;
1693     }
1694 
1695     crtc = nv_drm_crtc_find(dev, filep, params->crtc_id);
1696     if (!crtc) {
1697         return -ENOENT;
1698     }
1699 
1700     nv_crtc = to_nv_crtc(crtc);
1701 
1702     if (!nvKms->getCRC32(nv_dev->pDevice, nv_crtc->head, &crc32)) {
1703         return -ENODEV;
1704     }
1705     NvKmsKapiCrcsToDrm(&crc32, &params->crc32);
1706 
1707     return 0;
1708 }
1709 
nv_drm_get_crtc_crc32_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)1710 int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
1711                                 void *data, struct drm_file *filep)
1712 {
1713     struct drm_nvidia_get_crtc_crc32_params *params = data;
1714     struct nv_drm_device *nv_dev = to_nv_device(dev);
1715     struct drm_crtc *crtc = NULL;
1716     struct nv_drm_crtc *nv_crtc = NULL;
1717     struct NvKmsKapiCrcs crc32;
1718 
1719     if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
1720         return -ENOENT;
1721     }
1722 
1723     crtc = nv_drm_crtc_find(dev, filep, params->crtc_id);
1724     if (!crtc) {
1725         return -ENOENT;
1726     }
1727 
1728     nv_crtc = to_nv_crtc(crtc);
1729 
1730     if (!nvKms->getCRC32(nv_dev->pDevice, nv_crtc->head, &crc32)) {
1731         return -ENODEV;
1732     }
1733     params->crc32 = crc32.outputCrc32.value;
1734 
1735     return 0;
1736 }
1737 
1738 #endif
1739