1 /*
2  * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef __NVIDIA_DRM_CRTC_H__
24 #define __NVIDIA_DRM_CRTC_H__
25 
26 #include "nvidia-drm-conftest.h"
27 
28 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
29 
30 #include "nvidia-drm-helper.h"
31 
32 #if defined(NV_DRM_DRMP_H_PRESENT)
33 #include <drm/drmP.h>
34 #endif
35 
36 #include <drm/drm_crtc.h>
37 
38 #include "nvtypes.h"
39 #include "nvkms-kapi.h"
40 
41 struct nv_drm_crtc {
42     NvU32 head;
43 
44     /**
45      * @flip_list:
46      *
47      * List of flips pending to get processed by __nv_drm_handle_flip_event().
48      * Protected by @flip_list_lock.
49      */
50     struct list_head flip_list;
51 
52     /**
53      * @flip_list_lock:
54      *
55      * Spinlock to protect @flip_list.
56      */
57     spinlock_t flip_list_lock;
58 
59     /**
60      * @modeset_permission_filep:
61      *
62      * The filep using this crtc with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
63      */
64     struct drm_file *modeset_permission_filep;
65 
66     struct drm_crtc base;
67 };
68 
69 /**
70  * struct nv_drm_flip - flip state
71  *
72  * This state is getting used to consume DRM completion event associated
73  * with each crtc state from atomic commit.
74  *
75  * Function nv_drm_atomic_apply_modeset_config() consumes DRM completion
76  * event, save it into flip state associated with crtc and queue flip state into
77  * crtc's flip list and commits atomic update to hardware.
78  */
79 struct nv_drm_flip {
80     /**
81      * @event:
82      *
83      * Optional pointer to a DRM event to signal upon completion of
84      * the state update.
85      */
86     struct drm_pending_vblank_event *event;
87 
88     /**
89      * @pending_events
90      *
91      * Number of HW events pending to signal completion of the state
92      * update.
93      */
94     uint32_t pending_events;
95 
96     /**
97      * @list_entry:
98      *
99      * Entry on the per-CRTC &nv_drm_crtc.flip_list. Protected by
100      * &nv_drm_crtc.flip_list_lock.
101      */
102     struct list_head list_entry;
103 
104     /**
105      * @deferred_flip_list
106      *
107      * List flip objects whose processing is deferred until processing of
108      * this flip object. Protected by &nv_drm_crtc.flip_list_lock.
109      * nv_drm_atomic_commit() gets last flip object from
110      * nv_drm_crtc:flip_list and add deferred flip objects into
111      * @deferred_flip_list, __nv_drm_handle_flip_event() processes
112      * @deferred_flip_list.
113      */
114     struct list_head deferred_flip_list;
115 };
116 
117 struct nv_drm_crtc_state {
118     /**
119      * @base:
120      *
121      * Base DRM crtc state object for this.
122      */
123     struct drm_crtc_state base;
124 
125     /**
126      * @head_req_config:
127      *
128      * Requested head's modeset configuration corresponding to this crtc state.
129      */
130     struct NvKmsKapiHeadRequestedConfig req_config;
131 
132     struct NvKmsLutRamps *ilut_ramps;
133     struct NvKmsLutRamps *olut_ramps;
134 
135     /**
136      * @nv_flip:
137      *
138      * Flip state associated with this crtc state, gets allocated
139      * by nv_drm_atomic_crtc_duplicate_state(), on successful commit it gets
140      * consumed and queued into flip list by
141      * nv_drm_atomic_apply_modeset_config() and finally gets destroyed
142      * by __nv_drm_handle_flip_event() after getting processed.
143      *
144      * In case of failure of atomic commit, this flip state getting destroyed by
145      * nv_drm_atomic_crtc_destroy_state().
146      */
147     struct nv_drm_flip *nv_flip;
148 };
149 
to_nv_crtc_state(struct drm_crtc_state * state)150 static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state)
151 {
152     return container_of(state, struct nv_drm_crtc_state, base);
153 }
154 
155 struct nv_drm_plane {
156     /**
157      * @base:
158      *
159      * Base DRM plane object for this plane.
160      */
161     struct drm_plane base;
162 
163     /**
164      * @defaultCompositionMode:
165      *
166      * Default composition blending mode of this plane.
167      */
168     enum NvKmsCompositionBlendingMode defaultCompositionMode;
169 
170     /**
171      * @layer_idx
172      *
173      * Index of this plane in the per head array of layers.
174      */
175     uint32_t layer_idx;
176 };
177 
to_nv_plane(struct drm_plane * plane)178 static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
179 {
180     if (plane == NULL) {
181         return NULL;
182     }
183     return container_of(plane, struct nv_drm_plane, base);
184 }
185 
186 struct nv_drm_plane_state {
187     struct drm_plane_state base;
188     s32 __user *fd_user_ptr;
189     enum NvKmsInputColorSpace input_colorspace;
190 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
191     struct drm_property_blob *hdr_output_metadata;
192 #endif
193 };
194 
to_nv_drm_plane_state(struct drm_plane_state * state)195 static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state)
196 {
197     return container_of(state, struct nv_drm_plane_state, base);
198 }
199 
to_nv_drm_plane_state_const(const struct drm_plane_state * state)200 static inline const struct nv_drm_plane_state *to_nv_drm_plane_state_const(const struct drm_plane_state *state)
201 {
202     return container_of(state, const struct nv_drm_plane_state, base);
203 }
204 
to_nv_crtc(struct drm_crtc * crtc)205 static inline struct nv_drm_crtc *to_nv_crtc(struct drm_crtc *crtc)
206 {
207     if (crtc == NULL) {
208         return NULL;
209     }
210     return container_of(crtc, struct nv_drm_crtc, base);
211 }
212 
213 /*
214  * CRTCs are static objects, list does not change once after initialization and
215  * before teardown of device. Initialization/teardown paths are single
216  * threaded, so no locking required.
217  */
218 static inline
nv_drm_crtc_lookup(struct nv_drm_device * nv_dev,NvU32 head)219 struct nv_drm_crtc *nv_drm_crtc_lookup(struct nv_drm_device *nv_dev, NvU32 head)
220 {
221     struct drm_crtc *crtc;
222     nv_drm_for_each_crtc(crtc, nv_dev->dev) {
223         struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
224 
225         if (nv_crtc->head == head)  {
226             return nv_crtc;
227         }
228     }
229     return NULL;
230 }
231 
232 /**
233  * nv_drm_crtc_enqueue_flip - Enqueue nv_drm_flip object to flip_list of crtc.
234  */
nv_drm_crtc_enqueue_flip(struct nv_drm_crtc * nv_crtc,struct nv_drm_flip * nv_flip)235 static inline void nv_drm_crtc_enqueue_flip(struct nv_drm_crtc *nv_crtc,
236                                             struct nv_drm_flip *nv_flip)
237 {
238     spin_lock(&nv_crtc->flip_list_lock);
239     list_add(&nv_flip->list_entry, &nv_crtc->flip_list);
240     spin_unlock(&nv_crtc->flip_list_lock);
241 }
242 
243 /**
244  * nv_drm_crtc_dequeue_flip - Dequeue nv_drm_flip object to flip_list of crtc.
245  */
246 static inline
nv_drm_crtc_dequeue_flip(struct nv_drm_crtc * nv_crtc)247 struct nv_drm_flip *nv_drm_crtc_dequeue_flip(struct nv_drm_crtc *nv_crtc)
248 {
249     struct nv_drm_flip *nv_flip = NULL;
250     uint32_t pending_events = 0;
251 
252     spin_lock(&nv_crtc->flip_list_lock);
253     nv_flip = list_first_entry_or_null(&nv_crtc->flip_list,
254                                        struct nv_drm_flip, list_entry);
255     if (likely(nv_flip != NULL)) {
256         /*
257          * Decrement pending_event count and dequeue flip object if
258          * pending_event count becomes 0.
259          */
260         pending_events = --nv_flip->pending_events;
261         if (!pending_events) {
262             list_del(&nv_flip->list_entry);
263         }
264     }
265     spin_unlock(&nv_crtc->flip_list_lock);
266 
267     if (WARN_ON(nv_flip == NULL) || pending_events) {
268         return NULL;
269     }
270 
271     return nv_flip;
272 }
273 
274 void nv_drm_enumerate_crtcs_and_planes(
275     struct nv_drm_device *nv_dev,
276     const struct NvKmsKapiDeviceResourcesInfo *pResInfo);
277 
278 int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
279                                 void *data, struct drm_file *filep);
280 
281 int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
282                                    void *data, struct drm_file *filep);
283 
284 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
285 
286 #endif /* __NVIDIA_DRM_CRTC_H__ */
287