1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * DOC: atomic plane helpers
26  *
27  * The functions here are used by the atomic plane helper functions to
28  * implement legacy plane updates (i.e., drm_plane->update_plane() and
29  * drm_plane->disable_plane()).  This allows plane updates to use the
30  * atomic state infrastructure and perform plane updates as separate
31  * prepare/check/commit/cleanup steps.
32  */
33 
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_plane_helper.h>
37 
38 #include "i915_trace.h"
39 #include "intel_atomic_plane.h"
40 #include "intel_display_types.h"
41 #include "intel_pm.h"
42 #include "intel_sprite.h"
43 
44 struct intel_plane *intel_plane_alloc(void)
45 {
46 	struct intel_plane_state *plane_state;
47 	struct intel_plane *plane;
48 
49 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
50 	if (!plane)
51 		return ERR_PTR(-ENOMEM);
52 
53 	plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
54 	if (!plane_state) {
55 		kfree(plane);
56 		return ERR_PTR(-ENOMEM);
57 	}
58 
59 	__drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
60 	plane_state->scaler_id = -1;
61 
62 	return plane;
63 }
64 
65 void intel_plane_free(struct intel_plane *plane)
66 {
67 	intel_plane_destroy_state(&plane->base, plane->base.state);
68 	kfree(plane);
69 }
70 
71 /**
72  * intel_plane_duplicate_state - duplicate plane state
73  * @plane: drm plane
74  *
75  * Allocates and returns a copy of the plane state (both common and
76  * Intel-specific) for the specified plane.
77  *
78  * Returns: The newly allocated plane state, or NULL on failure.
79  */
80 struct drm_plane_state *
81 intel_plane_duplicate_state(struct drm_plane *plane)
82 {
83 	struct drm_plane_state *state;
84 	struct intel_plane_state *intel_state;
85 
86 	intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
87 
88 	if (!intel_state)
89 		return NULL;
90 
91 	state = &intel_state->base;
92 
93 	__drm_atomic_helper_plane_duplicate_state(plane, state);
94 
95 	intel_state->vma = NULL;
96 	intel_state->flags = 0;
97 
98 	return state;
99 }
100 
101 /**
102  * intel_plane_destroy_state - destroy plane state
103  * @plane: drm plane
104  * @state: state object to destroy
105  *
106  * Destroys the plane state (both common and Intel-specific) for the
107  * specified plane.
108  */
109 void
110 intel_plane_destroy_state(struct drm_plane *plane,
111 			  struct drm_plane_state *state)
112 {
113 	WARN_ON(to_intel_plane_state(state)->vma);
114 
115 	drm_atomic_helper_plane_destroy_state(plane, state);
116 }
117 
118 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
119 				   const struct intel_plane_state *plane_state)
120 {
121 	const struct drm_framebuffer *fb = plane_state->base.fb;
122 	unsigned int cpp;
123 
124 	if (!plane_state->base.visible)
125 		return 0;
126 
127 	cpp = fb->format->cpp[0];
128 
129 	/*
130 	 * Based on HSD#:1408715493
131 	 * NV12 cpp == 4, P010 cpp == 8
132 	 *
133 	 * FIXME what is the logic behind this?
134 	 */
135 	if (fb->format->is_yuv && fb->format->num_planes > 1)
136 		cpp *= 4;
137 
138 	return cpp * crtc_state->pixel_rate;
139 }
140 
141 bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
142 				struct intel_plane *plane)
143 {
144 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
145 	const struct intel_plane_state *plane_state =
146 		intel_atomic_get_new_plane_state(state, plane);
147 	struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
148 	struct intel_crtc_state *crtc_state;
149 
150 	if (!plane_state->base.visible || !plane->min_cdclk)
151 		return false;
152 
153 	crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
154 
155 	crtc_state->min_cdclk[plane->id] =
156 		plane->min_cdclk(crtc_state, plane_state);
157 
158 	/*
159 	 * Does the cdclk need to be bumbed up?
160 	 *
161 	 * Note: we obviously need to be called before the new
162 	 * cdclk frequency is calculated so state->cdclk.logical
163 	 * hasn't been populated yet. Hence we look at the old
164 	 * cdclk state under dev_priv->cdclk.logical. This is
165 	 * safe as long we hold at least one crtc mutex (which
166 	 * must be true since we have crtc_state).
167 	 */
168 	if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
169 		DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
170 			      plane->base.base.id, plane->base.name,
171 			      crtc_state->min_cdclk[plane->id],
172 			      dev_priv->cdclk.logical.cdclk);
173 		return true;
174 	}
175 
176 	return false;
177 }
178 
179 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
180 					struct intel_crtc_state *new_crtc_state,
181 					const struct intel_plane_state *old_plane_state,
182 					struct intel_plane_state *new_plane_state)
183 {
184 	struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
185 	const struct drm_framebuffer *fb = new_plane_state->base.fb;
186 	int ret;
187 
188 	new_crtc_state->active_planes &= ~BIT(plane->id);
189 	new_crtc_state->nv12_planes &= ~BIT(plane->id);
190 	new_crtc_state->c8_planes &= ~BIT(plane->id);
191 	new_crtc_state->data_rate[plane->id] = 0;
192 	new_crtc_state->min_cdclk[plane->id] = 0;
193 	new_plane_state->base.visible = false;
194 
195 	if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
196 		return 0;
197 
198 	ret = plane->check_plane(new_crtc_state, new_plane_state);
199 	if (ret)
200 		return ret;
201 
202 	/* FIXME pre-g4x don't work like this */
203 	if (new_plane_state->base.visible)
204 		new_crtc_state->active_planes |= BIT(plane->id);
205 
206 	if (new_plane_state->base.visible &&
207 	    drm_format_info_is_yuv_semiplanar(fb->format))
208 		new_crtc_state->nv12_planes |= BIT(plane->id);
209 
210 	if (new_plane_state->base.visible &&
211 	    fb->format->format == DRM_FORMAT_C8)
212 		new_crtc_state->c8_planes |= BIT(plane->id);
213 
214 	if (new_plane_state->base.visible || old_plane_state->base.visible)
215 		new_crtc_state->update_planes |= BIT(plane->id);
216 
217 	new_crtc_state->data_rate[plane->id] =
218 		intel_plane_data_rate(new_crtc_state, new_plane_state);
219 
220 	return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
221 					       old_plane_state, new_plane_state);
222 }
223 
224 static struct intel_crtc *
225 get_crtc_from_states(const struct intel_plane_state *old_plane_state,
226 		     const struct intel_plane_state *new_plane_state)
227 {
228 	if (new_plane_state->base.crtc)
229 		return to_intel_crtc(new_plane_state->base.crtc);
230 
231 	if (old_plane_state->base.crtc)
232 		return to_intel_crtc(old_plane_state->base.crtc);
233 
234 	return NULL;
235 }
236 
237 int intel_plane_atomic_check(struct intel_atomic_state *state,
238 			     struct intel_plane *plane)
239 {
240 	struct intel_plane_state *new_plane_state =
241 		intel_atomic_get_new_plane_state(state, plane);
242 	const struct intel_plane_state *old_plane_state =
243 		intel_atomic_get_old_plane_state(state, plane);
244 	struct intel_crtc *crtc =
245 		get_crtc_from_states(old_plane_state, new_plane_state);
246 	const struct intel_crtc_state *old_crtc_state;
247 	struct intel_crtc_state *new_crtc_state;
248 
249 	new_plane_state->base.visible = false;
250 	if (!crtc)
251 		return 0;
252 
253 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
254 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
255 
256 	return intel_plane_atomic_check_with_state(old_crtc_state,
257 						   new_crtc_state,
258 						   old_plane_state,
259 						   new_plane_state);
260 }
261 
262 static struct intel_plane *
263 skl_next_plane_to_commit(struct intel_atomic_state *state,
264 			 struct intel_crtc *crtc,
265 			 struct skl_ddb_entry entries_y[I915_MAX_PLANES],
266 			 struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
267 			 unsigned int *update_mask)
268 {
269 	struct intel_crtc_state *crtc_state =
270 		intel_atomic_get_new_crtc_state(state, crtc);
271 	struct intel_plane_state *plane_state;
272 	struct intel_plane *plane;
273 	int i;
274 
275 	if (*update_mask == 0)
276 		return NULL;
277 
278 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
279 		enum plane_id plane_id = plane->id;
280 
281 		if (crtc->pipe != plane->pipe ||
282 		    !(*update_mask & BIT(plane_id)))
283 			continue;
284 
285 		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
286 						entries_y,
287 						I915_MAX_PLANES, plane_id) ||
288 		    skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
289 						entries_uv,
290 						I915_MAX_PLANES, plane_id))
291 			continue;
292 
293 		*update_mask &= ~BIT(plane_id);
294 		entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
295 		entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
296 
297 		return plane;
298 	}
299 
300 	/* should never happen */
301 	WARN_ON(1);
302 
303 	return NULL;
304 }
305 
306 void intel_update_plane(struct intel_plane *plane,
307 			const struct intel_crtc_state *crtc_state,
308 			const struct intel_plane_state *plane_state)
309 {
310 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
311 
312 	trace_intel_update_plane(&plane->base, crtc);
313 	plane->update_plane(plane, crtc_state, plane_state);
314 }
315 
316 void intel_update_slave(struct intel_plane *plane,
317 			const struct intel_crtc_state *crtc_state,
318 			const struct intel_plane_state *plane_state)
319 {
320 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
321 
322 	trace_intel_update_plane(&plane->base, crtc);
323 	plane->update_slave(plane, crtc_state, plane_state);
324 }
325 
326 void intel_disable_plane(struct intel_plane *plane,
327 			 const struct intel_crtc_state *crtc_state)
328 {
329 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
330 
331 	trace_intel_disable_plane(&plane->base, crtc);
332 	plane->disable_plane(plane, crtc_state);
333 }
334 
335 void skl_update_planes_on_crtc(struct intel_atomic_state *state,
336 			       struct intel_crtc *crtc)
337 {
338 	struct intel_crtc_state *old_crtc_state =
339 		intel_atomic_get_old_crtc_state(state, crtc);
340 	struct intel_crtc_state *new_crtc_state =
341 		intel_atomic_get_new_crtc_state(state, crtc);
342 	struct skl_ddb_entry entries_y[I915_MAX_PLANES];
343 	struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
344 	u32 update_mask = new_crtc_state->update_planes;
345 	struct intel_plane *plane;
346 
347 	memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
348 	       sizeof(old_crtc_state->wm.skl.plane_ddb_y));
349 	memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
350 	       sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
351 
352 	while ((plane = skl_next_plane_to_commit(state, crtc,
353 						 entries_y, entries_uv,
354 						 &update_mask))) {
355 		struct intel_plane_state *new_plane_state =
356 			intel_atomic_get_new_plane_state(state, plane);
357 
358 		if (new_plane_state->base.visible) {
359 			intel_update_plane(plane, new_crtc_state, new_plane_state);
360 		} else if (new_plane_state->planar_slave) {
361 			struct intel_plane *master =
362 				new_plane_state->planar_linked_plane;
363 
364 			/*
365 			 * We update the slave plane from this function because
366 			 * programming it from the master plane's update_plane
367 			 * callback runs into issues when the Y plane is
368 			 * reassigned, disabled or used by a different plane.
369 			 *
370 			 * The slave plane is updated with the master plane's
371 			 * plane_state.
372 			 */
373 			new_plane_state =
374 				intel_atomic_get_new_plane_state(state, master);
375 
376 			intel_update_slave(plane, new_crtc_state, new_plane_state);
377 		} else {
378 			intel_disable_plane(plane, new_crtc_state);
379 		}
380 	}
381 }
382 
383 void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
384 				struct intel_crtc *crtc)
385 {
386 	struct intel_crtc_state *new_crtc_state =
387 		intel_atomic_get_new_crtc_state(state, crtc);
388 	u32 update_mask = new_crtc_state->update_planes;
389 	struct intel_plane_state *new_plane_state;
390 	struct intel_plane *plane;
391 	int i;
392 
393 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
394 		if (crtc->pipe != plane->pipe ||
395 		    !(update_mask & BIT(plane->id)))
396 			continue;
397 
398 		if (new_plane_state->base.visible)
399 			intel_update_plane(plane, new_crtc_state, new_plane_state);
400 		else
401 			intel_disable_plane(plane, new_crtc_state);
402 	}
403 }
404 
405 const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
406 	.prepare_fb = intel_prepare_plane_fb,
407 	.cleanup_fb = intel_cleanup_plane_fb,
408 };
409