xref: /linux/drivers/gpu/drm/vkms/vkms_composer.c (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/crc32.h>
4 
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_atomic_helper.h>
7 #include <drm/drm_fourcc.h>
8 #include <drm/drm_gem_framebuffer_helper.h>
9 #include <drm/drm_vblank.h>
10 #include <linux/minmax.h>
11 
12 #include "vkms_drv.h"
13 
14 static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
15 {
16 	u32 new_color;
17 
18 	new_color = (src * 0xffff + dst * (0xffff - alpha));
19 
20 	return DIV_ROUND_CLOSEST(new_color, 0xffff);
21 }
22 
23 /**
24  * pre_mul_alpha_blend - alpha blending equation
25  * @src_frame_info: source framebuffer's metadata
26  * @stage_buffer: The line with the pixels from src_plane
27  * @output_buffer: A line buffer that receives all the blends output
28  *
29  * Using the information from the `frame_info`, this blends only the
30  * necessary pixels from the `stage_buffer` to the `output_buffer`
31  * using premultiplied blend formula.
32  *
33  * The current DRM assumption is that pixel color values have been already
34  * pre-multiplied with the alpha channel values. See more
35  * drm_plane_create_blend_mode_property(). Also, this formula assumes a
36  * completely opaque background.
37  */
38 static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
39 				struct line_buffer *stage_buffer,
40 				struct line_buffer *output_buffer)
41 {
42 	int x_dst = frame_info->dst.x1;
43 	struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
44 	struct pixel_argb_u16 *in = stage_buffer->pixels;
45 	int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
46 			    stage_buffer->n_pixels);
47 
48 	for (int x = 0; x < x_limit; x++) {
49 		out[x].a = (u16)0xffff;
50 		out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
51 		out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
52 		out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
53 	}
54 }
55 
56 static bool check_y_limit(struct vkms_frame_info *frame_info, int y)
57 {
58 	if (y >= frame_info->dst.y1 && y < frame_info->dst.y2)
59 		return true;
60 
61 	return false;
62 }
63 
64 static void fill_background(const struct pixel_argb_u16 *background_color,
65 			    struct line_buffer *output_buffer)
66 {
67 	for (size_t i = 0; i < output_buffer->n_pixels; i++)
68 		output_buffer->pixels[i] = *background_color;
69 }
70 
71 /**
72  * @wb_frame_info: The writeback frame buffer metadata
73  * @crtc_state: The crtc state
74  * @crc32: The crc output of the final frame
75  * @output_buffer: A buffer of a row that will receive the result of the blend(s)
76  * @stage_buffer: The line with the pixels from plane being blend to the output
77  *
78  * This function blends the pixels (Using the `pre_mul_alpha_blend`)
79  * from all planes, calculates the crc32 of the output from the former step,
80  * and, if necessary, convert and store the output to the writeback buffer.
81  */
82 static void blend(struct vkms_writeback_job *wb,
83 		  struct vkms_crtc_state *crtc_state,
84 		  u32 *crc32, struct line_buffer *stage_buffer,
85 		  struct line_buffer *output_buffer, size_t row_size)
86 {
87 	struct vkms_plane_state **plane = crtc_state->active_planes;
88 	u32 n_active_planes = crtc_state->num_active_planes;
89 
90 	const struct pixel_argb_u16 background_color = { .a = 0xffff };
91 
92 	size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay;
93 
94 	for (size_t y = 0; y < crtc_y_limit; y++) {
95 		fill_background(&background_color, output_buffer);
96 
97 		/* The active planes are composed associatively in z-order. */
98 		for (size_t i = 0; i < n_active_planes; i++) {
99 			if (!check_y_limit(plane[i]->frame_info, y))
100 				continue;
101 
102 			plane[i]->plane_read(stage_buffer, plane[i]->frame_info, y);
103 			pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
104 					    output_buffer);
105 		}
106 
107 		*crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
108 
109 		if (wb)
110 			wb->wb_write(&wb->wb_frame_info, output_buffer, y);
111 	}
112 }
113 
114 static int check_format_funcs(struct vkms_crtc_state *crtc_state,
115 			      struct vkms_writeback_job *active_wb)
116 {
117 	struct vkms_plane_state **planes = crtc_state->active_planes;
118 	u32 n_active_planes = crtc_state->num_active_planes;
119 
120 	for (size_t i = 0; i < n_active_planes; i++)
121 		if (!planes[i]->plane_read)
122 			return -1;
123 
124 	if (active_wb && !active_wb->wb_write)
125 		return -1;
126 
127 	return 0;
128 }
129 
130 static int check_iosys_map(struct vkms_crtc_state *crtc_state)
131 {
132 	struct vkms_plane_state **plane_state = crtc_state->active_planes;
133 	u32 n_active_planes = crtc_state->num_active_planes;
134 
135 	for (size_t i = 0; i < n_active_planes; i++)
136 		if (iosys_map_is_null(&plane_state[i]->frame_info->map[0]))
137 			return -1;
138 
139 	return 0;
140 }
141 
142 static int compose_active_planes(struct vkms_writeback_job *active_wb,
143 				 struct vkms_crtc_state *crtc_state,
144 				 u32 *crc32)
145 {
146 	size_t line_width, pixel_size = sizeof(struct pixel_argb_u16);
147 	struct line_buffer output_buffer, stage_buffer;
148 	int ret = 0;
149 
150 	/*
151 	 * This check exists so we can call `crc32_le` for the entire line
152 	 * instead doing it for each channel of each pixel in case
153 	 * `struct `pixel_argb_u16` had any gap added by the compiler
154 	 * between the struct fields.
155 	 */
156 	static_assert(sizeof(struct pixel_argb_u16) == 8);
157 
158 	if (WARN_ON(check_iosys_map(crtc_state)))
159 		return -EINVAL;
160 
161 	if (WARN_ON(check_format_funcs(crtc_state, active_wb)))
162 		return -EINVAL;
163 
164 	line_width = crtc_state->base.crtc->mode.hdisplay;
165 	stage_buffer.n_pixels = line_width;
166 	output_buffer.n_pixels = line_width;
167 
168 	stage_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
169 	if (!stage_buffer.pixels) {
170 		DRM_ERROR("Cannot allocate memory for the output line buffer");
171 		return -ENOMEM;
172 	}
173 
174 	output_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
175 	if (!output_buffer.pixels) {
176 		DRM_ERROR("Cannot allocate memory for intermediate line buffer");
177 		ret = -ENOMEM;
178 		goto free_stage_buffer;
179 	}
180 
181 	blend(active_wb, crtc_state, crc32, &stage_buffer,
182 	      &output_buffer, line_width * pixel_size);
183 
184 	kvfree(output_buffer.pixels);
185 free_stage_buffer:
186 	kvfree(stage_buffer.pixels);
187 
188 	return ret;
189 }
190 
191 /**
192  * vkms_composer_worker - ordered work_struct to compute CRC
193  *
194  * @work: work_struct
195  *
196  * Work handler for composing and computing CRCs. work_struct scheduled in
197  * an ordered workqueue that's periodically scheduled to run by
198  * vkms_vblank_simulate() and flushed at vkms_atomic_commit_tail().
199  */
200 void vkms_composer_worker(struct work_struct *work)
201 {
202 	struct vkms_crtc_state *crtc_state = container_of(work,
203 						struct vkms_crtc_state,
204 						composer_work);
205 	struct drm_crtc *crtc = crtc_state->base.crtc;
206 	struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
207 	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
208 	bool crc_pending, wb_pending;
209 	u64 frame_start, frame_end;
210 	u32 crc32 = 0;
211 	int ret;
212 
213 	spin_lock_irq(&out->composer_lock);
214 	frame_start = crtc_state->frame_start;
215 	frame_end = crtc_state->frame_end;
216 	crc_pending = crtc_state->crc_pending;
217 	wb_pending = crtc_state->wb_pending;
218 	crtc_state->frame_start = 0;
219 	crtc_state->frame_end = 0;
220 	crtc_state->crc_pending = false;
221 	spin_unlock_irq(&out->composer_lock);
222 
223 	/*
224 	 * We raced with the vblank hrtimer and previous work already computed
225 	 * the crc, nothing to do.
226 	 */
227 	if (!crc_pending)
228 		return;
229 
230 	if (wb_pending)
231 		ret = compose_active_planes(active_wb, crtc_state, &crc32);
232 	else
233 		ret = compose_active_planes(NULL, crtc_state, &crc32);
234 
235 	if (ret)
236 		return;
237 
238 	if (wb_pending) {
239 		drm_writeback_signal_completion(&out->wb_connector, 0);
240 		spin_lock_irq(&out->composer_lock);
241 		crtc_state->wb_pending = false;
242 		spin_unlock_irq(&out->composer_lock);
243 	}
244 
245 	/*
246 	 * The worker can fall behind the vblank hrtimer, make sure we catch up.
247 	 */
248 	while (frame_start <= frame_end)
249 		drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
250 }
251 
252 static const char * const pipe_crc_sources[] = {"auto"};
253 
254 const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
255 					size_t *count)
256 {
257 	*count = ARRAY_SIZE(pipe_crc_sources);
258 	return pipe_crc_sources;
259 }
260 
261 static int vkms_crc_parse_source(const char *src_name, bool *enabled)
262 {
263 	int ret = 0;
264 
265 	if (!src_name) {
266 		*enabled = false;
267 	} else if (strcmp(src_name, "auto") == 0) {
268 		*enabled = true;
269 	} else {
270 		*enabled = false;
271 		ret = -EINVAL;
272 	}
273 
274 	return ret;
275 }
276 
277 int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
278 			   size_t *values_cnt)
279 {
280 	bool enabled;
281 
282 	if (vkms_crc_parse_source(src_name, &enabled) < 0) {
283 		DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
284 		return -EINVAL;
285 	}
286 
287 	*values_cnt = 1;
288 
289 	return 0;
290 }
291 
292 void vkms_set_composer(struct vkms_output *out, bool enabled)
293 {
294 	bool old_enabled;
295 
296 	if (enabled)
297 		drm_crtc_vblank_get(&out->crtc);
298 
299 	spin_lock_irq(&out->lock);
300 	old_enabled = out->composer_enabled;
301 	out->composer_enabled = enabled;
302 	spin_unlock_irq(&out->lock);
303 
304 	if (old_enabled)
305 		drm_crtc_vblank_put(&out->crtc);
306 }
307 
308 int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
309 {
310 	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
311 	bool enabled = false;
312 	int ret = 0;
313 
314 	ret = vkms_crc_parse_source(src_name, &enabled);
315 
316 	vkms_set_composer(out, enabled);
317 
318 	return ret;
319 }
320