1c575b7eeSOleksandr Andrushchenko // SPDX-License-Identifier: GPL-2.0 OR MIT
2c575b7eeSOleksandr Andrushchenko
3c575b7eeSOleksandr Andrushchenko /*
4c575b7eeSOleksandr Andrushchenko * Xen para-virtual DRM device
5c575b7eeSOleksandr Andrushchenko *
6c575b7eeSOleksandr Andrushchenko * Copyright (C) 2016-2018 EPAM Systems Inc.
7c575b7eeSOleksandr Andrushchenko *
8c575b7eeSOleksandr Andrushchenko * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9c575b7eeSOleksandr Andrushchenko */
10c575b7eeSOleksandr Andrushchenko
11c575b7eeSOleksandr Andrushchenko #include <drm/drm_atomic.h>
12c575b7eeSOleksandr Andrushchenko #include <drm/drm_atomic_helper.h>
132ea2269eSSam Ravnborg #include <drm/drm_drv.h>
142ea2269eSSam Ravnborg #include <drm/drm_fourcc.h>
15*720cf96dSVille Syrjälä #include <drm/drm_framebuffer.h>
16c575b7eeSOleksandr Andrushchenko #include <drm/drm_gem.h>
17820c1707SThomas Zimmermann #include <drm/drm_gem_atomic_helper.h>
18c575b7eeSOleksandr Andrushchenko #include <drm/drm_gem_framebuffer_helper.h>
19fcd70cd3SDaniel Vetter #include <drm/drm_probe_helper.h>
202ea2269eSSam Ravnborg #include <drm/drm_vblank.h>
21c575b7eeSOleksandr Andrushchenko
22c575b7eeSOleksandr Andrushchenko #include "xen_drm_front.h"
23c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_conn.h"
242ea2269eSSam Ravnborg #include "xen_drm_front_kms.h"
25c575b7eeSOleksandr Andrushchenko
26c575b7eeSOleksandr Andrushchenko /*
27c575b7eeSOleksandr Andrushchenko * Timeout in ms to wait for frame done event from the backend:
28c575b7eeSOleksandr Andrushchenko * must be a bit more than IO time-out
29c575b7eeSOleksandr Andrushchenko */
30c575b7eeSOleksandr Andrushchenko #define FRAME_DONE_TO_MS (XEN_DRM_FRONT_WAIT_BACK_MS + 100)
31c575b7eeSOleksandr Andrushchenko
32c575b7eeSOleksandr Andrushchenko static struct xen_drm_front_drm_pipeline *
to_xen_drm_pipeline(struct drm_simple_display_pipe * pipe)33c575b7eeSOleksandr Andrushchenko to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
34c575b7eeSOleksandr Andrushchenko {
35c575b7eeSOleksandr Andrushchenko return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
36c575b7eeSOleksandr Andrushchenko }
37c575b7eeSOleksandr Andrushchenko
fb_destroy(struct drm_framebuffer * fb)38c575b7eeSOleksandr Andrushchenko static void fb_destroy(struct drm_framebuffer *fb)
39c575b7eeSOleksandr Andrushchenko {
40c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
41c575b7eeSOleksandr Andrushchenko int idx;
42c575b7eeSOleksandr Andrushchenko
43c575b7eeSOleksandr Andrushchenko if (drm_dev_enter(fb->dev, &idx)) {
44c575b7eeSOleksandr Andrushchenko xen_drm_front_fb_detach(drm_info->front_info,
45c575b7eeSOleksandr Andrushchenko xen_drm_front_fb_to_cookie(fb));
46c575b7eeSOleksandr Andrushchenko drm_dev_exit(idx);
47c575b7eeSOleksandr Andrushchenko }
48c575b7eeSOleksandr Andrushchenko drm_gem_fb_destroy(fb);
49c575b7eeSOleksandr Andrushchenko }
50c575b7eeSOleksandr Andrushchenko
51d7774785SNishka Dasgupta static const struct drm_framebuffer_funcs fb_funcs = {
52c575b7eeSOleksandr Andrushchenko .destroy = fb_destroy,
53c575b7eeSOleksandr Andrushchenko };
54c575b7eeSOleksandr Andrushchenko
55c575b7eeSOleksandr Andrushchenko static struct drm_framebuffer *
fb_create(struct drm_device * dev,struct drm_file * filp,const struct drm_mode_fb_cmd2 * mode_cmd)56c575b7eeSOleksandr Andrushchenko fb_create(struct drm_device *dev, struct drm_file *filp,
57c575b7eeSOleksandr Andrushchenko const struct drm_mode_fb_cmd2 *mode_cmd)
58c575b7eeSOleksandr Andrushchenko {
59c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = dev->dev_private;
60938010abSYueHaibing struct drm_framebuffer *fb;
61c575b7eeSOleksandr Andrushchenko struct drm_gem_object *gem_obj;
62c575b7eeSOleksandr Andrushchenko int ret;
63c575b7eeSOleksandr Andrushchenko
64c575b7eeSOleksandr Andrushchenko fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
6514dee058SOleksandr Andrushchenko if (IS_ERR(fb))
66c575b7eeSOleksandr Andrushchenko return fb;
67c575b7eeSOleksandr Andrushchenko
6893adc0c2SDaniel Vetter gem_obj = fb->obj[0];
69c575b7eeSOleksandr Andrushchenko
70c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_fb_attach(drm_info->front_info,
71c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(gem_obj),
72c575b7eeSOleksandr Andrushchenko xen_drm_front_fb_to_cookie(fb),
73c575b7eeSOleksandr Andrushchenko fb->width, fb->height,
74c575b7eeSOleksandr Andrushchenko fb->format->format);
75c575b7eeSOleksandr Andrushchenko if (ret < 0) {
76c575b7eeSOleksandr Andrushchenko DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
77c575b7eeSOleksandr Andrushchenko goto fail;
78c575b7eeSOleksandr Andrushchenko }
79c575b7eeSOleksandr Andrushchenko
80c575b7eeSOleksandr Andrushchenko return fb;
81c575b7eeSOleksandr Andrushchenko
82c575b7eeSOleksandr Andrushchenko fail:
83c575b7eeSOleksandr Andrushchenko drm_gem_fb_destroy(fb);
84c575b7eeSOleksandr Andrushchenko return ERR_PTR(ret);
85c575b7eeSOleksandr Andrushchenko }
86c575b7eeSOleksandr Andrushchenko
87c575b7eeSOleksandr Andrushchenko static const struct drm_mode_config_funcs mode_config_funcs = {
88c575b7eeSOleksandr Andrushchenko .fb_create = fb_create,
89c575b7eeSOleksandr Andrushchenko .atomic_check = drm_atomic_helper_check,
90c575b7eeSOleksandr Andrushchenko .atomic_commit = drm_atomic_helper_commit,
91c575b7eeSOleksandr Andrushchenko };
92c575b7eeSOleksandr Andrushchenko
send_pending_event(struct xen_drm_front_drm_pipeline * pipeline)93c575b7eeSOleksandr Andrushchenko static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
94c575b7eeSOleksandr Andrushchenko {
95c575b7eeSOleksandr Andrushchenko struct drm_crtc *crtc = &pipeline->pipe.crtc;
96c575b7eeSOleksandr Andrushchenko struct drm_device *dev = crtc->dev;
97c575b7eeSOleksandr Andrushchenko unsigned long flags;
98c575b7eeSOleksandr Andrushchenko
99c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&dev->event_lock, flags);
100c575b7eeSOleksandr Andrushchenko if (pipeline->pending_event)
101c575b7eeSOleksandr Andrushchenko drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
102c575b7eeSOleksandr Andrushchenko pipeline->pending_event = NULL;
103c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&dev->event_lock, flags);
104c575b7eeSOleksandr Andrushchenko }
105c575b7eeSOleksandr Andrushchenko
display_enable(struct drm_simple_display_pipe * pipe,struct drm_crtc_state * crtc_state,struct drm_plane_state * plane_state)106c575b7eeSOleksandr Andrushchenko static void display_enable(struct drm_simple_display_pipe *pipe,
107c575b7eeSOleksandr Andrushchenko struct drm_crtc_state *crtc_state,
108c575b7eeSOleksandr Andrushchenko struct drm_plane_state *plane_state)
109c575b7eeSOleksandr Andrushchenko {
110c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline =
111c575b7eeSOleksandr Andrushchenko to_xen_drm_pipeline(pipe);
112c575b7eeSOleksandr Andrushchenko struct drm_crtc *crtc = &pipe->crtc;
113c575b7eeSOleksandr Andrushchenko struct drm_framebuffer *fb = plane_state->fb;
114c575b7eeSOleksandr Andrushchenko int ret, idx;
115c575b7eeSOleksandr Andrushchenko
116c575b7eeSOleksandr Andrushchenko if (!drm_dev_enter(pipe->crtc.dev, &idx))
117c575b7eeSOleksandr Andrushchenko return;
118c575b7eeSOleksandr Andrushchenko
119c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
120c575b7eeSOleksandr Andrushchenko fb->width, fb->height,
121c575b7eeSOleksandr Andrushchenko fb->format->cpp[0] * 8,
122c575b7eeSOleksandr Andrushchenko xen_drm_front_fb_to_cookie(fb));
123c575b7eeSOleksandr Andrushchenko
124c575b7eeSOleksandr Andrushchenko if (ret) {
125c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to enable display: %d\n", ret);
126c575b7eeSOleksandr Andrushchenko pipeline->conn_connected = false;
127c575b7eeSOleksandr Andrushchenko }
128c575b7eeSOleksandr Andrushchenko
129c575b7eeSOleksandr Andrushchenko drm_dev_exit(idx);
130c575b7eeSOleksandr Andrushchenko }
131c575b7eeSOleksandr Andrushchenko
display_disable(struct drm_simple_display_pipe * pipe)132c575b7eeSOleksandr Andrushchenko static void display_disable(struct drm_simple_display_pipe *pipe)
133c575b7eeSOleksandr Andrushchenko {
134c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline =
135c575b7eeSOleksandr Andrushchenko to_xen_drm_pipeline(pipe);
136c575b7eeSOleksandr Andrushchenko int ret = 0, idx;
137c575b7eeSOleksandr Andrushchenko
138c575b7eeSOleksandr Andrushchenko if (drm_dev_enter(pipe->crtc.dev, &idx)) {
139c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
140c575b7eeSOleksandr Andrushchenko xen_drm_front_fb_to_cookie(NULL));
141c575b7eeSOleksandr Andrushchenko drm_dev_exit(idx);
142c575b7eeSOleksandr Andrushchenko }
143c575b7eeSOleksandr Andrushchenko if (ret)
144c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to disable display: %d\n", ret);
145c575b7eeSOleksandr Andrushchenko
146c575b7eeSOleksandr Andrushchenko /* Make sure we can restart with enabled connector next time */
147c575b7eeSOleksandr Andrushchenko pipeline->conn_connected = true;
148c575b7eeSOleksandr Andrushchenko
149c575b7eeSOleksandr Andrushchenko /* release stalled event if any */
150c575b7eeSOleksandr Andrushchenko send_pending_event(pipeline);
151c575b7eeSOleksandr Andrushchenko }
152c575b7eeSOleksandr Andrushchenko
xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline * pipeline,u64 fb_cookie)153c575b7eeSOleksandr Andrushchenko void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
154c575b7eeSOleksandr Andrushchenko u64 fb_cookie)
155c575b7eeSOleksandr Andrushchenko {
156c575b7eeSOleksandr Andrushchenko /*
157c575b7eeSOleksandr Andrushchenko * This runs in interrupt context, e.g. under
158c575b7eeSOleksandr Andrushchenko * drm_info->front_info->io_lock, so we cannot call _sync version
159c575b7eeSOleksandr Andrushchenko * to cancel the work
160c575b7eeSOleksandr Andrushchenko */
161c575b7eeSOleksandr Andrushchenko cancel_delayed_work(&pipeline->pflip_to_worker);
162c575b7eeSOleksandr Andrushchenko
163c575b7eeSOleksandr Andrushchenko send_pending_event(pipeline);
164c575b7eeSOleksandr Andrushchenko }
165c575b7eeSOleksandr Andrushchenko
pflip_to_worker(struct work_struct * work)166c575b7eeSOleksandr Andrushchenko static void pflip_to_worker(struct work_struct *work)
167c575b7eeSOleksandr Andrushchenko {
168c575b7eeSOleksandr Andrushchenko struct delayed_work *delayed_work = to_delayed_work(work);
169c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline =
170c575b7eeSOleksandr Andrushchenko container_of(delayed_work,
171c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline,
172c575b7eeSOleksandr Andrushchenko pflip_to_worker);
173c575b7eeSOleksandr Andrushchenko
174c575b7eeSOleksandr Andrushchenko DRM_ERROR("Frame done timed-out, releasing");
175c575b7eeSOleksandr Andrushchenko send_pending_event(pipeline);
176c575b7eeSOleksandr Andrushchenko }
177c575b7eeSOleksandr Andrushchenko
display_send_page_flip(struct drm_simple_display_pipe * pipe,struct drm_plane_state * old_plane_state)178c575b7eeSOleksandr Andrushchenko static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
179c575b7eeSOleksandr Andrushchenko struct drm_plane_state *old_plane_state)
180c575b7eeSOleksandr Andrushchenko {
181c575b7eeSOleksandr Andrushchenko struct drm_plane_state *plane_state =
182c575b7eeSOleksandr Andrushchenko drm_atomic_get_new_plane_state(old_plane_state->state,
183c575b7eeSOleksandr Andrushchenko &pipe->plane);
184c575b7eeSOleksandr Andrushchenko
185c575b7eeSOleksandr Andrushchenko /*
186c575b7eeSOleksandr Andrushchenko * If old_plane_state->fb is NULL and plane_state->fb is not,
187c575b7eeSOleksandr Andrushchenko * then this is an atomic commit which will enable display.
188c575b7eeSOleksandr Andrushchenko * If old_plane_state->fb is not NULL and plane_state->fb is,
189c575b7eeSOleksandr Andrushchenko * then this is an atomic commit which will disable display.
190c575b7eeSOleksandr Andrushchenko * Ignore these and do not send page flip as this framebuffer will be
191c575b7eeSOleksandr Andrushchenko * sent to the backend as a part of display_set_config call.
192c575b7eeSOleksandr Andrushchenko */
193c575b7eeSOleksandr Andrushchenko if (old_plane_state->fb && plane_state->fb) {
194c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline =
195c575b7eeSOleksandr Andrushchenko to_xen_drm_pipeline(pipe);
196c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
197c575b7eeSOleksandr Andrushchenko int ret;
198c575b7eeSOleksandr Andrushchenko
199c575b7eeSOleksandr Andrushchenko schedule_delayed_work(&pipeline->pflip_to_worker,
200c575b7eeSOleksandr Andrushchenko msecs_to_jiffies(FRAME_DONE_TO_MS));
201c575b7eeSOleksandr Andrushchenko
202c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_page_flip(drm_info->front_info,
203c575b7eeSOleksandr Andrushchenko pipeline->index,
204c575b7eeSOleksandr Andrushchenko xen_drm_front_fb_to_cookie(plane_state->fb));
205c575b7eeSOleksandr Andrushchenko if (ret) {
206c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
207c575b7eeSOleksandr Andrushchenko
208c575b7eeSOleksandr Andrushchenko pipeline->conn_connected = false;
209c575b7eeSOleksandr Andrushchenko /*
210c575b7eeSOleksandr Andrushchenko * Report the flip not handled, so pending event is
211c575b7eeSOleksandr Andrushchenko * sent, unblocking user-space.
212c575b7eeSOleksandr Andrushchenko */
213c575b7eeSOleksandr Andrushchenko return false;
214c575b7eeSOleksandr Andrushchenko }
215c575b7eeSOleksandr Andrushchenko /*
216c575b7eeSOleksandr Andrushchenko * Signal that page flip was handled, pending event will be sent
217c575b7eeSOleksandr Andrushchenko * on frame done event from the backend.
218c575b7eeSOleksandr Andrushchenko */
219c575b7eeSOleksandr Andrushchenko return true;
220c575b7eeSOleksandr Andrushchenko }
221c575b7eeSOleksandr Andrushchenko
222c575b7eeSOleksandr Andrushchenko return false;
223c575b7eeSOleksandr Andrushchenko }
224c575b7eeSOleksandr Andrushchenko
display_check(struct drm_simple_display_pipe * pipe,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)225a3c27df2SThomas Zimmermann static int display_check(struct drm_simple_display_pipe *pipe,
226a3c27df2SThomas Zimmermann struct drm_plane_state *plane_state,
227a3c27df2SThomas Zimmermann struct drm_crtc_state *crtc_state)
228a3c27df2SThomas Zimmermann {
229a3c27df2SThomas Zimmermann /*
230a3c27df2SThomas Zimmermann * Xen doesn't initialize vblanking via drm_vblank_init(), so
231a3c27df2SThomas Zimmermann * DRM helpers assume that it doesn't handle vblanking and start
232a3c27df2SThomas Zimmermann * sending out fake VBLANK events automatically.
233a3c27df2SThomas Zimmermann *
234a3c27df2SThomas Zimmermann * As xen contains it's own logic for sending out VBLANK events
235a3c27df2SThomas Zimmermann * in send_pending_event(), disable no_vblank (i.e., the xen
236a3c27df2SThomas Zimmermann * driver has vblanking support).
237a3c27df2SThomas Zimmermann */
238a3c27df2SThomas Zimmermann crtc_state->no_vblank = false;
239a3c27df2SThomas Zimmermann
240a3c27df2SThomas Zimmermann return 0;
241a3c27df2SThomas Zimmermann }
242a3c27df2SThomas Zimmermann
display_update(struct drm_simple_display_pipe * pipe,struct drm_plane_state * old_plane_state)243c575b7eeSOleksandr Andrushchenko static void display_update(struct drm_simple_display_pipe *pipe,
244c575b7eeSOleksandr Andrushchenko struct drm_plane_state *old_plane_state)
245c575b7eeSOleksandr Andrushchenko {
246c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline =
247c575b7eeSOleksandr Andrushchenko to_xen_drm_pipeline(pipe);
248c575b7eeSOleksandr Andrushchenko struct drm_crtc *crtc = &pipe->crtc;
249c575b7eeSOleksandr Andrushchenko struct drm_pending_vblank_event *event;
250c575b7eeSOleksandr Andrushchenko int idx;
251c575b7eeSOleksandr Andrushchenko
252c575b7eeSOleksandr Andrushchenko event = crtc->state->event;
253c575b7eeSOleksandr Andrushchenko if (event) {
254c575b7eeSOleksandr Andrushchenko struct drm_device *dev = crtc->dev;
255c575b7eeSOleksandr Andrushchenko unsigned long flags;
256c575b7eeSOleksandr Andrushchenko
257c575b7eeSOleksandr Andrushchenko WARN_ON(pipeline->pending_event);
258c575b7eeSOleksandr Andrushchenko
259c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&dev->event_lock, flags);
260c575b7eeSOleksandr Andrushchenko crtc->state->event = NULL;
261c575b7eeSOleksandr Andrushchenko
262c575b7eeSOleksandr Andrushchenko pipeline->pending_event = event;
263c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&dev->event_lock, flags);
264c575b7eeSOleksandr Andrushchenko }
265c575b7eeSOleksandr Andrushchenko
266c575b7eeSOleksandr Andrushchenko if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
267c575b7eeSOleksandr Andrushchenko send_pending_event(pipeline);
268c575b7eeSOleksandr Andrushchenko return;
269c575b7eeSOleksandr Andrushchenko }
270c575b7eeSOleksandr Andrushchenko
271c575b7eeSOleksandr Andrushchenko /*
272c575b7eeSOleksandr Andrushchenko * Send page flip request to the backend *after* we have event cached
273c575b7eeSOleksandr Andrushchenko * above, so on page flip done event from the backend we can
274c575b7eeSOleksandr Andrushchenko * deliver it and there is no race condition between this code and
275c575b7eeSOleksandr Andrushchenko * event from the backend.
276c575b7eeSOleksandr Andrushchenko * If this is not a page flip, e.g. no flip done event from the backend
277c575b7eeSOleksandr Andrushchenko * is expected, then send now.
278c575b7eeSOleksandr Andrushchenko */
279c575b7eeSOleksandr Andrushchenko if (!display_send_page_flip(pipe, old_plane_state))
280c575b7eeSOleksandr Andrushchenko send_pending_event(pipeline);
281c575b7eeSOleksandr Andrushchenko
282c575b7eeSOleksandr Andrushchenko drm_dev_exit(idx);
283c575b7eeSOleksandr Andrushchenko }
284c575b7eeSOleksandr Andrushchenko
285c575b7eeSOleksandr Andrushchenko static enum drm_mode_status
display_mode_valid(struct drm_simple_display_pipe * pipe,const struct drm_display_mode * mode)28662db7d1eSDaniel Vetter display_mode_valid(struct drm_simple_display_pipe *pipe,
28762db7d1eSDaniel Vetter const struct drm_display_mode *mode)
288c575b7eeSOleksandr Andrushchenko {
289c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline =
29062db7d1eSDaniel Vetter container_of(pipe, struct xen_drm_front_drm_pipeline,
29162db7d1eSDaniel Vetter pipe);
292c575b7eeSOleksandr Andrushchenko
293c575b7eeSOleksandr Andrushchenko if (mode->hdisplay != pipeline->width)
294c575b7eeSOleksandr Andrushchenko return MODE_ERROR;
295c575b7eeSOleksandr Andrushchenko
296c575b7eeSOleksandr Andrushchenko if (mode->vdisplay != pipeline->height)
297c575b7eeSOleksandr Andrushchenko return MODE_ERROR;
298c575b7eeSOleksandr Andrushchenko
299c575b7eeSOleksandr Andrushchenko return MODE_OK;
300c575b7eeSOleksandr Andrushchenko }
301c575b7eeSOleksandr Andrushchenko
302c575b7eeSOleksandr Andrushchenko static const struct drm_simple_display_pipe_funcs display_funcs = {
303c575b7eeSOleksandr Andrushchenko .mode_valid = display_mode_valid,
304c575b7eeSOleksandr Andrushchenko .enable = display_enable,
305c575b7eeSOleksandr Andrushchenko .disable = display_disable,
306a3c27df2SThomas Zimmermann .check = display_check,
307c575b7eeSOleksandr Andrushchenko .update = display_update,
308c575b7eeSOleksandr Andrushchenko };
309c575b7eeSOleksandr Andrushchenko
display_pipe_init(struct xen_drm_front_drm_info * drm_info,int index,struct xen_drm_front_cfg_connector * cfg,struct xen_drm_front_drm_pipeline * pipeline)310c575b7eeSOleksandr Andrushchenko static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
311c575b7eeSOleksandr Andrushchenko int index, struct xen_drm_front_cfg_connector *cfg,
312c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline)
313c575b7eeSOleksandr Andrushchenko {
314c575b7eeSOleksandr Andrushchenko struct drm_device *dev = drm_info->drm_dev;
315c575b7eeSOleksandr Andrushchenko const u32 *formats;
316c575b7eeSOleksandr Andrushchenko int format_count;
317c575b7eeSOleksandr Andrushchenko int ret;
318c575b7eeSOleksandr Andrushchenko
319c575b7eeSOleksandr Andrushchenko pipeline->drm_info = drm_info;
320c575b7eeSOleksandr Andrushchenko pipeline->index = index;
321c575b7eeSOleksandr Andrushchenko pipeline->height = cfg->height;
322c575b7eeSOleksandr Andrushchenko pipeline->width = cfg->width;
323c575b7eeSOleksandr Andrushchenko
324c575b7eeSOleksandr Andrushchenko INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
325c575b7eeSOleksandr Andrushchenko
326c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
327c575b7eeSOleksandr Andrushchenko if (ret)
328c575b7eeSOleksandr Andrushchenko return ret;
329c575b7eeSOleksandr Andrushchenko
330c575b7eeSOleksandr Andrushchenko formats = xen_drm_front_conn_get_formats(&format_count);
331c575b7eeSOleksandr Andrushchenko
332c575b7eeSOleksandr Andrushchenko return drm_simple_display_pipe_init(dev, &pipeline->pipe,
333c575b7eeSOleksandr Andrushchenko &display_funcs, formats,
334c575b7eeSOleksandr Andrushchenko format_count, NULL,
335c575b7eeSOleksandr Andrushchenko &pipeline->conn);
336c575b7eeSOleksandr Andrushchenko }
337c575b7eeSOleksandr Andrushchenko
xen_drm_front_kms_init(struct xen_drm_front_drm_info * drm_info)338c575b7eeSOleksandr Andrushchenko int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
339c575b7eeSOleksandr Andrushchenko {
340c575b7eeSOleksandr Andrushchenko struct drm_device *dev = drm_info->drm_dev;
341c575b7eeSOleksandr Andrushchenko int i, ret;
342c575b7eeSOleksandr Andrushchenko
343c575b7eeSOleksandr Andrushchenko drm_mode_config_init(dev);
344c575b7eeSOleksandr Andrushchenko
345c575b7eeSOleksandr Andrushchenko dev->mode_config.min_width = 0;
346c575b7eeSOleksandr Andrushchenko dev->mode_config.min_height = 0;
347c575b7eeSOleksandr Andrushchenko dev->mode_config.max_width = 4095;
348c575b7eeSOleksandr Andrushchenko dev->mode_config.max_height = 2047;
349c575b7eeSOleksandr Andrushchenko dev->mode_config.funcs = &mode_config_funcs;
350c575b7eeSOleksandr Andrushchenko
351c575b7eeSOleksandr Andrushchenko for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
352c575b7eeSOleksandr Andrushchenko struct xen_drm_front_cfg_connector *cfg =
353c575b7eeSOleksandr Andrushchenko &drm_info->front_info->cfg.connectors[i];
354c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline =
355c575b7eeSOleksandr Andrushchenko &drm_info->pipeline[i];
356c575b7eeSOleksandr Andrushchenko
357c575b7eeSOleksandr Andrushchenko ret = display_pipe_init(drm_info, i, cfg, pipeline);
358c575b7eeSOleksandr Andrushchenko if (ret) {
359c575b7eeSOleksandr Andrushchenko drm_mode_config_cleanup(dev);
360c575b7eeSOleksandr Andrushchenko return ret;
361c575b7eeSOleksandr Andrushchenko }
362c575b7eeSOleksandr Andrushchenko }
363c575b7eeSOleksandr Andrushchenko
364c575b7eeSOleksandr Andrushchenko drm_mode_config_reset(dev);
365c575b7eeSOleksandr Andrushchenko drm_kms_helper_poll_init(dev);
366c575b7eeSOleksandr Andrushchenko return 0;
367c575b7eeSOleksandr Andrushchenko }
368c575b7eeSOleksandr Andrushchenko
xen_drm_front_kms_fini(struct xen_drm_front_drm_info * drm_info)369c575b7eeSOleksandr Andrushchenko void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
370c575b7eeSOleksandr Andrushchenko {
371c575b7eeSOleksandr Andrushchenko int i;
372c575b7eeSOleksandr Andrushchenko
373c575b7eeSOleksandr Andrushchenko for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
374c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline *pipeline =
375c575b7eeSOleksandr Andrushchenko &drm_info->pipeline[i];
376c575b7eeSOleksandr Andrushchenko
377c575b7eeSOleksandr Andrushchenko cancel_delayed_work_sync(&pipeline->pflip_to_worker);
378c575b7eeSOleksandr Andrushchenko
379c575b7eeSOleksandr Andrushchenko send_pending_event(pipeline);
380c575b7eeSOleksandr Andrushchenko }
381c575b7eeSOleksandr Andrushchenko }
382