1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/irq.h>
13 
14 #include <drm/drm_print.h>
15 
16 #include <xen/xenbus.h>
17 #include <xen/events.h>
18 #include <xen/grant_table.h>
19 
20 #include "xen_drm_front.h"
21 #include "xen_drm_front_evtchnl.h"
22 
23 static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
24 {
25 	struct xen_drm_front_evtchnl *evtchnl = dev_id;
26 	struct xen_drm_front_info *front_info = evtchnl->front_info;
27 	struct xendispl_resp *resp;
28 	RING_IDX i, rp;
29 	unsigned long flags;
30 
31 	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
32 		return IRQ_HANDLED;
33 
34 	spin_lock_irqsave(&front_info->io_lock, flags);
35 
36 again:
37 	rp = evtchnl->u.req.ring.sring->rsp_prod;
38 	/* ensure we see queued responses up to rp */
39 	virt_rmb();
40 
41 	for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
42 		resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
43 		if (unlikely(resp->id != evtchnl->evt_id))
44 			continue;
45 
46 		switch (resp->operation) {
47 		case XENDISPL_OP_PG_FLIP:
48 		case XENDISPL_OP_FB_ATTACH:
49 		case XENDISPL_OP_FB_DETACH:
50 		case XENDISPL_OP_DBUF_CREATE:
51 		case XENDISPL_OP_DBUF_DESTROY:
52 		case XENDISPL_OP_SET_CONFIG:
53 			evtchnl->u.req.resp_status = resp->status;
54 			complete(&evtchnl->u.req.completion);
55 			break;
56 
57 		default:
58 			DRM_ERROR("Operation %d is not supported\n",
59 				  resp->operation);
60 			break;
61 		}
62 	}
63 
64 	evtchnl->u.req.ring.rsp_cons = i;
65 
66 	if (i != evtchnl->u.req.ring.req_prod_pvt) {
67 		int more_to_do;
68 
69 		RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
70 					       more_to_do);
71 		if (more_to_do)
72 			goto again;
73 	} else {
74 		evtchnl->u.req.ring.sring->rsp_event = i + 1;
75 	}
76 
77 	spin_unlock_irqrestore(&front_info->io_lock, flags);
78 	return IRQ_HANDLED;
79 }
80 
81 static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
82 {
83 	struct xen_drm_front_evtchnl *evtchnl = dev_id;
84 	struct xen_drm_front_info *front_info = evtchnl->front_info;
85 	struct xendispl_event_page *page = evtchnl->u.evt.page;
86 	u32 cons, prod;
87 	unsigned long flags;
88 
89 	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
90 		return IRQ_HANDLED;
91 
92 	spin_lock_irqsave(&front_info->io_lock, flags);
93 
94 	prod = page->in_prod;
95 	/* ensure we see ring contents up to prod */
96 	virt_rmb();
97 	if (prod == page->in_cons)
98 		goto out;
99 
100 	for (cons = page->in_cons; cons != prod; cons++) {
101 		struct xendispl_evt *event;
102 
103 		event = &XENDISPL_IN_RING_REF(page, cons);
104 		if (unlikely(event->id != evtchnl->evt_id++))
105 			continue;
106 
107 		switch (event->type) {
108 		case XENDISPL_EVT_PG_FLIP:
109 			xen_drm_front_on_frame_done(front_info, evtchnl->index,
110 						    event->op.pg_flip.fb_cookie);
111 			break;
112 		}
113 	}
114 	page->in_cons = cons;
115 	/* ensure ring contents */
116 	virt_wmb();
117 
118 out:
119 	spin_unlock_irqrestore(&front_info->io_lock, flags);
120 	return IRQ_HANDLED;
121 }
122 
123 static void evtchnl_free(struct xen_drm_front_info *front_info,
124 			 struct xen_drm_front_evtchnl *evtchnl)
125 {
126 	unsigned long page = 0;
127 
128 	if (evtchnl->type == EVTCHNL_TYPE_REQ)
129 		page = (unsigned long)evtchnl->u.req.ring.sring;
130 	else if (evtchnl->type == EVTCHNL_TYPE_EVT)
131 		page = (unsigned long)evtchnl->u.evt.page;
132 	if (!page)
133 		return;
134 
135 	evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
136 
137 	if (evtchnl->type == EVTCHNL_TYPE_REQ) {
138 		/* release all who still waits for response if any */
139 		evtchnl->u.req.resp_status = -EIO;
140 		complete_all(&evtchnl->u.req.completion);
141 	}
142 
143 	if (evtchnl->irq)
144 		unbind_from_irqhandler(evtchnl->irq, evtchnl);
145 
146 	if (evtchnl->port)
147 		xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
148 
149 	/* end access and free the page */
150 	if (evtchnl->gref != GRANT_INVALID_REF)
151 		gnttab_end_foreign_access(evtchnl->gref, page);
152 
153 	memset(evtchnl, 0, sizeof(*evtchnl));
154 }
155 
156 static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
157 			 struct xen_drm_front_evtchnl *evtchnl,
158 			 enum xen_drm_front_evtchnl_type type)
159 {
160 	struct xenbus_device *xb_dev = front_info->xb_dev;
161 	unsigned long page;
162 	grant_ref_t gref;
163 	irq_handler_t handler;
164 	int ret;
165 
166 	memset(evtchnl, 0, sizeof(*evtchnl));
167 	evtchnl->type = type;
168 	evtchnl->index = index;
169 	evtchnl->front_info = front_info;
170 	evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
171 	evtchnl->gref = GRANT_INVALID_REF;
172 
173 	page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
174 	if (!page) {
175 		ret = -ENOMEM;
176 		goto fail;
177 	}
178 
179 	if (type == EVTCHNL_TYPE_REQ) {
180 		struct xen_displif_sring *sring;
181 
182 		init_completion(&evtchnl->u.req.completion);
183 		mutex_init(&evtchnl->u.req.req_io_lock);
184 		sring = (struct xen_displif_sring *)page;
185 		SHARED_RING_INIT(sring);
186 		FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
187 
188 		ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
189 		if (ret < 0) {
190 			evtchnl->u.req.ring.sring = NULL;
191 			free_page(page);
192 			goto fail;
193 		}
194 
195 		handler = evtchnl_interrupt_ctrl;
196 	} else {
197 		ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
198 						  virt_to_gfn((void *)page), 0);
199 		if (ret < 0) {
200 			free_page(page);
201 			goto fail;
202 		}
203 
204 		evtchnl->u.evt.page = (struct xendispl_event_page *)page;
205 		gref = ret;
206 		handler = evtchnl_interrupt_evt;
207 	}
208 	evtchnl->gref = gref;
209 
210 	ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
211 	if (ret < 0)
212 		goto fail;
213 
214 	ret = bind_evtchn_to_irqhandler(evtchnl->port,
215 					handler, 0, xb_dev->devicetype,
216 					evtchnl);
217 	if (ret < 0)
218 		goto fail;
219 
220 	evtchnl->irq = ret;
221 	return 0;
222 
223 fail:
224 	DRM_ERROR("Failed to allocate ring: %d\n", ret);
225 	return ret;
226 }
227 
228 int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
229 {
230 	struct xen_drm_front_cfg *cfg;
231 	int ret, conn;
232 
233 	cfg = &front_info->cfg;
234 
235 	front_info->evt_pairs =
236 			kcalloc(cfg->num_connectors,
237 				sizeof(struct xen_drm_front_evtchnl_pair),
238 				GFP_KERNEL);
239 	if (!front_info->evt_pairs) {
240 		ret = -ENOMEM;
241 		goto fail;
242 	}
243 
244 	for (conn = 0; conn < cfg->num_connectors; conn++) {
245 		ret = evtchnl_alloc(front_info, conn,
246 				    &front_info->evt_pairs[conn].req,
247 				    EVTCHNL_TYPE_REQ);
248 		if (ret < 0) {
249 			DRM_ERROR("Error allocating control channel\n");
250 			goto fail;
251 		}
252 
253 		ret = evtchnl_alloc(front_info, conn,
254 				    &front_info->evt_pairs[conn].evt,
255 				    EVTCHNL_TYPE_EVT);
256 		if (ret < 0) {
257 			DRM_ERROR("Error allocating in-event channel\n");
258 			goto fail;
259 		}
260 	}
261 	front_info->num_evt_pairs = cfg->num_connectors;
262 	return 0;
263 
264 fail:
265 	xen_drm_front_evtchnl_free_all(front_info);
266 	return ret;
267 }
268 
269 static int evtchnl_publish(struct xenbus_transaction xbt,
270 			   struct xen_drm_front_evtchnl *evtchnl,
271 			   const char *path, const char *node_ring,
272 			   const char *node_chnl)
273 {
274 	struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
275 	int ret;
276 
277 	/* write control channel ring reference */
278 	ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
279 	if (ret < 0) {
280 		xenbus_dev_error(xb_dev, ret, "writing ring-ref");
281 		return ret;
282 	}
283 
284 	/* write event channel ring reference */
285 	ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
286 	if (ret < 0) {
287 		xenbus_dev_error(xb_dev, ret, "writing event channel");
288 		return ret;
289 	}
290 
291 	return 0;
292 }
293 
294 int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
295 {
296 	struct xenbus_transaction xbt;
297 	struct xen_drm_front_cfg *plat_data;
298 	int ret, conn;
299 
300 	plat_data = &front_info->cfg;
301 
302 again:
303 	ret = xenbus_transaction_start(&xbt);
304 	if (ret < 0) {
305 		xenbus_dev_fatal(front_info->xb_dev, ret,
306 				 "starting transaction");
307 		return ret;
308 	}
309 
310 	for (conn = 0; conn < plat_data->num_connectors; conn++) {
311 		ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
312 				      plat_data->connectors[conn].xenstore_path,
313 				      XENDISPL_FIELD_REQ_RING_REF,
314 				      XENDISPL_FIELD_REQ_CHANNEL);
315 		if (ret < 0)
316 			goto fail;
317 
318 		ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
319 				      plat_data->connectors[conn].xenstore_path,
320 				      XENDISPL_FIELD_EVT_RING_REF,
321 				      XENDISPL_FIELD_EVT_CHANNEL);
322 		if (ret < 0)
323 			goto fail;
324 	}
325 
326 	ret = xenbus_transaction_end(xbt, 0);
327 	if (ret < 0) {
328 		if (ret == -EAGAIN)
329 			goto again;
330 
331 		xenbus_dev_fatal(front_info->xb_dev, ret,
332 				 "completing transaction");
333 		goto fail_to_end;
334 	}
335 
336 	return 0;
337 
338 fail:
339 	xenbus_transaction_end(xbt, 1);
340 
341 fail_to_end:
342 	xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
343 	return ret;
344 }
345 
346 void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
347 {
348 	int notify;
349 
350 	evtchnl->u.req.ring.req_prod_pvt++;
351 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
352 	if (notify)
353 		notify_remote_via_irq(evtchnl->irq);
354 }
355 
356 void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
357 				     enum xen_drm_front_evtchnl_state state)
358 {
359 	unsigned long flags;
360 	int i;
361 
362 	if (!front_info->evt_pairs)
363 		return;
364 
365 	spin_lock_irqsave(&front_info->io_lock, flags);
366 	for (i = 0; i < front_info->num_evt_pairs; i++) {
367 		front_info->evt_pairs[i].req.state = state;
368 		front_info->evt_pairs[i].evt.state = state;
369 	}
370 	spin_unlock_irqrestore(&front_info->io_lock, flags);
371 }
372 
373 void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
374 {
375 	int i;
376 
377 	if (!front_info->evt_pairs)
378 		return;
379 
380 	for (i = 0; i < front_info->num_evt_pairs; i++) {
381 		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
382 		evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
383 	}
384 
385 	kfree(front_info->evt_pairs);
386 	front_info->evt_pairs = NULL;
387 }
388