1 /*
2  * Copyright © 2007 Red Hat, Inc.
3  * Copyright © 2015 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  * Authors:
25  *    Dave Airlie <airlied@redhat.com>
26  *
27  */
28 
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32 
33 #include <errno.h>
34 
35 #include <xorg-server.h>
36 #include <X11/Xdefs.h>
37 #include <list.h>
38 
39 #include "amdgpu_drv.h"
40 #include "amdgpu_drm_queue.h"
41 
42 
43 struct amdgpu_drm_queue_entry {
44 	struct xorg_list list;
45 	uint64_t usec;
46 	uint64_t id;
47 	uintptr_t seq;
48 	void *data;
49 	ClientPtr client;
50 	xf86CrtcPtr crtc;
51 	amdgpu_drm_handler_proc handler;
52 	amdgpu_drm_abort_proc abort;
53 	Bool is_flip;
54 	unsigned int frame;
55 };
56 
57 static int amdgpu_drm_queue_refcnt;
58 static struct xorg_list amdgpu_drm_queue;
59 static struct xorg_list amdgpu_drm_flip_signalled;
60 static struct xorg_list amdgpu_drm_vblank_signalled;
61 static struct xorg_list amdgpu_drm_vblank_deferred;
62 static uintptr_t amdgpu_drm_queue_seq;
63 
64 
65 /*
66  * Process a DRM event
67  */
68 static void
amdgpu_drm_queue_handle_one(struct amdgpu_drm_queue_entry * e)69 amdgpu_drm_queue_handle_one(struct amdgpu_drm_queue_entry *e)
70 {
71 	xorg_list_del(&e->list);
72 	if (e->handler) {
73 		e->handler(e->crtc, e->frame, e->usec, e->data);
74 	} else
75 		e->abort(e->crtc, e->data);
76 	free(e);
77 }
78 
79 /*
80  * Abort one queued DRM entry, removing it
81  * from the list, calling the abort function and
82  * freeing the memory
83  */
84 static void
amdgpu_drm_abort_one(struct amdgpu_drm_queue_entry * e)85 amdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e)
86 {
87 	xorg_list_del(&e->list);
88 	e->abort(e->crtc, e->data);
89 	free(e);
90 }
91 
92 static void
amdgpu_drm_queue_handler(int fd,unsigned int frame,unsigned int sec,unsigned int usec,void * user_ptr)93 amdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec,
94 			 unsigned int usec, void *user_ptr)
95 {
96 	uintptr_t seq = (uintptr_t)user_ptr;
97 	struct amdgpu_drm_queue_entry *e = NULL, *tmp;
98 
99 	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
100 		if (e->seq == seq) {
101 			if (!e->handler) {
102 				amdgpu_drm_abort_one(e);
103 				break;
104 			}
105 
106 			xorg_list_del(&e->list);
107 			e->usec = (uint64_t)sec * 1000000 + usec;
108 			e->frame = frame;
109 			xorg_list_append(&e->list, e->is_flip ?
110 					 &amdgpu_drm_flip_signalled :
111 					 &amdgpu_drm_vblank_signalled);
112 			break;
113 		}
114 	}
115 }
116 
117 /*
118  * Handle signalled vblank events. If we're waiting for a flip event,
119  * put events for that CRTC in the vblank_deferred list.
120  */
121 static void
amdgpu_drm_handle_vblank_signalled(void)122 amdgpu_drm_handle_vblank_signalled(void)
123 {
124 	drmmode_crtc_private_ptr drmmode_crtc;
125 	struct amdgpu_drm_queue_entry *e;
126 
127 	while (!xorg_list_is_empty(&amdgpu_drm_vblank_signalled)) {
128 		e = xorg_list_first_entry(&amdgpu_drm_vblank_signalled,
129 					  struct amdgpu_drm_queue_entry, list);
130 		drmmode_crtc = e->crtc->driver_private;
131 
132 		if (drmmode_crtc->wait_flip_nesting_level == 0) {
133 			amdgpu_drm_queue_handle_one(e);
134 			continue;
135 		}
136 
137 		xorg_list_del(&e->list);
138 		xorg_list_append(&e->list, &amdgpu_drm_vblank_deferred);
139 	}
140 }
141 
142 /*
143  * Handle deferred DRM vblank events
144  *
145  * This function must be called after amdgpu_drm_wait_pending_flip, once
146  * it's safe to attempt queueing a flip again
147  */
148 void
amdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc)149 amdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc)
150 {
151 	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
152 	struct amdgpu_drm_queue_entry *e, *tmp;
153 
154 	if (drmmode_crtc->wait_flip_nesting_level == 0 ||
155 	    --drmmode_crtc->wait_flip_nesting_level > 0)
156 		return;
157 
158 	/* Put previously deferred vblank events for this CRTC back in the
159 	 * signalled queue
160 	 */
161 	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_deferred, list) {
162 		if (e->crtc != crtc)
163 			continue;
164 
165 		xorg_list_del(&e->list);
166 		xorg_list_append(&e->list, &amdgpu_drm_vblank_signalled);
167 	}
168 
169 	amdgpu_drm_handle_vblank_signalled();
170 }
171 
172 /*
173  * Enqueue a potential drm response; when the associated response
174  * appears, we've got data to pass to the handler from here
175  */
176 uintptr_t
amdgpu_drm_queue_alloc(xf86CrtcPtr crtc,ClientPtr client,uint64_t id,void * data,amdgpu_drm_handler_proc handler,amdgpu_drm_abort_proc abort,Bool is_flip)177 amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client,
178 		       uint64_t id, void *data,
179 		       amdgpu_drm_handler_proc handler,
180 		       amdgpu_drm_abort_proc abort,
181 		       Bool is_flip)
182 {
183 	struct amdgpu_drm_queue_entry *e;
184 
185 	e = calloc(1, sizeof(struct amdgpu_drm_queue_entry));
186 	if (!e)
187 		return AMDGPU_DRM_QUEUE_ERROR;
188 
189 	if (_X_UNLIKELY(amdgpu_drm_queue_seq == AMDGPU_DRM_QUEUE_ERROR))
190 		amdgpu_drm_queue_seq++;
191 
192 	e->seq = amdgpu_drm_queue_seq++;
193 	e->client = client;
194 	e->crtc = crtc;
195 	e->id = id;
196 	e->data = data;
197 	e->handler = handler;
198 	e->abort = abort;
199 	e->is_flip = is_flip;
200 
201 	xorg_list_append(&e->list, &amdgpu_drm_queue);
202 
203 	return e->seq;
204 }
205 
206 /*
207  * Abort drm queue entries for a client
208  *
209  * NOTE: This keeps the entries in the list until the DRM event arrives,
210  * but then it calls the abort functions instead of the handler
211  * functions.
212  */
213 void
amdgpu_drm_abort_client(ClientPtr client)214 amdgpu_drm_abort_client(ClientPtr client)
215 {
216 	struct amdgpu_drm_queue_entry *e = NULL;
217 
218 	xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) {
219 		if (e->client == client)
220 			e->handler = NULL;
221 	}
222 }
223 
224 /*
225  * Abort specific drm queue entry
226  */
227 void
amdgpu_drm_abort_entry(uintptr_t seq)228 amdgpu_drm_abort_entry(uintptr_t seq)
229 {
230 	struct amdgpu_drm_queue_entry *e = NULL, *tmp;
231 
232 	if (seq == AMDGPU_DRM_QUEUE_ERROR)
233 		return;
234 
235 	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) {
236 		if (e->seq == seq) {
237 			amdgpu_drm_abort_one(e);
238 			return;
239 		}
240 	}
241 
242 	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_deferred, list) {
243 		if (e->seq == seq) {
244 			amdgpu_drm_abort_one(e);
245 			return;
246 		}
247 	}
248 
249 	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
250 		if (e->seq == seq) {
251 			amdgpu_drm_abort_one(e);
252 			break;
253 		}
254 	}
255 }
256 
257 /*
258  * Abort specific drm queue entry by ID
259  */
260 void
amdgpu_drm_abort_id(uint64_t id)261 amdgpu_drm_abort_id(uint64_t id)
262 {
263 	struct amdgpu_drm_queue_entry *e = NULL, *tmp;
264 
265 	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
266 		if (e->id == id) {
267 			amdgpu_drm_abort_one(e);
268 			break;
269 		}
270 	}
271 }
272 
273 /*
274  * drmHandleEvent wrapper
275  */
276 int
amdgpu_drm_handle_event(int fd,drmEventContext * event_context)277 amdgpu_drm_handle_event(int fd, drmEventContext *event_context)
278 {
279 	struct amdgpu_drm_queue_entry *e;
280 	int r;
281 
282 	/* Retry drmHandleEvent if it was interrupted by a signal in read() */
283 	do {
284 		r = drmHandleEvent(fd, event_context);
285 	} while (r < 0 && (errno == EINTR || errno == EAGAIN));
286 
287 	if (r < 0) {
288 		static Bool printed;
289 
290 		if (!printed) {
291 			ErrorF("%s: drmHandleEvent returned %d, errno=%d (%s)\n",
292 			       __func__, r, errno, strerror(errno));
293 			printed = TRUE;
294 		}
295 	}
296 
297 	while (!xorg_list_is_empty(&amdgpu_drm_flip_signalled)) {
298 		e = xorg_list_first_entry(&amdgpu_drm_flip_signalled,
299 					  struct amdgpu_drm_queue_entry, list);
300 		amdgpu_drm_queue_handle_one(e);
301 	}
302 
303 	amdgpu_drm_handle_vblank_signalled();
304 
305 	return r;
306 }
307 
308 /*
309  * Wait for pending page flip on given CRTC to complete
310  */
amdgpu_drm_wait_pending_flip(xf86CrtcPtr crtc)311 void amdgpu_drm_wait_pending_flip(xf86CrtcPtr crtc)
312 {
313 	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
314 	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(crtc->scrn);
315 	struct amdgpu_drm_queue_entry *e;
316 
317 	drmmode_crtc->wait_flip_nesting_level++;
318 
319 	while (drmmode_crtc->flip_pending &&
320 	       !xorg_list_is_empty(&amdgpu_drm_flip_signalled)) {
321 		e = xorg_list_first_entry(&amdgpu_drm_flip_signalled,
322 					  struct amdgpu_drm_queue_entry, list);
323 		amdgpu_drm_queue_handle_one(e);
324 	}
325 
326 	while (drmmode_crtc->flip_pending
327 	       && amdgpu_drm_handle_event(pAMDGPUEnt->fd,
328 					  &drmmode_crtc->drmmode->event_context) >= 0);
329 }
330 
331 /*
332  * Initialize the DRM event queue
333  */
334 void
amdgpu_drm_queue_init(ScrnInfoPtr scrn)335 amdgpu_drm_queue_init(ScrnInfoPtr scrn)
336 {
337 	AMDGPUInfoPtr info = AMDGPUPTR(scrn);
338 	drmmode_ptr drmmode = &info->drmmode;
339 
340 	drmmode->event_context.version = 2;
341 	drmmode->event_context.vblank_handler = amdgpu_drm_queue_handler;
342 	drmmode->event_context.page_flip_handler = amdgpu_drm_queue_handler;
343 
344 	if (amdgpu_drm_queue_refcnt++)
345 		return;
346 
347 	xorg_list_init(&amdgpu_drm_queue);
348 	xorg_list_init(&amdgpu_drm_flip_signalled);
349 	xorg_list_init(&amdgpu_drm_vblank_signalled);
350 	xorg_list_init(&amdgpu_drm_vblank_deferred);
351 }
352 
353 /*
354  * Deinitialize the DRM event queue
355  */
356 void
amdgpu_drm_queue_close(ScrnInfoPtr scrn)357 amdgpu_drm_queue_close(ScrnInfoPtr scrn)
358 {
359 	struct amdgpu_drm_queue_entry *e = NULL, *tmp;
360 
361 	xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
362 		if (e->crtc->scrn == scrn)
363 			amdgpu_drm_abort_one(e);
364 	}
365 
366 	amdgpu_drm_queue_refcnt--;
367 }
368