xref: /dragonfly/sys/dev/drm/drm_irq.c (revision 99dd49c5)
1 /*-
2  * Copyright 2003 Eric Anholt
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <anholt@FreeBSD.org>
25  *
26  */
27 
28 /** @file drm_irq.c
29  * Support code for handling setup/teardown of interrupt handlers and
30  * handing interrupt handlers off to the drivers.
31  */
32 
33 #include "dev/drm/drmP.h"
34 #include "dev/drm/drm.h"
35 
36 int drm_irq_by_busid(struct drm_device *dev, void *data,
37 		     struct drm_file *file_priv)
38 {
39 	struct drm_irq_busid *irq = data;
40 
41 	if ((irq->busnum >> 8) != dev->pci_domain ||
42 	    (irq->busnum & 0xff) != dev->pci_bus ||
43 	    irq->devnum != dev->pci_slot ||
44 	    irq->funcnum != dev->pci_func)
45 		return EINVAL;
46 
47 	irq->irq = dev->irq;
48 
49 	DRM_DEBUG("%d:%d:%d => IRQ %d\n",
50 	    irq->busnum, irq->devnum, irq->funcnum, irq->irq);
51 
52 	return 0;
53 }
54 
55 static void vblank_disable_fn(void *arg)
56 {
57 	struct drm_device *dev = (struct drm_device *)arg;
58 	int i;
59 
60 	if (callout_pending(&dev->vblank_disable_timer)) {
61 		/* callout was reset */
62 		return;
63 	}
64 	if (!callout_active(&dev->vblank_disable_timer)) {
65 		/* callout was stopped */
66 		return;
67 	}
68 	callout_deactivate(&dev->vblank_disable_timer);
69 
70 	DRM_DEBUG("vblank_disable: %s\n", dev->vblank_disable_allowed ?
71 		"allowed" : "denied");
72 	if (!dev->vblank_disable_allowed)
73 		return;
74 
75 	for (i = 0; i < dev->num_crtcs; i++) {
76 		if (atomic_read(&dev->vblank[i].refcount) == 0 &&
77 		    dev->vblank[i].enabled && !dev->vblank[i].inmodeset) {
78 			DRM_DEBUG("disabling vblank on crtc %d\n", i);
79 			dev->vblank[i].last =
80 			    dev->driver->get_vblank_counter(dev, i);
81 			dev->driver->disable_vblank(dev, i);
82 			dev->vblank[i].enabled = 0;
83 		}
84 	}
85 }
86 
87 void drm_vblank_cleanup(struct drm_device *dev)
88 {
89 	/* Bail if the driver didn't call drm_vblank_init() */
90 	if (dev->num_crtcs == 0)
91 		return;
92 
93 	DRM_SPINLOCK(&dev->vbl_lock);
94 	callout_stop(&dev->vblank_disable_timer);
95 	DRM_SPINUNLOCK(&dev->vbl_lock);
96 
97 	vblank_disable_fn((void *)dev);
98 
99 	free(dev->vblank, DRM_MEM_DRIVER);
100 
101 	dev->num_crtcs = 0;
102 }
103 
104 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
105 {
106 	int i, ret = ENOMEM;
107 
108 	callout_init(&dev->vblank_disable_timer);
109 	atomic_set(&dev->vbl_signal_pending, 0);
110 	dev->num_crtcs = num_crtcs;
111 
112 	dev->vblank = malloc(sizeof(struct drm_vblank_info) * num_crtcs,
113 	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
114 	if (!dev->vblank)
115 	    goto err;
116 
117 	DRM_DEBUG("\n");
118 
119 	/* Zero per-crtc vblank stuff */
120 	for (i = 0; i < num_crtcs; i++) {
121 		DRM_INIT_WAITQUEUE(&dev->vblank[i].queue);
122 		TAILQ_INIT(&dev->vblank[i].sigs);
123 		atomic_set(&dev->vblank[i].count, 0);
124 		atomic_set(&dev->vblank[i].refcount, 0);
125 	}
126 
127 	dev->vblank_disable_allowed = 0;
128 
129 	return 0;
130 
131 err:
132 	drm_vblank_cleanup(dev);
133 	return ret;
134 }
135 
136 int drm_irq_install(struct drm_device *dev)
137 {
138 	int crtc, retcode;
139 
140 	if (dev->irq == 0 || dev->dev_private == NULL)
141 		return EINVAL;
142 
143 	DRM_DEBUG("irq=%d\n", dev->irq);
144 
145 	DRM_LOCK();
146 	if (dev->irq_enabled) {
147 		DRM_UNLOCK();
148 		return EBUSY;
149 	}
150 	dev->irq_enabled = 1;
151 
152 	dev->context_flag = 0;
153 
154 	/* Before installing handler */
155 	dev->driver->irq_preinstall(dev);
156 	DRM_UNLOCK();
157 
158 	/* Install handler */
159 	retcode = bus_setup_intr(dev->device, dev->irqr, INTR_MPSAFE,
160 				 dev->driver->irq_handler, dev, &dev->irqh,
161 				 &dev->irq_lock);
162 	if (retcode != 0)
163 		goto err;
164 
165 	/* After installing handler */
166 	DRM_LOCK();
167 	dev->driver->irq_postinstall(dev);
168 	DRM_UNLOCK();
169 	if (dev->driver->enable_vblank) {
170 		DRM_SPINLOCK(&dev->vbl_lock);
171 		for( crtc = 0 ; crtc < dev->num_crtcs ; crtc++) {
172 			if (dev->driver->enable_vblank(dev, crtc) == 0) {
173 				dev->vblank[crtc].enabled = 1;
174 			}
175 		}
176 		callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
177 		    (timeout_t *)vblank_disable_fn, (void *)dev);
178 		DRM_SPINUNLOCK(&dev->vbl_lock);
179 	}
180 
181 	return 0;
182 err:
183 	DRM_LOCK();
184 	dev->irq_enabled = 0;
185 	DRM_UNLOCK();
186 
187 	return retcode;
188 }
189 
190 int drm_irq_uninstall(struct drm_device *dev)
191 {
192 	int crtc;
193 
194 	if (!dev->irq_enabled)
195 		return EINVAL;
196 
197 	dev->irq_enabled = 0;
198 
199 	/*
200 	* Wake up any waiters so they don't hang.
201 	*/
202 	DRM_SPINLOCK(&dev->vbl_lock);
203 	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
204 		if (dev->vblank[crtc].enabled) {
205 			DRM_WAKEUP(&dev->vblank[crtc].queue);
206 			dev->vblank[crtc].last =
207 			    dev->driver->get_vblank_counter(dev, crtc);
208 			dev->vblank[crtc].enabled = 0;
209 		}
210 	}
211 	DRM_SPINUNLOCK(&dev->vbl_lock);
212 
213 	DRM_DEBUG("irq=%d\n", dev->irq);
214 
215 	dev->driver->irq_uninstall(dev);
216 
217 	DRM_UNLOCK();
218 	bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
219 	DRM_LOCK();
220 
221 	return 0;
222 }
223 
224 int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
225 {
226 	struct drm_control *ctl = data;
227 	int err;
228 
229 	switch (ctl->func) {
230 	case DRM_INST_HANDLER:
231 		/* Handle drivers whose DRM used to require IRQ setup but the
232 		 * no longer does.
233 		 */
234 		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
235 			return 0;
236 		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
237 		    ctl->irq != dev->irq)
238 			return EINVAL;
239 		return drm_irq_install(dev);
240 	case DRM_UNINST_HANDLER:
241 		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
242 			return 0;
243 		DRM_LOCK();
244 		err = drm_irq_uninstall(dev);
245 		DRM_UNLOCK();
246 		return err;
247 	default:
248 		return EINVAL;
249 	}
250 }
251 
252 u32 drm_vblank_count(struct drm_device *dev, int crtc)
253 {
254 	return atomic_read(&dev->vblank[crtc].count);
255 }
256 
257 static void drm_update_vblank_count(struct drm_device *dev, int crtc)
258 {
259 	u32 cur_vblank, diff;
260 
261 	/*
262 	 * Interrupts were disabled prior to this call, so deal with counter
263 	 * wrap if needed.
264 	 * NOTE!  It's possible we lost a full dev->max_vblank_count events
265 	 * here if the register is small or we had vblank interrupts off for
266 	 * a long time.
267 	 */
268 	cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
269 	diff = cur_vblank - dev->vblank[crtc].last;
270 	if (cur_vblank < dev->vblank[crtc].last) {
271 		diff += dev->max_vblank_count;
272 
273 		DRM_DEBUG("vblank[%d].last=0x%x, cur_vblank=0x%x => diff=0x%x\n",
274 		    crtc, dev->vblank[crtc].last, cur_vblank, diff);
275 	}
276 
277 	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
278 	    crtc, diff);
279 
280 	atomic_add(diff, &dev->vblank[crtc].count);
281 }
282 
283 int drm_vblank_get(struct drm_device *dev, int crtc)
284 {
285 	int ret = 0;
286 
287 	DRM_SPINLOCK(&dev->vbl_lock);
288 	/* Going from 0->1 means we have to enable interrupts again */
289 	atomic_add_acq_int(&dev->vblank[crtc].refcount, 1);
290 	if (dev->vblank[crtc].refcount == 1 &&
291 	    !dev->vblank[crtc].enabled) {
292 		ret = dev->driver->enable_vblank(dev, crtc);
293 		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
294 		if (ret)
295 			atomic_dec(&dev->vblank[crtc].refcount);
296 		else {
297 			dev->vblank[crtc].enabled = 1;
298 			drm_update_vblank_count(dev, crtc);
299 		}
300 	}
301 	DRM_SPINUNLOCK(&dev->vbl_lock);
302 
303 	return ret;
304 }
305 
306 void drm_vblank_put(struct drm_device *dev, int crtc)
307 {
308 	KASSERT(atomic_read(&dev->vblank[crtc].refcount) > 0,
309 	    ("invalid refcount"));
310 
311 	/* Last user schedules interrupt disable */
312 	atomic_subtract_acq_int(&dev->vblank[crtc].refcount, 1);
313 
314 	DRM_SPINLOCK(&dev->vbl_lock);
315 	if (dev->vblank[crtc].refcount == 0)
316 	    callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
317 		(timeout_t *)vblank_disable_fn, (void *)dev);
318 	DRM_SPINUNLOCK(&dev->vbl_lock);
319 }
320 
321 int drm_modeset_ctl(struct drm_device *dev, void *data,
322 		    struct drm_file *file_priv)
323 {
324 	struct drm_modeset_ctl *modeset = data;
325 	int crtc, ret = 0;
326 
327 	DRM_DEBUG("num_crtcs=%d\n", dev->num_crtcs);
328 	/* If drm_vblank_init() hasn't been called yet, just no-op */
329 	if (!dev->num_crtcs)
330 		goto out;
331 
332 	crtc = modeset->crtc;
333 	DRM_DEBUG("crtc=%d\n", crtc);
334 	if (crtc >= dev->num_crtcs) {
335 		ret = EINVAL;
336 		goto out;
337 	}
338 
339 	/*
340 	 * To avoid all the problems that might happen if interrupts
341 	 * were enabled/disabled around or between these calls, we just
342 	 * have the kernel take a reference on the CRTC (just once though
343 	 * to avoid corrupting the count if multiple, mismatch calls occur),
344 	 * so that interrupts remain enabled in the interim.
345 	 */
346 	switch (modeset->cmd) {
347 	case _DRM_PRE_MODESET:
348 		DRM_DEBUG("pre-modeset\n");
349 		if (!dev->vblank[crtc].inmodeset) {
350 			dev->vblank[crtc].inmodeset = 0x1;
351 			if (drm_vblank_get(dev, crtc) == 0)
352 				dev->vblank[crtc].inmodeset |= 0x2;
353 		}
354 		break;
355 	case _DRM_POST_MODESET:
356 		DRM_DEBUG("post-modeset\n");
357 		if (dev->vblank[crtc].inmodeset) {
358 			DRM_SPINLOCK(&dev->vbl_lock);
359 			dev->vblank_disable_allowed = 1;
360 			DRM_SPINUNLOCK(&dev->vbl_lock);
361 
362 			if (dev->vblank[crtc].inmodeset & 0x2)
363 				drm_vblank_put(dev, crtc);
364 
365 			dev->vblank[crtc].inmodeset = 0;
366 		}
367 		break;
368 	default:
369 		ret = EINVAL;
370 		break;
371 	}
372 
373 out:
374 	return ret;
375 }
376 
377 int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
378 {
379 	union drm_wait_vblank *vblwait = data;
380 	unsigned int flags, seq, crtc;
381 	int ret = 0;
382 
383 	if (!dev->irq_enabled)
384 		return EINVAL;
385 
386 	if (vblwait->request.type &
387 	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
388 		DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
389 		    vblwait->request.type,
390 		    (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
391 		return EINVAL;
392 	}
393 
394 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
395 	crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
396 
397 	if (crtc >= dev->num_crtcs)
398 		return EINVAL;
399 
400 	ret = drm_vblank_get(dev, crtc);
401 	if (ret) {
402 		DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
403 		return ret;
404 	}
405 	seq = drm_vblank_count(dev, crtc);
406 
407 	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
408 	case _DRM_VBLANK_RELATIVE:
409 		vblwait->request.sequence += seq;
410 		vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
411 	case _DRM_VBLANK_ABSOLUTE:
412 		break;
413 	default:
414 		ret = EINVAL;
415 		goto done;
416 	}
417 
418 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
419 	    (seq - vblwait->request.sequence) <= (1<<23)) {
420 		vblwait->request.sequence = seq + 1;
421 	}
422 
423 	if (flags & _DRM_VBLANK_SIGNAL) {
424 #if 0 /* disabled */
425 		drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t),
426 		    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
427 		if (vbl_sig == NULL)
428 			return ENOMEM;
429 
430 		vbl_sig->sequence = vblwait->request.sequence;
431 		vbl_sig->signo = vblwait->request.signal;
432 		vbl_sig->pid = DRM_CURRENTPID;
433 
434 		vblwait->reply.sequence = atomic_read(&dev->vbl_received);
435 
436 		DRM_SPINLOCK(&dev->vbl_lock);
437 		TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
438 		DRM_SPINUNLOCK(&dev->vbl_lock);
439 		ret = 0;
440 #endif
441 		ret = EINVAL;
442 	} else {
443 		DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
444 		    vblwait->request.sequence, crtc);
445 		dev->vblank[crtc].last = vblwait->request.sequence;
446 		for ( ret = 0 ; !ret && !(((drm_vblank_count(dev, crtc) -
447 		    vblwait->request.sequence) <= (1 << 23)) ||
448 		    !dev->irq_enabled) ; ) {
449 			lwkt_serialize_enter(&dev->irq_lock);
450 			if (!(((drm_vblank_count(dev, crtc) -
451 			    vblwait->request.sequence) <= (1 << 23)) ||
452 			    !dev->irq_enabled))
453 				ret = serialize_sleep(&dev->vblank[crtc].queue,
454 				    &dev->irq_lock, PCATCH, "vblwtq",
455 				    3 * DRM_HZ);
456 			lwkt_serialize_exit(&dev->irq_lock);
457 		}
458 
459 		if (ret != EINTR && ret != ERESTART) {
460 			struct timeval now;
461 
462 			microtime(&now);
463 			vblwait->reply.tval_sec = now.tv_sec;
464 			vblwait->reply.tval_usec = now.tv_usec;
465 			vblwait->reply.sequence = drm_vblank_count(dev, crtc);
466 			DRM_DEBUG("returning %d to client, irq_enabled %d\n",
467 			    vblwait->reply.sequence, dev->irq_enabled);
468 		} else {
469 			DRM_DEBUG("vblank wait interrupted by signal\n");
470 		}
471 	}
472 
473 done:
474 	drm_vblank_put(dev, crtc);
475 	return ret;
476 }
477 
478 void drm_vbl_send_signals(struct drm_device *dev, int crtc)
479 {
480 }
481 
482 #if 0 /* disabled */
483 void drm_vbl_send_signals(struct drm_device *dev, int crtc )
484 {
485 	drm_vbl_sig_t *vbl_sig;
486 	unsigned int vbl_seq = atomic_read( &dev->vbl_received );
487 	struct proc *p;
488 
489 	vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
490 	while (vbl_sig != NULL) {
491 		drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
492 
493 		if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
494 			p = pfind(vbl_sig->pid);
495 			if (p != NULL)
496 				psignal(p, vbl_sig->signo);
497 
498 			TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
499 			DRM_FREE(vbl_sig,sizeof(*vbl_sig));
500 		}
501 		vbl_sig = next;
502 	}
503 }
504 #endif
505 
506 void drm_handle_vblank(struct drm_device *dev, int crtc)
507 {
508 	atomic_inc(&dev->vblank[crtc].count);
509 	DRM_WAKEUP(&dev->vblank[crtc].queue);
510 	drm_vbl_send_signals(dev, crtc);
511 }
512 
513