xref: /openbsd/sys/dev/pci/drm/drm_drv.c (revision cca36db2)
1 /* $OpenBSD: drm_drv.c,v 1.96 2011/07/03 18:34:14 oga Exp $ */
2 /*-
3  * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
4  * Copyright © 2008 Intel Corporation
5  * Copyright 2003 Eric Anholt
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Daryll Strauss <daryll@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *    Eric Anholt <eric@anholt.net>
34  *    Owain Ainsworth <oga@openbsd.org>
35  *
36  */
37 
38 /** @file drm_drv.c
39  * The catch-all file for DRM device support, including module setup/teardown,
40  * open/close, and ioctl dispatch.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/limits.h>
45 #include <sys/systm.h>
46 #include <uvm/uvm_extern.h>
47 
48 #include <sys/ttycom.h> /* for TIOCSGRP */
49 
50 #include "drmP.h"
51 #include "drm.h"
52 #include "drm_sarea.h"
53 
54 #ifdef DRM_DEBUG_DEFAULT_ON
55 int drm_debug_flag = 1;
56 #else
57 int drm_debug_flag = 0;
58 #endif
59 
60 int	 drm_firstopen(struct drm_device *);
61 int	 drm_lastclose(struct drm_device *);
62 void	 drm_attach(struct device *, struct device *, void *);
63 int	 drm_probe(struct device *, void *, void *);
64 int	 drm_detach(struct device *, int);
65 int	 drm_activate(struct device *, int);
66 int	 drmprint(void *, const char *);
67 int	 drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
68 	     struct drm_pending_event **);
69 
70 int	 drm_getunique(struct drm_device *, void *, struct drm_file *);
71 int	 drm_version(struct drm_device *, void *, struct drm_file *);
72 int	 drm_setversion(struct drm_device *, void *, struct drm_file *);
73 int	 drm_getmagic(struct drm_device *, void *, struct drm_file *);
74 int	 drm_authmagic(struct drm_device *, void *, struct drm_file *);
75 int	 drm_file_cmp(struct drm_file *, struct drm_file *);
76 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
77 
78 /* functions used by the per-open handle  code to grab references to object */
79 void	 drm_handle_ref(struct drm_obj *);
80 void	 drm_handle_unref(struct drm_obj *);
81 
82 int	 drm_handle_cmp(struct drm_handle *, struct drm_handle *);
83 int	 drm_name_cmp(struct drm_obj *, struct drm_obj *);
84 int	 drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
85 	     vm_fault_t, vm_prot_t, int);
86 boolean_t	 drm_flush(struct uvm_object *, voff_t, voff_t, int);
87 
88 SPLAY_PROTOTYPE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
89 SPLAY_PROTOTYPE(drm_name_tree, drm_obj, entry, drm_name_cmp);
90 
91 /*
92  * attach drm to a pci-based driver.
93  *
94  * This function does all the pci-specific calculations for the
95  * drm_attach_args.
96  */
97 struct device *
98 drm_attach_pci(const struct drm_driver_info *driver, struct pci_attach_args *pa,
99     int is_agp, struct device *dev)
100 {
101 	struct drm_attach_args arg;
102 
103 	arg.driver = driver;
104 	arg.dmat = pa->pa_dmat;
105 	arg.bst = pa->pa_memt;
106 	arg.irq = pa->pa_intrline;
107 	arg.is_agp = is_agp;
108 
109 	arg.busid_len = 20;
110 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
111 	if (arg.busid == NULL) {
112 		printf("%s: no memory for drm\n", dev->dv_xname);
113 		return (NULL);
114 	}
115 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
116 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
117 
118 	return (config_found(dev, &arg, drmprint));
119 }
120 
121 int
122 drmprint(void *aux, const char *pnp)
123 {
124 	if (pnp != NULL)
125 		printf("drm at %s", pnp);
126 	return (UNCONF);
127 }
128 
129 int
130 drm_pciprobe(struct pci_attach_args *pa, const struct drm_pcidev *idlist)
131 {
132 	const struct drm_pcidev *id_entry;
133 
134 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
135 	    PCI_PRODUCT(pa->pa_id), idlist);
136 	if (id_entry != NULL)
137 		return 1;
138 
139 	return 0;
140 }
141 
142 int
143 drm_probe(struct device *parent, void *match, void *aux)
144 {
145 	struct drm_attach_args *da = aux;
146 
147 	return (da->driver != NULL ? 1 : 0);
148 }
149 
150 void
151 drm_attach(struct device *parent, struct device *self, void *aux)
152 {
153 	struct drm_device	*dev = (struct drm_device *)self;
154 	struct drm_attach_args	*da = aux;
155 
156 	dev->dev_private = parent;
157 	dev->driver = da->driver;
158 
159 	dev->dmat = da->dmat;
160 	dev->bst = da->bst;
161 	dev->irq = da->irq;
162 	dev->unique = da->busid;
163 	dev->unique_len = da->busid_len;
164 
165 	rw_init(&dev->dev_lock, "drmdevlk");
166 	mtx_init(&dev->lock.spinlock, IPL_NONE);
167 	mtx_init(&dev->event_lock, IPL_TTY);
168 
169 	TAILQ_INIT(&dev->maplist);
170 	SPLAY_INIT(&dev->files);
171 
172 	if (dev->driver->vblank_pipes != 0 && drm_vblank_init(dev,
173 	    dev->driver->vblank_pipes)) {
174 		printf(": failed to allocate vblank data\n");
175 		goto error;
176 	}
177 
178 	/*
179 	 * the dma buffers api is just weird. offset 1Gb to ensure we don't
180 	 * conflict with it.
181 	 */
182 	dev->handle_ext = extent_create("drmext", 1024*1024*1024, LONG_MAX,
183 	    M_DRM, NULL, 0, EX_NOWAIT | EX_NOCOALESCE);
184 	if (dev->handle_ext == NULL) {
185 		DRM_ERROR("Failed to initialise handle extent\n");
186 		goto error;
187 	}
188 
189 	if (dev->driver->flags & DRIVER_AGP) {
190 		if (da->is_agp)
191 			dev->agp = drm_agp_init();
192 		if (dev->driver->flags & DRIVER_AGP_REQUIRE &&
193 		    dev->agp == NULL) {
194 			printf(": couldn't find agp\n");
195 			goto error;
196 		}
197 		if (dev->agp != NULL) {
198 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
199 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
200 				dev->agp->mtrr = 1;
201 		}
202 	}
203 
204 	if (drm_ctxbitmap_init(dev) != 0) {
205 		printf(": couldn't allocate memory for context bitmap.\n");
206 		goto error;
207 	}
208 
209 	if (dev->driver->flags & DRIVER_GEM) {
210 		mtx_init(&dev->obj_name_lock, IPL_NONE);
211 		SPLAY_INIT(&dev->name_tree);
212 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_obj));
213 		/* XXX unique name */
214 		pool_init(&dev->objpl, dev->driver->gem_size, 0, 0, 0,
215 		    "drmobjpl", &pool_allocator_nointr);
216 	}
217 
218 	printf("\n");
219 	return;
220 
221 error:
222 	drm_lastclose(dev);
223 	dev->dev_private = NULL;
224 }
225 
226 int
227 drm_detach(struct device *self, int flags)
228 {
229 	struct drm_device *dev = (struct drm_device *)self;
230 
231 	drm_lastclose(dev);
232 
233 	drm_ctxbitmap_cleanup(dev);
234 
235 	extent_destroy(dev->handle_ext);
236 
237 	drm_vblank_cleanup(dev);
238 
239 	if (dev->agp && dev->agp->mtrr) {
240 		int retcode;
241 
242 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
243 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
244 		DRM_DEBUG("mtrr_del = %d", retcode);
245 	}
246 
247 
248 	if (dev->agp != NULL) {
249 		drm_free(dev->agp);
250 		dev->agp = NULL;
251 	}
252 
253 	return 0;
254 }
255 
256 int
257 drm_activate(struct device *self, int act)
258 {
259 	switch (act) {
260 	case DVACT_DEACTIVATE:
261 		/* FIXME */
262 		break;
263 	}
264 	return (0);
265 }
266 
267 struct cfattach drm_ca = {
268 	sizeof(struct drm_device), drm_probe, drm_attach,
269 	drm_detach, drm_activate
270 };
271 
272 struct cfdriver drm_cd = {
273 	0, "drm", DV_DULL
274 };
275 
276 const struct drm_pcidev *
277 drm_find_description(int vendor, int device, const struct drm_pcidev *idlist)
278 {
279 	int i = 0;
280 
281 	for (i = 0; idlist[i].vendor != 0; i++) {
282 		if ((idlist[i].vendor == vendor) &&
283 		    (idlist[i].device == device))
284 			return &idlist[i];
285 	}
286 	return NULL;
287 }
288 
289 int
290 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
291 {
292 	return (f1->minor < f2->minor ? -1 : f1->minor > f2->minor);
293 }
294 
295 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
296 
297 struct drm_file *
298 drm_find_file_by_minor(struct drm_device *dev, int minor)
299 {
300 	struct drm_file	key;
301 
302 	key.minor = minor;
303 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
304 }
305 
306 int
307 drm_firstopen(struct drm_device *dev)
308 {
309 	struct drm_local_map	*map;
310 	int			 i;
311 
312 	/* prebuild the SAREA */
313 	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
314 	    _DRM_CONTAINS_LOCK, &map);
315 	if (i != 0)
316 		return i;
317 
318 	if (dev->driver->firstopen)
319 		dev->driver->firstopen(dev);
320 
321 	if (dev->driver->flags & DRIVER_DMA) {
322 		if ((i = drm_dma_setup(dev)) != 0)
323 			return (i);
324 	}
325 
326 	dev->magicid = 1;
327 
328 	dev->irq_enabled = 0;
329 	dev->if_version = 0;
330 
331 	dev->buf_pgid = 0;
332 
333 	DRM_DEBUG("\n");
334 
335 	return 0;
336 }
337 
338 int
339 drm_lastclose(struct drm_device *dev)
340 {
341 	struct drm_local_map	*map, *mapsave;
342 
343 	DRM_DEBUG("\n");
344 
345 	if (dev->driver->lastclose != NULL)
346 		dev->driver->lastclose(dev);
347 
348 	if (dev->irq_enabled)
349 		drm_irq_uninstall(dev);
350 
351 	drm_agp_takedown(dev);
352 	drm_dma_takedown(dev);
353 
354 	DRM_LOCK();
355 	if (dev->sg != NULL) {
356 		struct drm_sg_mem *sg = dev->sg;
357 		dev->sg = NULL;
358 
359 		DRM_UNLOCK();
360 		drm_sg_cleanup(dev, sg);
361 		DRM_LOCK();
362 	}
363 
364 	for (map = TAILQ_FIRST(&dev->maplist); map != TAILQ_END(&dev->maplist);
365 	    map = mapsave) {
366 		mapsave = TAILQ_NEXT(map, link);
367 		if ((map->flags & _DRM_DRIVER) == 0)
368 			drm_rmmap_locked(dev, map);
369 	}
370 
371 	if (dev->lock.hw_lock != NULL) {
372 		dev->lock.hw_lock = NULL; /* SHM removed */
373 		dev->lock.file_priv = NULL;
374 		wakeup(&dev->lock); /* there should be nothing sleeping on it */
375 	}
376 	DRM_UNLOCK();
377 
378 	return 0;
379 }
380 
381 int
382 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
383 {
384 	struct drm_device	*dev = NULL;
385 	struct drm_file		*file_priv;
386 	int			 ret = 0;
387 
388 	dev = drm_get_device_from_kdev(kdev);
389 	if (dev == NULL || dev->dev_private == NULL)
390 		return (ENXIO);
391 
392 	DRM_DEBUG("open_count = %d\n", dev->open_count);
393 
394 	if (flags & O_EXCL)
395 		return (EBUSY); /* No exclusive opens */
396 
397 	DRM_LOCK();
398 	if (dev->open_count++ == 0) {
399 		DRM_UNLOCK();
400 		if ((ret = drm_firstopen(dev)) != 0)
401 			goto err;
402 	} else {
403 		DRM_UNLOCK();
404 	}
405 
406 	/* always allocate at least enough space for our data */
407 	file_priv = drm_calloc(1, max(dev->driver->file_priv_size,
408 	    sizeof(*file_priv)));
409 	if (file_priv == NULL) {
410 		ret = ENOMEM;
411 		goto err;
412 	}
413 
414 	file_priv->kdev = kdev;
415 	file_priv->flags = flags;
416 	file_priv->minor = minor(kdev);
417 	TAILQ_INIT(&file_priv->evlist);
418 	file_priv->event_space = 4096; /* 4k for event buffer */
419 	DRM_DEBUG("minor = %d\n", file_priv->minor);
420 
421 	/* for compatibility root is always authenticated */
422 	file_priv->authenticated = DRM_SUSER(p);
423 
424 	if (dev->driver->flags & DRIVER_GEM) {
425 		SPLAY_INIT(&file_priv->obj_tree);
426 		mtx_init(&file_priv->table_lock, IPL_NONE);
427 	}
428 
429 	if (dev->driver->open) {
430 		ret = dev->driver->open(dev, file_priv);
431 		if (ret != 0) {
432 			goto free_priv;
433 		}
434 	}
435 
436 	DRM_LOCK();
437 	/* first opener automatically becomes master if root */
438 	if (SPLAY_EMPTY(&dev->files) && !DRM_SUSER(p)) {
439 		DRM_UNLOCK();
440 		ret = EPERM;
441 		goto free_priv;
442 	}
443 
444 	file_priv->master = SPLAY_EMPTY(&dev->files);
445 
446 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
447 	DRM_UNLOCK();
448 
449 	return (0);
450 
451 free_priv:
452 	drm_free(file_priv);
453 err:
454 	DRM_LOCK();
455 	--dev->open_count;
456 	DRM_UNLOCK();
457 	return (ret);
458 }
459 
460 int
461 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
462 {
463 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
464 	struct drm_file			*file_priv;
465 	struct drm_pending_event	*ev, *evtmp;
466 	int				 i, retcode = 0;
467 
468 	if (dev == NULL)
469 		return (ENXIO);
470 
471 	DRM_DEBUG("open_count = %d\n", dev->open_count);
472 
473 	DRM_LOCK();
474 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
475 	if (file_priv == NULL) {
476 		DRM_ERROR("can't find authenticator\n");
477 		retcode = EINVAL;
478 		goto done;
479 	}
480 	DRM_UNLOCK();
481 
482 	if (dev->driver->close != NULL)
483 		dev->driver->close(dev, file_priv);
484 
485 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
486 	    DRM_CURRENTPID, (long)&dev->device, dev->open_count);
487 
488 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
489 	    && dev->lock.file_priv == file_priv) {
490 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
491 		    DRM_CURRENTPID,
492 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
493 
494 		drm_lock_free(&dev->lock,
495 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
496 	}
497 	if (dev->driver->flags & DRIVER_DMA)
498 		drm_reclaim_buffers(dev, file_priv);
499 
500 	mtx_enter(&dev->event_lock);
501 	for (i = 0; i < dev->vblank->vb_num; i++) {
502 		struct drmevlist *list = &dev->vblank->vb_crtcs[i].vbl_events;
503 		for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list);
504 		    ev = evtmp) {
505 			evtmp = TAILQ_NEXT(ev, link);
506 			if (ev->file_priv == file_priv) {
507 				TAILQ_REMOVE(list, ev, link);
508 				drm_vblank_put(dev, i);
509 				ev->destroy(ev);
510 			}
511 		}
512 	}
513 	while ((ev = TAILQ_FIRST(&file_priv->evlist)) != NULL) {
514 		TAILQ_REMOVE(&file_priv->evlist, ev, link);
515 		ev->destroy(ev);
516 	}
517 	mtx_leave(&dev->event_lock);
518 
519 	DRM_LOCK();
520 	if (dev->driver->flags & DRIVER_GEM) {
521 		struct drm_handle	*han;
522 		mtx_enter(&file_priv->table_lock);
523 		while ((han = SPLAY_ROOT(&file_priv->obj_tree)) != NULL) {
524 			SPLAY_REMOVE(drm_obj_tree, &file_priv->obj_tree, han);
525 			drm_handle_unref(han->obj);
526 			drm_free(han);
527 		}
528 		mtx_leave(&file_priv->table_lock);
529 	}
530 
531 	dev->buf_pgid = 0;
532 
533 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
534 	drm_free(file_priv);
535 
536 done:
537 	if (--dev->open_count == 0) {
538 		DRM_UNLOCK();
539 		retcode = drm_lastclose(dev);
540 	} else
541 		DRM_UNLOCK();
542 
543 	return (retcode);
544 }
545 
546 /* drmioctl is called whenever a process performs an ioctl on /dev/drm.
547  */
548 int
549 drmioctl(dev_t kdev, u_long cmd, caddr_t data, int flags,
550     struct proc *p)
551 {
552 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
553 	struct drm_file *file_priv;
554 
555 	if (dev == NULL)
556 		return ENODEV;
557 
558 	DRM_LOCK();
559 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
560 	DRM_UNLOCK();
561 	if (file_priv == NULL) {
562 		DRM_ERROR("can't find authenticator\n");
563 		return EINVAL;
564 	}
565 
566 	++file_priv->ioctl_count;
567 
568 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
569 	    DRM_CURRENTPID, cmd, DRM_IOCTL_NR(cmd), (long)&dev->device,
570 	    file_priv->authenticated);
571 
572 	switch (cmd) {
573 	case FIONBIO:
574 	case FIOASYNC:
575 		return 0;
576 
577 	case TIOCSPGRP:
578 		dev->buf_pgid = *(int *)data;
579 		return 0;
580 
581 	case TIOCGPGRP:
582 		*(int *)data = dev->buf_pgid;
583 		return 0;
584 	case DRM_IOCTL_VERSION:
585 		return (drm_version(dev, data, file_priv));
586 	case DRM_IOCTL_GET_UNIQUE:
587 		return (drm_getunique(dev, data, file_priv));
588 	case DRM_IOCTL_GET_MAGIC:
589 		return (drm_getmagic(dev, data, file_priv));
590 	case DRM_IOCTL_WAIT_VBLANK:
591 		return (drm_wait_vblank(dev, data, file_priv));
592 	case DRM_IOCTL_MODESET_CTL:
593 		return (drm_modeset_ctl(dev, data, file_priv));
594 	case DRM_IOCTL_GEM_CLOSE:
595 		return (drm_gem_close_ioctl(dev, data, file_priv));
596 
597 	/* removed */
598 	case DRM_IOCTL_GET_MAP:
599 		/* FALLTHROUGH */
600 	case DRM_IOCTL_GET_CLIENT:
601 		/* FALLTHROUGH */
602 	case DRM_IOCTL_GET_STATS:
603 		return (EINVAL);
604 	/*
605 	 * no-oped ioctls, we don't check permissions on them because
606 	 * they do nothing. they'll be removed as soon as userland is
607 	 * definitely purged
608 	 */
609 	case DRM_IOCTL_SET_SAREA_CTX:
610 	case DRM_IOCTL_BLOCK:
611 	case DRM_IOCTL_UNBLOCK:
612 	case DRM_IOCTL_MOD_CTX:
613 	case DRM_IOCTL_MARK_BUFS:
614 	case DRM_IOCTL_FINISH:
615 	case DRM_IOCTL_INFO_BUFS:
616 	case DRM_IOCTL_SWITCH_CTX:
617 	case DRM_IOCTL_NEW_CTX:
618 	case DRM_IOCTL_GET_SAREA_CTX:
619 		return (0);
620 	}
621 
622 	if (file_priv->authenticated == 1) {
623 		switch (cmd) {
624 		case DRM_IOCTL_RM_MAP:
625 			return (drm_rmmap_ioctl(dev, data, file_priv));
626 		case DRM_IOCTL_GET_CTX:
627 			return (drm_getctx(dev, data, file_priv));
628 		case DRM_IOCTL_RES_CTX:
629 			return (drm_resctx(dev, data, file_priv));
630 		case DRM_IOCTL_LOCK:
631 			return (drm_lock(dev, data, file_priv));
632 		case DRM_IOCTL_UNLOCK:
633 			return (drm_unlock(dev, data, file_priv));
634 		case DRM_IOCTL_MAP_BUFS:
635 			return (drm_mapbufs(dev, data, file_priv));
636 		case DRM_IOCTL_FREE_BUFS:
637 			return (drm_freebufs(dev, data, file_priv));
638 		case DRM_IOCTL_DMA:
639 			return (drm_dma(dev, data, file_priv));
640 		case DRM_IOCTL_AGP_INFO:
641 			return (drm_agp_info_ioctl(dev, data, file_priv));
642 		case DRM_IOCTL_GEM_FLINK:
643 			return (drm_gem_flink_ioctl(dev, data, file_priv));
644 		case DRM_IOCTL_GEM_OPEN:
645 			return (drm_gem_open_ioctl(dev, data, file_priv));
646 
647 		}
648 	}
649 
650 	/* master is always root */
651 	if (file_priv->master == 1) {
652 		switch(cmd) {
653 		case DRM_IOCTL_SET_VERSION:
654 			return (drm_setversion(dev, data, file_priv));
655 		case DRM_IOCTL_IRQ_BUSID:
656 			return (drm_irq_by_busid(dev, data, file_priv));
657 		case DRM_IOCTL_AUTH_MAGIC:
658 			return (drm_authmagic(dev, data, file_priv));
659 		case DRM_IOCTL_ADD_MAP:
660 			return (drm_addmap_ioctl(dev, data, file_priv));
661 		case DRM_IOCTL_ADD_CTX:
662 			return (drm_addctx(dev, data, file_priv));
663 		case DRM_IOCTL_RM_CTX:
664 			return (drm_rmctx(dev, data, file_priv));
665 		case DRM_IOCTL_ADD_BUFS:
666 			return (drm_addbufs(dev, (struct drm_buf_desc *)data));
667 		case DRM_IOCTL_CONTROL:
668 			return (drm_control(dev, data, file_priv));
669 		case DRM_IOCTL_AGP_ACQUIRE:
670 			return (drm_agp_acquire_ioctl(dev, data, file_priv));
671 		case DRM_IOCTL_AGP_RELEASE:
672 			return (drm_agp_release_ioctl(dev, data, file_priv));
673 		case DRM_IOCTL_AGP_ENABLE:
674 			return (drm_agp_enable_ioctl(dev, data, file_priv));
675 		case DRM_IOCTL_AGP_ALLOC:
676 			return (drm_agp_alloc_ioctl(dev, data, file_priv));
677 		case DRM_IOCTL_AGP_FREE:
678 			return (drm_agp_free_ioctl(dev, data, file_priv));
679 		case DRM_IOCTL_AGP_BIND:
680 			return (drm_agp_bind_ioctl(dev, data, file_priv));
681 		case DRM_IOCTL_AGP_UNBIND:
682 			return (drm_agp_unbind_ioctl(dev, data, file_priv));
683 		case DRM_IOCTL_SG_ALLOC:
684 			return (drm_sg_alloc_ioctl(dev, data, file_priv));
685 		case DRM_IOCTL_SG_FREE:
686 			return (drm_sg_free(dev, data, file_priv));
687 		case DRM_IOCTL_ADD_DRAW:
688 		case DRM_IOCTL_RM_DRAW:
689 		case DRM_IOCTL_UPDATE_DRAW:
690 			/*
691 			 * Support removed from kernel since it's not used.
692 			 * just return zero until userland stops calling this
693 			 * ioctl.
694 			 */
695 			return (0);
696 		case DRM_IOCTL_SET_UNIQUE:
697 		/*
698 		 * Deprecated in DRM version 1.1, and will return EBUSY
699 		 * when setversion has
700 		 * requested version 1.1 or greater.
701 		 */
702 			return (EBUSY);
703 		}
704 	}
705 	if (dev->driver->ioctl != NULL)
706 		return (dev->driver->ioctl(dev, cmd, data, file_priv));
707 	else
708 		return (EINVAL);
709 }
710 
711 int
712 drmread(dev_t kdev, struct uio *uio, int ioflag)
713 {
714 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
715 	struct drm_file			*file_priv;
716 	struct drm_pending_event	*ev;
717 	int		 		 error = 0;
718 
719 	if (dev == NULL)
720 		return (ENXIO);
721 
722 	DRM_LOCK();
723 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
724 	DRM_UNLOCK();
725 	if (file_priv == NULL)
726 		return (ENXIO);
727 
728 	/*
729 	 * The semantics are a little weird here. We will wait until we
730 	 * have events to process, but as soon as we have events we will
731 	 * only deliver as many as we have.
732 	 * Note that events are atomic, if the read buffer will not fit in
733 	 * a whole event, we won't read any of it out.
734 	 */
735 	mtx_enter(&dev->event_lock);
736 	while (error == 0 && TAILQ_EMPTY(&file_priv->evlist)) {
737 		if (ioflag & IO_NDELAY) {
738 			mtx_leave(&dev->event_lock);
739 			return (EAGAIN);
740 		}
741 		error = msleep(&file_priv->evlist, &dev->event_lock,
742 		    PWAIT | PCATCH, "drmread", 0);
743 	}
744 	if (error) {
745 		mtx_leave(&dev->event_lock);
746 		return (error);
747 	}
748 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
749 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
750 		/* XXX we always destroy the event on error. */
751 		error = uiomove(ev->event, ev->event->length, uio);
752 		ev->destroy(ev);
753 		if (error)
754 			break;
755 		mtx_enter(&dev->event_lock);
756 	}
757 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
758 
759 	return (error);
760 }
761 
762 /*
763  * Deqeue an event from the file priv in question. returning 1 if an
764  * event was found. We take the resid from the read as a parameter because
765  * we will only dequeue and event if the read buffer has space to fit the
766  * entire thing.
767  *
768  * We are called locked, but we will *unlock* the queue on return so that
769  * we may sleep to copyout the event.
770  */
771 int
772 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
773     size_t resid, struct drm_pending_event **out)
774 {
775 	struct drm_pending_event	*ev;
776 	int				 gotone = 0;
777 
778 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
779 	if ((ev = TAILQ_FIRST(&file_priv->evlist)) == NULL ||
780 	    ev->event->length > resid)
781 		goto out;
782 
783 	TAILQ_REMOVE(&file_priv->evlist, ev, link);
784 	file_priv->event_space += ev->event->length;
785 	*out = ev;
786 	gotone = 1;
787 
788 out:
789 	mtx_leave(&dev->event_lock);
790 
791 	return (gotone);
792 }
793 
794 /* XXX kqfilter ... */
795 int
796 drmpoll(dev_t kdev, int events, struct proc *p)
797 {
798 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
799 	struct drm_file		*file_priv;
800 	int		 	 revents = 0;
801 
802 	if (dev == NULL)
803 		return (POLLERR);
804 
805 	DRM_LOCK();
806 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
807 	DRM_UNLOCK();
808 	if (file_priv == NULL)
809 		return (POLLERR);
810 
811 	mtx_enter(&dev->event_lock);
812 	if (events & (POLLIN | POLLRDNORM)) {
813 		if (!TAILQ_EMPTY(&file_priv->evlist))
814 			revents |=  events & (POLLIN | POLLRDNORM);
815 		else
816 			selrecord(p, &file_priv->rsel);
817 	}
818 	mtx_leave(&dev->event_lock);
819 
820 	return (revents);
821 }
822 
823 struct drm_local_map *
824 drm_getsarea(struct drm_device *dev)
825 {
826 	struct drm_local_map	*map;
827 
828 	DRM_LOCK();
829 	TAILQ_FOREACH(map, &dev->maplist, link) {
830 		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
831 			break;
832 	}
833 	DRM_UNLOCK();
834 	return (map);
835 }
836 
837 paddr_t
838 drmmmap(dev_t kdev, off_t offset, int prot)
839 {
840 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
841 	struct drm_local_map	*map;
842 	struct drm_file		*file_priv;
843 	enum drm_map_type	 type;
844 
845 	if (dev == NULL)
846 		return (-1);
847 
848 	DRM_LOCK();
849 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
850 	DRM_UNLOCK();
851 	if (file_priv == NULL) {
852 		DRM_ERROR("can't find authenticator\n");
853 		return (-1);
854 	}
855 
856 	if (!file_priv->authenticated)
857 		return (-1);
858 
859 	if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
860 		struct drm_device_dma *dma = dev->dma;
861 		paddr_t	phys = -1;
862 
863 		rw_enter_write(&dma->dma_lock);
864 		if (dma->pagelist != NULL)
865 			phys = dma->pagelist[offset >> PAGE_SHIFT];
866 		rw_exit_write(&dma->dma_lock);
867 
868 		return (phys);
869 	}
870 
871 	/*
872 	 * A sequential search of a linked list is
873  	 * fine here because: 1) there will only be
874 	 * about 5-10 entries in the list and, 2) a
875 	 * DRI client only has to do this mapping
876 	 * once, so it doesn't have to be optimized
877 	 * for performance, even if the list was a
878 	 * bit longer.
879 	 */
880 	DRM_LOCK();
881 	TAILQ_FOREACH(map, &dev->maplist, link) {
882 		if (offset >= map->ext &&
883 		    offset < map->ext + map->size) {
884 			offset -= map->ext;
885 			break;
886 		}
887 	}
888 
889 	if (map == NULL) {
890 		DRM_UNLOCK();
891 		DRM_DEBUG("can't find map\n");
892 		return (-1);
893 	}
894 	if (((map->flags & _DRM_RESTRICTED) && file_priv->master == 0)) {
895 		DRM_UNLOCK();
896 		DRM_DEBUG("restricted map\n");
897 		return (-1);
898 	}
899 	type = map->type;
900 	DRM_UNLOCK();
901 
902 	switch (type) {
903 	case _DRM_FRAME_BUFFER:
904 	case _DRM_REGISTERS:
905 	case _DRM_AGP:
906 		return (offset + map->offset);
907 		break;
908 	/* XXX unify all the bus_dmamem_mmap bits */
909 	case _DRM_SCATTER_GATHER:
910 		return (bus_dmamem_mmap(dev->dmat, dev->sg->mem->segs,
911 		    dev->sg->mem->nsegs, map->offset - dev->sg->handle +
912 		    offset, prot, BUS_DMA_NOWAIT));
913 	case _DRM_SHM:
914 	case _DRM_CONSISTENT:
915 		return (bus_dmamem_mmap(dev->dmat, map->dmamem->segs,
916 		    map->dmamem->nsegs, offset, prot, BUS_DMA_NOWAIT));
917 	default:
918 		DRM_ERROR("bad map type %d\n", type);
919 		return (-1);	/* This should never happen. */
920 	}
921 	/* NOTREACHED */
922 }
923 
924 /*
925  * Beginning in revision 1.1 of the DRM interface, getunique will return
926  * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
927  * before setunique has been called.  The format for the bus-specific part of
928  * the unique is not defined for any other bus.
929  */
930 int
931 drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
932 {
933 	struct drm_unique	 *u = data;
934 
935 	if (u->unique_len >= dev->unique_len) {
936 		if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
937 			return EFAULT;
938 	}
939 	u->unique_len = dev->unique_len;
940 
941 	return 0;
942 }
943 
944 #define DRM_IF_MAJOR	1
945 #define DRM_IF_MINOR	2
946 
947 int
948 drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
949 {
950 	struct drm_version	*version = data;
951 	int			 len;
952 
953 #define DRM_COPY(name, value)						\
954 	len = strlen( value );						\
955 	if ( len > name##_len ) len = name##_len;			\
956 	name##_len = strlen( value );					\
957 	if ( len && name ) {						\
958 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
959 			return EFAULT;				\
960 	}
961 
962 	version->version_major = dev->driver->major;
963 	version->version_minor = dev->driver->minor;
964 	version->version_patchlevel = dev->driver->patchlevel;
965 
966 	DRM_COPY(version->name, dev->driver->name);
967 	DRM_COPY(version->date, dev->driver->date);
968 	DRM_COPY(version->desc, dev->driver->desc);
969 
970 	return 0;
971 }
972 
973 int
974 drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
975 {
976 	struct drm_set_version	ver, *sv = data;
977 	int			if_version;
978 
979 	/* Save the incoming data, and set the response before continuing
980 	 * any further.
981 	 */
982 	ver = *sv;
983 	sv->drm_di_major = DRM_IF_MAJOR;
984 	sv->drm_di_minor = DRM_IF_MINOR;
985 	sv->drm_dd_major = dev->driver->major;
986 	sv->drm_dd_minor = dev->driver->minor;
987 
988 	/*
989 	 * We no longer support interface versions less than 1.1, so error
990 	 * out if the xserver is too old. 1.1 always ties the drm to a
991 	 * certain busid, this was done on attach
992 	 */
993 	if (ver.drm_di_major != -1) {
994 		if (ver.drm_di_major != DRM_IF_MAJOR || ver.drm_di_minor < 1 ||
995 		    ver.drm_di_minor > DRM_IF_MINOR) {
996 			return EINVAL;
997 		}
998 		if_version = DRM_IF_VERSION(ver.drm_di_major, ver.drm_dd_minor);
999 		dev->if_version = imax(if_version, dev->if_version);
1000 	}
1001 
1002 	if (ver.drm_dd_major != -1) {
1003 		if (ver.drm_dd_major != dev->driver->major ||
1004 		    ver.drm_dd_minor < 0 ||
1005 		    ver.drm_dd_minor > dev->driver->minor)
1006 			return EINVAL;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 struct drm_dmamem *
1013 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1014     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1015 {
1016 	struct drm_dmamem	*mem;
1017 	size_t			 strsize;
1018 	/*
1019 	 * segs is the last member of the struct since we modify the size
1020 	 * to allow extra segments if more than one are allowed.
1021 	 */
1022 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1023 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1024 	if (mem == NULL)
1025 		return (NULL);
1026 
1027 	mem->size = size;
1028 
1029 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1030 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1031 		goto strfree;
1032 
1033 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1034 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1035 		goto destroy;
1036 
1037 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1038 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1039 		goto free;
1040 
1041 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1042 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1043 		goto unmap;
1044 
1045 	return (mem);
1046 
1047 unmap:
1048 	bus_dmamem_unmap(dmat, mem->kva, size);
1049 free:
1050 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1051 destroy:
1052 	bus_dmamap_destroy(dmat, mem->map);
1053 strfree:
1054 	free(mem, M_DRM);
1055 
1056 	return (NULL);
1057 }
1058 
1059 void
1060 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1061 {
1062 	if (mem == NULL)
1063 		return;
1064 
1065 	bus_dmamap_unload(dmat, mem->map);
1066 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1067 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1068 	bus_dmamap_destroy(dmat, mem->map);
1069 	free(mem, M_DRM);
1070 }
1071 
1072 /**
1073  * Called by the client, this returns a unique magic number to be authorized
1074  * by the master.
1075  *
1076  * The master may use its own knowledge of the client (such as the X
1077  * connection that the magic is passed over) to determine if the magic number
1078  * should be authenticated.
1079  */
1080 int
1081 drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1082 {
1083 	struct drm_auth		*auth = data;
1084 
1085 	if (dev->magicid == 0)
1086 		dev->magicid = 1;
1087 
1088 	/* Find unique magic */
1089 	if (file_priv->magic) {
1090 		auth->magic = file_priv->magic;
1091 	} else {
1092 		DRM_LOCK();
1093 		file_priv->magic = auth->magic = dev->magicid++;
1094 		DRM_UNLOCK();
1095 		DRM_DEBUG("%d\n", auth->magic);
1096 	}
1097 
1098 	DRM_DEBUG("%u\n", auth->magic);
1099 	return (0);
1100 }
1101 
1102 /**
1103  * Marks the client associated with the given magic number as authenticated.
1104  */
1105 int
1106 drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1107 {
1108 	struct drm_file	*p;
1109 	struct drm_auth	*auth = data;
1110 	int		 ret = EINVAL;
1111 
1112 	DRM_DEBUG("%u\n", auth->magic);
1113 
1114 	if (auth->magic == 0)
1115 		return (ret);
1116 
1117 	DRM_LOCK();
1118 	SPLAY_FOREACH(p, drm_file_tree, &dev->files) {
1119 		if (p->magic == auth->magic) {
1120 			p->authenticated = 1;
1121 			p->magic = 0;
1122 			ret = 0;
1123 			break;
1124 		}
1125 	}
1126 	DRM_UNLOCK();
1127 
1128 	return (ret);
1129 }
1130 
1131 struct uvm_pagerops drm_pgops = {
1132 	NULL,
1133 	drm_ref,
1134 	drm_unref,
1135 	drm_fault,
1136 	drm_flush,
1137 };
1138 
1139 
1140 void
1141 drm_hold_object_locked(struct drm_obj *obj)
1142 {
1143 	while (obj->do_flags & DRM_BUSY) {
1144 		atomic_setbits_int(&obj->do_flags, DRM_WANTED);
1145 		simple_unlock(&uobj->vmobjlock);
1146 #ifdef DRMLOCKDEBUG
1147 		{
1148 		int ret = 0;
1149 		ret = tsleep(obj, PVM, "drm_hold", 3 * hz); /* XXX msleep */
1150 		if (ret)
1151 			printf("still waiting for obj %p, owned by %p\n",
1152 			    obj, obj->holding_proc);
1153 		}
1154 #else
1155 		tsleep(obj, PVM, "drm_hold", 0); /* XXX msleep */
1156 #endif
1157 		simple_lock(&uobj->vmobjlock);
1158 	}
1159 #ifdef DRMLOCKDEBUG
1160 	obj->holding_proc = curproc;
1161 #endif
1162 	atomic_setbits_int(&obj->do_flags, DRM_BUSY);
1163 }
1164 
1165 void
1166 drm_hold_object(struct drm_obj *obj)
1167 {
1168 	simple_lock(&obj->uobj->vmobjlock);
1169 	drm_hold_object_locked(obj);
1170 	simple_unlock(&obj->uobj->vmobjlock);
1171 }
1172 
1173 int
1174 drm_try_hold_object(struct drm_obj *obj)
1175 {
1176 	simple_lock(&obj->uobj->vmobjlock);
1177 	/* if the object is free, grab it */
1178 	if (obj->do_flags & (DRM_BUSY | DRM_WANTED))
1179 		return (0);
1180 	atomic_setbits_int(&obj->do_flags, DRM_BUSY);
1181 #ifdef DRMLOCKDEBUG
1182 	obj->holding_proc = curproc;
1183 #endif
1184 	simple_unlock(&obj->uobj->vmobjlock);
1185 	return (1);
1186 }
1187 
1188 
1189 void
1190 drm_unhold_object_locked(struct drm_obj *obj)
1191 {
1192 	if (obj->do_flags & DRM_WANTED)
1193 		wakeup(obj);
1194 #ifdef DRMLOCKDEBUG
1195 	obj->holding_proc = NULL;
1196 #endif
1197 	atomic_clearbits_int(&obj->do_flags, DRM_WANTED | DRM_BUSY);
1198 }
1199 
1200 void
1201 drm_unhold_object(struct drm_obj *obj)
1202 {
1203 	simple_lock(&obj->uobj->vmobjlock);
1204 	drm_unhold_object_locked(obj);
1205 	simple_unlock(&obj->uobj->vmobjlock);
1206 }
1207 
1208 void
1209 drm_ref_locked(struct uvm_object *uobj)
1210 {
1211 	uobj->uo_refs++;
1212 }
1213 
1214 void
1215 drm_ref(struct uvm_object *uobj)
1216 {
1217 	simple_lock(&uobj->vmobjlock);
1218 	drm_ref_locked(uobj);
1219 	simple_unlock(&uobj->vmobjlock);
1220 }
1221 
1222 void
1223 drm_unref(struct uvm_object *uobj)
1224 {
1225 	simple_lock(&uobj->vmobjlock);
1226 	drm_unref_locked(uobj);
1227 }
1228 
1229 void
1230 drm_unref_locked(struct uvm_object *uobj)
1231 {
1232 	struct drm_obj		*obj = (struct drm_obj *)uobj;
1233 	struct drm_device	*dev = obj->dev;
1234 
1235 again:
1236 	if (uobj->uo_refs > 1) {
1237 		uobj->uo_refs--;
1238 		simple_unlock(&uobj->vmobjlock);
1239 		return;
1240 	}
1241 
1242 	/* inlined version of drm_hold because we want to trylock then sleep */
1243 	if (obj->do_flags & DRM_BUSY) {
1244 		atomic_setbits_int(&obj->do_flags, DRM_WANTED);
1245 		simple_unlock(&uobj->vmobjlock);
1246 		tsleep(obj, PVM, "drm_unref", 0); /* XXX msleep */
1247 		simple_lock(&uobj->vmobjlock);
1248 		goto again;
1249 	}
1250 #ifdef DRMLOCKDEBUG
1251 	obj->holding_proc = curproc;
1252 #endif
1253 	atomic_setbits_int(&obj->do_flags, DRM_BUSY);
1254 	simple_unlock(&obj->vmobjlock);
1255 	/* We own this thing now. it is on no queues, though it may still
1256 	 * be bound to the aperture (and on the inactive list, in which case
1257 	 * idling the buffer is what triggered the free. Since we know no one
1258 	 * else can grab it now, we can nuke with impunity.
1259 	 */
1260 	if (dev->driver->gem_free_object != NULL)
1261 		dev->driver->gem_free_object(obj);
1262 
1263 	uao_detach(obj->uao);
1264 
1265 	atomic_dec(&dev->obj_count);
1266 	atomic_sub(obj->size, &dev->obj_memory);
1267 	if (obj->do_flags & DRM_WANTED) /* should never happen, not on lists */
1268 		wakeup(obj);
1269 	pool_put(&dev->objpl, obj);
1270 }
1271 
1272 /*
1273  * convenience function to unreference and unhold an object.
1274  */
1275 void
1276 drm_unhold_and_unref(struct drm_obj *obj)
1277 {
1278 	drm_lock_obj(obj);
1279 	drm_unhold_object_locked(obj);
1280 	drm_unref_locked(&obj->uobj);
1281 }
1282 
1283 
1284 boolean_t
1285 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
1286 {
1287 	return (TRUE);
1288 }
1289 
1290 
1291 int
1292 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
1293     int npages, int centeridx, vm_fault_t fault_type,
1294     vm_prot_t access_type, int flags)
1295 {
1296 	struct vm_map_entry *entry = ufi->entry;
1297 	struct uvm_object *uobj = entry->object.uvm_obj;
1298 	struct drm_obj *obj = (struct drm_obj *)uobj;
1299 	struct drm_device *dev = obj->dev;
1300 	int ret;
1301 
1302 	/*
1303 	 * we do not allow device mappings to be mapped copy-on-write
1304 	 * so we kill any attempt to do so here.
1305 	 */
1306 
1307 	if (UVM_ET_ISCOPYONWRITE(entry)) {
1308 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
1309 		return(VM_PAGER_ERROR);
1310 	}
1311 
1312 	/* Call down into driver to do the magic */
1313 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
1314 	    entry->start), vaddr, pps, npages, centeridx,
1315 	    access_type, flags);
1316 	return (ret);
1317 }
1318 
1319 /*
1320  * Code to support memory managers based on the GEM (Graphics
1321  * Execution Manager) api.
1322  */
1323 struct drm_obj *
1324 drm_gem_object_alloc(struct drm_device *dev, size_t size)
1325 {
1326 	struct drm_obj	*obj;
1327 
1328 	KASSERT((size & (PAGE_SIZE -1)) == 0);
1329 
1330 	if ((obj = pool_get(&dev->objpl, PR_WAITOK | PR_ZERO)) == NULL)
1331 		return (NULL);
1332 
1333 	obj->dev = dev;
1334 
1335 	/* uao create can't fail in the 0 case, it just sleeps */
1336 	obj->uao = uao_create(size, 0);
1337 	obj->size = size;
1338 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
1339 
1340 	if (dev->driver->gem_init_object != NULL &&
1341 	    dev->driver->gem_init_object(obj) != 0) {
1342 		uao_detach(obj->uao);
1343 		pool_put(&dev->objpl, obj);
1344 		return (NULL);
1345 	}
1346 	atomic_inc(&dev->obj_count);
1347 	atomic_add(obj->size, &dev->obj_memory);
1348 	return (obj);
1349 }
1350 
1351 int
1352 drm_handle_create(struct drm_file *file_priv, struct drm_obj *obj,
1353     int *handlep)
1354 {
1355 	struct drm_handle	*han;
1356 
1357 	if ((han = drm_calloc(1, sizeof(*han))) == NULL)
1358 		return (ENOMEM);
1359 
1360 	han->obj = obj;
1361 	mtx_enter(&file_priv->table_lock);
1362 again:
1363 	*handlep = han->handle = ++file_priv->obj_id;
1364 	/*
1365 	 * Make sure we have no duplicates. this'll hurt once we wrap, 0 is
1366 	 * reserved.
1367 	 */
1368 	if (han->handle == 0 || SPLAY_INSERT(drm_obj_tree,
1369 	    &file_priv->obj_tree, han))
1370 		goto again;
1371 	mtx_leave(&file_priv->table_lock);
1372 
1373 	drm_handle_ref(obj);
1374 	return (0);
1375 }
1376 
1377 struct drm_obj *
1378 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *file_priv,
1379     int handle)
1380 {
1381 	struct drm_obj		*obj;
1382 	struct drm_handle	*han, search;
1383 
1384 	search.handle = handle;
1385 
1386 	mtx_enter(&file_priv->table_lock);
1387 	han = SPLAY_FIND(drm_obj_tree, &file_priv->obj_tree, &search);
1388 	if (han == NULL) {
1389 		mtx_leave(&file_priv->table_lock);
1390 		return (NULL);
1391 	}
1392 
1393 	obj = han->obj;
1394 	drm_ref(&obj->uobj);
1395 	mtx_leave(&file_priv->table_lock);
1396 
1397 	return (obj);
1398 }
1399 
1400 int
1401 drm_gem_close_ioctl(struct drm_device *dev, void *data,
1402     struct drm_file *file_priv)
1403 {
1404 	struct drm_gem_close	*args = data;
1405 	struct drm_handle	*han, find;
1406 	struct drm_obj		*obj;
1407 
1408 	if ((dev->driver->flags & DRIVER_GEM) == 0)
1409 		return (ENODEV);
1410 
1411 	find.handle = args->handle;
1412 	mtx_enter(&file_priv->table_lock);
1413 	han = SPLAY_FIND(drm_obj_tree, &file_priv->obj_tree, &find);
1414 	if (han == NULL) {
1415 		mtx_leave(&file_priv->table_lock);
1416 		return (EINVAL);
1417 	}
1418 
1419 	obj = han->obj;
1420 	SPLAY_REMOVE(drm_obj_tree, &file_priv->obj_tree, han);
1421 	mtx_leave(&file_priv->table_lock);
1422 
1423 	drm_free(han);
1424 
1425 	DRM_LOCK();
1426 	drm_handle_unref(obj);
1427 	DRM_UNLOCK();
1428 
1429 	return (0);
1430 }
1431 
1432 int
1433 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1434     struct drm_file *file_priv)
1435 {
1436 	struct drm_gem_flink	*args = data;
1437 	struct drm_obj		*obj;
1438 
1439 	if (!(dev->driver->flags & DRIVER_GEM))
1440 		return (ENODEV);
1441 
1442 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1443 	if (obj == NULL)
1444 		return (EBADF);
1445 
1446 	mtx_enter(&dev->obj_name_lock);
1447 	if (!obj->name) {
1448 again:
1449 		obj->name = ++dev->obj_name;
1450 		/* 0 is reserved, make sure we don't clash. */
1451 		if (obj->name == 0 || SPLAY_INSERT(drm_name_tree,
1452 		    &dev->name_tree, obj))
1453 			goto again;
1454 		/* name holds a reference to the object */
1455 		drm_ref(&obj->uobj);
1456 	}
1457 	mtx_leave(&dev->obj_name_lock);
1458 
1459 	args->name = (uint64_t)obj->name;
1460 
1461 	drm_unref(&obj->uobj);
1462 
1463 	return (0);
1464 }
1465 
1466 int
1467 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1468     struct drm_file *file_priv)
1469 {
1470 	struct drm_gem_open	*args = data;
1471 	struct drm_obj		*obj, search;
1472 	int			 ret, handle;
1473 
1474 	if (!(dev->driver->flags & DRIVER_GEM))
1475 		return (ENODEV);
1476 
1477 	search.name = args->name;
1478 	mtx_enter(&dev->obj_name_lock);
1479 	obj = SPLAY_FIND(drm_name_tree, &dev->name_tree, &search);
1480 	if (obj != NULL)
1481 		drm_ref(&obj->uobj);
1482 	mtx_leave(&dev->obj_name_lock);
1483 	if (obj == NULL)
1484 		return (ENOENT);
1485 
1486 	/* this gives our reference to the handle */
1487 	ret = drm_handle_create(file_priv, obj, &handle);
1488 	if (ret) {
1489 		drm_unref(&obj->uobj);
1490 		return (ret);
1491 	}
1492 
1493 	args->handle = handle;
1494 	args->size = obj->size;
1495 
1496         return (0);
1497 }
1498 
1499 /*
1500  * grab a reference for a per-open handle.
1501  * The object contains a handlecount too because if all handles disappear we
1502  * need to also remove the global name (names initially are per open unless the
1503  * flink ioctl is called.
1504  */
1505 void
1506 drm_handle_ref(struct drm_obj *obj)
1507 {
1508 	/* we are given the reference from the caller, so just
1509 	 * crank handlecount.
1510 	 */
1511 	obj->handlecount++;
1512 }
1513 
1514 /*
1515  * Remove the reference owned by a per-open handle. If we're the last one,
1516  * remove the reference from flink, too.
1517  */
1518 void
1519 drm_handle_unref(struct drm_obj *obj)
1520 {
1521 	/* do this first in case this is the last reference */
1522 	if (--obj->handlecount == 0) {
1523 		struct drm_device	*dev = obj->dev;
1524 
1525 		mtx_enter(&dev->obj_name_lock);
1526 		if (obj->name) {
1527 			SPLAY_REMOVE(drm_name_tree, &dev->name_tree, obj);
1528 			obj->name = 0;
1529 			mtx_leave(&dev->obj_name_lock);
1530 			/* name held a reference to object */
1531 			drm_unref(&obj->uobj);
1532 		} else {
1533 			mtx_leave(&dev->obj_name_lock);
1534 		}
1535 	}
1536 	drm_unref(&obj->uobj);
1537 }
1538 
1539 /*
1540  * Helper function to load a uvm anonymous object into a dmamap, to be used
1541  * for binding to a translation-table style sg mechanism (e.g. agp, or intel
1542  * gtt).
1543  *
1544  * For now we ignore maxsegsz.
1545  */
1546 int
1547 drm_gem_load_uao(bus_dma_tag_t dmat, bus_dmamap_t map, struct uvm_object *uao,
1548     bus_size_t size, int flags, bus_dma_segment_t **segp)
1549 {
1550 	bus_dma_segment_t	*segs;
1551 	struct vm_page		*pg;
1552 	struct pglist		 plist;
1553 	u_long			 npages = size >> PAGE_SHIFT, i = 0;
1554 	int			 ret;
1555 
1556 	TAILQ_INIT(&plist);
1557 
1558 	/*
1559 	 * This is really quite ugly, but nothing else would need
1560 	 * bus_dmamap_load_uao() yet.
1561 	 */
1562 	segs = malloc(npages * sizeof(*segs), M_DRM,
1563 	    M_WAITOK | M_CANFAIL | M_ZERO);
1564 	if (segs == NULL)
1565 		return (ENOMEM);
1566 
1567 	/* This may sleep, no choice in the matter */
1568 	if (uvm_objwire(uao, 0, size, &plist) != 0) {
1569 		ret = ENOMEM;
1570 		goto free;
1571 	}
1572 
1573 	TAILQ_FOREACH(pg, &plist, pageq) {
1574 		paddr_t pa = VM_PAGE_TO_PHYS(pg);
1575 
1576 		if (i > 0 && pa == (segs[i - 1].ds_addr +
1577 		    segs[i - 1].ds_len)) {
1578 			/* contiguous, yay */
1579 			segs[i - 1].ds_len += PAGE_SIZE;
1580 			continue;
1581 		}
1582 		segs[i].ds_addr = pa;
1583 		segs[i].ds_len = PAGE_SIZE;
1584 		if (i++ > npages)
1585 			break;
1586 	}
1587 	/* this should be impossible */
1588 	if (pg != TAILQ_END(&pageq)) {
1589 		ret = EINVAL;
1590 		goto unwire;
1591 	}
1592 
1593 	if ((ret = bus_dmamap_load_raw(dmat, map, segs, i, size, flags)) != 0)
1594 		goto unwire;
1595 
1596 	*segp = segs;
1597 
1598 	return (0);
1599 
1600 unwire:
1601 	uvm_objunwire(uao, 0, size);
1602 free:
1603 	free(segs, M_DRM);
1604 	return (ret);
1605 }
1606 
1607 int
1608 drm_handle_cmp(struct drm_handle *a, struct drm_handle *b)
1609 {
1610 	return (a->handle < b->handle ? -1 : a->handle > b->handle);
1611 }
1612 
1613 int
1614 drm_name_cmp(struct drm_obj *a, struct drm_obj *b)
1615 {
1616 	return (a->name < b->name ? -1 : a->name > b->name);
1617 }
1618 
1619 SPLAY_GENERATE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
1620 
1621 SPLAY_GENERATE(drm_name_tree, drm_obj, entry, drm_name_cmp);
1622