xref: /openbsd/sys/dev/pci/drm/drm_bufs.c (revision cca36db2)
1 /* $OpenBSD: drm_bufs.c,v 1.49 2012/03/09 13:01:28 ariane Exp $ */
2 /*-
3  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
4  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * Authors:
27  *    Rickard E. (Rik) Faith <faith@valinux.com>
28  *    Gareth Hughes <gareth@valinux.com>
29  *
30  */
31 
32 /** @file drm_bufs.c
33  * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
34  */
35 
36 #include "sys/types.h"
37 #include "dev/pci/pcireg.h"
38 
39 #include "drmP.h"
40 
41 int	drm_addbufs_pci(struct drm_device *, struct drm_buf_desc *);
42 int	drm_addbufs_sg(struct drm_device *, struct drm_buf_desc *);
43 int	drm_addbufs_agp(struct drm_device *, struct drm_buf_desc *);
44 
45 /*
46  * Compute order.  Can be made faster.
47  */
48 int
49 drm_order(unsigned long size)
50 {
51 	int order;
52 	unsigned long tmp;
53 
54 	for (order = 0, tmp = size; tmp >>= 1; ++order)
55 		;
56 
57 	if (size & ~(1 << order))
58 		++order;
59 
60 	return order;
61 }
62 
63 struct drm_local_map *
64 drm_core_findmap(struct drm_device *dev, unsigned long offset)
65 {
66 	struct drm_local_map	*map;
67 
68 	DRM_LOCK();
69 	TAILQ_FOREACH(map, &dev->maplist, link) {
70 		if (offset == map->ext)
71 			break;
72 	}
73 	DRM_UNLOCK();
74 	return (map);
75 }
76 
77 int
78 drm_addmap(struct drm_device * dev, unsigned long offset, unsigned long size,
79     enum drm_map_type type, enum drm_map_flags flags,
80     struct drm_local_map **map_ptr)
81 {
82 	struct drm_local_map	*map;
83 	int			 align, ret = 0;
84 #if 0 /* disabled for now */
85 	struct drm_agp_mem	*entry;
86 	int			 valid;
87 #endif
88 
89 	/* Only allow shared memory to be removable since we only keep enough
90 	 * book keeping information about shared memory to allow for removal
91 	 * when processes fork.
92 	 */
93 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
94 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
95 		return EINVAL;
96 	}
97 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
98 		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
99 		    offset, size);
100 		return EINVAL;
101 	}
102 	if (offset + size < offset) {
103 		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
104 		    offset, size);
105 		return EINVAL;
106 	}
107 
108 	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
109 	    size, type);
110 
111 	/*
112 	 * Check if this is just another version of a kernel-allocated map, and
113 	 * just hand that back if so.
114 	 */
115 	DRM_LOCK();
116 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
117 	    type == _DRM_SHM) {
118 		TAILQ_FOREACH(map, &dev->maplist, link) {
119 			if (map->type == type && (map->offset == offset ||
120 			    (map->type == _DRM_SHM &&
121 			    map->flags == _DRM_CONTAINS_LOCK))) {
122 				DRM_DEBUG("Found kernel map %d\n", type);
123 				goto done;
124 			}
125 		}
126 	}
127 	DRM_UNLOCK();
128 
129 	/* Allocate a new map structure, fill it in, and do any type-specific
130 	 * initialization necessary.
131 	 */
132 	map = drm_calloc(1, sizeof(*map));
133 	if (map == NULL) {
134 		DRM_LOCK();
135 		return ENOMEM;
136 	}
137 
138 	map->offset = offset;
139 	map->size = size;
140 	map->type = type;
141 	map->flags = flags;
142 
143 
144 	DRM_LOCK();
145 	ret = extent_alloc(dev->handle_ext, map->size, PAGE_SIZE, 0,
146 	    0, EX_NOWAIT, &map->ext);
147 	if (ret) {
148 		DRM_ERROR("can't find free offset\n");
149 		DRM_UNLOCK();
150 		drm_free(map);
151 		return (ret);
152 	}
153 	DRM_UNLOCK();
154 
155 	switch (map->type) {
156 	case _DRM_REGISTERS:
157 		if (!(map->flags & _DRM_WRITE_COMBINING))
158 			break;
159 		/* FALLTHROUGH */
160 	case _DRM_FRAME_BUFFER:
161 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
162 			map->mtrr = 1;
163 		break;
164 	case _DRM_AGP:
165 		/*valid = 0;*/
166 		/* In some cases (i810 driver), user space may have already
167 		 * added the AGP base itself, because dev->agp->base previously
168 		 * only got set during AGP enable.  So, only add the base
169 		 * address if the map's offset isn't already within the
170 		 * aperture.
171 		 */
172 		if (map->offset < dev->agp->base ||
173 		    map->offset > dev->agp->base +
174 		    dev->agp->info.ai_aperture_size - 1) {
175 			map->offset += dev->agp->base;
176 		}
177 		map->mtrr   = dev->agp->mtrr; /* for getmap */
178 #if 0 /* disabled for now */
179 		/*
180 		 * If agp is in control of userspace (some intel drivers for
181 		 * example. In which case ignore this loop.
182 		 */
183 		DRM_LOCK();
184 		TAILQ_FOREACH(entry, &dev->agp->memory, link) {
185 			DRM_DEBUG("bound = %p, pages = %p, %p\n",
186 			    entry->bound, entry->pages,
187 			    entry->pages * PAGE_SIZE);
188 			if ((map->offset >= entry->bound) &&
189 			    (map->offset + map->size <=
190 			    entry->bound + entry->pages * PAGE_SIZE)) {
191 				valid = 1;
192 				break;
193 			}
194 		}
195 		if (!TAILQ_EMPTY(&dev->agp->memory) && !valid) {
196 			DRM_UNLOCK();
197 			drm_free(map);
198 			DRM_ERROR("invalid agp map requested\n");
199 			return (EACCES);
200 		}
201 		DRM_UNLOCK();
202 #endif
203 		break;
204 	case _DRM_SCATTER_GATHER:
205 		if (dev->sg == NULL) {
206 			drm_free(map);
207 			return (EINVAL);
208 		}
209 		map->offset += dev->sg->handle;
210 		break;
211 	case _DRM_SHM:
212 	case _DRM_CONSISTENT:
213 		/*
214 		 * Unfortunately, we don't get any alignment specification from
215 		 * the caller, so we have to guess. So try to align the bus
216 		 * address of the map to its size if possible, otherwise just
217 		 * assume PAGE_SIZE alignment.
218 		 */
219 		align = map->size;
220 		if ((align & (align - 1)) != 0)
221 			align = PAGE_SIZE;
222 		map->dmamem = drm_dmamem_alloc(dev->dmat, map->size, align,
223 		    1, map->size, 0, 0);
224 		if (map->dmamem == NULL) {
225 			drm_free(map);
226 			return (ENOMEM);
227 		}
228 		map->handle = map->dmamem->kva;
229 		map->offset = map->dmamem->map->dm_segs[0].ds_addr;
230 		if (map->type == _DRM_SHM && map->flags & _DRM_CONTAINS_LOCK) {
231 			DRM_LOCK();
232 			/* Prevent a 2nd X Server from creating a 2nd lock */
233 			if (dev->lock.hw_lock != NULL) {
234 				DRM_UNLOCK();
235 				drm_dmamem_free(dev->dmat, map->dmamem);
236 				drm_free(map);
237 				return (EBUSY);
238 			}
239 			dev->lock.hw_lock = map->handle;
240 			DRM_UNLOCK();
241 		}
242 		break;
243 	default:
244 		DRM_ERROR("Bad map type %d\n", map->type);
245 		drm_free(map);
246 		return EINVAL;
247 	}
248 
249 	DRM_LOCK();
250 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
251 done:
252 	DRM_UNLOCK();
253 
254 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
255 	    map->size);
256 
257 	*map_ptr = map;
258 
259 	return 0;
260 }
261 
262 int
263 drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
264 {
265 	struct drm_map		*request = data;
266 	struct drm_local_map	*map;
267 	int			 err;
268 
269 	if (!(file_priv->flags & (FREAD|FWRITE)))
270 		return EACCES; /* Require read/write */
271 
272 	err = drm_addmap(dev, request->offset, request->size, request->type,
273 	    request->flags, &map);
274 	if (err != 0)
275 		return err;
276 
277 	request->offset = map->offset;
278 	request->size = map->size;
279 	request->type = map->type;
280 	request->flags = map->flags;
281 	request->mtrr = map->mtrr;
282 	request->handle = map->handle;
283 
284 	request->handle = (void *)map->ext;
285 
286 	return 0;
287 }
288 
289 void
290 drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
291 {
292 	DRM_LOCK();
293 	drm_rmmap_locked(dev, map);
294 	DRM_UNLOCK();
295 }
296 
297 
298 void
299 drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
300 {
301 	TAILQ_REMOVE(&dev->maplist, map, link);
302 
303 	switch (map->type) {
304 	case _DRM_REGISTERS:
305 		/* FALLTHROUGH */
306 	case _DRM_FRAME_BUFFER:
307 		if (map->mtrr) {
308 			int retcode;
309 
310 			retcode = drm_mtrr_del(0, map->offset, map->size,
311 			    DRM_MTRR_WC);
312 			DRM_DEBUG("mtrr_del = %d\n", retcode);
313 		}
314 		break;
315 	case _DRM_AGP:
316 		/* FALLTHROUGH */
317 	case _DRM_SCATTER_GATHER:
318 		break;
319 	case _DRM_SHM:
320 		/* FALLTHROUGH */
321 	case _DRM_CONSISTENT:
322 		drm_dmamem_free(dev->dmat, map->dmamem);
323 		break;
324 	default:
325 		DRM_ERROR("Bad map type %d\n", map->type);
326 		break;
327 	}
328 
329 	/* NOCOALESCE set, can't fail */
330 	extent_free(dev->handle_ext, map->ext, map->size, EX_NOWAIT);
331 
332 	drm_free(map);
333 }
334 
335 /* Remove a map private from list and deallocate resources if the mapping
336  * isn't in use.
337  */
338 
339 int
340 drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
341 {
342 	struct drm_local_map	*map;
343 	struct drm_map		*request = data;
344 
345 	DRM_LOCK();
346 	TAILQ_FOREACH(map, &dev->maplist, link) {
347 		if (map->handle == request->handle &&
348 		    map->flags & _DRM_REMOVABLE)
349 			break;
350 	}
351 
352 	/* No match found. */
353 	if (map == NULL) {
354 		DRM_UNLOCK();
355 		return (EINVAL);
356 	}
357 
358 	drm_rmmap_locked(dev, map);
359 
360 	DRM_UNLOCK();
361 
362 	return 0;
363 }
364 
365 /*
366  * DMA buffers api.
367  *
368  * The implementation used to be significantly more complicated, but the
369  * complexity has been moved into the drivers as different buffer management
370  * schemes evolved.
371  *
372  * This api is going to die eventually.
373  */
374 
375 int
376 drm_dma_setup(struct drm_device *dev)
377 {
378 
379 	dev->dma = drm_calloc(1, sizeof(*dev->dma));
380 	if (dev->dma == NULL)
381 		return (ENOMEM);
382 
383 	rw_init(&dev->dma->dma_lock, "drmdma");
384 
385 	return (0);
386 }
387 
388 void
389 drm_cleanup_buf(struct drm_device *dev, struct drm_buf_entry *entry)
390 {
391 	int i;
392 
393 	if (entry->seg_count) {
394 		for (i = 0; i < entry->seg_count; i++)
395 			drm_dmamem_free(dev->dmat, entry->seglist[i]);
396 		drm_free(entry->seglist);
397 
398 		entry->seg_count = 0;
399 	}
400 
401    	if (entry->buf_count) {
402 	   	for (i = 0; i < entry->buf_count; i++) {
403 			drm_free(entry->buflist[i].dev_private);
404 		}
405 		drm_free(entry->buflist);
406 
407 		entry->buf_count = 0;
408 	}
409 }
410 
411 void
412 drm_dma_takedown(struct drm_device *dev)
413 {
414 	struct drm_device_dma	*dma = dev->dma;
415 	int			 i;
416 
417 	if (dma == NULL)
418 		return;
419 
420 	/* Clear dma buffers */
421 	for (i = 0; i <= DRM_MAX_ORDER; i++)
422 		drm_cleanup_buf(dev, &dma->bufs[i]);
423 
424 	drm_free(dma->buflist);
425 	drm_free(dma->pagelist);
426 	drm_free(dev->dma);
427 	dev->dma = NULL;
428 }
429 
430 
431 void
432 drm_free_buffer(struct drm_device *dev, struct drm_buf *buf)
433 {
434 	if (buf == NULL)
435 		return;
436 
437 	buf->pending = 0;
438 	buf->file_priv= NULL;
439 	buf->used = 0;
440 }
441 
442 void
443 drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
444 {
445 	struct drm_device_dma	*dma = dev->dma;
446 	int			 i;
447 
448 	if (dma == NULL)
449 		return;
450 	for (i = 0; i < dma->buf_count; i++) {
451 		if (dma->buflist[i]->file_priv == file_priv)
452 				drm_free_buffer(dev, dma->buflist[i]);
453 	}
454 }
455 
456 /* Call into the driver-specific DMA handler */
457 int
458 drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
459 {
460 	struct drm_device_dma	*dma = dev->dma;
461 	struct drm_dma		*d = data;
462 	int			 ret = 0;
463 
464 	if (dev->driver->dma_ioctl == NULL) {
465 		DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
466 		return (EINVAL);
467 	}
468 
469 	LOCK_TEST_WITH_RETURN(dev, file_priv);
470 
471 	/* Please don't send us buffers.
472 	 */
473 	if (d->send_count != 0) {
474 		DRM_ERROR("process trying to send %d buffers via drmDMA\n",
475 		    d->send_count);
476 		return (EINVAL);
477 	}
478 
479 	/* We'll send you buffers.
480 	 */
481 	if (d->request_count < 0 || d->request_count > dma->buf_count) {
482 		DRM_ERROR("Process trying to get %d buffers (of %d max)\n",
483 			  d->request_count, dma->buf_count);
484 		return (EINVAL);
485 	}
486 	d->granted_count = 0;
487 
488 	if (d->request_count)
489 		ret = dev->driver->dma_ioctl(dev, d, file_priv);
490 	return (ret);
491 }
492 
493 int
494 drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
495 {
496 	struct drm_device_dma	*dma = dev->dma;
497 	struct drm_buf_entry	*entry;
498 	struct drm_buf		*buf, **temp_buflist;
499 	unsigned long		 agp_offset, offset;
500 	int			 alignment, count, order, page_order, size;
501 	int			 total, byte_count, i;
502 #if 0 /* disabled for now */
503 	struct drm_agp_mem	*agp_entry;
504 	int			 valid;
505 #endif
506 
507 	count = request->count;
508 	order = drm_order(request->size);
509 	size = 1 << order;
510 
511 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
512 	    ? round_page(size) : size;
513 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
514 	total = PAGE_SIZE << page_order;
515 
516 	byte_count = 0;
517 	agp_offset = dev->agp->base + request->agp_start;
518 
519 	DRM_DEBUG("count:      %d\n",  count);
520 	DRM_DEBUG("order:      %d\n",  order);
521 	DRM_DEBUG("size:       %d\n",  size);
522 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
523 	DRM_DEBUG("alignment:  %d\n",  alignment);
524 	DRM_DEBUG("page_order: %d\n",  page_order);
525 	DRM_DEBUG("total:      %d\n",  total);
526 
527 	/* Make sure buffers are located in AGP memory that we own */
528 
529 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
530 	 * memory.  Safe to ignore for now because these ioctls are still
531 	 * root-only.
532 	 */
533 #if 0 /* disabled for now */
534 	valid = 0;
535 	DRM_LOCK();
536 	TAILQ_FOREACH(agp_entry, &dev->agp->memory, link) {
537 		if ((agp_offset >= agp_entry->bound) &&
538 		    (agp_offset + total * count <=
539 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
540 			valid = 1;
541 			break;
542 		}
543 	}
544 	if (!TAILQ_EMPTY(&dev->agp->memory) && !valid) {
545 		DRM_DEBUG("zone invalid\n");
546 		DRM_UNLOCK();
547 		return (EINVAL);
548 	}
549 	DRM_UNLOCK();
550 #endif
551 
552 	entry = &dma->bufs[order];
553 
554 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist));
555 	if (entry->buflist == NULL)
556 		return ENOMEM;
557 
558 	entry->buf_size = size;
559 	entry->page_order = page_order;
560 
561 	offset = 0;
562 
563 	while (entry->buf_count < count) {
564 		buf = &entry->buflist[entry->buf_count];
565 		buf->idx = dma->buf_count + entry->buf_count;
566 		buf->total = alignment;
567 		buf->used = 0;
568 
569 		buf->offset = (dma->byte_count + offset);
570 		buf->bus_address = agp_offset + offset;
571 		buf->pending = 0;
572 		buf->file_priv = NULL;
573 
574 		buf->dev_private = drm_calloc(1, dev->driver->buf_priv_size);
575 		if (buf->dev_private == NULL) {
576 			/* Set count correctly so we free the proper amount. */
577 			entry->buf_count = count;
578 			drm_cleanup_buf(dev, entry);
579 			return ENOMEM;
580 		}
581 
582 		offset += alignment;
583 		entry->buf_count++;
584 		byte_count += PAGE_SIZE << page_order;
585 	}
586 
587 	DRM_DEBUG("byte_count: %d\n", byte_count);
588 
589 	/* OpenBSD lacks realloc in kernel */
590 	temp_buflist = drm_realloc(dma->buflist,
591 	    dma->buf_count * sizeof(*dma->buflist),
592 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist));
593 	if (temp_buflist == NULL) {
594 		/* Free the entry because it isn't valid */
595 		drm_cleanup_buf(dev, entry);
596 		return ENOMEM;
597 	}
598 	dma->buflist = temp_buflist;
599 
600 	for (i = 0; i < entry->buf_count; i++)
601 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
602 
603 	dma->buf_count += entry->buf_count;
604 	dma->byte_count += byte_count;
605 
606 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
607 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
608 
609 	request->count = entry->buf_count;
610 	request->size = size;
611 
612 	dma->flags = _DRM_DMA_USE_AGP;
613 
614 	return 0;
615 }
616 
617 int
618 drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
619 {
620 	struct drm_device_dma	*dma = dev->dma;
621 	struct drm_buf		*buf, **temp_buflist;
622 	struct drm_buf_entry	*entry;
623 	int			 alignment, byte_count, count, i, order;
624 	int			 page_count, page_order, size, total;
625 	unsigned long		 offset, *temp_pagelist;
626 
627 	count = request->count;
628 	order = drm_order(request->size);
629 	size = 1 << order;
630 
631 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
632 	    request->count, request->size, size, order);
633 
634 	alignment = (request->flags & _DRM_PAGE_ALIGN)
635 	    ? round_page(size) : size;
636 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
637 	total = PAGE_SIZE << page_order;
638 
639 	entry = &dma->bufs[order];
640 
641 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist));
642 	entry->seglist = drm_calloc(count, sizeof(*entry->seglist));
643 
644 	/* Keep the original pagelist until we know all the allocations
645 	 * have succeeded
646 	 */
647 	temp_pagelist = drm_calloc((dma->page_count + (count << page_order)),
648 	    sizeof(*dma->pagelist));
649 
650 	if (entry->buflist == NULL || entry->seglist == NULL ||
651 	    temp_pagelist == NULL) {
652 		drm_free(temp_pagelist);
653 		drm_free(entry->seglist);
654 		drm_free(entry->buflist);
655 		return ENOMEM;
656 	}
657 
658 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
659 	    sizeof(*dma->pagelist));
660 
661 	DRM_DEBUG("pagelist: %d entries\n",
662 	    dma->page_count + (count << page_order));
663 
664 	entry->buf_size	= size;
665 	entry->page_order = page_order;
666 	byte_count = 0;
667 	page_count = 0;
668 
669 	while (entry->buf_count < count) {
670 		struct drm_dmamem *mem = drm_dmamem_alloc(dev->dmat, size,
671 		    alignment, 1, size, 0, 0);
672 		if (mem == NULL) {
673 			/* Set count correctly so we free the proper amount. */
674 			entry->buf_count = count;
675 			entry->seg_count = count;
676 			drm_cleanup_buf(dev, entry);
677 			drm_free(temp_pagelist);
678 			return ENOMEM;
679 		}
680 
681 		entry->seglist[entry->seg_count++] = mem;
682 		for (i = 0; i < (1 << page_order); i++) {
683 			DRM_DEBUG("page %d @ %p\n", dma->page_count +
684 			    page_count, mem->kva + PAGE_SIZE * i);
685 			temp_pagelist[dma->page_count + page_count++] =
686 			    (long)mem->kva + PAGE_SIZE * i;
687 		}
688 		for (offset = 0;
689 		    offset + size <= total && entry->buf_count < count;
690 		    offset += alignment, ++entry->buf_count) {
691 			buf = &entry->buflist[entry->buf_count];
692 			buf->idx = dma->buf_count + entry->buf_count;
693 			buf->total = alignment;
694 			buf->used = 0;
695 			buf->offset = (dma->byte_count + byte_count + offset);
696 			buf->address = mem->kva + offset;
697 			buf->bus_address = mem->map->dm_segs[0].ds_addr +
698 			    offset;
699 			buf->pending = 0;
700 			buf->file_priv = NULL;
701 
702 			buf->dev_private = drm_calloc(1,
703 			    dev->driver->buf_priv_size);
704 			if (buf->dev_private == NULL) {
705 				/* Set count so we free the proper amount. */
706 				entry->buf_count = count;
707 				entry->seg_count = count;
708 				drm_cleanup_buf(dev, entry);
709 				drm_free(temp_pagelist);
710 				return ENOMEM;
711 			}
712 
713 			DRM_DEBUG("buffer %d\n",
714 			    entry->buf_count);
715 		}
716 		byte_count += PAGE_SIZE << page_order;
717 	}
718 
719 	temp_buflist = drm_realloc(dma->buflist,
720 	    dma->buf_count * sizeof(*dma->buflist),
721 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist));
722 	if (temp_buflist == NULL) {
723 		/* Free the entry because it isn't valid */
724 		drm_cleanup_buf(dev, entry);
725 		drm_free(temp_pagelist);
726 		return ENOMEM;
727 	}
728 	dma->buflist = temp_buflist;
729 
730 	for (i = 0; i < entry->buf_count; i++)
731 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
732 
733 	/* No allocations failed, so now we can replace the orginal pagelist
734 	 * with the new one.
735 	 */
736 	drm_free(dma->pagelist);
737 	dma->pagelist = temp_pagelist;
738 
739 	dma->buf_count += entry->buf_count;
740 	dma->seg_count += entry->seg_count;
741 	dma->page_count += entry->seg_count << page_order;
742 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
743 
744 	request->count = entry->buf_count;
745 	request->size = size;
746 
747 	return 0;
748 
749 }
750 
751 int
752 drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
753 {
754 	struct drm_device_dma	*dma = dev->dma;
755 	struct drm_buf_entry	*entry;
756 	struct drm_buf		*buf, **temp_buflist;
757 	unsigned long		 agp_offset, offset;
758 	int			 alignment, byte_count, count, i, order;
759 	int			 page_order, size, total;
760 
761 	count = request->count;
762 	order = drm_order(request->size);
763 	size = 1 << order;
764 
765 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
766 	    ? round_page(size) : size;
767 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
768 	total = PAGE_SIZE << page_order;
769 
770 	byte_count = 0;
771 	agp_offset = request->agp_start;
772 
773 	DRM_DEBUG("count:      %d\n",  count);
774 	DRM_DEBUG("order:      %d\n",  order);
775 	DRM_DEBUG("size:       %d\n",  size);
776 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
777 	DRM_DEBUG("alignment:  %d\n",  alignment);
778 	DRM_DEBUG("page_order: %d\n",  page_order);
779 	DRM_DEBUG("total:      %d\n",  total);
780 
781 	entry = &dma->bufs[order];
782 
783 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist));
784 	if (entry->buflist == NULL)
785 		return ENOMEM;
786 
787 	entry->buf_size = size;
788 	entry->page_order = page_order;
789 
790 	offset = 0;
791 
792 	while (entry->buf_count < count) {
793 		buf = &entry->buflist[entry->buf_count];
794 		buf->idx = dma->buf_count + entry->buf_count;
795 		buf->total = alignment;
796 		buf->used = 0;
797 
798 		buf->offset = (dma->byte_count + offset);
799 		buf->bus_address = agp_offset + offset;
800 		buf->pending = 0;
801 		buf->file_priv = NULL;
802 
803 		buf->dev_private = drm_calloc(1, dev->driver->buf_priv_size);
804 		if (buf->dev_private == NULL) {
805 			/* Set count correctly so we free the proper amount. */
806 			entry->buf_count = count;
807 			drm_cleanup_buf(dev, entry);
808 			return ENOMEM;
809 		}
810 
811 		DRM_DEBUG("buffer %d\n", entry->buf_count);
812 
813 		offset += alignment;
814 		entry->buf_count++;
815 		byte_count += PAGE_SIZE << page_order;
816 	}
817 
818 	DRM_DEBUG("byte_count: %d\n", byte_count);
819 
820 	temp_buflist = drm_realloc(dma->buflist,
821 	    dma->buf_count * sizeof(*dma->buflist),
822 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist));
823 	if (temp_buflist == NULL) {
824 		/* Free the entry because it isn't valid */
825 		drm_cleanup_buf(dev, entry);
826 		return ENOMEM;
827 	}
828 	dma->buflist = temp_buflist;
829 
830 	for (i = 0; i < entry->buf_count; i++)
831 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
832 
833 	dma->buf_count += entry->buf_count;
834 	dma->byte_count += byte_count;
835 
836 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
837 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
838 
839 	request->count = entry->buf_count;
840 	request->size = size;
841 
842 	dma->flags = _DRM_DMA_USE_SG;
843 
844 	return 0;
845 }
846 
847 int
848 drm_addbufs(struct drm_device *dev, struct drm_buf_desc *request)
849 {
850 	struct drm_device_dma	*dma = dev->dma;
851 	int			 order, ret;
852 
853 	if (request->count < 0 || request->count > 4096)
854 		return (EINVAL);
855 
856 	order = drm_order(request->size);
857 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
858 		return (EINVAL);
859 
860 	rw_enter_write(&dma->dma_lock);
861 
862 	/* No more allocations after first buffer-using ioctl. */
863 	if (dma->buf_use != 0) {
864 		rw_exit_write(&dma->dma_lock);
865 		return (EBUSY);
866 	}
867 	/* No more than one allocation per order */
868 	if (dma->bufs[order].buf_count != 0) {
869 		rw_exit_write(&dma->dma_lock);
870 		return (ENOMEM);
871 	}
872 
873 	if (request->flags & _DRM_AGP_BUFFER)
874 		ret = drm_addbufs_agp(dev, request);
875 	else if (request->flags & _DRM_SG_BUFFER)
876 		ret = drm_addbufs_sg(dev, request);
877 	else
878 		ret = drm_addbufs_pci(dev, request);
879 
880 	rw_exit_write(&dma->dma_lock);
881 
882 	return (ret);
883 }
884 
885 int
886 drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
887 {
888 	struct drm_device_dma	*dma = dev->dma;
889 	struct drm_buf_free	*request = data;
890 	struct drm_buf		*buf;
891 	int			 i, idx, retcode = 0;
892 
893 	DRM_DEBUG("%d\n", request->count);
894 
895 	rw_enter_write(&dma->dma_lock);
896 	for (i = 0; i < request->count; i++) {
897 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
898 			retcode = EFAULT;
899 			break;
900 		}
901 		if (idx < 0 || idx >= dma->buf_count) {
902 			DRM_ERROR("Index %d (of %d max)\n", idx,
903 			    dma->buf_count - 1);
904 			retcode = EINVAL;
905 			break;
906 		}
907 		buf = dma->buflist[idx];
908 		if (buf->file_priv != file_priv) {
909 			DRM_ERROR("Process %d freeing buffer not owned\n",
910 			    DRM_CURRENTPID);
911 			retcode = EINVAL;
912 			break;
913 		}
914 		drm_free_buffer(dev, buf);
915 	}
916 	rw_exit_write(&dma->dma_lock);
917 
918 	return retcode;
919 }
920 
921 int
922 drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
923 {
924 	struct drm_device_dma	*dma = dev->dma;
925 	struct drm_buf_map	*request = data;
926 	struct vnode		*vn;
927 	vaddr_t			 address, vaddr;
928 	voff_t			 foff;
929 	vsize_t			 size;
930 	const int		 zero = 0;
931 	int			 i, retcode = 0;
932 
933 	if (!vfinddev(file_priv->kdev, VCHR, &vn))
934 		return EINVAL;
935 
936 	rw_enter_write(&dma->dma_lock);
937 	dev->dma->buf_use++;	/* Can't allocate more after this call */
938 	rw_exit_write(&dma->dma_lock);
939 
940 	if (request->count < dma->buf_count)
941 		goto done;
942 
943 	if ((dev->driver->flags & DRIVER_AGP &&
944 	    (dma->flags & _DRM_DMA_USE_AGP)) ||
945 	    (dev->driver->flags & DRIVER_SG &&
946 	    (dma->flags & _DRM_DMA_USE_SG))) {
947 		struct drm_local_map *map = dev->agp_buffer_map;
948 
949 		if (map == NULL) {
950 			DRM_DEBUG("couldn't find agp buffer map\n");
951 			retcode = EINVAL;
952 			goto done;
953 		}
954 		size = round_page(map->size);
955 		foff = map->ext;
956 	} else {
957 		size = round_page(dma->byte_count),
958 		foff = 0;
959 	}
960 
961 	vaddr = 0;
962 	retcode = uvm_mmap(&curproc->p_vmspace->vm_map, &vaddr, size,
963 	    UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
964 	    (caddr_t)vn, foff, curproc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur,
965 	    curproc);
966 	if (retcode) {
967 		DRM_DEBUG("uvm_mmap failed\n");
968 		goto done;
969 	}
970 
971 	request->virtual = (void *)vaddr;
972 
973 	for (i = 0; i < dma->buf_count; i++) {
974 		if (DRM_COPY_TO_USER(&request->list[i].idx,
975 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
976 			retcode = EFAULT;
977 			goto done;
978 		}
979 		if (DRM_COPY_TO_USER(&request->list[i].total,
980 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
981 			retcode = EFAULT;
982 			goto done;
983 		}
984 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
985 		    sizeof(zero))) {
986 			retcode = EFAULT;
987 			goto done;
988 		}
989 		address = vaddr + dma->buflist[i]->offset; /* *** */
990 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
991 		    sizeof(address))) {
992 			retcode = EFAULT;
993 			goto done;
994 		}
995 	}
996 
997  done:
998 	request->count = dma->buf_count;
999 
1000 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1001 
1002 	return retcode;
1003 }
1004