xref: /dragonfly/sys/dev/drm/drm_bufs.c (revision 9ddb8543)
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  */
30 
31 /** @file drm_bufs.c
32  * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
33  */
34 
35 #include "bus/pci/pcireg.h"
36 
37 #include "dev/drm/drmP.h"
38 
39 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
40  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
41  * address for accessing them.  Cleaned up at unload.
42  */
43 static int drm_alloc_resource(struct drm_device *dev, int resource)
44 {
45 	if (resource >= DRM_MAX_PCI_RESOURCE) {
46 		DRM_ERROR("Resource %d too large\n", resource);
47 		return 1;
48 	}
49 
50 	DRM_UNLOCK();
51 	if (dev->pcir[resource] != NULL) {
52 		DRM_LOCK();
53 		return 0;
54 	}
55 
56 	dev->pcirid[resource] = PCIR_BAR(resource);
57 	dev->pcir[resource] = bus_alloc_resource_any(dev->device,
58 	    SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
59 	DRM_LOCK();
60 
61 	if (dev->pcir[resource] == NULL) {
62 		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
63 		return 1;
64 	}
65 
66 	return 0;
67 }
68 
69 unsigned long drm_get_resource_start(struct drm_device *dev,
70 				     unsigned int resource)
71 {
72 	if (drm_alloc_resource(dev, resource) != 0)
73 		return 0;
74 
75 	return rman_get_start(dev->pcir[resource]);
76 }
77 
78 unsigned long drm_get_resource_len(struct drm_device *dev,
79 				   unsigned int resource)
80 {
81 	if (drm_alloc_resource(dev, resource) != 0)
82 		return 0;
83 
84 	return rman_get_size(dev->pcir[resource]);
85 }
86 
87 int drm_addmap(struct drm_device * dev, unsigned long offset,
88 	       unsigned long size,
89     enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
90 {
91 	drm_local_map_t *map;
92 	int align;
93 	/*drm_agp_mem_t *entry;
94 	int valid;*/
95 
96 	/* Only allow shared memory to be removable since we only keep enough
97 	 * book keeping information about shared memory to allow for removal
98 	 * when processes fork.
99 	 */
100 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
101 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
102 		return EINVAL;
103 	}
104 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
105 		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
106 		    offset, size);
107 		return EINVAL;
108 	}
109 	if (offset + size < offset) {
110 		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
111 		    offset, size);
112 		return EINVAL;
113 	}
114 
115 	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
116 	    size, type);
117 
118 	/* Check if this is just another version of a kernel-allocated map, and
119 	 * just hand that back if so.
120 	 */
121 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
122 	    type == _DRM_SHM) {
123 		TAILQ_FOREACH(map, &dev->maplist, link) {
124 			if (map->type == type && (map->offset == offset ||
125 			    (map->type == _DRM_SHM &&
126 			    map->flags == _DRM_CONTAINS_LOCK))) {
127 				map->size = size;
128 				DRM_DEBUG("Found kernel map %d\n", type);
129 				goto done;
130 			}
131 		}
132 	}
133 	DRM_UNLOCK();
134 
135 	/* Allocate a new map structure, fill it in, and do any type-specific
136 	 * initialization necessary.
137 	 */
138 	map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
139 	if (!map) {
140 		DRM_LOCK();
141 		return ENOMEM;
142 	}
143 
144 	map->offset = offset;
145 	map->size = size;
146 	map->type = type;
147 	map->flags = flags;
148 
149 	switch (map->type) {
150 	case _DRM_REGISTERS:
151 		map->handle = drm_ioremap(dev, map);
152 		if (!(map->flags & _DRM_WRITE_COMBINING))
153 			break;
154 		/* FALLTHROUGH */
155 	case _DRM_FRAME_BUFFER:
156 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
157 			map->mtrr = 1;
158 		break;
159 	case _DRM_SHM:
160 		map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
161 		DRM_DEBUG("%lu %d %p\n",
162 		    map->size, drm_order(map->size), map->handle);
163 		if (!map->handle) {
164 			free(map, DRM_MEM_MAPS);
165 			DRM_LOCK();
166 			return ENOMEM;
167 		}
168 		map->offset = (unsigned long)map->handle;
169 		if (map->flags & _DRM_CONTAINS_LOCK) {
170 			/* Prevent a 2nd X Server from creating a 2nd lock */
171 			DRM_LOCK();
172 			if (dev->lock.hw_lock != NULL) {
173 				DRM_UNLOCK();
174 				free(map->handle, DRM_MEM_MAPS);
175 				free(map, DRM_MEM_MAPS);
176 				return EBUSY;
177 			}
178 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
179 			DRM_UNLOCK();
180 		}
181 		break;
182 	case _DRM_AGP:
183 		/*valid = 0;*/
184 		/* In some cases (i810 driver), user space may have already
185 		 * added the AGP base itself, because dev->agp->base previously
186 		 * only got set during AGP enable.  So, only add the base
187 		 * address if the map's offset isn't already within the
188 		 * aperture.
189 		 */
190 		if (map->offset < dev->agp->base ||
191 		    map->offset > dev->agp->base +
192 		    dev->agp->info.ai_aperture_size - 1) {
193 			map->offset += dev->agp->base;
194 		}
195 		map->mtrr   = dev->agp->mtrr; /* for getmap */
196 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
197 			if ((map->offset >= entry->bound) &&
198 			    (map->offset + map->size <=
199 			    entry->bound + entry->pages * PAGE_SIZE)) {
200 				valid = 1;
201 				break;
202 			}
203 		}
204 		if (!valid) {
205 			free(map, DRM_MEM_MAPS);
206 			DRM_LOCK();
207 			return EACCES;
208 		}*/
209 		break;
210 	case _DRM_SCATTER_GATHER:
211 		if (!dev->sg) {
212 			free(map, DRM_MEM_MAPS);
213 			DRM_LOCK();
214 			return EINVAL;
215 		}
216 		map->offset += dev->sg->handle;
217 		break;
218 	case _DRM_CONSISTENT:
219 		/* Unfortunately, we don't get any alignment specification from
220 		 * the caller, so we have to guess.  drm_pci_alloc requires
221 		 * a power-of-two alignment, so try to align the bus address of
222 		 * the map to it size if possible, otherwise just assume
223 		 * PAGE_SIZE alignment.
224 		 */
225 		align = map->size;
226 		if ((align & (align - 1)) != 0)
227 			align = PAGE_SIZE;
228 		map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
229 		if (map->dmah == NULL) {
230 			free(map, DRM_MEM_MAPS);
231 			DRM_LOCK();
232 			return ENOMEM;
233 		}
234 		map->handle = map->dmah->vaddr;
235 		map->offset = map->dmah->busaddr;
236 		break;
237 	default:
238 		DRM_ERROR("Bad map type %d\n", map->type);
239 		free(map, DRM_MEM_MAPS);
240 		DRM_LOCK();
241 		return EINVAL;
242 	}
243 
244 	DRM_LOCK();
245 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
246 
247 done:
248 	/* Jumped to, with lock held, when a kernel map is found. */
249 
250 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
251 	    map->size);
252 
253 	*map_ptr = map;
254 
255 	return 0;
256 }
257 
258 int drm_addmap_ioctl(struct drm_device *dev, void *data,
259 		     struct drm_file *file_priv)
260 {
261 	struct drm_map *request = data;
262 	drm_local_map_t *map;
263 	int err;
264 
265 	if (!(dev->flags & (FREAD|FWRITE)))
266 		return EACCES; /* Require read/write */
267 
268 	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
269 		return EACCES;
270 
271 	DRM_LOCK();
272 	err = drm_addmap(dev, request->offset, request->size, request->type,
273 	    request->flags, &map);
274 	DRM_UNLOCK();
275 	if (err != 0)
276 		return err;
277 
278 	request->offset = map->offset;
279 	request->size = map->size;
280 	request->type = map->type;
281 	request->flags = map->flags;
282 	request->mtrr   = map->mtrr;
283 	request->handle = map->handle;
284 
285 	if (request->type != _DRM_SHM) {
286 		request->handle = (void *)request->offset;
287 	}
288 
289 	return 0;
290 }
291 
292 void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
293 {
294 	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
295 
296 	if (map == NULL)
297 		return;
298 
299 	TAILQ_REMOVE(&dev->maplist, map, link);
300 
301 	switch (map->type) {
302 	case _DRM_REGISTERS:
303 		if (map->bsr == NULL)
304 			drm_ioremapfree(map);
305 		/* FALLTHROUGH */
306 	case _DRM_FRAME_BUFFER:
307 		if (map->mtrr) {
308 			int __unused retcode;
309 
310 			retcode = drm_mtrr_del(0, map->offset, map->size,
311 			    DRM_MTRR_WC);
312 			DRM_DEBUG("mtrr_del = %d\n", retcode);
313 		}
314 		break;
315 	case _DRM_SHM:
316 		free(map->handle, DRM_MEM_MAPS);
317 		break;
318 	case _DRM_AGP:
319 	case _DRM_SCATTER_GATHER:
320 		break;
321 	case _DRM_CONSISTENT:
322 		drm_pci_free(dev, map->dmah);
323 		break;
324 	default:
325 		DRM_ERROR("Bad map type %d\n", map->type);
326 		break;
327 	}
328 
329 	if (map->bsr != NULL) {
330 		bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
331 		    map->bsr);
332 	}
333 
334 	free(map, DRM_MEM_MAPS);
335 }
336 
337 /* Remove a map private from list and deallocate resources if the mapping
338  * isn't in use.
339  */
340 
341 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
342 		    struct drm_file *file_priv)
343 {
344 	drm_local_map_t *map;
345 	struct drm_map *request = data;
346 
347 	DRM_LOCK();
348 	TAILQ_FOREACH(map, &dev->maplist, link) {
349 		if (map->handle == request->handle &&
350 		    map->flags & _DRM_REMOVABLE)
351 			break;
352 	}
353 
354 	/* No match found. */
355 	if (map == NULL) {
356 		DRM_UNLOCK();
357 		return EINVAL;
358 	}
359 
360 	drm_rmmap(dev, map);
361 
362 	DRM_UNLOCK();
363 
364 	return 0;
365 }
366 
367 
368 static void drm_cleanup_buf_error(struct drm_device *dev,
369 				  drm_buf_entry_t *entry)
370 {
371 	int i;
372 
373 	if (entry->seg_count) {
374 		for (i = 0; i < entry->seg_count; i++) {
375 			drm_pci_free(dev, entry->seglist[i]);
376 		}
377 		free(entry->seglist, DRM_MEM_SEGS);
378 
379 		entry->seg_count = 0;
380 	}
381 
382    	if (entry->buf_count) {
383 	   	for (i = 0; i < entry->buf_count; i++) {
384 			free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
385 		}
386 		free(entry->buflist, DRM_MEM_BUFS);
387 
388 		entry->buf_count = 0;
389 	}
390 }
391 
392 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
393 {
394 	drm_device_dma_t *dma = dev->dma;
395 	drm_buf_entry_t *entry;
396 	/*drm_agp_mem_t *agp_entry;
397 	int valid*/
398 	drm_buf_t *buf;
399 	unsigned long offset;
400 	unsigned long agp_offset;
401 	int count;
402 	int order;
403 	int size;
404 	int alignment;
405 	int page_order;
406 	int total;
407 	int byte_count;
408 	int i;
409 	drm_buf_t **temp_buflist;
410 
411 	count = request->count;
412 	order = drm_order(request->size);
413 	size = 1 << order;
414 
415 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
416 	    ? round_page(size) : size;
417 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
418 	total = PAGE_SIZE << page_order;
419 
420 	byte_count = 0;
421 	agp_offset = dev->agp->base + request->agp_start;
422 
423 	DRM_DEBUG("count:      %d\n",  count);
424 	DRM_DEBUG("order:      %d\n",  order);
425 	DRM_DEBUG("size:       %d\n",  size);
426 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
427 	DRM_DEBUG("alignment:  %d\n",  alignment);
428 	DRM_DEBUG("page_order: %d\n",  page_order);
429 	DRM_DEBUG("total:      %d\n",  total);
430 
431 	/* Make sure buffers are located in AGP memory that we own */
432 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
433 	 * memory.  Safe to ignore for now because these ioctls are still
434 	 * root-only.
435 	 */
436 	/*valid = 0;
437 	for (agp_entry = dev->agp->memory; agp_entry;
438 	    agp_entry = agp_entry->next) {
439 		if ((agp_offset >= agp_entry->bound) &&
440 		    (agp_offset + total * count <=
441 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
442 			valid = 1;
443 			break;
444 		}
445 	}
446 	if (!valid) {
447 		DRM_DEBUG("zone invalid\n");
448 		return EINVAL;
449 	}*/
450 
451 	entry = &dma->bufs[order];
452 
453 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
454 	    M_NOWAIT | M_ZERO);
455 	if (!entry->buflist) {
456 		return ENOMEM;
457 	}
458 
459 	entry->buf_size = size;
460 	entry->page_order = page_order;
461 
462 	offset = 0;
463 
464 	while (entry->buf_count < count) {
465 		buf          = &entry->buflist[entry->buf_count];
466 		buf->idx     = dma->buf_count + entry->buf_count;
467 		buf->total   = alignment;
468 		buf->order   = order;
469 		buf->used    = 0;
470 
471 		buf->offset  = (dma->byte_count + offset);
472 		buf->bus_address = agp_offset + offset;
473 		buf->address = (void *)(agp_offset + offset);
474 		buf->next    = NULL;
475 		buf->pending = 0;
476 		buf->file_priv = NULL;
477 
478 		buf->dev_priv_size = dev->driver->buf_priv_size;
479 		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
480 		    M_NOWAIT | M_ZERO);
481 		if (buf->dev_private == NULL) {
482 			/* Set count correctly so we free the proper amount. */
483 			entry->buf_count = count;
484 			drm_cleanup_buf_error(dev, entry);
485 			return ENOMEM;
486 		}
487 
488 		offset += alignment;
489 		entry->buf_count++;
490 		byte_count += PAGE_SIZE << page_order;
491 	}
492 
493 	DRM_DEBUG("byte_count: %d\n", byte_count);
494 
495 	temp_buflist = realloc(dma->buflist,
496 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
497 	    DRM_MEM_BUFS, M_NOWAIT);
498 	if (temp_buflist == NULL) {
499 		/* Free the entry because it isn't valid */
500 		drm_cleanup_buf_error(dev, entry);
501 		return ENOMEM;
502 	}
503 	dma->buflist = temp_buflist;
504 
505 	for (i = 0; i < entry->buf_count; i++) {
506 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
507 	}
508 
509 	dma->buf_count += entry->buf_count;
510 	dma->byte_count += byte_count;
511 
512 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
513 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
514 
515 	request->count = entry->buf_count;
516 	request->size = size;
517 
518 	dma->flags = _DRM_DMA_USE_AGP;
519 
520 	return 0;
521 }
522 
523 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
524 {
525 	drm_device_dma_t *dma = dev->dma;
526 	int count;
527 	int order;
528 	int size;
529 	int total;
530 	int page_order;
531 	drm_buf_entry_t *entry;
532 	drm_buf_t *buf;
533 	int alignment;
534 	unsigned long offset;
535 	int i;
536 	int byte_count;
537 	int page_count;
538 	unsigned long *temp_pagelist;
539 	drm_buf_t **temp_buflist;
540 
541 	count = request->count;
542 	order = drm_order(request->size);
543 	size = 1 << order;
544 
545 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
546 	    request->count, request->size, size, order);
547 
548 	alignment = (request->flags & _DRM_PAGE_ALIGN)
549 	    ? round_page(size) : size;
550 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
551 	total = PAGE_SIZE << page_order;
552 
553 	entry = &dma->bufs[order];
554 
555 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
556 	    M_NOWAIT | M_ZERO);
557 	entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
558 	    M_NOWAIT | M_ZERO);
559 
560 	/* Keep the original pagelist until we know all the allocations
561 	 * have succeeded
562 	 */
563 	temp_pagelist = malloc((dma->page_count + (count << page_order)) *
564 	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
565 
566 	if (entry->buflist == NULL || entry->seglist == NULL ||
567 	    temp_pagelist == NULL) {
568 		free(temp_pagelist, DRM_MEM_PAGES);
569 		free(entry->seglist, DRM_MEM_SEGS);
570 		free(entry->buflist, DRM_MEM_BUFS);
571 		return ENOMEM;
572 	}
573 
574 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
575 	    sizeof(*dma->pagelist));
576 
577 	DRM_DEBUG("pagelist: %d entries\n",
578 	    dma->page_count + (count << page_order));
579 
580 	entry->buf_size	= size;
581 	entry->page_order = page_order;
582 	byte_count = 0;
583 	page_count = 0;
584 
585 	while (entry->buf_count < count) {
586 		DRM_SPINUNLOCK(&dev->dma_lock);
587 		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
588 		    0xfffffffful);
589 		DRM_SPINLOCK(&dev->dma_lock);
590 		if (dmah == NULL) {
591 			/* Set count correctly so we free the proper amount. */
592 			entry->buf_count = count;
593 			entry->seg_count = count;
594 			drm_cleanup_buf_error(dev, entry);
595 			free(temp_pagelist, DRM_MEM_PAGES);
596 			return ENOMEM;
597 		}
598 
599 		entry->seglist[entry->seg_count++] = dmah;
600 		for (i = 0; i < (1 << page_order); i++) {
601 			DRM_DEBUG("page %d @ %p\n",
602 			    dma->page_count + page_count,
603 			    (char *)dmah->vaddr + PAGE_SIZE * i);
604 			temp_pagelist[dma->page_count + page_count++] =
605 			    (long)dmah->vaddr + PAGE_SIZE * i;
606 		}
607 		for (offset = 0;
608 		    offset + size <= total && entry->buf_count < count;
609 		    offset += alignment, ++entry->buf_count) {
610 			buf	     = &entry->buflist[entry->buf_count];
611 			buf->idx     = dma->buf_count + entry->buf_count;
612 			buf->total   = alignment;
613 			buf->order   = order;
614 			buf->used    = 0;
615 			buf->offset  = (dma->byte_count + byte_count + offset);
616 			buf->address = ((char *)dmah->vaddr + offset);
617 			buf->bus_address = dmah->busaddr + offset;
618 			buf->next    = NULL;
619 			buf->pending = 0;
620 			buf->file_priv = NULL;
621 
622 			buf->dev_priv_size = dev->driver->buf_priv_size;
623 			buf->dev_private = malloc(buf->dev_priv_size,
624 			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
625 			if (buf->dev_private == NULL) {
626 				/* Set count correctly so we free the proper amount. */
627 				entry->buf_count = count;
628 				entry->seg_count = count;
629 				drm_cleanup_buf_error(dev, entry);
630 				free(temp_pagelist, DRM_MEM_PAGES);
631 				return ENOMEM;
632 			}
633 
634 			DRM_DEBUG("buffer %d @ %p\n",
635 			    entry->buf_count, buf->address);
636 		}
637 		byte_count += PAGE_SIZE << page_order;
638 	}
639 
640 	temp_buflist = realloc(dma->buflist,
641 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
642 	    DRM_MEM_BUFS, M_NOWAIT);
643 	if (temp_buflist == NULL) {
644 		/* Free the entry because it isn't valid */
645 		drm_cleanup_buf_error(dev, entry);
646 		free(temp_pagelist, DRM_MEM_PAGES);
647 		return ENOMEM;
648 	}
649 	dma->buflist = temp_buflist;
650 
651 	for (i = 0; i < entry->buf_count; i++) {
652 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
653 	}
654 
655 	/* No allocations failed, so now we can replace the orginal pagelist
656 	 * with the new one.
657 	 */
658 	free(dma->pagelist, DRM_MEM_PAGES);
659 	dma->pagelist = temp_pagelist;
660 
661 	dma->buf_count += entry->buf_count;
662 	dma->seg_count += entry->seg_count;
663 	dma->page_count += entry->seg_count << page_order;
664 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
665 
666 	request->count = entry->buf_count;
667 	request->size = size;
668 
669 	return 0;
670 
671 }
672 
673 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
674 {
675 	drm_device_dma_t *dma = dev->dma;
676 	drm_buf_entry_t *entry;
677 	drm_buf_t *buf;
678 	unsigned long offset;
679 	unsigned long agp_offset;
680 	int count;
681 	int order;
682 	int size;
683 	int alignment;
684 	int page_order;
685 	int total;
686 	int byte_count;
687 	int i;
688 	drm_buf_t **temp_buflist;
689 
690 	count = request->count;
691 	order = drm_order(request->size);
692 	size = 1 << order;
693 
694 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
695 	    ? round_page(size) : size;
696 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
697 	total = PAGE_SIZE << page_order;
698 
699 	byte_count = 0;
700 	agp_offset = request->agp_start;
701 
702 	DRM_DEBUG("count:      %d\n",  count);
703 	DRM_DEBUG("order:      %d\n",  order);
704 	DRM_DEBUG("size:       %d\n",  size);
705 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
706 	DRM_DEBUG("alignment:  %d\n",  alignment);
707 	DRM_DEBUG("page_order: %d\n",  page_order);
708 	DRM_DEBUG("total:      %d\n",  total);
709 
710 	entry = &dma->bufs[order];
711 
712 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
713 	    M_NOWAIT | M_ZERO);
714 	if (entry->buflist == NULL)
715 		return ENOMEM;
716 
717 	entry->buf_size = size;
718 	entry->page_order = page_order;
719 
720 	offset = 0;
721 
722 	while (entry->buf_count < count) {
723 		buf          = &entry->buflist[entry->buf_count];
724 		buf->idx     = dma->buf_count + entry->buf_count;
725 		buf->total   = alignment;
726 		buf->order   = order;
727 		buf->used    = 0;
728 
729 		buf->offset  = (dma->byte_count + offset);
730 		buf->bus_address = agp_offset + offset;
731 		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
732 		buf->next    = NULL;
733 		buf->pending = 0;
734 		buf->file_priv = NULL;
735 
736 		buf->dev_priv_size = dev->driver->buf_priv_size;
737 		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
738 		    M_NOWAIT | M_ZERO);
739 		if (buf->dev_private == NULL) {
740 			/* Set count correctly so we free the proper amount. */
741 			entry->buf_count = count;
742 			drm_cleanup_buf_error(dev, entry);
743 			return ENOMEM;
744 		}
745 
746 		DRM_DEBUG("buffer %d @ %p\n",
747 		    entry->buf_count, buf->address);
748 
749 		offset += alignment;
750 		entry->buf_count++;
751 		byte_count += PAGE_SIZE << page_order;
752 	}
753 
754 	DRM_DEBUG("byte_count: %d\n", byte_count);
755 
756 	temp_buflist = realloc(dma->buflist,
757 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
758 	    DRM_MEM_BUFS, M_NOWAIT);
759 	if (temp_buflist == NULL) {
760 		/* Free the entry because it isn't valid */
761 		drm_cleanup_buf_error(dev, entry);
762 		return ENOMEM;
763 	}
764 	dma->buflist = temp_buflist;
765 
766 	for (i = 0; i < entry->buf_count; i++) {
767 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
768 	}
769 
770 	dma->buf_count += entry->buf_count;
771 	dma->byte_count += byte_count;
772 
773 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
774 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
775 
776 	request->count = entry->buf_count;
777 	request->size = size;
778 
779 	dma->flags = _DRM_DMA_USE_SG;
780 
781 	return 0;
782 }
783 
784 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
785 {
786 	int order, ret;
787 
788 	if (request->count < 0 || request->count > 4096)
789 		return EINVAL;
790 
791 	order = drm_order(request->size);
792 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
793 		return EINVAL;
794 
795 	DRM_SPINLOCK(&dev->dma_lock);
796 
797 	/* No more allocations after first buffer-using ioctl. */
798 	if (dev->buf_use != 0) {
799 		DRM_SPINUNLOCK(&dev->dma_lock);
800 		return EBUSY;
801 	}
802 	/* No more than one allocation per order */
803 	if (dev->dma->bufs[order].buf_count != 0) {
804 		DRM_SPINUNLOCK(&dev->dma_lock);
805 		return ENOMEM;
806 	}
807 
808 	ret = drm_do_addbufs_agp(dev, request);
809 
810 	DRM_SPINUNLOCK(&dev->dma_lock);
811 
812 	return ret;
813 }
814 
815 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
816 {
817 	int order, ret;
818 
819 	if (!DRM_SUSER(DRM_CURPROC))
820 		return EACCES;
821 
822 	if (request->count < 0 || request->count > 4096)
823 		return EINVAL;
824 
825 	order = drm_order(request->size);
826 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
827 		return EINVAL;
828 
829 	DRM_SPINLOCK(&dev->dma_lock);
830 
831 	/* No more allocations after first buffer-using ioctl. */
832 	if (dev->buf_use != 0) {
833 		DRM_SPINUNLOCK(&dev->dma_lock);
834 		return EBUSY;
835 	}
836 	/* No more than one allocation per order */
837 	if (dev->dma->bufs[order].buf_count != 0) {
838 		DRM_SPINUNLOCK(&dev->dma_lock);
839 		return ENOMEM;
840 	}
841 
842 	ret = drm_do_addbufs_sg(dev, request);
843 
844 	DRM_SPINUNLOCK(&dev->dma_lock);
845 
846 	return ret;
847 }
848 
849 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
850 {
851 	int order, ret;
852 
853 	if (!DRM_SUSER(DRM_CURPROC))
854 		return EACCES;
855 
856 	if (request->count < 0 || request->count > 4096)
857 		return EINVAL;
858 
859 	order = drm_order(request->size);
860 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
861 		return EINVAL;
862 
863 	DRM_SPINLOCK(&dev->dma_lock);
864 
865 	/* No more allocations after first buffer-using ioctl. */
866 	if (dev->buf_use != 0) {
867 		DRM_SPINUNLOCK(&dev->dma_lock);
868 		return EBUSY;
869 	}
870 	/* No more than one allocation per order */
871 	if (dev->dma->bufs[order].buf_count != 0) {
872 		DRM_SPINUNLOCK(&dev->dma_lock);
873 		return ENOMEM;
874 	}
875 
876 	ret = drm_do_addbufs_pci(dev, request);
877 
878 	DRM_SPINUNLOCK(&dev->dma_lock);
879 
880 	return ret;
881 }
882 
883 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
884 {
885 	struct drm_buf_desc *request = data;
886 	int err;
887 
888 	if (request->flags & _DRM_AGP_BUFFER)
889 		err = drm_addbufs_agp(dev, request);
890 	else if (request->flags & _DRM_SG_BUFFER)
891 		err = drm_addbufs_sg(dev, request);
892 	else
893 		err = drm_addbufs_pci(dev, request);
894 
895 	return err;
896 }
897 
898 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
899 {
900 	drm_device_dma_t *dma = dev->dma;
901 	struct drm_buf_info *request = data;
902 	int i;
903 	int count;
904 	int retcode = 0;
905 
906 	DRM_SPINLOCK(&dev->dma_lock);
907 	++dev->buf_use;		/* Can't allocate more after this call */
908 	DRM_SPINUNLOCK(&dev->dma_lock);
909 
910 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
911 		if (dma->bufs[i].buf_count)
912 			++count;
913 	}
914 
915 	DRM_DEBUG("count = %d\n", count);
916 
917 	if (request->count >= count) {
918 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
919 			if (dma->bufs[i].buf_count) {
920 				struct drm_buf_desc from;
921 
922 				from.count = dma->bufs[i].buf_count;
923 				from.size = dma->bufs[i].buf_size;
924 				from.low_mark = dma->bufs[i].freelist.low_mark;
925 				from.high_mark = dma->bufs[i].freelist.high_mark;
926 
927 				if (DRM_COPY_TO_USER(&request->list[count], &from,
928 				    sizeof(struct drm_buf_desc)) != 0) {
929 					retcode = EFAULT;
930 					break;
931 				}
932 
933 				DRM_DEBUG("%d %d %d %d %d\n",
934 				    i, dma->bufs[i].buf_count,
935 				    dma->bufs[i].buf_size,
936 				    dma->bufs[i].freelist.low_mark,
937 				    dma->bufs[i].freelist.high_mark);
938 				++count;
939 			}
940 		}
941 	}
942 	request->count = count;
943 
944 	return retcode;
945 }
946 
947 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
948 {
949 	drm_device_dma_t *dma = dev->dma;
950 	struct drm_buf_desc *request = data;
951 	int order;
952 
953 	DRM_DEBUG("%d, %d, %d\n",
954 		  request->size, request->low_mark, request->high_mark);
955 
956 
957 	order = drm_order(request->size);
958 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
959 	    request->low_mark < 0 || request->high_mark < 0) {
960 		return EINVAL;
961 	}
962 
963 	DRM_SPINLOCK(&dev->dma_lock);
964 	if (request->low_mark > dma->bufs[order].buf_count ||
965 	    request->high_mark > dma->bufs[order].buf_count) {
966 		DRM_SPINUNLOCK(&dev->dma_lock);
967 		return EINVAL;
968 	}
969 
970 	dma->bufs[order].freelist.low_mark  = request->low_mark;
971 	dma->bufs[order].freelist.high_mark = request->high_mark;
972 	DRM_SPINUNLOCK(&dev->dma_lock);
973 
974 	return 0;
975 }
976 
977 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
978 {
979 	drm_device_dma_t *dma = dev->dma;
980 	struct drm_buf_free *request = data;
981 	int i;
982 	int idx;
983 	drm_buf_t *buf;
984 	int retcode = 0;
985 
986 	DRM_DEBUG("%d\n", request->count);
987 
988 	DRM_SPINLOCK(&dev->dma_lock);
989 	for (i = 0; i < request->count; i++) {
990 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
991 			retcode = EFAULT;
992 			break;
993 		}
994 		if (idx < 0 || idx >= dma->buf_count) {
995 			DRM_ERROR("Index %d (of %d max)\n",
996 			    idx, dma->buf_count - 1);
997 			retcode = EINVAL;
998 			break;
999 		}
1000 		buf = dma->buflist[idx];
1001 		if (buf->file_priv != file_priv) {
1002 			DRM_ERROR("Process %d freeing buffer not owned\n",
1003 			    DRM_CURRENTPID);
1004 			retcode = EINVAL;
1005 			break;
1006 		}
1007 		drm_free_buffer(dev, buf);
1008 	}
1009 	DRM_SPINUNLOCK(&dev->dma_lock);
1010 
1011 	return retcode;
1012 }
1013 
1014 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1015 {
1016 	drm_device_dma_t *dma = dev->dma;
1017 	int retcode = 0;
1018 	const int zero = 0;
1019 	vm_offset_t address;
1020 	struct vmspace *vms;
1021 	vm_ooffset_t foff;
1022 	vm_size_t size;
1023 	vm_offset_t vaddr;
1024 	struct drm_buf_map *request = data;
1025 	int i;
1026 
1027 	vms = DRM_CURPROC->td_proc->p_vmspace;
1028 
1029 	DRM_SPINLOCK(&dev->dma_lock);
1030 	dev->buf_use++;		/* Can't allocate more after this call */
1031 	DRM_SPINUNLOCK(&dev->dma_lock);
1032 
1033 	if (request->count < dma->buf_count)
1034 		goto done;
1035 
1036 	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1037 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1038 	    (dma->flags & _DRM_DMA_USE_SG))) {
1039 		drm_local_map_t *map = dev->agp_buffer_map;
1040 
1041 		if (map == NULL) {
1042 			retcode = EINVAL;
1043 			goto done;
1044 		}
1045 		size = round_page(map->size);
1046 		foff = map->offset;
1047 	} else {
1048 		size = round_page(dma->byte_count),
1049 		foff = 0;
1050 	}
1051 
1052 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1053 #if __FreeBSD_version >= 600023
1054 	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1055 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1056 	    dev->devnode, foff);
1057 #else
1058 	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1059 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1060 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1061 #endif
1062 	if (retcode)
1063 		goto done;
1064 
1065 	request->virtual = (void *)vaddr;
1066 
1067 	for (i = 0; i < dma->buf_count; i++) {
1068 		if (DRM_COPY_TO_USER(&request->list[i].idx,
1069 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1070 			retcode = EFAULT;
1071 			goto done;
1072 		}
1073 		if (DRM_COPY_TO_USER(&request->list[i].total,
1074 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1075 			retcode = EFAULT;
1076 			goto done;
1077 		}
1078 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1079 		    sizeof(zero))) {
1080 			retcode = EFAULT;
1081 			goto done;
1082 		}
1083 		address = vaddr + dma->buflist[i]->offset; /* *** */
1084 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1085 		    sizeof(address))) {
1086 			retcode = EFAULT;
1087 			goto done;
1088 		}
1089 	}
1090 
1091  done:
1092 	request->count = dma->buf_count;
1093 
1094 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1095 
1096 	return retcode;
1097 }
1098 
1099 /*
1100  * Compute order.  Can be made faster.
1101  */
1102 int drm_order(unsigned long size)
1103 {
1104 	int order;
1105 
1106 	if (size == 0)
1107 		return 0;
1108 
1109 	order = flsl(size) - 1;
1110 	if (size & ~(1ul << order))
1111 		++order;
1112 
1113 	return order;
1114 }
1115