xref: /dragonfly/sys/dev/drm/drm_bufs.c (revision 31c7ac8b)
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  *
35  * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
36  */
37 
38 #include <sys/conf.h>
39 #include <bus/pci/pcireg.h>
40 #include <linux/types.h>
41 #include <linux/export.h>
42 #include <drm/drmP.h>
43 
44 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
45  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
46  * address for accessing them.  Cleaned up at unload.
47  */
48 static int drm_alloc_resource(struct drm_device *dev, int resource)
49 {
50 	struct resource *res;
51 	int rid;
52 
53 	DRM_LOCK_ASSERT(dev);
54 
55 	if (resource >= DRM_MAX_PCI_RESOURCE) {
56 		DRM_ERROR("Resource %d too large\n", resource);
57 		return 1;
58 	}
59 
60 	if (dev->pcir[resource] != NULL) {
61 		return 0;
62 	}
63 
64 	DRM_UNLOCK(dev);
65 	rid = PCIR_BAR(resource);
66 	res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
67 	    RF_SHAREABLE);
68 	DRM_LOCK(dev);
69 	if (res == NULL) {
70 		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
71 		return 1;
72 	}
73 
74 	if (dev->pcir[resource] == NULL) {
75 		dev->pcirid[resource] = rid;
76 		dev->pcir[resource] = res;
77 	}
78 
79 	return 0;
80 }
81 
82 unsigned long drm_get_resource_start(struct drm_device *dev,
83 				     unsigned int resource)
84 {
85 	if (drm_alloc_resource(dev, resource) != 0)
86 		return 0;
87 
88 	return rman_get_start(dev->pcir[resource]);
89 }
90 
91 unsigned long drm_get_resource_len(struct drm_device *dev,
92 				   unsigned int resource)
93 {
94 	if (drm_alloc_resource(dev, resource) != 0)
95 		return 0;
96 
97 	return rman_get_size(dev->pcir[resource]);
98 }
99 
100 int drm_addmap(struct drm_device * dev, resource_size_t offset,
101 	       unsigned int size, enum drm_map_type type,
102 	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
103 {
104 	struct drm_local_map *map;
105 	struct drm_map_list *entry = NULL;
106 	int align;
107 
108 	/* Allocate a new map structure, fill it in, and do any type-specific
109 	 * initialization necessary.
110 	 */
111 	map = kmalloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
112 	if (!map) {
113 		return ENOMEM;
114 	}
115 
116 	map->offset = offset;
117 	map->size = size;
118 	map->type = type;
119 	map->flags = flags;
120 	map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
121 	    DRM_MAP_HANDLE_SHIFT);
122 
123 	/* Only allow shared memory to be removable since we only keep enough
124 	 * book keeping information about shared memory to allow for removal
125 	 * when processes fork.
126 	 */
127 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
128 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
129 		drm_free(map, DRM_MEM_MAPS);
130 		return EINVAL;
131 	}
132 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
133 		DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
134 		    (uintmax_t)offset, size);
135 		drm_free(map, DRM_MEM_MAPS);
136 		return EINVAL;
137 	}
138 	if (offset + size < offset) {
139 		DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
140 		    (uintmax_t)offset, size);
141 		drm_free(map, DRM_MEM_MAPS);
142 		return EINVAL;
143 	}
144 
145 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
146 		  (unsigned long long)map->offset, map->size, map->type);
147 
148 	/* Check if this is just another version of a kernel-allocated map, and
149 	 * just hand that back if so.
150 	 */
151 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
152 	    type == _DRM_SHM) {
153 		list_for_each_entry(entry, &dev->maplist, head) {
154 			if (entry->map->type == type && (entry->map->offset == offset ||
155 			    (entry->map->type == _DRM_SHM &&
156 			    entry->map->flags == _DRM_CONTAINS_LOCK))) {
157 				entry->map->size = size;
158 				DRM_DEBUG("Found kernel map %d\n", type);
159 				goto done;
160 			}
161 		}
162 	}
163 
164 	switch (map->type) {
165 	case _DRM_REGISTERS:
166 		map->virtual = drm_ioremap(dev, map);
167 		if (!(map->flags & _DRM_WRITE_COMBINING))
168 			break;
169 		/* FALLTHROUGH */
170 	case _DRM_FRAME_BUFFER:
171 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
172 			map->mtrr = 1;
173 		break;
174 	case _DRM_SHM:
175 		map->virtual = kmalloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
176 		DRM_DEBUG("%lu %d %p\n",
177 		    map->size, drm_order(map->size), map->virtual);
178 		if (!map->virtual) {
179 			drm_free(map, DRM_MEM_MAPS);
180 			return ENOMEM;
181 		}
182 		map->offset = (unsigned long)map->virtual;
183 		if (map->flags & _DRM_CONTAINS_LOCK) {
184 			/* Prevent a 2nd X Server from creating a 2nd lock */
185 			DRM_LOCK(dev);
186 			if (dev->lock.hw_lock != NULL) {
187 				DRM_UNLOCK(dev);
188 				drm_free(map->virtual, DRM_MEM_MAPS);
189 				drm_free(map, DRM_MEM_MAPS);
190 				return EBUSY;
191 			}
192 			dev->lock.hw_lock = map->virtual; /* Pointer to lock */
193 			DRM_UNLOCK(dev);
194 		}
195 		break;
196 	case _DRM_AGP:
197 		/*valid = 0;*/
198 		/* In some cases (i810 driver), user space may have already
199 		 * added the AGP base itself, because dev->agp->base previously
200 		 * only got set during AGP enable.  So, only add the base
201 		 * address if the map's offset isn't already within the
202 		 * aperture.
203 		 */
204 		if (map->offset < dev->agp->base ||
205 		    map->offset > dev->agp->base +
206 		    dev->agp->agp_info.ai_aperture_size - 1) {
207 			map->offset += dev->agp->base;
208 		}
209 		map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
210 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
211 			if ((map->offset >= entry->bound) &&
212 			    (map->offset + map->size <=
213 			    entry->bound + entry->pages * PAGE_SIZE)) {
214 				valid = 1;
215 				break;
216 			}
217 		}
218 		if (!valid) {
219 			drm_free(map, DRM_MEM_MAPS);
220 			return EACCES;
221 		}*/
222 		break;
223 	case _DRM_SCATTER_GATHER:
224 		if (!dev->sg) {
225 			drm_free(map, DRM_MEM_MAPS);
226 			return EINVAL;
227 		}
228 		map->virtual = (void *)(uintptr_t)(dev->sg->vaddr + offset);
229 		map->offset = dev->sg->vaddr + offset;
230 		break;
231 	case _DRM_CONSISTENT:
232 		/* Unfortunately, we don't get any alignment specification from
233 		 * the caller, so we have to guess.  drm_pci_alloc requires
234 		 * a power-of-two alignment, so try to align the bus address of
235 		 * the map to it size if possible, otherwise just assume
236 		 * PAGE_SIZE alignment.
237 		 */
238 		align = map->size;
239 		if ((align & (align - 1)) != 0)
240 			align = PAGE_SIZE;
241 		map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
242 		if (map->dmah == NULL) {
243 			drm_free(map, DRM_MEM_MAPS);
244 			return ENOMEM;
245 		}
246 		map->virtual = map->dmah->vaddr;
247 		map->offset = map->dmah->busaddr;
248 		break;
249 	default:
250 		DRM_ERROR("Bad map type %d\n", map->type);
251 		drm_free(map, DRM_MEM_MAPS);
252 		return EINVAL;
253 	}
254 
255 	list_add(&entry->head, &dev->maplist);
256 
257 done:
258 	/* Jumped to, with lock held, when a kernel map is found. */
259 
260 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
261 	    map->size);
262 
263 	*map_ptr = map;
264 
265 	return 0;
266 }
267 
268 /**
269  * Ioctl to specify a range of memory that is available for mapping by a
270  * non-root process.
271  *
272  * \param inode device inode.
273  * \param file_priv DRM file private.
274  * \param cmd command.
275  * \param arg pointer to a drm_map structure.
276  * \return zero on success or a negative value on error.
277  *
278  */
279 int drm_addmap_ioctl(struct drm_device *dev, void *data,
280 		     struct drm_file *file_priv)
281 {
282 	struct drm_map *request = data;
283 	drm_local_map_t *map;
284 	int err;
285 
286 	if (!(dev->flags & (FREAD|FWRITE)))
287 		return EACCES; /* Require read/write */
288 
289 	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
290 		return EACCES;
291 
292 	DRM_LOCK(dev);
293 	err = drm_addmap(dev, request->offset, request->size, request->type,
294 	    request->flags, &map);
295 	DRM_UNLOCK(dev);
296 	if (err != 0)
297 		return err;
298 
299 	request->offset = map->offset;
300 	request->size = map->size;
301 	request->type = map->type;
302 	request->flags = map->flags;
303 	request->mtrr   = map->mtrr;
304 	request->handle = (void *)map->handle;
305 
306 	return 0;
307 }
308 
309 void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
310 {
311 	struct drm_map_list *r_list = NULL, *list_t;
312 	int found = 0;
313 
314 	DRM_LOCK_ASSERT(dev);
315 
316 	if (map == NULL)
317 		return;
318 
319 	/* Find the list entry for the map and remove it */
320 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
321 		if (r_list->map == map) {
322 			list_del(&r_list->head);
323 			drm_free(r_list, DRM_MEM_DRIVER);
324 			found = 1;
325 			break;
326 		}
327 	}
328 
329 	if (!found)
330 		return;
331 
332 	switch (map->type) {
333 	case _DRM_REGISTERS:
334 		if (map->bsr == NULL)
335 			drm_ioremapfree(map);
336 		/* FALLTHROUGH */
337 	case _DRM_FRAME_BUFFER:
338 		if (map->mtrr) {
339 			int __unused retcode;
340 
341 			retcode = drm_mtrr_del(0, map->offset, map->size,
342 			    DRM_MTRR_WC);
343 			DRM_DEBUG("mtrr_del = %d\n", retcode);
344 		}
345 		break;
346 	case _DRM_SHM:
347 		drm_free(map->virtual, DRM_MEM_MAPS);
348 		break;
349 	case _DRM_AGP:
350 	case _DRM_SCATTER_GATHER:
351 		break;
352 	case _DRM_CONSISTENT:
353 		drm_pci_free(dev, map->dmah);
354 		break;
355 	default:
356 		DRM_ERROR("Bad map type %d\n", map->type);
357 		break;
358 	}
359 
360 	if (map->bsr != NULL) {
361 		bus_release_resource(dev->dev, SYS_RES_MEMORY, map->rid,
362 		    map->bsr);
363 	}
364 
365 	DRM_UNLOCK(dev);
366 	if (map->handle)
367 		free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
368 		    DRM_MAP_HANDLE_SHIFT);
369 	DRM_LOCK(dev);
370 
371 	drm_free(map, DRM_MEM_MAPS);
372 }
373 
374 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
375  * the last close of the device, and this is necessary for cleanup when things
376  * exit uncleanly.  Therefore, having userland manually remove mappings seems
377  * like a pointless exercise since they're going away anyway.
378  *
379  * One use case might be after addmap is allowed for normal users for SHM and
380  * gets used by drivers that the server doesn't need to care about.  This seems
381  * unlikely.
382  *
383  * \param inode device inode.
384  * \param file_priv DRM file private.
385  * \param cmd command.
386  * \param arg pointer to a struct drm_map structure.
387  * \return zero on success or a negative value on error.
388  */
389 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
390 		    struct drm_file *file_priv)
391 {
392 	struct drm_map *request = data;
393 	struct drm_local_map *map = NULL;
394 	struct drm_map_list *r_list;
395 
396 	DRM_LOCK(dev);
397 	list_for_each_entry(r_list, &dev->maplist, head) {
398 		if (r_list->map &&
399 		    r_list->user_token == (unsigned long)request->handle &&
400 		    r_list->map->flags & _DRM_REMOVABLE) {
401 			map = r_list->map;
402 			break;
403 		}
404 	}
405 
406 	/* List has wrapped around to the head pointer, or its empty we didn't
407 	 * find anything.
408 	 */
409 	if (list_empty(&dev->maplist) || !map) {
410 		DRM_UNLOCK(dev);
411 		return -EINVAL;
412 	}
413 
414 	/* Register and framebuffer maps are permanent */
415 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
416 		DRM_UNLOCK(dev);
417 		return 0;
418 	}
419 
420 	drm_rmmap(dev, map);
421 
422 	DRM_UNLOCK(dev);
423 
424 	return 0;
425 }
426 
427 /**
428  * Cleanup after an error on one of the addbufs() functions.
429  *
430  * \param dev DRM device.
431  * \param entry buffer entry where the error occurred.
432  *
433  * Frees any pages and buffers associated with the given entry.
434  */
435 static void drm_cleanup_buf_error(struct drm_device * dev,
436 				  struct drm_buf_entry * entry)
437 {
438 	int i;
439 
440 	if (entry->seg_count) {
441 		for (i = 0; i < entry->seg_count; i++) {
442 			drm_pci_free(dev, entry->seglist[i]);
443 		}
444 		drm_free(entry->seglist, DRM_MEM_SEGS);
445 
446 		entry->seg_count = 0;
447 	}
448 
449    	if (entry->buf_count) {
450 	   	for (i = 0; i < entry->buf_count; i++) {
451 			drm_free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
452 		}
453 		drm_free(entry->buflist, DRM_MEM_BUFS);
454 
455 		entry->buf_count = 0;
456 	}
457 }
458 
459 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
460 {
461 	drm_device_dma_t *dma = dev->dma;
462 	drm_buf_entry_t *entry;
463 	/*drm_agp_mem_t *agp_entry;
464 	int valid*/
465 	drm_buf_t *buf;
466 	unsigned long offset;
467 	unsigned long agp_offset;
468 	int count;
469 	int order;
470 	int size;
471 	int alignment;
472 	int page_order;
473 	int total;
474 	int byte_count;
475 	int i;
476 	drm_buf_t **temp_buflist;
477 
478 	count = request->count;
479 	order = drm_order(request->size);
480 	size = 1 << order;
481 
482 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
483 	    ? round_page(size) : size;
484 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
485 	total = PAGE_SIZE << page_order;
486 
487 	byte_count = 0;
488 	agp_offset = dev->agp->base + request->agp_start;
489 
490 	DRM_DEBUG("count:      %d\n",  count);
491 	DRM_DEBUG("order:      %d\n",  order);
492 	DRM_DEBUG("size:       %d\n",  size);
493 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
494 	DRM_DEBUG("alignment:  %d\n",  alignment);
495 	DRM_DEBUG("page_order: %d\n",  page_order);
496 	DRM_DEBUG("total:      %d\n",  total);
497 
498 	/* Make sure buffers are located in AGP memory that we own */
499 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
500 	 * memory.  Safe to ignore for now because these ioctls are still
501 	 * root-only.
502 	 */
503 	/*valid = 0;
504 	for (agp_entry = dev->agp->memory; agp_entry;
505 	    agp_entry = agp_entry->next) {
506 		if ((agp_offset >= agp_entry->bound) &&
507 		    (agp_offset + total * count <=
508 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
509 			valid = 1;
510 			break;
511 		}
512 	}
513 	if (!valid) {
514 		DRM_DEBUG("zone invalid\n");
515 		return EINVAL;
516 	}*/
517 
518 	entry = &dma->bufs[order];
519 
520 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
521 	    M_NOWAIT | M_ZERO);
522 	if (!entry->buflist) {
523 		return ENOMEM;
524 	}
525 
526 	entry->buf_size = size;
527 	entry->page_order = page_order;
528 
529 	offset = 0;
530 
531 	while (entry->buf_count < count) {
532 		buf          = &entry->buflist[entry->buf_count];
533 		buf->idx     = dma->buf_count + entry->buf_count;
534 		buf->total   = alignment;
535 		buf->order   = order;
536 		buf->used    = 0;
537 
538 		buf->offset  = (dma->byte_count + offset);
539 		buf->bus_address = agp_offset + offset;
540 		buf->address = (void *)(agp_offset + offset);
541 		buf->next    = NULL;
542 		buf->pending = 0;
543 		buf->file_priv = NULL;
544 
545 		buf->dev_priv_size = dev->driver->buf_priv_size;
546 		buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
547 		    M_NOWAIT | M_ZERO);
548 		if (buf->dev_private == NULL) {
549 			/* Set count correctly so we free the proper amount. */
550 			entry->buf_count = count;
551 			drm_cleanup_buf_error(dev, entry);
552 			return ENOMEM;
553 		}
554 
555 		offset += alignment;
556 		entry->buf_count++;
557 		byte_count += PAGE_SIZE << page_order;
558 	}
559 
560 	DRM_DEBUG("byte_count: %d\n", byte_count);
561 
562 	temp_buflist = krealloc(dma->buflist,
563 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
564 	    DRM_MEM_BUFS, M_NOWAIT);
565 	if (temp_buflist == NULL) {
566 		/* Free the entry because it isn't valid */
567 		drm_cleanup_buf_error(dev, entry);
568 		return ENOMEM;
569 	}
570 	dma->buflist = temp_buflist;
571 
572 	for (i = 0; i < entry->buf_count; i++) {
573 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
574 	}
575 
576 	dma->buf_count += entry->buf_count;
577 	dma->byte_count += byte_count;
578 
579 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
580 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
581 
582 	request->count = entry->buf_count;
583 	request->size = size;
584 
585 	dma->flags = _DRM_DMA_USE_AGP;
586 
587 	return 0;
588 }
589 
590 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
591 {
592 	drm_device_dma_t *dma = dev->dma;
593 	int count;
594 	int order;
595 	int size;
596 	int total;
597 	int page_order;
598 	drm_buf_entry_t *entry;
599 	drm_buf_t *buf;
600 	int alignment;
601 	unsigned long offset;
602 	int i;
603 	int byte_count;
604 	int page_count;
605 	unsigned long *temp_pagelist;
606 	drm_buf_t **temp_buflist;
607 
608 	count = request->count;
609 	order = drm_order(request->size);
610 	size = 1 << order;
611 
612 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
613 	    request->count, request->size, size, order);
614 
615 	alignment = (request->flags & _DRM_PAGE_ALIGN)
616 	    ? round_page(size) : size;
617 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
618 	total = PAGE_SIZE << page_order;
619 
620 	entry = &dma->bufs[order];
621 
622 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
623 	    M_NOWAIT | M_ZERO);
624 	entry->seglist = kmalloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
625 	    M_NOWAIT | M_ZERO);
626 
627 	/* Keep the original pagelist until we know all the allocations
628 	 * have succeeded
629 	 */
630 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
631 	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
632 
633 	if (entry->buflist == NULL || entry->seglist == NULL ||
634 	    temp_pagelist == NULL) {
635 		drm_free(temp_pagelist, DRM_MEM_PAGES);
636 		drm_free(entry->seglist, DRM_MEM_SEGS);
637 		drm_free(entry->buflist, DRM_MEM_BUFS);
638 		return ENOMEM;
639 	}
640 
641 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
642 	    sizeof(*dma->pagelist));
643 
644 	DRM_DEBUG("pagelist: %d entries\n",
645 	    dma->page_count + (count << page_order));
646 
647 	entry->buf_size	= size;
648 	entry->page_order = page_order;
649 	byte_count = 0;
650 	page_count = 0;
651 
652 	while (entry->buf_count < count) {
653 		spin_unlock(&dev->dma_lock);
654 		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
655 		    0xfffffffful);
656 		spin_lock(&dev->dma_lock);
657 		if (dmah == NULL) {
658 			/* Set count correctly so we free the proper amount. */
659 			entry->buf_count = count;
660 			entry->seg_count = count;
661 			drm_cleanup_buf_error(dev, entry);
662 			drm_free(temp_pagelist, DRM_MEM_PAGES);
663 			return ENOMEM;
664 		}
665 
666 		entry->seglist[entry->seg_count++] = dmah;
667 		for (i = 0; i < (1 << page_order); i++) {
668 			DRM_DEBUG("page %d @ %p\n",
669 			    dma->page_count + page_count,
670 			    (char *)dmah->vaddr + PAGE_SIZE * i);
671 			temp_pagelist[dma->page_count + page_count++] =
672 			    (long)dmah->vaddr + PAGE_SIZE * i;
673 		}
674 		for (offset = 0;
675 		    offset + size <= total && entry->buf_count < count;
676 		    offset += alignment, ++entry->buf_count) {
677 			buf	     = &entry->buflist[entry->buf_count];
678 			buf->idx     = dma->buf_count + entry->buf_count;
679 			buf->total   = alignment;
680 			buf->order   = order;
681 			buf->used    = 0;
682 			buf->offset  = (dma->byte_count + byte_count + offset);
683 			buf->address = ((char *)dmah->vaddr + offset);
684 			buf->bus_address = dmah->busaddr + offset;
685 			buf->next    = NULL;
686 			buf->pending = 0;
687 			buf->file_priv = NULL;
688 
689 			buf->dev_priv_size = dev->driver->buf_priv_size;
690 			buf->dev_private = kmalloc(buf->dev_priv_size,
691 			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
692 			if (buf->dev_private == NULL) {
693 				/* Set count correctly so we free the proper amount. */
694 				entry->buf_count = count;
695 				entry->seg_count = count;
696 				drm_cleanup_buf_error(dev, entry);
697 				drm_free(temp_pagelist, DRM_MEM_PAGES);
698 				return ENOMEM;
699 			}
700 
701 			DRM_DEBUG("buffer %d @ %p\n",
702 			    entry->buf_count, buf->address);
703 		}
704 		byte_count += PAGE_SIZE << page_order;
705 	}
706 
707 	temp_buflist = krealloc(dma->buflist,
708 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
709 	    DRM_MEM_BUFS, M_NOWAIT);
710 	if (temp_buflist == NULL) {
711 		/* Free the entry because it isn't valid */
712 		drm_cleanup_buf_error(dev, entry);
713 		drm_free(temp_pagelist, DRM_MEM_PAGES);
714 		return ENOMEM;
715 	}
716 	dma->buflist = temp_buflist;
717 
718 	for (i = 0; i < entry->buf_count; i++) {
719 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
720 	}
721 
722 	/* No allocations failed, so now we can replace the orginal pagelist
723 	 * with the new one.
724 	 */
725 	drm_free(dma->pagelist, DRM_MEM_PAGES);
726 	dma->pagelist = temp_pagelist;
727 
728 	dma->buf_count += entry->buf_count;
729 	dma->seg_count += entry->seg_count;
730 	dma->page_count += entry->seg_count << page_order;
731 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
732 
733 	request->count = entry->buf_count;
734 	request->size = size;
735 
736 	return 0;
737 
738 }
739 
740 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
741 {
742 	drm_device_dma_t *dma = dev->dma;
743 	drm_buf_entry_t *entry;
744 	drm_buf_t *buf;
745 	unsigned long offset;
746 	unsigned long agp_offset;
747 	int count;
748 	int order;
749 	int size;
750 	int alignment;
751 	int page_order;
752 	int total;
753 	int byte_count;
754 	int i;
755 	drm_buf_t **temp_buflist;
756 
757 	count = request->count;
758 	order = drm_order(request->size);
759 	size = 1 << order;
760 
761 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
762 	    ? round_page(size) : size;
763 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
764 	total = PAGE_SIZE << page_order;
765 
766 	byte_count = 0;
767 	agp_offset = request->agp_start;
768 
769 	DRM_DEBUG("count:      %d\n",  count);
770 	DRM_DEBUG("order:      %d\n",  order);
771 	DRM_DEBUG("size:       %d\n",  size);
772 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
773 	DRM_DEBUG("alignment:  %d\n",  alignment);
774 	DRM_DEBUG("page_order: %d\n",  page_order);
775 	DRM_DEBUG("total:      %d\n",  total);
776 
777 	entry = &dma->bufs[order];
778 
779 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
780 	    M_NOWAIT | M_ZERO);
781 	if (entry->buflist == NULL)
782 		return ENOMEM;
783 
784 	entry->buf_size = size;
785 	entry->page_order = page_order;
786 
787 	offset = 0;
788 
789 	while (entry->buf_count < count) {
790 		buf          = &entry->buflist[entry->buf_count];
791 		buf->idx     = dma->buf_count + entry->buf_count;
792 		buf->total   = alignment;
793 		buf->order   = order;
794 		buf->used    = 0;
795 
796 		buf->offset  = (dma->byte_count + offset);
797 		buf->bus_address = agp_offset + offset;
798 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
799 		buf->next    = NULL;
800 		buf->pending = 0;
801 		buf->file_priv = NULL;
802 
803 		buf->dev_priv_size = dev->driver->buf_priv_size;
804 		buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
805 		    M_NOWAIT | M_ZERO);
806 		if (buf->dev_private == NULL) {
807 			/* Set count correctly so we free the proper amount. */
808 			entry->buf_count = count;
809 			drm_cleanup_buf_error(dev, entry);
810 			return ENOMEM;
811 		}
812 
813 		DRM_DEBUG("buffer %d @ %p\n",
814 		    entry->buf_count, buf->address);
815 
816 		offset += alignment;
817 		entry->buf_count++;
818 		byte_count += PAGE_SIZE << page_order;
819 	}
820 
821 	DRM_DEBUG("byte_count: %d\n", byte_count);
822 
823 	temp_buflist = krealloc(dma->buflist,
824 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
825 	    DRM_MEM_BUFS, M_NOWAIT);
826 	if (temp_buflist == NULL) {
827 		/* Free the entry because it isn't valid */
828 		drm_cleanup_buf_error(dev, entry);
829 		return ENOMEM;
830 	}
831 	dma->buflist = temp_buflist;
832 
833 	for (i = 0; i < entry->buf_count; i++) {
834 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
835 	}
836 
837 	dma->buf_count += entry->buf_count;
838 	dma->byte_count += byte_count;
839 
840 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
841 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
842 
843 	request->count = entry->buf_count;
844 	request->size = size;
845 
846 	dma->flags = _DRM_DMA_USE_SG;
847 
848 	return 0;
849 }
850 
851 /**
852  * Add AGP buffers for DMA transfers.
853  *
854  * \param dev struct drm_device to which the buffers are to be added.
855  * \param request pointer to a struct drm_buf_desc describing the request.
856  * \return zero on success or a negative number on failure.
857  *
858  * After some sanity checks creates a drm_buf structure for each buffer and
859  * reallocates the buffer list of the same size order to accommodate the new
860  * buffers.
861  */
862 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
863 {
864 	int order, ret;
865 
866 	if (request->count < 0 || request->count > 4096)
867 		return EINVAL;
868 
869 	order = drm_order(request->size);
870 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
871 		return EINVAL;
872 
873 	spin_lock(&dev->dma_lock);
874 
875 	/* No more allocations after first buffer-using ioctl. */
876 	if (dev->buf_use != 0) {
877 		spin_unlock(&dev->dma_lock);
878 		return EBUSY;
879 	}
880 	/* No more than one allocation per order */
881 	if (dev->dma->bufs[order].buf_count != 0) {
882 		spin_unlock(&dev->dma_lock);
883 		return ENOMEM;
884 	}
885 
886 	ret = drm_do_addbufs_agp(dev, request);
887 
888 	spin_unlock(&dev->dma_lock);
889 
890 	return ret;
891 }
892 
893 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
894 {
895 	int order, ret;
896 
897 	if (!DRM_SUSER(DRM_CURPROC))
898 		return EACCES;
899 
900 	if (request->count < 0 || request->count > 4096)
901 		return EINVAL;
902 
903 	order = drm_order(request->size);
904 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
905 		return EINVAL;
906 
907 	spin_lock(&dev->dma_lock);
908 
909 	/* No more allocations after first buffer-using ioctl. */
910 	if (dev->buf_use != 0) {
911 		spin_unlock(&dev->dma_lock);
912 		return EBUSY;
913 	}
914 	/* No more than one allocation per order */
915 	if (dev->dma->bufs[order].buf_count != 0) {
916 		spin_unlock(&dev->dma_lock);
917 		return ENOMEM;
918 	}
919 
920 	ret = drm_do_addbufs_sg(dev, request);
921 
922 	spin_unlock(&dev->dma_lock);
923 
924 	return ret;
925 }
926 
927 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
928 {
929 	int order, ret;
930 
931 	if (!DRM_SUSER(DRM_CURPROC))
932 		return EACCES;
933 
934 	if (request->count < 0 || request->count > 4096)
935 		return EINVAL;
936 
937 	order = drm_order(request->size);
938 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
939 		return EINVAL;
940 
941 	spin_lock(&dev->dma_lock);
942 
943 	/* No more allocations after first buffer-using ioctl. */
944 	if (dev->buf_use != 0) {
945 		spin_unlock(&dev->dma_lock);
946 		return EBUSY;
947 	}
948 	/* No more than one allocation per order */
949 	if (dev->dma->bufs[order].buf_count != 0) {
950 		spin_unlock(&dev->dma_lock);
951 		return ENOMEM;
952 	}
953 
954 	ret = drm_do_addbufs_pci(dev, request);
955 
956 	spin_unlock(&dev->dma_lock);
957 
958 	return ret;
959 }
960 
961 /**
962  * Add buffers for DMA transfers (ioctl).
963  *
964  * \param inode device inode.
965  * \param file_priv DRM file private.
966  * \param cmd command.
967  * \param arg pointer to a struct drm_buf_desc request.
968  * \return zero on success or a negative number on failure.
969  *
970  * According with the memory type specified in drm_buf_desc::flags and the
971  * build options, it dispatches the call either to addbufs_agp(),
972  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
973  * PCI memory respectively.
974  */
975 int drm_addbufs(struct drm_device *dev, void *data,
976 		struct drm_file *file_priv)
977 {
978 	struct drm_buf_desc *request = data;
979 	int err;
980 
981 	if (request->flags & _DRM_AGP_BUFFER)
982 		err = drm_addbufs_agp(dev, request);
983 	else if (request->flags & _DRM_SG_BUFFER)
984 		err = drm_addbufs_sg(dev, request);
985 	else
986 		err = drm_addbufs_pci(dev, request);
987 
988 	return err;
989 }
990 
991 /**
992  * Get information about the buffer mappings.
993  *
994  * This was originally mean for debugging purposes, or by a sophisticated
995  * client library to determine how best to use the available buffers (e.g.,
996  * large buffers can be used for image transfer).
997  *
998  * \param inode device inode.
999  * \param file_priv DRM file private.
1000  * \param cmd command.
1001  * \param arg pointer to a drm_buf_info structure.
1002  * \return zero on success or a negative number on failure.
1003  *
1004  * Increments drm_device::buf_use while holding the drm_device::count_lock
1005  * lock, preventing of allocating more buffers after this call. Information
1006  * about each requested buffer is then copied into user space.
1007  */
1008 int drm_infobufs(struct drm_device *dev, void *data,
1009 		 struct drm_file *file_priv)
1010 {
1011 	drm_device_dma_t *dma = dev->dma;
1012 	struct drm_buf_info *request = data;
1013 	int i;
1014 	int count;
1015 	int retcode = 0;
1016 
1017 	spin_lock(&dev->dma_lock);
1018 	++dev->buf_use;		/* Can't allocate more after this call */
1019 	spin_unlock(&dev->dma_lock);
1020 
1021 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1022 		if (dma->bufs[i].buf_count)
1023 			++count;
1024 	}
1025 
1026 	DRM_DEBUG("count = %d\n", count);
1027 
1028 	if (request->count >= count) {
1029 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1030 			if (dma->bufs[i].buf_count) {
1031 				struct drm_buf_desc from;
1032 
1033 				from.count = dma->bufs[i].buf_count;
1034 				from.size = dma->bufs[i].buf_size;
1035 				from.low_mark = dma->bufs[i].freelist.low_mark;
1036 				from.high_mark = dma->bufs[i].freelist.high_mark;
1037 
1038 				if (DRM_COPY_TO_USER(&request->list[count], &from,
1039 				    sizeof(struct drm_buf_desc)) != 0) {
1040 					retcode = EFAULT;
1041 					break;
1042 				}
1043 
1044 				DRM_DEBUG("%d %d %d %d %d\n",
1045 				    i, dma->bufs[i].buf_count,
1046 				    dma->bufs[i].buf_size,
1047 				    dma->bufs[i].freelist.low_mark,
1048 				    dma->bufs[i].freelist.high_mark);
1049 				++count;
1050 			}
1051 		}
1052 	}
1053 	request->count = count;
1054 
1055 	return retcode;
1056 }
1057 
1058 /**
1059  * Specifies a low and high water mark for buffer allocation
1060  *
1061  * \param inode device inode.
1062  * \param file_priv DRM file private.
1063  * \param cmd command.
1064  * \param arg a pointer to a drm_buf_desc structure.
1065  * \return zero on success or a negative number on failure.
1066  *
1067  * Verifies that the size order is bounded between the admissible orders and
1068  * updates the respective drm_device_dma::bufs entry low and high water mark.
1069  *
1070  * \note This ioctl is deprecated and mostly never used.
1071  */
1072 int drm_markbufs(struct drm_device *dev, void *data,
1073 		 struct drm_file *file_priv)
1074 {
1075 	drm_device_dma_t *dma = dev->dma;
1076 	struct drm_buf_desc *request = data;
1077 	int order;
1078 
1079 	DRM_DEBUG("%d, %d, %d\n",
1080 		  request->size, request->low_mark, request->high_mark);
1081 
1082 
1083 	order = drm_order(request->size);
1084 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
1085 	    request->low_mark < 0 || request->high_mark < 0) {
1086 		return EINVAL;
1087 	}
1088 
1089 	spin_lock(&dev->dma_lock);
1090 	if (request->low_mark > dma->bufs[order].buf_count ||
1091 	    request->high_mark > dma->bufs[order].buf_count) {
1092 		spin_unlock(&dev->dma_lock);
1093 		return EINVAL;
1094 	}
1095 
1096 	dma->bufs[order].freelist.low_mark  = request->low_mark;
1097 	dma->bufs[order].freelist.high_mark = request->high_mark;
1098 	spin_unlock(&dev->dma_lock);
1099 
1100 	return 0;
1101 }
1102 
1103 /**
1104  * Unreserve the buffers in list, previously reserved using drmDMA.
1105  *
1106  * \param inode device inode.
1107  * \param file_priv DRM file private.
1108  * \param cmd command.
1109  * \param arg pointer to a drm_buf_free structure.
1110  * \return zero on success or a negative number on failure.
1111  *
1112  * Calls free_buffer() for each used buffer.
1113  * This function is primarily used for debugging.
1114  */
1115 int drm_freebufs(struct drm_device *dev, void *data,
1116 		 struct drm_file *file_priv)
1117 {
1118 	drm_device_dma_t *dma = dev->dma;
1119 	struct drm_buf_free *request = data;
1120 	int i;
1121 	int idx;
1122 	drm_buf_t *buf;
1123 	int retcode = 0;
1124 
1125 	DRM_DEBUG("%d\n", request->count);
1126 
1127 	spin_lock(&dev->dma_lock);
1128 	for (i = 0; i < request->count; i++) {
1129 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1130 			retcode = EFAULT;
1131 			break;
1132 		}
1133 		if (idx < 0 || idx >= dma->buf_count) {
1134 			DRM_ERROR("Index %d (of %d max)\n",
1135 			    idx, dma->buf_count - 1);
1136 			retcode = EINVAL;
1137 			break;
1138 		}
1139 		buf = dma->buflist[idx];
1140 		if (buf->file_priv != file_priv) {
1141 			DRM_ERROR("Process %d freeing buffer not owned\n",
1142 			    DRM_CURRENTPID);
1143 			retcode = EINVAL;
1144 			break;
1145 		}
1146 		drm_free_buffer(dev, buf);
1147 	}
1148 	spin_unlock(&dev->dma_lock);
1149 
1150 	return retcode;
1151 }
1152 
1153 /**
1154  * Maps all of the DMA buffers into client-virtual space (ioctl).
1155  *
1156  * \param inode device inode.
1157  * \param file_priv DRM file private.
1158  * \param cmd command.
1159  * \param arg pointer to a drm_buf_map structure.
1160  * \return zero on success or a negative number on failure.
1161  *
1162  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1163  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1164  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1165  * drm_mmap_dma().
1166  */
1167 int drm_mapbufs(struct drm_device *dev, void *data,
1168 	        struct drm_file *file_priv)
1169 {
1170 	drm_device_dma_t *dma = dev->dma;
1171 	int retcode = 0;
1172 	const int zero = 0;
1173 	vm_offset_t address;
1174 	struct vmspace *vms;
1175 	vm_ooffset_t foff;
1176 	vm_size_t size;
1177 	vm_offset_t vaddr;
1178 	struct drm_buf_map *request = data;
1179 	int i;
1180 
1181 	vms = DRM_CURPROC->td_proc->p_vmspace;
1182 
1183 	spin_lock(&dev->dma_lock);
1184 	dev->buf_use++;		/* Can't allocate more after this call */
1185 	spin_unlock(&dev->dma_lock);
1186 
1187 	if (request->count < dma->buf_count)
1188 		goto done;
1189 
1190 	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1191 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1192 	    (dma->flags & _DRM_DMA_USE_SG))) {
1193 		drm_local_map_t *map = dev->agp_buffer_map;
1194 
1195 		if (map == NULL) {
1196 			retcode = EINVAL;
1197 			goto done;
1198 		}
1199 		size = round_page(map->size);
1200 		foff = (unsigned long)map->handle;
1201 	} else {
1202 		size = round_page(dma->byte_count),
1203 		foff = 0;
1204 	}
1205 
1206 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1207 	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1208 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1209 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1210 	if (retcode)
1211 		goto done;
1212 
1213 	request->virtual = (void *)vaddr;
1214 
1215 	for (i = 0; i < dma->buf_count; i++) {
1216 		if (DRM_COPY_TO_USER(&request->list[i].idx,
1217 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1218 			retcode = EFAULT;
1219 			goto done;
1220 		}
1221 		if (DRM_COPY_TO_USER(&request->list[i].total,
1222 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1223 			retcode = EFAULT;
1224 			goto done;
1225 		}
1226 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1227 		    sizeof(zero))) {
1228 			retcode = EFAULT;
1229 			goto done;
1230 		}
1231 		address = vaddr + dma->buflist[i]->offset; /* *** */
1232 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1233 		    sizeof(address))) {
1234 			retcode = EFAULT;
1235 			goto done;
1236 		}
1237 	}
1238 
1239  done:
1240 	request->count = dma->buf_count;
1241 
1242 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1243 
1244 	return retcode;
1245 }
1246 
1247 /**
1248  * Compute size order.  Returns the exponent of the smaller power of two which
1249  * is greater or equal to given number.
1250  *
1251  * \param size size.
1252  * \return order.
1253  *
1254  * \todo Can be made faster.
1255  */
1256 int drm_order(unsigned long size)
1257 {
1258 	int order;
1259 	unsigned long tmp;
1260 
1261 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1262 
1263 	if (size & (size - 1))
1264 		++order;
1265 
1266 	return order;
1267 }
1268 EXPORT_SYMBOL(drm_order);
1269