xref: /dragonfly/sys/dev/drm/drm_bufs.c (revision f7df6c8e)
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  *
35  * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
36  */
37 
38 #include <sys/conf.h>
39 #include <bus/pci/pcireg.h>
40 #include <linux/types.h>
41 #include <linux/export.h>
42 #include <drm/drmP.h>
43 
44 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
45  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
46  * address for accessing them.  Cleaned up at unload.
47  */
48 static int drm_alloc_resource(struct drm_device *dev, int resource)
49 {
50 	struct resource *res;
51 	int rid;
52 
53 	DRM_LOCK_ASSERT(dev);
54 
55 	if (resource >= DRM_MAX_PCI_RESOURCE) {
56 		DRM_ERROR("Resource %d too large\n", resource);
57 		return 1;
58 	}
59 
60 	if (dev->pcir[resource] != NULL) {
61 		return 0;
62 	}
63 
64 	DRM_UNLOCK(dev);
65 	rid = PCIR_BAR(resource);
66 	res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
67 	    RF_SHAREABLE);
68 	DRM_LOCK(dev);
69 	if (res == NULL) {
70 		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
71 		return 1;
72 	}
73 
74 	if (dev->pcir[resource] == NULL) {
75 		dev->pcirid[resource] = rid;
76 		dev->pcir[resource] = res;
77 	}
78 
79 	return 0;
80 }
81 
82 unsigned long drm_get_resource_start(struct drm_device *dev,
83 				     unsigned int resource)
84 {
85 	if (drm_alloc_resource(dev, resource) != 0)
86 		return 0;
87 
88 	return rman_get_start(dev->pcir[resource]);
89 }
90 
91 unsigned long drm_get_resource_len(struct drm_device *dev,
92 				   unsigned int resource)
93 {
94 	if (drm_alloc_resource(dev, resource) != 0)
95 		return 0;
96 
97 	return rman_get_size(dev->pcir[resource]);
98 }
99 
100 int drm_addmap(struct drm_device * dev, resource_size_t offset,
101 	       unsigned int size, enum drm_map_type type,
102 	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
103 {
104 	struct drm_local_map *map;
105 	struct drm_map_list *entry = NULL;
106 	drm_dma_handle_t *dmah;
107 	int align;
108 
109 	/* Allocate a new map structure, fill it in, and do any type-specific
110 	 * initialization necessary.
111 	 */
112 	map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
113 	if (!map) {
114 		return ENOMEM;
115 	}
116 
117 	map->offset = offset;
118 	map->size = size;
119 	map->type = type;
120 	map->flags = flags;
121 
122 	/* Only allow shared memory to be removable since we only keep enough
123 	 * book keeping information about shared memory to allow for removal
124 	 * when processes fork.
125 	 */
126 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
127 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
128 		drm_free(map, M_DRM);
129 		return EINVAL;
130 	}
131 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
132 		DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
133 		    (uintmax_t)offset, size);
134 		drm_free(map, M_DRM);
135 		return EINVAL;
136 	}
137 	if (offset + size < offset) {
138 		DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
139 		    (uintmax_t)offset, size);
140 		drm_free(map, M_DRM);
141 		return EINVAL;
142 	}
143 
144 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
145 		  (unsigned long long)map->offset, map->size, map->type);
146 
147 	/* Check if this is just another version of a kernel-allocated map, and
148 	 * just hand that back if so.
149 	 */
150 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
151 	    type == _DRM_SHM) {
152 		list_for_each_entry(entry, &dev->maplist, head) {
153 			if (entry->map->type == type && (entry->map->offset == offset ||
154 			    (entry->map->type == _DRM_SHM &&
155 			    entry->map->flags == _DRM_CONTAINS_LOCK))) {
156 				entry->map->size = size;
157 				DRM_DEBUG("Found kernel map %d\n", type);
158 				goto done;
159 			}
160 		}
161 	}
162 
163 	switch (map->type) {
164 	case _DRM_REGISTERS:
165 		map->handle = drm_ioremap(dev, map);
166 		if (!(map->flags & _DRM_WRITE_COMBINING))
167 			break;
168 		/* FALLTHROUGH */
169 	case _DRM_FRAME_BUFFER:
170 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
171 			map->mtrr = 1;
172 		break;
173 	case _DRM_SHM:
174 		map->handle = kmalloc(map->size, M_DRM, M_WAITOK | M_NULLOK);
175 		DRM_DEBUG("%lu %d %p\n",
176 		    map->size, drm_order(map->size), map->handle);
177 		if (!map->handle) {
178 			drm_free(map, M_DRM);
179 			return ENOMEM;
180 		}
181 		map->offset = (unsigned long)map->handle;
182 		if (map->flags & _DRM_CONTAINS_LOCK) {
183 			/* Prevent a 2nd X Server from creating a 2nd lock */
184 			DRM_LOCK(dev);
185 			if (dev->lock.hw_lock != NULL) {
186 				DRM_UNLOCK(dev);
187 				drm_free(map->handle, M_DRM);
188 				drm_free(map, M_DRM);
189 				return EBUSY;
190 			}
191 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
192 			DRM_UNLOCK(dev);
193 		}
194 		break;
195 	case _DRM_AGP:
196 		/*valid = 0;*/
197 		/* In some cases (i810 driver), user space may have already
198 		 * added the AGP base itself, because dev->agp->base previously
199 		 * only got set during AGP enable.  So, only add the base
200 		 * address if the map's offset isn't already within the
201 		 * aperture.
202 		 */
203 		if (map->offset < dev->agp->base ||
204 		    map->offset > dev->agp->base +
205 		    dev->agp->agp_info.ai_aperture_size - 1) {
206 			map->offset += dev->agp->base;
207 		}
208 		map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
209 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
210 			if ((map->offset >= entry->bound) &&
211 			    (map->offset + map->size <=
212 			    entry->bound + entry->pages * PAGE_SIZE)) {
213 				valid = 1;
214 				break;
215 			}
216 		}
217 		if (!valid) {
218 			drm_free(map, M_DRM);
219 			return EACCES;
220 		}*/
221 		break;
222 	case _DRM_SCATTER_GATHER:
223 		if (!dev->sg) {
224 			drm_free(map, M_DRM);
225 			return EINVAL;
226 		}
227 		map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
228 		map->offset = dev->sg->vaddr + offset;
229 		break;
230 	case _DRM_CONSISTENT:
231 		/* Unfortunately, we don't get any alignment specification from
232 		 * the caller, so we have to guess.  drm_pci_alloc requires
233 		 * a power-of-two alignment, so try to align the bus address of
234 		 * the map to it size if possible, otherwise just assume
235 		 * PAGE_SIZE alignment.
236 		 */
237 		align = map->size;
238 		if ((align & (align - 1)) != 0)
239 			align = PAGE_SIZE;
240 		dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
241 		if (!dmah) {
242 			drm_free(map, M_DRM);
243 			return ENOMEM;
244 		}
245 		map->handle = dmah->vaddr;
246 		map->offset = (unsigned long)dmah->busaddr;
247 		break;
248 	default:
249 		DRM_ERROR("Bad map type %d\n", map->type);
250 		drm_free(map, M_DRM);
251 		return EINVAL;
252 	}
253 
254 	list_add(&entry->head, &dev->maplist);
255 
256 done:
257 	/* Jumped to, with lock held, when a kernel map is found. */
258 
259 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
260 	    map->size);
261 
262 	*map_ptr = map;
263 
264 	return 0;
265 }
266 
267 /**
268  * Ioctl to specify a range of memory that is available for mapping by a
269  * non-root process.
270  *
271  * \param inode device inode.
272  * \param file_priv DRM file private.
273  * \param cmd command.
274  * \param arg pointer to a drm_map structure.
275  * \return zero on success or a negative value on error.
276  *
277  */
278 int drm_addmap_ioctl(struct drm_device *dev, void *data,
279 		     struct drm_file *file_priv)
280 {
281 	struct drm_map *request = data;
282 	drm_local_map_t *map;
283 	int err;
284 
285 	if (!(dev->flags & (FREAD|FWRITE)))
286 		return EACCES; /* Require read/write */
287 
288 	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
289 		return EACCES;
290 
291 	DRM_LOCK(dev);
292 	err = drm_addmap(dev, request->offset, request->size, request->type,
293 	    request->flags, &map);
294 	DRM_UNLOCK(dev);
295 	if (err != 0)
296 		return err;
297 
298 	request->offset = map->offset;
299 	request->size = map->size;
300 	request->type = map->type;
301 	request->flags = map->flags;
302 	request->mtrr   = map->mtrr;
303 	request->handle = (void *)map->handle;
304 
305 	return 0;
306 }
307 
308 void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
309 {
310 	struct drm_map_list *r_list = NULL, *list_t;
311 	drm_dma_handle_t dmah;
312 	int found = 0;
313 
314 	DRM_LOCK_ASSERT(dev);
315 
316 	if (map == NULL)
317 		return;
318 
319 	/* Find the list entry for the map and remove it */
320 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
321 		if (r_list->map == map) {
322 			list_del(&r_list->head);
323 			drm_free(r_list, M_DRM);
324 			found = 1;
325 			break;
326 		}
327 	}
328 
329 	if (!found)
330 		return;
331 
332 	switch (map->type) {
333 	case _DRM_REGISTERS:
334 		drm_ioremapfree(map);
335 		/* FALLTHROUGH */
336 	case _DRM_FRAME_BUFFER:
337 		if (map->mtrr) {
338 			int __unused retcode;
339 
340 			retcode = drm_mtrr_del(0, map->offset, map->size,
341 			    DRM_MTRR_WC);
342 			DRM_DEBUG("mtrr_del = %d\n", retcode);
343 		}
344 		break;
345 	case _DRM_SHM:
346 		drm_free(map->handle, M_DRM);
347 		break;
348 	case _DRM_AGP:
349 	case _DRM_SCATTER_GATHER:
350 		break;
351 	case _DRM_CONSISTENT:
352 		dmah.vaddr = map->handle;
353 		dmah.busaddr = map->offset;
354 		drm_pci_free(dev, &dmah);
355 		break;
356 	default:
357 		DRM_ERROR("Bad map type %d\n", map->type);
358 		break;
359 	}
360 
361 	drm_free(map, M_DRM);
362 }
363 
364 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
365  * the last close of the device, and this is necessary for cleanup when things
366  * exit uncleanly.  Therefore, having userland manually remove mappings seems
367  * like a pointless exercise since they're going away anyway.
368  *
369  * One use case might be after addmap is allowed for normal users for SHM and
370  * gets used by drivers that the server doesn't need to care about.  This seems
371  * unlikely.
372  *
373  * \param inode device inode.
374  * \param file_priv DRM file private.
375  * \param cmd command.
376  * \param arg pointer to a struct drm_map structure.
377  * \return zero on success or a negative value on error.
378  */
379 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
380 		    struct drm_file *file_priv)
381 {
382 	struct drm_map *request = data;
383 	struct drm_local_map *map = NULL;
384 	struct drm_map_list *r_list;
385 
386 	DRM_LOCK(dev);
387 	list_for_each_entry(r_list, &dev->maplist, head) {
388 		if (r_list->map &&
389 		    r_list->user_token == (unsigned long)request->handle &&
390 		    r_list->map->flags & _DRM_REMOVABLE) {
391 			map = r_list->map;
392 			break;
393 		}
394 	}
395 
396 	/* List has wrapped around to the head pointer, or its empty we didn't
397 	 * find anything.
398 	 */
399 	if (list_empty(&dev->maplist) || !map) {
400 		DRM_UNLOCK(dev);
401 		return -EINVAL;
402 	}
403 
404 	/* Register and framebuffer maps are permanent */
405 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
406 		DRM_UNLOCK(dev);
407 		return 0;
408 	}
409 
410 	drm_rmmap(dev, map);
411 
412 	DRM_UNLOCK(dev);
413 
414 	return 0;
415 }
416 
417 /**
418  * Cleanup after an error on one of the addbufs() functions.
419  *
420  * \param dev DRM device.
421  * \param entry buffer entry where the error occurred.
422  *
423  * Frees any pages and buffers associated with the given entry.
424  */
425 static void drm_cleanup_buf_error(struct drm_device * dev,
426 				  struct drm_buf_entry * entry)
427 {
428 	int i;
429 
430 	if (entry->seg_count) {
431 		for (i = 0; i < entry->seg_count; i++) {
432 			drm_pci_free(dev, entry->seglist[i]);
433 		}
434 		drm_free(entry->seglist, M_DRM);
435 
436 		entry->seg_count = 0;
437 	}
438 
439    	if (entry->buf_count) {
440 	   	for (i = 0; i < entry->buf_count; i++) {
441 			drm_free(entry->buflist[i].dev_private, M_DRM);
442 		}
443 		drm_free(entry->buflist, M_DRM);
444 
445 		entry->buf_count = 0;
446 	}
447 }
448 
449 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
450 {
451 	drm_device_dma_t *dma = dev->dma;
452 	drm_buf_entry_t *entry;
453 	/*drm_agp_mem_t *agp_entry;
454 	int valid*/
455 	drm_buf_t *buf;
456 	unsigned long offset;
457 	unsigned long agp_offset;
458 	int count;
459 	int order;
460 	int size;
461 	int alignment;
462 	int page_order;
463 	int total;
464 	int byte_count;
465 	int i;
466 	drm_buf_t **temp_buflist;
467 
468 	count = request->count;
469 	order = drm_order(request->size);
470 	size = 1 << order;
471 
472 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
473 	    ? round_page(size) : size;
474 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
475 	total = PAGE_SIZE << page_order;
476 
477 	byte_count = 0;
478 	agp_offset = dev->agp->base + request->agp_start;
479 
480 	DRM_DEBUG("count:      %d\n",  count);
481 	DRM_DEBUG("order:      %d\n",  order);
482 	DRM_DEBUG("size:       %d\n",  size);
483 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
484 	DRM_DEBUG("alignment:  %d\n",  alignment);
485 	DRM_DEBUG("page_order: %d\n",  page_order);
486 	DRM_DEBUG("total:      %d\n",  total);
487 
488 	/* Make sure buffers are located in AGP memory that we own */
489 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
490 	 * memory.  Safe to ignore for now because these ioctls are still
491 	 * root-only.
492 	 */
493 	/*valid = 0;
494 	for (agp_entry = dev->agp->memory; agp_entry;
495 	    agp_entry = agp_entry->next) {
496 		if ((agp_offset >= agp_entry->bound) &&
497 		    (agp_offset + total * count <=
498 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
499 			valid = 1;
500 			break;
501 		}
502 	}
503 	if (!valid) {
504 		DRM_DEBUG("zone invalid\n");
505 		return EINVAL;
506 	}*/
507 
508 	entry = &dma->bufs[order];
509 
510 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
511 				 M_WAITOK | M_NULLOK | M_ZERO);
512 	if (!entry->buflist) {
513 		return ENOMEM;
514 	}
515 
516 	entry->buf_size = size;
517 	entry->page_order = page_order;
518 
519 	offset = 0;
520 
521 	while (entry->buf_count < count) {
522 		buf          = &entry->buflist[entry->buf_count];
523 		buf->idx     = dma->buf_count + entry->buf_count;
524 		buf->total   = alignment;
525 		buf->order   = order;
526 		buf->used    = 0;
527 
528 		buf->offset  = (dma->byte_count + offset);
529 		buf->bus_address = agp_offset + offset;
530 		buf->address = (void *)(agp_offset + offset);
531 		buf->next    = NULL;
532 		buf->pending = 0;
533 		buf->file_priv = NULL;
534 
535 		buf->dev_priv_size = dev->driver->buf_priv_size;
536 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
537 					   M_WAITOK | M_NULLOK | M_ZERO);
538 		if (buf->dev_private == NULL) {
539 			/* Set count correctly so we free the proper amount. */
540 			entry->buf_count = count;
541 			drm_cleanup_buf_error(dev, entry);
542 			return ENOMEM;
543 		}
544 
545 		offset += alignment;
546 		entry->buf_count++;
547 		byte_count += PAGE_SIZE << page_order;
548 	}
549 
550 	DRM_DEBUG("byte_count: %d\n", byte_count);
551 
552 	temp_buflist = krealloc(dma->buflist,
553 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
554 	    M_DRM, M_WAITOK | M_NULLOK);
555 	if (temp_buflist == NULL) {
556 		/* Free the entry because it isn't valid */
557 		drm_cleanup_buf_error(dev, entry);
558 		return ENOMEM;
559 	}
560 	dma->buflist = temp_buflist;
561 
562 	for (i = 0; i < entry->buf_count; i++) {
563 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
564 	}
565 
566 	dma->buf_count += entry->buf_count;
567 	dma->byte_count += byte_count;
568 
569 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
570 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
571 
572 	request->count = entry->buf_count;
573 	request->size = size;
574 
575 	dma->flags = _DRM_DMA_USE_AGP;
576 
577 	return 0;
578 }
579 
580 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
581 {
582 	drm_device_dma_t *dma = dev->dma;
583 	int count;
584 	int order;
585 	int size;
586 	int total;
587 	int page_order;
588 	drm_buf_entry_t *entry;
589 	drm_buf_t *buf;
590 	int alignment;
591 	unsigned long offset;
592 	int i;
593 	int byte_count;
594 	int page_count;
595 	unsigned long *temp_pagelist;
596 	drm_buf_t **temp_buflist;
597 
598 	count = request->count;
599 	order = drm_order(request->size);
600 	size = 1 << order;
601 
602 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
603 	    request->count, request->size, size, order);
604 
605 	alignment = (request->flags & _DRM_PAGE_ALIGN)
606 	    ? round_page(size) : size;
607 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
608 	total = PAGE_SIZE << page_order;
609 
610 	entry = &dma->bufs[order];
611 
612 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
613 				 M_WAITOK | M_NULLOK | M_ZERO);
614 	entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
615 				 M_WAITOK | M_NULLOK | M_ZERO);
616 
617 	/* Keep the original pagelist until we know all the allocations
618 	 * have succeeded
619 	 */
620 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
621 				sizeof(*dma->pagelist),
622 				M_DRM, M_WAITOK | M_NULLOK);
623 
624 	if (entry->buflist == NULL || entry->seglist == NULL ||
625 	    temp_pagelist == NULL) {
626 		drm_free(temp_pagelist, M_DRM);
627 		drm_free(entry->seglist, M_DRM);
628 		drm_free(entry->buflist, M_DRM);
629 		return ENOMEM;
630 	}
631 
632 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
633 	    sizeof(*dma->pagelist));
634 
635 	DRM_DEBUG("pagelist: %d entries\n",
636 	    dma->page_count + (count << page_order));
637 
638 	entry->buf_size	= size;
639 	entry->page_order = page_order;
640 	byte_count = 0;
641 	page_count = 0;
642 
643 	while (entry->buf_count < count) {
644 		spin_unlock(&dev->dma_lock);
645 		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
646 		    0xfffffffful);
647 		spin_lock(&dev->dma_lock);
648 		if (dmah == NULL) {
649 			/* Set count correctly so we free the proper amount. */
650 			entry->buf_count = count;
651 			entry->seg_count = count;
652 			drm_cleanup_buf_error(dev, entry);
653 			drm_free(temp_pagelist, M_DRM);
654 			return ENOMEM;
655 		}
656 
657 		entry->seglist[entry->seg_count++] = dmah;
658 		for (i = 0; i < (1 << page_order); i++) {
659 			DRM_DEBUG("page %d @ %p\n",
660 			    dma->page_count + page_count,
661 			    (char *)dmah->vaddr + PAGE_SIZE * i);
662 			temp_pagelist[dma->page_count + page_count++] =
663 			    (long)dmah->vaddr + PAGE_SIZE * i;
664 		}
665 		for (offset = 0;
666 		    offset + size <= total && entry->buf_count < count;
667 		    offset += alignment, ++entry->buf_count) {
668 			buf	     = &entry->buflist[entry->buf_count];
669 			buf->idx     = dma->buf_count + entry->buf_count;
670 			buf->total   = alignment;
671 			buf->order   = order;
672 			buf->used    = 0;
673 			buf->offset  = (dma->byte_count + byte_count + offset);
674 			buf->address = ((char *)dmah->vaddr + offset);
675 			buf->bus_address = dmah->busaddr + offset;
676 			buf->next    = NULL;
677 			buf->pending = 0;
678 			buf->file_priv = NULL;
679 
680 			buf->dev_priv_size = dev->driver->buf_priv_size;
681 			buf->dev_private = kmalloc(buf->dev_priv_size,
682 						   M_DRM,
683 						   M_WAITOK | M_NULLOK |
684 						    M_ZERO);
685 			if (buf->dev_private == NULL) {
686 				/* Set count correctly so we free the proper amount. */
687 				entry->buf_count = count;
688 				entry->seg_count = count;
689 				drm_cleanup_buf_error(dev, entry);
690 				drm_free(temp_pagelist, M_DRM);
691 				return ENOMEM;
692 			}
693 
694 			DRM_DEBUG("buffer %d @ %p\n",
695 			    entry->buf_count, buf->address);
696 		}
697 		byte_count += PAGE_SIZE << page_order;
698 	}
699 
700 	temp_buflist = krealloc(dma->buflist,
701 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
702 	    M_DRM, M_WAITOK | M_NULLOK);
703 	if (temp_buflist == NULL) {
704 		/* Free the entry because it isn't valid */
705 		drm_cleanup_buf_error(dev, entry);
706 		drm_free(temp_pagelist, M_DRM);
707 		return ENOMEM;
708 	}
709 	dma->buflist = temp_buflist;
710 
711 	for (i = 0; i < entry->buf_count; i++) {
712 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
713 	}
714 
715 	/* No allocations failed, so now we can replace the orginal pagelist
716 	 * with the new one.
717 	 */
718 	drm_free(dma->pagelist, M_DRM);
719 	dma->pagelist = temp_pagelist;
720 
721 	dma->buf_count += entry->buf_count;
722 	dma->seg_count += entry->seg_count;
723 	dma->page_count += entry->seg_count << page_order;
724 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
725 
726 	request->count = entry->buf_count;
727 	request->size = size;
728 
729 	return 0;
730 
731 }
732 
733 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
734 {
735 	drm_device_dma_t *dma = dev->dma;
736 	drm_buf_entry_t *entry;
737 	drm_buf_t *buf;
738 	unsigned long offset;
739 	unsigned long agp_offset;
740 	int count;
741 	int order;
742 	int size;
743 	int alignment;
744 	int page_order;
745 	int total;
746 	int byte_count;
747 	int i;
748 	drm_buf_t **temp_buflist;
749 
750 	count = request->count;
751 	order = drm_order(request->size);
752 	size = 1 << order;
753 
754 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
755 	    ? round_page(size) : size;
756 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
757 	total = PAGE_SIZE << page_order;
758 
759 	byte_count = 0;
760 	agp_offset = request->agp_start;
761 
762 	DRM_DEBUG("count:      %d\n",  count);
763 	DRM_DEBUG("order:      %d\n",  order);
764 	DRM_DEBUG("size:       %d\n",  size);
765 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
766 	DRM_DEBUG("alignment:  %d\n",  alignment);
767 	DRM_DEBUG("page_order: %d\n",  page_order);
768 	DRM_DEBUG("total:      %d\n",  total);
769 
770 	entry = &dma->bufs[order];
771 
772 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
773 				 M_WAITOK | M_NULLOK | M_ZERO);
774 	if (entry->buflist == NULL)
775 		return ENOMEM;
776 
777 	entry->buf_size = size;
778 	entry->page_order = page_order;
779 
780 	offset = 0;
781 
782 	while (entry->buf_count < count) {
783 		buf          = &entry->buflist[entry->buf_count];
784 		buf->idx     = dma->buf_count + entry->buf_count;
785 		buf->total   = alignment;
786 		buf->order   = order;
787 		buf->used    = 0;
788 
789 		buf->offset  = (dma->byte_count + offset);
790 		buf->bus_address = agp_offset + offset;
791 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
792 		buf->next    = NULL;
793 		buf->pending = 0;
794 		buf->file_priv = NULL;
795 
796 		buf->dev_priv_size = dev->driver->buf_priv_size;
797 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
798 					   M_WAITOK | M_NULLOK | M_ZERO);
799 		if (buf->dev_private == NULL) {
800 			/* Set count correctly so we free the proper amount. */
801 			entry->buf_count = count;
802 			drm_cleanup_buf_error(dev, entry);
803 			return ENOMEM;
804 		}
805 
806 		DRM_DEBUG("buffer %d @ %p\n",
807 		    entry->buf_count, buf->address);
808 
809 		offset += alignment;
810 		entry->buf_count++;
811 		byte_count += PAGE_SIZE << page_order;
812 	}
813 
814 	DRM_DEBUG("byte_count: %d\n", byte_count);
815 
816 	temp_buflist = krealloc(dma->buflist,
817 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
818 	    M_DRM, M_WAITOK | M_NULLOK);
819 	if (temp_buflist == NULL) {
820 		/* Free the entry because it isn't valid */
821 		drm_cleanup_buf_error(dev, entry);
822 		return ENOMEM;
823 	}
824 	dma->buflist = temp_buflist;
825 
826 	for (i = 0; i < entry->buf_count; i++) {
827 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
828 	}
829 
830 	dma->buf_count += entry->buf_count;
831 	dma->byte_count += byte_count;
832 
833 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
834 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
835 
836 	request->count = entry->buf_count;
837 	request->size = size;
838 
839 	dma->flags = _DRM_DMA_USE_SG;
840 
841 	return 0;
842 }
843 
844 /**
845  * Add AGP buffers for DMA transfers.
846  *
847  * \param dev struct drm_device to which the buffers are to be added.
848  * \param request pointer to a struct drm_buf_desc describing the request.
849  * \return zero on success or a negative number on failure.
850  *
851  * After some sanity checks creates a drm_buf structure for each buffer and
852  * reallocates the buffer list of the same size order to accommodate the new
853  * buffers.
854  */
855 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
856 {
857 	int order, ret;
858 
859 	if (request->count < 0 || request->count > 4096)
860 		return EINVAL;
861 
862 	order = drm_order(request->size);
863 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
864 		return EINVAL;
865 
866 	spin_lock(&dev->dma_lock);
867 
868 	/* No more allocations after first buffer-using ioctl. */
869 	if (dev->buf_use != 0) {
870 		spin_unlock(&dev->dma_lock);
871 		return EBUSY;
872 	}
873 	/* No more than one allocation per order */
874 	if (dev->dma->bufs[order].buf_count != 0) {
875 		spin_unlock(&dev->dma_lock);
876 		return ENOMEM;
877 	}
878 
879 	ret = drm_do_addbufs_agp(dev, request);
880 
881 	spin_unlock(&dev->dma_lock);
882 
883 	return ret;
884 }
885 
886 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
887 {
888 	int order, ret;
889 
890 	if (!DRM_SUSER(DRM_CURPROC))
891 		return EACCES;
892 
893 	if (request->count < 0 || request->count > 4096)
894 		return EINVAL;
895 
896 	order = drm_order(request->size);
897 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
898 		return EINVAL;
899 
900 	spin_lock(&dev->dma_lock);
901 
902 	/* No more allocations after first buffer-using ioctl. */
903 	if (dev->buf_use != 0) {
904 		spin_unlock(&dev->dma_lock);
905 		return EBUSY;
906 	}
907 	/* No more than one allocation per order */
908 	if (dev->dma->bufs[order].buf_count != 0) {
909 		spin_unlock(&dev->dma_lock);
910 		return ENOMEM;
911 	}
912 
913 	ret = drm_do_addbufs_sg(dev, request);
914 
915 	spin_unlock(&dev->dma_lock);
916 
917 	return ret;
918 }
919 
920 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
921 {
922 	int order, ret;
923 
924 	if (!DRM_SUSER(DRM_CURPROC))
925 		return EACCES;
926 
927 	if (request->count < 0 || request->count > 4096)
928 		return EINVAL;
929 
930 	order = drm_order(request->size);
931 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
932 		return EINVAL;
933 
934 	spin_lock(&dev->dma_lock);
935 
936 	/* No more allocations after first buffer-using ioctl. */
937 	if (dev->buf_use != 0) {
938 		spin_unlock(&dev->dma_lock);
939 		return EBUSY;
940 	}
941 	/* No more than one allocation per order */
942 	if (dev->dma->bufs[order].buf_count != 0) {
943 		spin_unlock(&dev->dma_lock);
944 		return ENOMEM;
945 	}
946 
947 	ret = drm_do_addbufs_pci(dev, request);
948 
949 	spin_unlock(&dev->dma_lock);
950 
951 	return ret;
952 }
953 
954 /**
955  * Add buffers for DMA transfers (ioctl).
956  *
957  * \param inode device inode.
958  * \param file_priv DRM file private.
959  * \param cmd command.
960  * \param arg pointer to a struct drm_buf_desc request.
961  * \return zero on success or a negative number on failure.
962  *
963  * According with the memory type specified in drm_buf_desc::flags and the
964  * build options, it dispatches the call either to addbufs_agp(),
965  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
966  * PCI memory respectively.
967  */
968 int drm_addbufs(struct drm_device *dev, void *data,
969 		struct drm_file *file_priv)
970 {
971 	struct drm_buf_desc *request = data;
972 	int err;
973 
974 	if (request->flags & _DRM_AGP_BUFFER)
975 		err = drm_addbufs_agp(dev, request);
976 	else if (request->flags & _DRM_SG_BUFFER)
977 		err = drm_addbufs_sg(dev, request);
978 	else
979 		err = drm_addbufs_pci(dev, request);
980 
981 	return err;
982 }
983 
984 /**
985  * Get information about the buffer mappings.
986  *
987  * This was originally mean for debugging purposes, or by a sophisticated
988  * client library to determine how best to use the available buffers (e.g.,
989  * large buffers can be used for image transfer).
990  *
991  * \param inode device inode.
992  * \param file_priv DRM file private.
993  * \param cmd command.
994  * \param arg pointer to a drm_buf_info structure.
995  * \return zero on success or a negative number on failure.
996  *
997  * Increments drm_device::buf_use while holding the drm_device::count_lock
998  * lock, preventing of allocating more buffers after this call. Information
999  * about each requested buffer is then copied into user space.
1000  */
1001 int drm_infobufs(struct drm_device *dev, void *data,
1002 		 struct drm_file *file_priv)
1003 {
1004 	drm_device_dma_t *dma = dev->dma;
1005 	struct drm_buf_info *request = data;
1006 	int i;
1007 	int count;
1008 	int retcode = 0;
1009 
1010 	spin_lock(&dev->dma_lock);
1011 	++dev->buf_use;		/* Can't allocate more after this call */
1012 	spin_unlock(&dev->dma_lock);
1013 
1014 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1015 		if (dma->bufs[i].buf_count)
1016 			++count;
1017 	}
1018 
1019 	DRM_DEBUG("count = %d\n", count);
1020 
1021 	if (request->count >= count) {
1022 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1023 			if (dma->bufs[i].buf_count) {
1024 				struct drm_buf_desc from;
1025 
1026 				from.count = dma->bufs[i].buf_count;
1027 				from.size = dma->bufs[i].buf_size;
1028 				from.low_mark = dma->bufs[i].freelist.low_mark;
1029 				from.high_mark = dma->bufs[i].freelist.high_mark;
1030 
1031 				if (DRM_COPY_TO_USER(&request->list[count], &from,
1032 				    sizeof(struct drm_buf_desc)) != 0) {
1033 					retcode = EFAULT;
1034 					break;
1035 				}
1036 
1037 				DRM_DEBUG("%d %d %d %d %d\n",
1038 				    i, dma->bufs[i].buf_count,
1039 				    dma->bufs[i].buf_size,
1040 				    dma->bufs[i].freelist.low_mark,
1041 				    dma->bufs[i].freelist.high_mark);
1042 				++count;
1043 			}
1044 		}
1045 	}
1046 	request->count = count;
1047 
1048 	return retcode;
1049 }
1050 
1051 /**
1052  * Specifies a low and high water mark for buffer allocation
1053  *
1054  * \param inode device inode.
1055  * \param file_priv DRM file private.
1056  * \param cmd command.
1057  * \param arg a pointer to a drm_buf_desc structure.
1058  * \return zero on success or a negative number on failure.
1059  *
1060  * Verifies that the size order is bounded between the admissible orders and
1061  * updates the respective drm_device_dma::bufs entry low and high water mark.
1062  *
1063  * \note This ioctl is deprecated and mostly never used.
1064  */
1065 int drm_markbufs(struct drm_device *dev, void *data,
1066 		 struct drm_file *file_priv)
1067 {
1068 	drm_device_dma_t *dma = dev->dma;
1069 	struct drm_buf_desc *request = data;
1070 	int order;
1071 
1072 	DRM_DEBUG("%d, %d, %d\n",
1073 		  request->size, request->low_mark, request->high_mark);
1074 
1075 
1076 	order = drm_order(request->size);
1077 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
1078 	    request->low_mark < 0 || request->high_mark < 0) {
1079 		return EINVAL;
1080 	}
1081 
1082 	spin_lock(&dev->dma_lock);
1083 	if (request->low_mark > dma->bufs[order].buf_count ||
1084 	    request->high_mark > dma->bufs[order].buf_count) {
1085 		spin_unlock(&dev->dma_lock);
1086 		return EINVAL;
1087 	}
1088 
1089 	dma->bufs[order].freelist.low_mark  = request->low_mark;
1090 	dma->bufs[order].freelist.high_mark = request->high_mark;
1091 	spin_unlock(&dev->dma_lock);
1092 
1093 	return 0;
1094 }
1095 
1096 /**
1097  * Unreserve the buffers in list, previously reserved using drmDMA.
1098  *
1099  * \param inode device inode.
1100  * \param file_priv DRM file private.
1101  * \param cmd command.
1102  * \param arg pointer to a drm_buf_free structure.
1103  * \return zero on success or a negative number on failure.
1104  *
1105  * Calls free_buffer() for each used buffer.
1106  * This function is primarily used for debugging.
1107  */
1108 int drm_freebufs(struct drm_device *dev, void *data,
1109 		 struct drm_file *file_priv)
1110 {
1111 	drm_device_dma_t *dma = dev->dma;
1112 	struct drm_buf_free *request = data;
1113 	int i;
1114 	int idx;
1115 	drm_buf_t *buf;
1116 	int retcode = 0;
1117 
1118 	DRM_DEBUG("%d\n", request->count);
1119 
1120 	spin_lock(&dev->dma_lock);
1121 	for (i = 0; i < request->count; i++) {
1122 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1123 			retcode = EFAULT;
1124 			break;
1125 		}
1126 		if (idx < 0 || idx >= dma->buf_count) {
1127 			DRM_ERROR("Index %d (of %d max)\n",
1128 			    idx, dma->buf_count - 1);
1129 			retcode = EINVAL;
1130 			break;
1131 		}
1132 		buf = dma->buflist[idx];
1133 		if (buf->file_priv != file_priv) {
1134 			DRM_ERROR("Process %d freeing buffer not owned\n",
1135 			    DRM_CURRENTPID);
1136 			retcode = EINVAL;
1137 			break;
1138 		}
1139 		drm_free_buffer(dev, buf);
1140 	}
1141 	spin_unlock(&dev->dma_lock);
1142 
1143 	return retcode;
1144 }
1145 
1146 /**
1147  * Maps all of the DMA buffers into client-virtual space (ioctl).
1148  *
1149  * \param inode device inode.
1150  * \param file_priv DRM file private.
1151  * \param cmd command.
1152  * \param arg pointer to a drm_buf_map structure.
1153  * \return zero on success or a negative number on failure.
1154  *
1155  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1156  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1157  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1158  * drm_mmap_dma().
1159  */
1160 int drm_mapbufs(struct drm_device *dev, void *data,
1161 	        struct drm_file *file_priv)
1162 {
1163 	drm_device_dma_t *dma = dev->dma;
1164 	int retcode = 0;
1165 	const int zero = 0;
1166 	vm_offset_t address;
1167 	struct vmspace *vms;
1168 	vm_ooffset_t foff;
1169 	vm_size_t size;
1170 	vm_offset_t vaddr;
1171 	struct drm_buf_map *request = data;
1172 	int i;
1173 
1174 	vms = DRM_CURPROC->td_proc->p_vmspace;
1175 
1176 	spin_lock(&dev->dma_lock);
1177 	dev->buf_use++;		/* Can't allocate more after this call */
1178 	spin_unlock(&dev->dma_lock);
1179 
1180 	if (request->count < dma->buf_count)
1181 		goto done;
1182 
1183 	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1184 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1185 	    (dma->flags & _DRM_DMA_USE_SG))) {
1186 		drm_local_map_t *map = dev->agp_buffer_map;
1187 
1188 		if (map == NULL) {
1189 			retcode = EINVAL;
1190 			goto done;
1191 		}
1192 		size = round_page(map->size);
1193 		foff = (unsigned long)map->handle;
1194 	} else {
1195 		size = round_page(dma->byte_count),
1196 		foff = 0;
1197 	}
1198 
1199 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1200 	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1201 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1202 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1203 	if (retcode)
1204 		goto done;
1205 
1206 	request->virtual = (void *)vaddr;
1207 
1208 	for (i = 0; i < dma->buf_count; i++) {
1209 		if (DRM_COPY_TO_USER(&request->list[i].idx,
1210 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1211 			retcode = EFAULT;
1212 			goto done;
1213 		}
1214 		if (DRM_COPY_TO_USER(&request->list[i].total,
1215 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1216 			retcode = EFAULT;
1217 			goto done;
1218 		}
1219 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1220 		    sizeof(zero))) {
1221 			retcode = EFAULT;
1222 			goto done;
1223 		}
1224 		address = vaddr + dma->buflist[i]->offset; /* *** */
1225 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1226 		    sizeof(address))) {
1227 			retcode = EFAULT;
1228 			goto done;
1229 		}
1230 	}
1231 
1232  done:
1233 	request->count = dma->buf_count;
1234 
1235 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1236 
1237 	return retcode;
1238 }
1239 
1240 /**
1241  * Compute size order.  Returns the exponent of the smaller power of two which
1242  * is greater or equal to given number.
1243  *
1244  * \param size size.
1245  * \return order.
1246  *
1247  * \todo Can be made faster.
1248  */
1249 int drm_order(unsigned long size)
1250 {
1251 	int order;
1252 	unsigned long tmp;
1253 
1254 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1255 
1256 	if (size & (size - 1))
1257 		++order;
1258 
1259 	return order;
1260 }
1261 EXPORT_SYMBOL(drm_order);
1262