xref: /dragonfly/sys/dev/drm/drm_bufs.c (revision ef3ac1d1)
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  *
35  * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
36  */
37 
38 #include <sys/conf.h>
39 #include <bus/pci/pcireg.h>
40 #include <linux/types.h>
41 #include <linux/export.h>
42 #include <drm/drmP.h>
43 
44 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
45  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
46  * address for accessing them.  Cleaned up at unload.
47  */
48 static int drm_alloc_resource(struct drm_device *dev, int resource)
49 {
50 	struct resource *res;
51 	int rid;
52 
53 	DRM_LOCK_ASSERT(dev);
54 
55 	if (resource >= DRM_MAX_PCI_RESOURCE) {
56 		DRM_ERROR("Resource %d too large\n", resource);
57 		return 1;
58 	}
59 
60 	if (dev->pcir[resource] != NULL) {
61 		return 0;
62 	}
63 
64 	DRM_UNLOCK(dev);
65 	rid = PCIR_BAR(resource);
66 	res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
67 	    RF_SHAREABLE);
68 	DRM_LOCK(dev);
69 	if (res == NULL) {
70 		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
71 		return 1;
72 	}
73 
74 	if (dev->pcir[resource] == NULL) {
75 		dev->pcirid[resource] = rid;
76 		dev->pcir[resource] = res;
77 	}
78 
79 	return 0;
80 }
81 
82 unsigned long drm_get_resource_start(struct drm_device *dev,
83 				     unsigned int resource)
84 {
85 	if (drm_alloc_resource(dev, resource) != 0)
86 		return 0;
87 
88 	return rman_get_start(dev->pcir[resource]);
89 }
90 
91 unsigned long drm_get_resource_len(struct drm_device *dev,
92 				   unsigned int resource)
93 {
94 	if (drm_alloc_resource(dev, resource) != 0)
95 		return 0;
96 
97 	return rman_get_size(dev->pcir[resource]);
98 }
99 
100 int drm_addmap(struct drm_device * dev, resource_size_t offset,
101 	       unsigned int size, enum drm_map_type type,
102 	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
103 {
104 	struct drm_local_map *map;
105 	struct drm_map_list *entry = NULL;
106 	drm_dma_handle_t *dmah;
107 	int align;
108 
109 	/* Allocate a new map structure, fill it in, and do any type-specific
110 	 * initialization necessary.
111 	 */
112 	map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
113 	if (!map) {
114 		return ENOMEM;
115 	}
116 
117 	map->offset = offset;
118 	map->size = size;
119 	map->type = type;
120 	map->flags = flags;
121 
122 	/* Only allow shared memory to be removable since we only keep enough
123 	 * book keeping information about shared memory to allow for removal
124 	 * when processes fork.
125 	 */
126 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
127 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
128 		drm_free(map, M_DRM);
129 		return EINVAL;
130 	}
131 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
132 		DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
133 		    (uintmax_t)offset, size);
134 		drm_free(map, M_DRM);
135 		return EINVAL;
136 	}
137 	if (offset + size < offset) {
138 		DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
139 		    (uintmax_t)offset, size);
140 		drm_free(map, M_DRM);
141 		return EINVAL;
142 	}
143 
144 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
145 		  (unsigned long long)map->offset, map->size, map->type);
146 
147 	/* Check if this is just another version of a kernel-allocated map, and
148 	 * just hand that back if so.
149 	 */
150 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
151 	    type == _DRM_SHM) {
152 		list_for_each_entry(entry, &dev->maplist, head) {
153 			if (entry->map->type == type && (entry->map->offset == offset ||
154 			    (entry->map->type == _DRM_SHM &&
155 			    entry->map->flags == _DRM_CONTAINS_LOCK))) {
156 				entry->map->size = size;
157 				DRM_DEBUG("Found kernel map %d\n", type);
158 				goto done;
159 			}
160 		}
161 	}
162 
163 	switch (map->type) {
164 	case _DRM_REGISTERS:
165 		map->handle = drm_ioremap(dev, map);
166 		if (!(map->flags & _DRM_WRITE_COMBINING))
167 			break;
168 		/* FALLTHROUGH */
169 	case _DRM_FRAME_BUFFER:
170 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
171 			map->mtrr = 1;
172 		break;
173 	case _DRM_SHM:
174 		map->handle = kmalloc(map->size, M_DRM, M_NOWAIT);
175 		DRM_DEBUG("%lu %d %p\n",
176 		    map->size, drm_order(map->size), map->handle);
177 		if (!map->handle) {
178 			drm_free(map, M_DRM);
179 			return ENOMEM;
180 		}
181 		map->offset = (unsigned long)map->handle;
182 		if (map->flags & _DRM_CONTAINS_LOCK) {
183 			/* Prevent a 2nd X Server from creating a 2nd lock */
184 			DRM_LOCK(dev);
185 			if (dev->lock.hw_lock != NULL) {
186 				DRM_UNLOCK(dev);
187 				drm_free(map->handle, M_DRM);
188 				drm_free(map, M_DRM);
189 				return EBUSY;
190 			}
191 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
192 			DRM_UNLOCK(dev);
193 		}
194 		break;
195 	case _DRM_AGP:
196 		/*valid = 0;*/
197 		/* In some cases (i810 driver), user space may have already
198 		 * added the AGP base itself, because dev->agp->base previously
199 		 * only got set during AGP enable.  So, only add the base
200 		 * address if the map's offset isn't already within the
201 		 * aperture.
202 		 */
203 		if (map->offset < dev->agp->base ||
204 		    map->offset > dev->agp->base +
205 		    dev->agp->agp_info.ai_aperture_size - 1) {
206 			map->offset += dev->agp->base;
207 		}
208 		map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
209 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
210 			if ((map->offset >= entry->bound) &&
211 			    (map->offset + map->size <=
212 			    entry->bound + entry->pages * PAGE_SIZE)) {
213 				valid = 1;
214 				break;
215 			}
216 		}
217 		if (!valid) {
218 			drm_free(map, M_DRM);
219 			return EACCES;
220 		}*/
221 		break;
222 	case _DRM_SCATTER_GATHER:
223 		if (!dev->sg) {
224 			drm_free(map, M_DRM);
225 			return EINVAL;
226 		}
227 		map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
228 		map->offset = dev->sg->vaddr + offset;
229 		break;
230 	case _DRM_CONSISTENT:
231 		/* Unfortunately, we don't get any alignment specification from
232 		 * the caller, so we have to guess.  drm_pci_alloc requires
233 		 * a power-of-two alignment, so try to align the bus address of
234 		 * the map to it size if possible, otherwise just assume
235 		 * PAGE_SIZE alignment.
236 		 */
237 		align = map->size;
238 		if ((align & (align - 1)) != 0)
239 			align = PAGE_SIZE;
240 		dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
241 		if (!dmah) {
242 			drm_free(map, M_DRM);
243 			return ENOMEM;
244 		}
245 		map->handle = dmah->vaddr;
246 		map->offset = (unsigned long)dmah->busaddr;
247 		break;
248 	default:
249 		DRM_ERROR("Bad map type %d\n", map->type);
250 		drm_free(map, M_DRM);
251 		return EINVAL;
252 	}
253 
254 	list_add(&entry->head, &dev->maplist);
255 
256 done:
257 	/* Jumped to, with lock held, when a kernel map is found. */
258 
259 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
260 	    map->size);
261 
262 	*map_ptr = map;
263 
264 	return 0;
265 }
266 
267 /**
268  * Ioctl to specify a range of memory that is available for mapping by a
269  * non-root process.
270  *
271  * \param inode device inode.
272  * \param file_priv DRM file private.
273  * \param cmd command.
274  * \param arg pointer to a drm_map structure.
275  * \return zero on success or a negative value on error.
276  *
277  */
278 int drm_addmap_ioctl(struct drm_device *dev, void *data,
279 		     struct drm_file *file_priv)
280 {
281 	struct drm_map *request = data;
282 	drm_local_map_t *map;
283 	int err;
284 
285 	if (!(dev->flags & (FREAD|FWRITE)))
286 		return EACCES; /* Require read/write */
287 
288 	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
289 		return EACCES;
290 
291 	DRM_LOCK(dev);
292 	err = drm_addmap(dev, request->offset, request->size, request->type,
293 	    request->flags, &map);
294 	DRM_UNLOCK(dev);
295 	if (err != 0)
296 		return err;
297 
298 	request->offset = map->offset;
299 	request->size = map->size;
300 	request->type = map->type;
301 	request->flags = map->flags;
302 	request->mtrr   = map->mtrr;
303 	request->handle = (void *)map->handle;
304 
305 	return 0;
306 }
307 
308 void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
309 {
310 	struct drm_map_list *r_list = NULL, *list_t;
311 	drm_dma_handle_t dmah;
312 	int found = 0;
313 
314 	DRM_LOCK_ASSERT(dev);
315 
316 	if (map == NULL)
317 		return;
318 
319 	/* Find the list entry for the map and remove it */
320 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
321 		if (r_list->map == map) {
322 			list_del(&r_list->head);
323 			drm_free(r_list, M_DRM);
324 			found = 1;
325 			break;
326 		}
327 	}
328 
329 	if (!found)
330 		return;
331 
332 	switch (map->type) {
333 	case _DRM_REGISTERS:
334 		drm_ioremapfree(map);
335 		/* FALLTHROUGH */
336 	case _DRM_FRAME_BUFFER:
337 		if (map->mtrr) {
338 			int __unused retcode;
339 
340 			retcode = drm_mtrr_del(0, map->offset, map->size,
341 			    DRM_MTRR_WC);
342 			DRM_DEBUG("mtrr_del = %d\n", retcode);
343 		}
344 		break;
345 	case _DRM_SHM:
346 		drm_free(map->handle, M_DRM);
347 		break;
348 	case _DRM_AGP:
349 	case _DRM_SCATTER_GATHER:
350 		break;
351 	case _DRM_CONSISTENT:
352 		dmah.vaddr = map->handle;
353 		dmah.busaddr = map->offset;
354 		drm_pci_free(dev, &dmah);
355 		break;
356 	default:
357 		DRM_ERROR("Bad map type %d\n", map->type);
358 		break;
359 	}
360 
361 	drm_free(map, M_DRM);
362 }
363 
364 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
365  * the last close of the device, and this is necessary for cleanup when things
366  * exit uncleanly.  Therefore, having userland manually remove mappings seems
367  * like a pointless exercise since they're going away anyway.
368  *
369  * One use case might be after addmap is allowed for normal users for SHM and
370  * gets used by drivers that the server doesn't need to care about.  This seems
371  * unlikely.
372  *
373  * \param inode device inode.
374  * \param file_priv DRM file private.
375  * \param cmd command.
376  * \param arg pointer to a struct drm_map structure.
377  * \return zero on success or a negative value on error.
378  */
379 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
380 		    struct drm_file *file_priv)
381 {
382 	struct drm_map *request = data;
383 	struct drm_local_map *map = NULL;
384 	struct drm_map_list *r_list;
385 
386 	DRM_LOCK(dev);
387 	list_for_each_entry(r_list, &dev->maplist, head) {
388 		if (r_list->map &&
389 		    r_list->user_token == (unsigned long)request->handle &&
390 		    r_list->map->flags & _DRM_REMOVABLE) {
391 			map = r_list->map;
392 			break;
393 		}
394 	}
395 
396 	/* List has wrapped around to the head pointer, or its empty we didn't
397 	 * find anything.
398 	 */
399 	if (list_empty(&dev->maplist) || !map) {
400 		DRM_UNLOCK(dev);
401 		return -EINVAL;
402 	}
403 
404 	/* Register and framebuffer maps are permanent */
405 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
406 		DRM_UNLOCK(dev);
407 		return 0;
408 	}
409 
410 	drm_rmmap(dev, map);
411 
412 	DRM_UNLOCK(dev);
413 
414 	return 0;
415 }
416 
417 /**
418  * Cleanup after an error on one of the addbufs() functions.
419  *
420  * \param dev DRM device.
421  * \param entry buffer entry where the error occurred.
422  *
423  * Frees any pages and buffers associated with the given entry.
424  */
425 static void drm_cleanup_buf_error(struct drm_device * dev,
426 				  struct drm_buf_entry * entry)
427 {
428 	int i;
429 
430 	if (entry->seg_count) {
431 		for (i = 0; i < entry->seg_count; i++) {
432 			drm_pci_free(dev, entry->seglist[i]);
433 		}
434 		drm_free(entry->seglist, M_DRM);
435 
436 		entry->seg_count = 0;
437 	}
438 
439    	if (entry->buf_count) {
440 	   	for (i = 0; i < entry->buf_count; i++) {
441 			drm_free(entry->buflist[i].dev_private, M_DRM);
442 		}
443 		drm_free(entry->buflist, M_DRM);
444 
445 		entry->buf_count = 0;
446 	}
447 }
448 
449 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
450 {
451 	drm_device_dma_t *dma = dev->dma;
452 	drm_buf_entry_t *entry;
453 	/*drm_agp_mem_t *agp_entry;
454 	int valid*/
455 	drm_buf_t *buf;
456 	unsigned long offset;
457 	unsigned long agp_offset;
458 	int count;
459 	int order;
460 	int size;
461 	int alignment;
462 	int page_order;
463 	int total;
464 	int byte_count;
465 	int i;
466 	drm_buf_t **temp_buflist;
467 
468 	count = request->count;
469 	order = drm_order(request->size);
470 	size = 1 << order;
471 
472 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
473 	    ? round_page(size) : size;
474 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
475 	total = PAGE_SIZE << page_order;
476 
477 	byte_count = 0;
478 	agp_offset = dev->agp->base + request->agp_start;
479 
480 	DRM_DEBUG("count:      %d\n",  count);
481 	DRM_DEBUG("order:      %d\n",  order);
482 	DRM_DEBUG("size:       %d\n",  size);
483 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
484 	DRM_DEBUG("alignment:  %d\n",  alignment);
485 	DRM_DEBUG("page_order: %d\n",  page_order);
486 	DRM_DEBUG("total:      %d\n",  total);
487 
488 	/* Make sure buffers are located in AGP memory that we own */
489 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
490 	 * memory.  Safe to ignore for now because these ioctls are still
491 	 * root-only.
492 	 */
493 	/*valid = 0;
494 	for (agp_entry = dev->agp->memory; agp_entry;
495 	    agp_entry = agp_entry->next) {
496 		if ((agp_offset >= agp_entry->bound) &&
497 		    (agp_offset + total * count <=
498 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
499 			valid = 1;
500 			break;
501 		}
502 	}
503 	if (!valid) {
504 		DRM_DEBUG("zone invalid\n");
505 		return EINVAL;
506 	}*/
507 
508 	entry = &dma->bufs[order];
509 
510 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
511 	    M_NOWAIT | M_ZERO);
512 	if (!entry->buflist) {
513 		return ENOMEM;
514 	}
515 
516 	entry->buf_size = size;
517 	entry->page_order = page_order;
518 
519 	offset = 0;
520 
521 	while (entry->buf_count < count) {
522 		buf          = &entry->buflist[entry->buf_count];
523 		buf->idx     = dma->buf_count + entry->buf_count;
524 		buf->total   = alignment;
525 		buf->order   = order;
526 		buf->used    = 0;
527 
528 		buf->offset  = (dma->byte_count + offset);
529 		buf->bus_address = agp_offset + offset;
530 		buf->address = (void *)(agp_offset + offset);
531 		buf->next    = NULL;
532 		buf->pending = 0;
533 		buf->file_priv = NULL;
534 
535 		buf->dev_priv_size = dev->driver->buf_priv_size;
536 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
537 		    M_NOWAIT | M_ZERO);
538 		if (buf->dev_private == NULL) {
539 			/* Set count correctly so we free the proper amount. */
540 			entry->buf_count = count;
541 			drm_cleanup_buf_error(dev, entry);
542 			return ENOMEM;
543 		}
544 
545 		offset += alignment;
546 		entry->buf_count++;
547 		byte_count += PAGE_SIZE << page_order;
548 	}
549 
550 	DRM_DEBUG("byte_count: %d\n", byte_count);
551 
552 	temp_buflist = krealloc(dma->buflist,
553 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
554 	    M_DRM, M_NOWAIT);
555 	if (temp_buflist == NULL) {
556 		/* Free the entry because it isn't valid */
557 		drm_cleanup_buf_error(dev, entry);
558 		return ENOMEM;
559 	}
560 	dma->buflist = temp_buflist;
561 
562 	for (i = 0; i < entry->buf_count; i++) {
563 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
564 	}
565 
566 	dma->buf_count += entry->buf_count;
567 	dma->byte_count += byte_count;
568 
569 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
570 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
571 
572 	request->count = entry->buf_count;
573 	request->size = size;
574 
575 	dma->flags = _DRM_DMA_USE_AGP;
576 
577 	return 0;
578 }
579 
580 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
581 {
582 	drm_device_dma_t *dma = dev->dma;
583 	int count;
584 	int order;
585 	int size;
586 	int total;
587 	int page_order;
588 	drm_buf_entry_t *entry;
589 	drm_buf_t *buf;
590 	int alignment;
591 	unsigned long offset;
592 	int i;
593 	int byte_count;
594 	int page_count;
595 	unsigned long *temp_pagelist;
596 	drm_buf_t **temp_buflist;
597 
598 	count = request->count;
599 	order = drm_order(request->size);
600 	size = 1 << order;
601 
602 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
603 	    request->count, request->size, size, order);
604 
605 	alignment = (request->flags & _DRM_PAGE_ALIGN)
606 	    ? round_page(size) : size;
607 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
608 	total = PAGE_SIZE << page_order;
609 
610 	entry = &dma->bufs[order];
611 
612 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
613 	    M_NOWAIT | M_ZERO);
614 	entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
615 	    M_NOWAIT | M_ZERO);
616 
617 	/* Keep the original pagelist until we know all the allocations
618 	 * have succeeded
619 	 */
620 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
621 	    sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
622 
623 	if (entry->buflist == NULL || entry->seglist == NULL ||
624 	    temp_pagelist == NULL) {
625 		drm_free(temp_pagelist, M_DRM);
626 		drm_free(entry->seglist, M_DRM);
627 		drm_free(entry->buflist, M_DRM);
628 		return ENOMEM;
629 	}
630 
631 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
632 	    sizeof(*dma->pagelist));
633 
634 	DRM_DEBUG("pagelist: %d entries\n",
635 	    dma->page_count + (count << page_order));
636 
637 	entry->buf_size	= size;
638 	entry->page_order = page_order;
639 	byte_count = 0;
640 	page_count = 0;
641 
642 	while (entry->buf_count < count) {
643 		spin_unlock(&dev->dma_lock);
644 		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
645 		    0xfffffffful);
646 		spin_lock(&dev->dma_lock);
647 		if (dmah == NULL) {
648 			/* Set count correctly so we free the proper amount. */
649 			entry->buf_count = count;
650 			entry->seg_count = count;
651 			drm_cleanup_buf_error(dev, entry);
652 			drm_free(temp_pagelist, M_DRM);
653 			return ENOMEM;
654 		}
655 
656 		entry->seglist[entry->seg_count++] = dmah;
657 		for (i = 0; i < (1 << page_order); i++) {
658 			DRM_DEBUG("page %d @ %p\n",
659 			    dma->page_count + page_count,
660 			    (char *)dmah->vaddr + PAGE_SIZE * i);
661 			temp_pagelist[dma->page_count + page_count++] =
662 			    (long)dmah->vaddr + PAGE_SIZE * i;
663 		}
664 		for (offset = 0;
665 		    offset + size <= total && entry->buf_count < count;
666 		    offset += alignment, ++entry->buf_count) {
667 			buf	     = &entry->buflist[entry->buf_count];
668 			buf->idx     = dma->buf_count + entry->buf_count;
669 			buf->total   = alignment;
670 			buf->order   = order;
671 			buf->used    = 0;
672 			buf->offset  = (dma->byte_count + byte_count + offset);
673 			buf->address = ((char *)dmah->vaddr + offset);
674 			buf->bus_address = dmah->busaddr + offset;
675 			buf->next    = NULL;
676 			buf->pending = 0;
677 			buf->file_priv = NULL;
678 
679 			buf->dev_priv_size = dev->driver->buf_priv_size;
680 			buf->dev_private = kmalloc(buf->dev_priv_size,
681 			    M_DRM, M_NOWAIT | M_ZERO);
682 			if (buf->dev_private == NULL) {
683 				/* Set count correctly so we free the proper amount. */
684 				entry->buf_count = count;
685 				entry->seg_count = count;
686 				drm_cleanup_buf_error(dev, entry);
687 				drm_free(temp_pagelist, M_DRM);
688 				return ENOMEM;
689 			}
690 
691 			DRM_DEBUG("buffer %d @ %p\n",
692 			    entry->buf_count, buf->address);
693 		}
694 		byte_count += PAGE_SIZE << page_order;
695 	}
696 
697 	temp_buflist = krealloc(dma->buflist,
698 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
699 	    M_DRM, M_NOWAIT);
700 	if (temp_buflist == NULL) {
701 		/* Free the entry because it isn't valid */
702 		drm_cleanup_buf_error(dev, entry);
703 		drm_free(temp_pagelist, M_DRM);
704 		return ENOMEM;
705 	}
706 	dma->buflist = temp_buflist;
707 
708 	for (i = 0; i < entry->buf_count; i++) {
709 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
710 	}
711 
712 	/* No allocations failed, so now we can replace the orginal pagelist
713 	 * with the new one.
714 	 */
715 	drm_free(dma->pagelist, M_DRM);
716 	dma->pagelist = temp_pagelist;
717 
718 	dma->buf_count += entry->buf_count;
719 	dma->seg_count += entry->seg_count;
720 	dma->page_count += entry->seg_count << page_order;
721 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
722 
723 	request->count = entry->buf_count;
724 	request->size = size;
725 
726 	return 0;
727 
728 }
729 
730 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
731 {
732 	drm_device_dma_t *dma = dev->dma;
733 	drm_buf_entry_t *entry;
734 	drm_buf_t *buf;
735 	unsigned long offset;
736 	unsigned long agp_offset;
737 	int count;
738 	int order;
739 	int size;
740 	int alignment;
741 	int page_order;
742 	int total;
743 	int byte_count;
744 	int i;
745 	drm_buf_t **temp_buflist;
746 
747 	count = request->count;
748 	order = drm_order(request->size);
749 	size = 1 << order;
750 
751 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
752 	    ? round_page(size) : size;
753 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
754 	total = PAGE_SIZE << page_order;
755 
756 	byte_count = 0;
757 	agp_offset = request->agp_start;
758 
759 	DRM_DEBUG("count:      %d\n",  count);
760 	DRM_DEBUG("order:      %d\n",  order);
761 	DRM_DEBUG("size:       %d\n",  size);
762 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
763 	DRM_DEBUG("alignment:  %d\n",  alignment);
764 	DRM_DEBUG("page_order: %d\n",  page_order);
765 	DRM_DEBUG("total:      %d\n",  total);
766 
767 	entry = &dma->bufs[order];
768 
769 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
770 	    M_NOWAIT | M_ZERO);
771 	if (entry->buflist == NULL)
772 		return ENOMEM;
773 
774 	entry->buf_size = size;
775 	entry->page_order = page_order;
776 
777 	offset = 0;
778 
779 	while (entry->buf_count < count) {
780 		buf          = &entry->buflist[entry->buf_count];
781 		buf->idx     = dma->buf_count + entry->buf_count;
782 		buf->total   = alignment;
783 		buf->order   = order;
784 		buf->used    = 0;
785 
786 		buf->offset  = (dma->byte_count + offset);
787 		buf->bus_address = agp_offset + offset;
788 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
789 		buf->next    = NULL;
790 		buf->pending = 0;
791 		buf->file_priv = NULL;
792 
793 		buf->dev_priv_size = dev->driver->buf_priv_size;
794 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
795 		    M_NOWAIT | M_ZERO);
796 		if (buf->dev_private == NULL) {
797 			/* Set count correctly so we free the proper amount. */
798 			entry->buf_count = count;
799 			drm_cleanup_buf_error(dev, entry);
800 			return ENOMEM;
801 		}
802 
803 		DRM_DEBUG("buffer %d @ %p\n",
804 		    entry->buf_count, buf->address);
805 
806 		offset += alignment;
807 		entry->buf_count++;
808 		byte_count += PAGE_SIZE << page_order;
809 	}
810 
811 	DRM_DEBUG("byte_count: %d\n", byte_count);
812 
813 	temp_buflist = krealloc(dma->buflist,
814 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
815 	    M_DRM, M_NOWAIT);
816 	if (temp_buflist == NULL) {
817 		/* Free the entry because it isn't valid */
818 		drm_cleanup_buf_error(dev, entry);
819 		return ENOMEM;
820 	}
821 	dma->buflist = temp_buflist;
822 
823 	for (i = 0; i < entry->buf_count; i++) {
824 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
825 	}
826 
827 	dma->buf_count += entry->buf_count;
828 	dma->byte_count += byte_count;
829 
830 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
831 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
832 
833 	request->count = entry->buf_count;
834 	request->size = size;
835 
836 	dma->flags = _DRM_DMA_USE_SG;
837 
838 	return 0;
839 }
840 
841 /**
842  * Add AGP buffers for DMA transfers.
843  *
844  * \param dev struct drm_device to which the buffers are to be added.
845  * \param request pointer to a struct drm_buf_desc describing the request.
846  * \return zero on success or a negative number on failure.
847  *
848  * After some sanity checks creates a drm_buf structure for each buffer and
849  * reallocates the buffer list of the same size order to accommodate the new
850  * buffers.
851  */
852 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
853 {
854 	int order, ret;
855 
856 	if (request->count < 0 || request->count > 4096)
857 		return EINVAL;
858 
859 	order = drm_order(request->size);
860 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
861 		return EINVAL;
862 
863 	spin_lock(&dev->dma_lock);
864 
865 	/* No more allocations after first buffer-using ioctl. */
866 	if (dev->buf_use != 0) {
867 		spin_unlock(&dev->dma_lock);
868 		return EBUSY;
869 	}
870 	/* No more than one allocation per order */
871 	if (dev->dma->bufs[order].buf_count != 0) {
872 		spin_unlock(&dev->dma_lock);
873 		return ENOMEM;
874 	}
875 
876 	ret = drm_do_addbufs_agp(dev, request);
877 
878 	spin_unlock(&dev->dma_lock);
879 
880 	return ret;
881 }
882 
883 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
884 {
885 	int order, ret;
886 
887 	if (!DRM_SUSER(DRM_CURPROC))
888 		return EACCES;
889 
890 	if (request->count < 0 || request->count > 4096)
891 		return EINVAL;
892 
893 	order = drm_order(request->size);
894 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
895 		return EINVAL;
896 
897 	spin_lock(&dev->dma_lock);
898 
899 	/* No more allocations after first buffer-using ioctl. */
900 	if (dev->buf_use != 0) {
901 		spin_unlock(&dev->dma_lock);
902 		return EBUSY;
903 	}
904 	/* No more than one allocation per order */
905 	if (dev->dma->bufs[order].buf_count != 0) {
906 		spin_unlock(&dev->dma_lock);
907 		return ENOMEM;
908 	}
909 
910 	ret = drm_do_addbufs_sg(dev, request);
911 
912 	spin_unlock(&dev->dma_lock);
913 
914 	return ret;
915 }
916 
917 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
918 {
919 	int order, ret;
920 
921 	if (!DRM_SUSER(DRM_CURPROC))
922 		return EACCES;
923 
924 	if (request->count < 0 || request->count > 4096)
925 		return EINVAL;
926 
927 	order = drm_order(request->size);
928 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
929 		return EINVAL;
930 
931 	spin_lock(&dev->dma_lock);
932 
933 	/* No more allocations after first buffer-using ioctl. */
934 	if (dev->buf_use != 0) {
935 		spin_unlock(&dev->dma_lock);
936 		return EBUSY;
937 	}
938 	/* No more than one allocation per order */
939 	if (dev->dma->bufs[order].buf_count != 0) {
940 		spin_unlock(&dev->dma_lock);
941 		return ENOMEM;
942 	}
943 
944 	ret = drm_do_addbufs_pci(dev, request);
945 
946 	spin_unlock(&dev->dma_lock);
947 
948 	return ret;
949 }
950 
951 /**
952  * Add buffers for DMA transfers (ioctl).
953  *
954  * \param inode device inode.
955  * \param file_priv DRM file private.
956  * \param cmd command.
957  * \param arg pointer to a struct drm_buf_desc request.
958  * \return zero on success or a negative number on failure.
959  *
960  * According with the memory type specified in drm_buf_desc::flags and the
961  * build options, it dispatches the call either to addbufs_agp(),
962  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
963  * PCI memory respectively.
964  */
965 int drm_addbufs(struct drm_device *dev, void *data,
966 		struct drm_file *file_priv)
967 {
968 	struct drm_buf_desc *request = data;
969 	int err;
970 
971 	if (request->flags & _DRM_AGP_BUFFER)
972 		err = drm_addbufs_agp(dev, request);
973 	else if (request->flags & _DRM_SG_BUFFER)
974 		err = drm_addbufs_sg(dev, request);
975 	else
976 		err = drm_addbufs_pci(dev, request);
977 
978 	return err;
979 }
980 
981 /**
982  * Get information about the buffer mappings.
983  *
984  * This was originally mean for debugging purposes, or by a sophisticated
985  * client library to determine how best to use the available buffers (e.g.,
986  * large buffers can be used for image transfer).
987  *
988  * \param inode device inode.
989  * \param file_priv DRM file private.
990  * \param cmd command.
991  * \param arg pointer to a drm_buf_info structure.
992  * \return zero on success or a negative number on failure.
993  *
994  * Increments drm_device::buf_use while holding the drm_device::count_lock
995  * lock, preventing of allocating more buffers after this call. Information
996  * about each requested buffer is then copied into user space.
997  */
998 int drm_infobufs(struct drm_device *dev, void *data,
999 		 struct drm_file *file_priv)
1000 {
1001 	drm_device_dma_t *dma = dev->dma;
1002 	struct drm_buf_info *request = data;
1003 	int i;
1004 	int count;
1005 	int retcode = 0;
1006 
1007 	spin_lock(&dev->dma_lock);
1008 	++dev->buf_use;		/* Can't allocate more after this call */
1009 	spin_unlock(&dev->dma_lock);
1010 
1011 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1012 		if (dma->bufs[i].buf_count)
1013 			++count;
1014 	}
1015 
1016 	DRM_DEBUG("count = %d\n", count);
1017 
1018 	if (request->count >= count) {
1019 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1020 			if (dma->bufs[i].buf_count) {
1021 				struct drm_buf_desc from;
1022 
1023 				from.count = dma->bufs[i].buf_count;
1024 				from.size = dma->bufs[i].buf_size;
1025 				from.low_mark = dma->bufs[i].freelist.low_mark;
1026 				from.high_mark = dma->bufs[i].freelist.high_mark;
1027 
1028 				if (DRM_COPY_TO_USER(&request->list[count], &from,
1029 				    sizeof(struct drm_buf_desc)) != 0) {
1030 					retcode = EFAULT;
1031 					break;
1032 				}
1033 
1034 				DRM_DEBUG("%d %d %d %d %d\n",
1035 				    i, dma->bufs[i].buf_count,
1036 				    dma->bufs[i].buf_size,
1037 				    dma->bufs[i].freelist.low_mark,
1038 				    dma->bufs[i].freelist.high_mark);
1039 				++count;
1040 			}
1041 		}
1042 	}
1043 	request->count = count;
1044 
1045 	return retcode;
1046 }
1047 
1048 /**
1049  * Specifies a low and high water mark for buffer allocation
1050  *
1051  * \param inode device inode.
1052  * \param file_priv DRM file private.
1053  * \param cmd command.
1054  * \param arg a pointer to a drm_buf_desc structure.
1055  * \return zero on success or a negative number on failure.
1056  *
1057  * Verifies that the size order is bounded between the admissible orders and
1058  * updates the respective drm_device_dma::bufs entry low and high water mark.
1059  *
1060  * \note This ioctl is deprecated and mostly never used.
1061  */
1062 int drm_markbufs(struct drm_device *dev, void *data,
1063 		 struct drm_file *file_priv)
1064 {
1065 	drm_device_dma_t *dma = dev->dma;
1066 	struct drm_buf_desc *request = data;
1067 	int order;
1068 
1069 	DRM_DEBUG("%d, %d, %d\n",
1070 		  request->size, request->low_mark, request->high_mark);
1071 
1072 
1073 	order = drm_order(request->size);
1074 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
1075 	    request->low_mark < 0 || request->high_mark < 0) {
1076 		return EINVAL;
1077 	}
1078 
1079 	spin_lock(&dev->dma_lock);
1080 	if (request->low_mark > dma->bufs[order].buf_count ||
1081 	    request->high_mark > dma->bufs[order].buf_count) {
1082 		spin_unlock(&dev->dma_lock);
1083 		return EINVAL;
1084 	}
1085 
1086 	dma->bufs[order].freelist.low_mark  = request->low_mark;
1087 	dma->bufs[order].freelist.high_mark = request->high_mark;
1088 	spin_unlock(&dev->dma_lock);
1089 
1090 	return 0;
1091 }
1092 
1093 /**
1094  * Unreserve the buffers in list, previously reserved using drmDMA.
1095  *
1096  * \param inode device inode.
1097  * \param file_priv DRM file private.
1098  * \param cmd command.
1099  * \param arg pointer to a drm_buf_free structure.
1100  * \return zero on success or a negative number on failure.
1101  *
1102  * Calls free_buffer() for each used buffer.
1103  * This function is primarily used for debugging.
1104  */
1105 int drm_freebufs(struct drm_device *dev, void *data,
1106 		 struct drm_file *file_priv)
1107 {
1108 	drm_device_dma_t *dma = dev->dma;
1109 	struct drm_buf_free *request = data;
1110 	int i;
1111 	int idx;
1112 	drm_buf_t *buf;
1113 	int retcode = 0;
1114 
1115 	DRM_DEBUG("%d\n", request->count);
1116 
1117 	spin_lock(&dev->dma_lock);
1118 	for (i = 0; i < request->count; i++) {
1119 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1120 			retcode = EFAULT;
1121 			break;
1122 		}
1123 		if (idx < 0 || idx >= dma->buf_count) {
1124 			DRM_ERROR("Index %d (of %d max)\n",
1125 			    idx, dma->buf_count - 1);
1126 			retcode = EINVAL;
1127 			break;
1128 		}
1129 		buf = dma->buflist[idx];
1130 		if (buf->file_priv != file_priv) {
1131 			DRM_ERROR("Process %d freeing buffer not owned\n",
1132 			    DRM_CURRENTPID);
1133 			retcode = EINVAL;
1134 			break;
1135 		}
1136 		drm_free_buffer(dev, buf);
1137 	}
1138 	spin_unlock(&dev->dma_lock);
1139 
1140 	return retcode;
1141 }
1142 
1143 /**
1144  * Maps all of the DMA buffers into client-virtual space (ioctl).
1145  *
1146  * \param inode device inode.
1147  * \param file_priv DRM file private.
1148  * \param cmd command.
1149  * \param arg pointer to a drm_buf_map structure.
1150  * \return zero on success or a negative number on failure.
1151  *
1152  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1153  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1154  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1155  * drm_mmap_dma().
1156  */
1157 int drm_mapbufs(struct drm_device *dev, void *data,
1158 	        struct drm_file *file_priv)
1159 {
1160 	drm_device_dma_t *dma = dev->dma;
1161 	int retcode = 0;
1162 	const int zero = 0;
1163 	vm_offset_t address;
1164 	struct vmspace *vms;
1165 	vm_ooffset_t foff;
1166 	vm_size_t size;
1167 	vm_offset_t vaddr;
1168 	struct drm_buf_map *request = data;
1169 	int i;
1170 
1171 	vms = DRM_CURPROC->td_proc->p_vmspace;
1172 
1173 	spin_lock(&dev->dma_lock);
1174 	dev->buf_use++;		/* Can't allocate more after this call */
1175 	spin_unlock(&dev->dma_lock);
1176 
1177 	if (request->count < dma->buf_count)
1178 		goto done;
1179 
1180 	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1181 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1182 	    (dma->flags & _DRM_DMA_USE_SG))) {
1183 		drm_local_map_t *map = dev->agp_buffer_map;
1184 
1185 		if (map == NULL) {
1186 			retcode = EINVAL;
1187 			goto done;
1188 		}
1189 		size = round_page(map->size);
1190 		foff = (unsigned long)map->handle;
1191 	} else {
1192 		size = round_page(dma->byte_count),
1193 		foff = 0;
1194 	}
1195 
1196 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1197 	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1198 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1199 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1200 	if (retcode)
1201 		goto done;
1202 
1203 	request->virtual = (void *)vaddr;
1204 
1205 	for (i = 0; i < dma->buf_count; i++) {
1206 		if (DRM_COPY_TO_USER(&request->list[i].idx,
1207 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1208 			retcode = EFAULT;
1209 			goto done;
1210 		}
1211 		if (DRM_COPY_TO_USER(&request->list[i].total,
1212 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1213 			retcode = EFAULT;
1214 			goto done;
1215 		}
1216 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1217 		    sizeof(zero))) {
1218 			retcode = EFAULT;
1219 			goto done;
1220 		}
1221 		address = vaddr + dma->buflist[i]->offset; /* *** */
1222 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1223 		    sizeof(address))) {
1224 			retcode = EFAULT;
1225 			goto done;
1226 		}
1227 	}
1228 
1229  done:
1230 	request->count = dma->buf_count;
1231 
1232 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1233 
1234 	return retcode;
1235 }
1236 
1237 /**
1238  * Compute size order.  Returns the exponent of the smaller power of two which
1239  * is greater or equal to given number.
1240  *
1241  * \param size size.
1242  * \return order.
1243  *
1244  * \todo Can be made faster.
1245  */
1246 int drm_order(unsigned long size)
1247 {
1248 	int order;
1249 	unsigned long tmp;
1250 
1251 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1252 
1253 	if (size & (size - 1))
1254 		++order;
1255 
1256 	return order;
1257 }
1258 EXPORT_SYMBOL(drm_order);
1259