xref: /dragonfly/sys/dev/drm/drm_bufs.c (revision 0ca59c34)
1 /*
2  * Legacy: Generic DRM Buffer Management
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9  * Author: Gareth Hughes <gareth@valinux.com>
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28  * OTHER DEALINGS IN THE SOFTWARE.
29  */
30 
31 #include <sys/conf.h>
32 #include <bus/pci/pcireg.h>
33 #include <linux/types.h>
34 #include <linux/export.h>
35 #include <drm/drmP.h>
36 #include "drm_legacy.h"
37 
38 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
39 		      unsigned int size, enum drm_map_type type,
40 		      enum drm_map_flags flags, struct drm_local_map **map_ptr)
41 {
42 	struct drm_local_map *map;
43 	struct drm_map_list *entry = NULL;
44 	drm_dma_handle_t *dmah;
45 
46 	/* Allocate a new map structure, fill it in, and do any type-specific
47 	 * initialization necessary.
48 	 */
49 	map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
50 	if (!map) {
51 		return -ENOMEM;
52 	}
53 
54 	map->offset = offset;
55 	map->size = size;
56 	map->type = type;
57 	map->flags = flags;
58 
59 	/* Only allow shared memory to be removable since we only keep enough
60 	 * book keeping information about shared memory to allow for removal
61 	 * when processes fork.
62 	 */
63 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
64 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
65 		drm_free(map, M_DRM);
66 		return -EINVAL;
67 	}
68 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
69 		DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
70 		    (uintmax_t)offset, size);
71 		drm_free(map, M_DRM);
72 		return -EINVAL;
73 	}
74 	if (offset + size < offset) {
75 		DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
76 		    (uintmax_t)offset, size);
77 		drm_free(map, M_DRM);
78 		return -EINVAL;
79 	}
80 
81 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
82 		  (unsigned long long)map->offset, map->size, map->type);
83 
84 	/* Check if this is just another version of a kernel-allocated map, and
85 	 * just hand that back if so.
86 	 */
87 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
88 	    type == _DRM_SHM) {
89 		list_for_each_entry(entry, &dev->maplist, head) {
90 			if (entry->map->type == type && (entry->map->offset == offset ||
91 			    (entry->map->type == _DRM_SHM &&
92 			    entry->map->flags == _DRM_CONTAINS_LOCK))) {
93 				entry->map->size = size;
94 				DRM_DEBUG("Found kernel map %d\n", type);
95 				goto done;
96 			}
97 		}
98 	}
99 
100 	switch (map->type) {
101 	case _DRM_REGISTERS:
102 		map->handle = drm_ioremap(dev, map);
103 		if (!(map->flags & _DRM_WRITE_COMBINING))
104 			break;
105 		/* FALLTHROUGH */
106 	case _DRM_FRAME_BUFFER:
107 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
108 			map->mtrr = 1;
109 		break;
110 	case _DRM_SHM:
111 		map->handle = kmalloc(map->size, M_DRM, M_WAITOK | M_NULLOK);
112 		DRM_DEBUG("%lu %d %p\n",
113 			  map->size, order_base_2(map->size), map->handle);
114 		if (!map->handle) {
115 			drm_free(map, M_DRM);
116 			return -ENOMEM;
117 		}
118 		map->offset = (unsigned long)map->handle;
119 		if (map->flags & _DRM_CONTAINS_LOCK) {
120 			/* Prevent a 2nd X Server from creating a 2nd lock */
121 			DRM_LOCK(dev);
122 			if (dev->lock.hw_lock != NULL) {
123 				DRM_UNLOCK(dev);
124 				drm_free(map->handle, M_DRM);
125 				drm_free(map, M_DRM);
126 				return -EBUSY;
127 			}
128 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
129 			DRM_UNLOCK(dev);
130 		}
131 		break;
132 	case _DRM_AGP:
133 		/*valid = 0;*/
134 		/* In some cases (i810 driver), user space may have already
135 		 * added the AGP base itself, because dev->agp->base previously
136 		 * only got set during AGP enable.  So, only add the base
137 		 * address if the map's offset isn't already within the
138 		 * aperture.
139 		 */
140 		if (map->offset < dev->agp->base ||
141 		    map->offset > dev->agp->base +
142 		    dev->agp->agp_info.ai_aperture_size - 1) {
143 			map->offset += dev->agp->base;
144 		}
145 		map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
146 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
147 			if ((map->offset >= entry->bound) &&
148 			    (map->offset + map->size <=
149 			    entry->bound + entry->pages * PAGE_SIZE)) {
150 				valid = 1;
151 				break;
152 			}
153 		}
154 		if (!valid) {
155 			drm_free(map, M_DRM);
156 			return -EACCES;
157 		}*/
158 		break;
159 	case _DRM_SCATTER_GATHER:
160 		if (!dev->sg) {
161 			drm_free(map, M_DRM);
162 			return -EINVAL;
163 		}
164 		map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
165 		map->offset = dev->sg->vaddr + offset;
166 		break;
167 	case _DRM_CONSISTENT:
168 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
169 		 * As we're limiting the address to 2^32-1 (or less),
170 		 * casting it down to 32 bits is no problem, but we
171 		 * need to point to a 64bit variable first. */
172 		dmah = drm_pci_alloc(dev, map->size, map->size);
173 		if (!dmah) {
174 			kfree(map);
175 			return -ENOMEM;
176 		}
177 		map->handle = dmah->vaddr;
178 		map->offset = dmah->busaddr;
179 		break;
180 	default:
181 		DRM_ERROR("Bad map type %d\n", map->type);
182 		drm_free(map, M_DRM);
183 		return -EINVAL;
184 	}
185 
186 	list_add(&entry->head, &dev->maplist);
187 
188 done:
189 	/* Jumped to, with lock held, when a kernel map is found. */
190 
191 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
192 	    map->size);
193 
194 	*map_ptr = map;
195 
196 	return 0;
197 }
198 
199 /**
200  * Ioctl to specify a range of memory that is available for mapping by a
201  * non-root process.
202  *
203  * \param inode device inode.
204  * \param file_priv DRM file private.
205  * \param cmd command.
206  * \param arg pointer to a drm_map structure.
207  * \return zero on success or a negative value on error.
208  *
209  */
210 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
211 			    struct drm_file *file_priv)
212 {
213 	struct drm_map *request = data;
214 	drm_local_map_t *map;
215 	int err;
216 
217 	if (!(dev->flags & (FREAD|FWRITE)))
218 		return -EACCES; /* Require read/write */
219 
220 	if (!capable(CAP_SYS_ADMIN) && request->type != _DRM_AGP)
221 		return -EACCES;
222 
223 	DRM_LOCK(dev);
224 	err = drm_legacy_addmap(dev, request->offset, request->size, request->type,
225 	    request->flags, &map);
226 	DRM_UNLOCK(dev);
227 	if (err != 0)
228 		return err;
229 
230 	request->offset = map->offset;
231 	request->size = map->size;
232 	request->type = map->type;
233 	request->flags = map->flags;
234 	request->mtrr   = map->mtrr;
235 	request->handle = (void *)map->handle;
236 
237 	return 0;
238 }
239 
240 /**
241  * Remove a map private from list and deallocate resources if the mapping
242  * isn't in use.
243  *
244  * Searches the map on drm_device::maplist, removes it from the list, see if
245  * its being used, and free any associate resource (such as MTRR's) if it's not
246  * being on use.
247  *
248  * \sa drm_legacy_addmap
249  */
250 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
251 {
252 	struct drm_map_list *r_list = NULL, *list_t;
253 	drm_dma_handle_t dmah;
254 	int found = 0;
255 
256 	/* Find the list entry for the map and remove it */
257 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
258 		if (r_list->map == map) {
259 			list_del(&r_list->head);
260 			kfree(r_list);
261 			found = 1;
262 			break;
263 		}
264 	}
265 
266 	if (!found)
267 		return -EINVAL;
268 
269 	switch (map->type) {
270 	case _DRM_REGISTERS:
271 		drm_ioremapfree(map);
272 		/* FALLTHROUGH */
273 	case _DRM_FRAME_BUFFER:
274 		if (map->mtrr) {
275 			int __unused retcode;
276 
277 			retcode = drm_mtrr_del(0, map->offset, map->size,
278 			    DRM_MTRR_WC);
279 			DRM_DEBUG("mtrr_del = %d\n", retcode);
280 		}
281 		break;
282 	case _DRM_SHM:
283 		drm_free(map->handle, M_DRM);
284 		break;
285 	case _DRM_AGP:
286 	case _DRM_SCATTER_GATHER:
287 		break;
288 	case _DRM_CONSISTENT:
289 		dmah.vaddr = map->handle;
290 		dmah.busaddr = map->offset;
291 		dmah.size = map->size;
292 		__drm_legacy_pci_free(dev, &dmah);
293 		break;
294 	}
295 	kfree(map);
296 
297 	return 0;
298 }
299 
300 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
301 {
302 	int ret;
303 
304 	mutex_lock(&dev->struct_mutex);
305 	ret = drm_legacy_rmmap_locked(dev, map);
306 	mutex_unlock(&dev->struct_mutex);
307 
308 	return ret;
309 }
310 EXPORT_SYMBOL(drm_legacy_rmmap);
311 
312 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
313  * the last close of the device, and this is necessary for cleanup when things
314  * exit uncleanly.  Therefore, having userland manually remove mappings seems
315  * like a pointless exercise since they're going away anyway.
316  *
317  * One use case might be after addmap is allowed for normal users for SHM and
318  * gets used by drivers that the server doesn't need to care about.  This seems
319  * unlikely.
320  *
321  * \param inode device inode.
322  * \param file_priv DRM file private.
323  * \param cmd command.
324  * \param arg pointer to a struct drm_map structure.
325  * \return zero on success or a negative value on error.
326  */
327 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
328 			   struct drm_file *file_priv)
329 {
330 	struct drm_map *request = data;
331 	struct drm_local_map *map = NULL;
332 	struct drm_map_list *r_list;
333 
334 	DRM_LOCK(dev);
335 	list_for_each_entry(r_list, &dev->maplist, head) {
336 		if (r_list->map &&
337 		    r_list->user_token == (unsigned long)request->handle &&
338 		    r_list->map->flags & _DRM_REMOVABLE) {
339 			map = r_list->map;
340 			break;
341 		}
342 	}
343 
344 	/* List has wrapped around to the head pointer, or its empty we didn't
345 	 * find anything.
346 	 */
347 	if (list_empty(&dev->maplist) || !map) {
348 		DRM_UNLOCK(dev);
349 		return -EINVAL;
350 	}
351 
352 	/* Register and framebuffer maps are permanent */
353 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
354 		DRM_UNLOCK(dev);
355 		return 0;
356 	}
357 
358 	drm_legacy_rmmap(dev, map);
359 
360 	DRM_UNLOCK(dev);
361 
362 	return 0;
363 }
364 
365 /**
366  * Cleanup after an error on one of the addbufs() functions.
367  *
368  * \param dev DRM device.
369  * \param entry buffer entry where the error occurred.
370  *
371  * Frees any pages and buffers associated with the given entry.
372  */
373 static void drm_cleanup_buf_error(struct drm_device * dev,
374 				  struct drm_buf_entry * entry)
375 {
376 	int i;
377 
378 	if (entry->seg_count) {
379 		for (i = 0; i < entry->seg_count; i++) {
380 			drm_pci_free(dev, entry->seglist[i]);
381 		}
382 		drm_free(entry->seglist, M_DRM);
383 
384 		entry->seg_count = 0;
385 	}
386 
387    	if (entry->buf_count) {
388 	   	for (i = 0; i < entry->buf_count; i++) {
389 			drm_free(entry->buflist[i].dev_private, M_DRM);
390 		}
391 		drm_free(entry->buflist, M_DRM);
392 
393 		entry->buf_count = 0;
394 	}
395 }
396 
397 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
398 {
399 	drm_device_dma_t *dma = dev->dma;
400 	drm_buf_entry_t *entry;
401 	/*drm_agp_mem_t *agp_entry;
402 	int valid*/
403 	drm_buf_t *buf;
404 	unsigned long offset;
405 	unsigned long agp_offset;
406 	int count;
407 	int order;
408 	int size;
409 	int alignment;
410 	int page_order;
411 	int total;
412 	int byte_count;
413 	int i;
414 	drm_buf_t **temp_buflist;
415 
416 	count = request->count;
417 	order = order_base_2(request->size);
418 	size = 1 << order;
419 
420 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
421 	    ? round_page(size) : size;
422 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
423 	total = PAGE_SIZE << page_order;
424 
425 	byte_count = 0;
426 	agp_offset = dev->agp->base + request->agp_start;
427 
428 	DRM_DEBUG("count:      %d\n",  count);
429 	DRM_DEBUG("order:      %d\n",  order);
430 	DRM_DEBUG("size:       %d\n",  size);
431 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
432 	DRM_DEBUG("alignment:  %d\n",  alignment);
433 	DRM_DEBUG("page_order: %d\n",  page_order);
434 	DRM_DEBUG("total:      %d\n",  total);
435 
436 	/* Make sure buffers are located in AGP memory that we own */
437 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
438 	 * memory.  Safe to ignore for now because these ioctls are still
439 	 * root-only.
440 	 */
441 	/*valid = 0;
442 	for (agp_entry = dev->agp->memory; agp_entry;
443 	    agp_entry = agp_entry->next) {
444 		if ((agp_offset >= agp_entry->bound) &&
445 		    (agp_offset + total * count <=
446 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
447 			valid = 1;
448 			break;
449 		}
450 	}
451 	if (!valid) {
452 		DRM_DEBUG("zone invalid\n");
453 		return -EINVAL;
454 	}*/
455 
456 	entry = &dma->bufs[order];
457 
458 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
459 				 M_WAITOK | M_NULLOK | M_ZERO);
460 	if (!entry->buflist) {
461 		return -ENOMEM;
462 	}
463 
464 	entry->buf_size = size;
465 	entry->page_order = page_order;
466 
467 	offset = 0;
468 
469 	while (entry->buf_count < count) {
470 		buf          = &entry->buflist[entry->buf_count];
471 		buf->idx     = dma->buf_count + entry->buf_count;
472 		buf->total   = alignment;
473 		buf->order   = order;
474 		buf->used    = 0;
475 
476 		buf->offset  = (dma->byte_count + offset);
477 		buf->bus_address = agp_offset + offset;
478 		buf->address = (void *)(agp_offset + offset);
479 		buf->next    = NULL;
480 		buf->pending = 0;
481 		buf->file_priv = NULL;
482 
483 		buf->dev_priv_size = dev->driver->dev_priv_size;
484 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
485 					   M_WAITOK | M_NULLOK | M_ZERO);
486 		if (buf->dev_private == NULL) {
487 			/* Set count correctly so we free the proper amount. */
488 			entry->buf_count = count;
489 			drm_cleanup_buf_error(dev, entry);
490 			return -ENOMEM;
491 		}
492 
493 		offset += alignment;
494 		entry->buf_count++;
495 		byte_count += PAGE_SIZE << page_order;
496 	}
497 
498 	DRM_DEBUG("byte_count: %d\n", byte_count);
499 
500 	temp_buflist = krealloc(dma->buflist,
501 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
502 	    M_DRM, M_WAITOK | M_NULLOK);
503 	if (temp_buflist == NULL) {
504 		/* Free the entry because it isn't valid */
505 		drm_cleanup_buf_error(dev, entry);
506 		return -ENOMEM;
507 	}
508 	dma->buflist = temp_buflist;
509 
510 	for (i = 0; i < entry->buf_count; i++) {
511 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
512 	}
513 
514 	dma->buf_count += entry->buf_count;
515 	dma->byte_count += byte_count;
516 
517 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
518 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
519 
520 	request->count = entry->buf_count;
521 	request->size = size;
522 
523 	dma->flags = _DRM_DMA_USE_AGP;
524 
525 	return 0;
526 }
527 
528 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
529 {
530 	drm_device_dma_t *dma = dev->dma;
531 	int count;
532 	int order;
533 	int size;
534 	int total;
535 	int page_order;
536 	drm_buf_entry_t *entry;
537 	drm_dma_handle_t *dmah;
538 	drm_buf_t *buf;
539 	int alignment;
540 	unsigned long offset;
541 	int i;
542 	int byte_count;
543 	int page_count;
544 	unsigned long *temp_pagelist;
545 	drm_buf_t **temp_buflist;
546 
547 	count = request->count;
548 	order = order_base_2(request->size);
549 	size = 1 << order;
550 
551 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
552 	    request->count, request->size, size, order);
553 
554 	alignment = (request->flags & _DRM_PAGE_ALIGN)
555 	    ? round_page(size) : size;
556 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
557 	total = PAGE_SIZE << page_order;
558 
559 	entry = &dma->bufs[order];
560 
561 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
562 				 M_WAITOK | M_NULLOK | M_ZERO);
563 	entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
564 				 M_WAITOK | M_NULLOK | M_ZERO);
565 
566 	/* Keep the original pagelist until we know all the allocations
567 	 * have succeeded
568 	 */
569 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
570 				sizeof(*dma->pagelist),
571 				M_DRM, M_WAITOK | M_NULLOK);
572 
573 	if (entry->buflist == NULL || entry->seglist == NULL ||
574 	    temp_pagelist == NULL) {
575 		drm_free(temp_pagelist, M_DRM);
576 		drm_free(entry->seglist, M_DRM);
577 		drm_free(entry->buflist, M_DRM);
578 		return -ENOMEM;
579 	}
580 
581 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
582 	    sizeof(*dma->pagelist));
583 
584 	DRM_DEBUG("pagelist: %d entries\n",
585 	    dma->page_count + (count << page_order));
586 
587 	entry->buf_size	= size;
588 	entry->page_order = page_order;
589 	byte_count = 0;
590 	page_count = 0;
591 
592 	while (entry->buf_count < count) {
593 		spin_unlock(&dev->dma_lock);
594 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
595 		spin_lock(&dev->dma_lock);
596 
597 		if (!dmah) {
598 			/* Set count correctly so we free the proper amount. */
599 			entry->buf_count = count;
600 			entry->seg_count = count;
601 			drm_cleanup_buf_error(dev, entry);
602 			drm_free(temp_pagelist, M_DRM);
603 			return -ENOMEM;
604 		}
605 
606 		entry->seglist[entry->seg_count++] = dmah;
607 		for (i = 0; i < (1 << page_order); i++) {
608 			DRM_DEBUG("page %d @ 0x%08lx\n",
609 				  dma->page_count + page_count,
610 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
611 			temp_pagelist[dma->page_count + page_count++]
612 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
613 		}
614 		for (offset = 0;
615 		    offset + size <= total && entry->buf_count < count;
616 		    offset += alignment, ++entry->buf_count) {
617 			buf	     = &entry->buflist[entry->buf_count];
618 			buf->idx     = dma->buf_count + entry->buf_count;
619 			buf->total   = alignment;
620 			buf->order   = order;
621 			buf->used    = 0;
622 			buf->offset  = (dma->byte_count + byte_count + offset);
623 			buf->address = ((char *)dmah->vaddr + offset);
624 			buf->bus_address = dmah->busaddr + offset;
625 			buf->next    = NULL;
626 			buf->pending = 0;
627 			buf->file_priv = NULL;
628 
629 			buf->dev_priv_size = dev->driver->dev_priv_size;
630 			buf->dev_private = kmalloc(buf->dev_priv_size,
631 						   M_DRM,
632 						   M_WAITOK | M_NULLOK |
633 						    M_ZERO);
634 			if (buf->dev_private == NULL) {
635 				/* Set count correctly so we free the proper amount. */
636 				entry->buf_count = count;
637 				entry->seg_count = count;
638 				drm_cleanup_buf_error(dev, entry);
639 				drm_free(temp_pagelist, M_DRM);
640 				return -ENOMEM;
641 			}
642 
643 			DRM_DEBUG("buffer %d @ %p\n",
644 			    entry->buf_count, buf->address);
645 		}
646 		byte_count += PAGE_SIZE << page_order;
647 	}
648 
649 	temp_buflist = krealloc(dma->buflist,
650 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
651 	    M_DRM, M_WAITOK | M_NULLOK);
652 	if (temp_buflist == NULL) {
653 		/* Free the entry because it isn't valid */
654 		drm_cleanup_buf_error(dev, entry);
655 		drm_free(temp_pagelist, M_DRM);
656 		return -ENOMEM;
657 	}
658 	dma->buflist = temp_buflist;
659 
660 	for (i = 0; i < entry->buf_count; i++) {
661 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
662 	}
663 
664 	/* No allocations failed, so now we can replace the orginal pagelist
665 	 * with the new one.
666 	 */
667 	drm_free(dma->pagelist, M_DRM);
668 	dma->pagelist = temp_pagelist;
669 
670 	dma->buf_count += entry->buf_count;
671 	dma->seg_count += entry->seg_count;
672 	dma->page_count += entry->seg_count << page_order;
673 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
674 
675 	request->count = entry->buf_count;
676 	request->size = size;
677 
678 	return 0;
679 
680 }
681 
682 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
683 {
684 	drm_device_dma_t *dma = dev->dma;
685 	drm_buf_entry_t *entry;
686 	drm_buf_t *buf;
687 	unsigned long offset;
688 	unsigned long agp_offset;
689 	int count;
690 	int order;
691 	int size;
692 	int alignment;
693 	int page_order;
694 	int total;
695 	int byte_count;
696 	int i;
697 	drm_buf_t **temp_buflist;
698 
699 	count = request->count;
700 	order = order_base_2(request->size);
701 	size = 1 << order;
702 
703 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
704 	    ? round_page(size) : size;
705 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
706 	total = PAGE_SIZE << page_order;
707 
708 	byte_count = 0;
709 	agp_offset = request->agp_start;
710 
711 	DRM_DEBUG("count:      %d\n",  count);
712 	DRM_DEBUG("order:      %d\n",  order);
713 	DRM_DEBUG("size:       %d\n",  size);
714 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
715 	DRM_DEBUG("alignment:  %d\n",  alignment);
716 	DRM_DEBUG("page_order: %d\n",  page_order);
717 	DRM_DEBUG("total:      %d\n",  total);
718 
719 	entry = &dma->bufs[order];
720 
721 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
722 				 M_WAITOK | M_NULLOK | M_ZERO);
723 	if (entry->buflist == NULL)
724 		return -ENOMEM;
725 
726 	entry->buf_size = size;
727 	entry->page_order = page_order;
728 
729 	offset = 0;
730 
731 	while (entry->buf_count < count) {
732 		buf          = &entry->buflist[entry->buf_count];
733 		buf->idx     = dma->buf_count + entry->buf_count;
734 		buf->total   = alignment;
735 		buf->order   = order;
736 		buf->used    = 0;
737 
738 		buf->offset  = (dma->byte_count + offset);
739 		buf->bus_address = agp_offset + offset;
740 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
741 		buf->next    = NULL;
742 		buf->pending = 0;
743 		buf->file_priv = NULL;
744 
745 		buf->dev_priv_size = dev->driver->dev_priv_size;
746 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
747 					   M_WAITOK | M_NULLOK | M_ZERO);
748 		if (buf->dev_private == NULL) {
749 			/* Set count correctly so we free the proper amount. */
750 			entry->buf_count = count;
751 			drm_cleanup_buf_error(dev, entry);
752 			return -ENOMEM;
753 		}
754 
755 		DRM_DEBUG("buffer %d @ %p\n",
756 		    entry->buf_count, buf->address);
757 
758 		offset += alignment;
759 		entry->buf_count++;
760 		byte_count += PAGE_SIZE << page_order;
761 	}
762 
763 	DRM_DEBUG("byte_count: %d\n", byte_count);
764 
765 	temp_buflist = krealloc(dma->buflist,
766 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
767 	    M_DRM, M_WAITOK | M_NULLOK);
768 	if (temp_buflist == NULL) {
769 		/* Free the entry because it isn't valid */
770 		drm_cleanup_buf_error(dev, entry);
771 		return -ENOMEM;
772 	}
773 	dma->buflist = temp_buflist;
774 
775 	for (i = 0; i < entry->buf_count; i++) {
776 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
777 	}
778 
779 	dma->buf_count += entry->buf_count;
780 	dma->byte_count += byte_count;
781 
782 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
783 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
784 
785 	request->count = entry->buf_count;
786 	request->size = size;
787 
788 	dma->flags = _DRM_DMA_USE_SG;
789 
790 	return 0;
791 }
792 
793 /**
794  * Add AGP buffers for DMA transfers.
795  *
796  * \param dev struct drm_device to which the buffers are to be added.
797  * \param request pointer to a struct drm_buf_desc describing the request.
798  * \return zero on success or a negative number on failure.
799  *
800  * After some sanity checks creates a drm_buf structure for each buffer and
801  * reallocates the buffer list of the same size order to accommodate the new
802  * buffers.
803  */
804 int drm_legacy_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
805 {
806 	int order, ret;
807 
808 	if (request->count < 0 || request->count > 4096)
809 		return -EINVAL;
810 
811 	order = order_base_2(request->size);
812 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
813 		return -EINVAL;
814 
815 	spin_lock(&dev->dma_lock);
816 
817 	/* No more allocations after first buffer-using ioctl. */
818 	if (dev->buf_use != 0) {
819 		spin_unlock(&dev->dma_lock);
820 		return -EBUSY;
821 	}
822 	/* No more than one allocation per order */
823 	if (dev->dma->bufs[order].buf_count != 0) {
824 		spin_unlock(&dev->dma_lock);
825 		return -ENOMEM;
826 	}
827 
828 	ret = drm_do_addbufs_agp(dev, request);
829 
830 	spin_unlock(&dev->dma_lock);
831 
832 	return ret;
833 }
834 
835 static int drm_legacy_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
836 {
837 	int order, ret;
838 
839 	if (!capable(CAP_SYS_ADMIN))
840 		return -EACCES;
841 
842 	if (request->count < 0 || request->count > 4096)
843 		return -EINVAL;
844 
845 	order = order_base_2(request->size);
846 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
847 		return -EINVAL;
848 
849 	spin_lock(&dev->dma_lock);
850 
851 	/* No more allocations after first buffer-using ioctl. */
852 	if (dev->buf_use != 0) {
853 		spin_unlock(&dev->dma_lock);
854 		return -EBUSY;
855 	}
856 	/* No more than one allocation per order */
857 	if (dev->dma->bufs[order].buf_count != 0) {
858 		spin_unlock(&dev->dma_lock);
859 		return -ENOMEM;
860 	}
861 
862 	ret = drm_do_addbufs_sg(dev, request);
863 
864 	spin_unlock(&dev->dma_lock);
865 
866 	return ret;
867 }
868 
869 int drm_legacy_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
870 {
871 	int order, ret;
872 
873 	if (!capable(CAP_SYS_ADMIN))
874 		return -EACCES;
875 
876 	if (request->count < 0 || request->count > 4096)
877 		return -EINVAL;
878 
879 	order = order_base_2(request->size);
880 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
881 		return -EINVAL;
882 
883 	spin_lock(&dev->dma_lock);
884 
885 	/* No more allocations after first buffer-using ioctl. */
886 	if (dev->buf_use != 0) {
887 		spin_unlock(&dev->dma_lock);
888 		return -EBUSY;
889 	}
890 	/* No more than one allocation per order */
891 	if (dev->dma->bufs[order].buf_count != 0) {
892 		spin_unlock(&dev->dma_lock);
893 		return -ENOMEM;
894 	}
895 
896 	ret = drm_do_addbufs_pci(dev, request);
897 
898 	spin_unlock(&dev->dma_lock);
899 
900 	return ret;
901 }
902 
903 /**
904  * Add buffers for DMA transfers (ioctl).
905  *
906  * \param inode device inode.
907  * \param file_priv DRM file private.
908  * \param cmd command.
909  * \param arg pointer to a struct drm_buf_desc request.
910  * \return zero on success or a negative number on failure.
911  *
912  * According with the memory type specified in drm_buf_desc::flags and the
913  * build options, it dispatches the call either to addbufs_agp(),
914  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
915  * PCI memory respectively.
916  */
917 int drm_legacy_addbufs(struct drm_device *dev, void *data,
918 		       struct drm_file *file_priv)
919 {
920 	struct drm_buf_desc *request = data;
921 	int err;
922 
923 	if (request->flags & _DRM_AGP_BUFFER)
924 		err = drm_legacy_addbufs_agp(dev, request);
925 	else if (request->flags & _DRM_SG_BUFFER)
926 		err = drm_legacy_addbufs_sg(dev, request);
927 	else
928 		err = drm_legacy_addbufs_pci(dev, request);
929 
930 	return err;
931 }
932 
933 /**
934  * Get information about the buffer mappings.
935  *
936  * This was originally mean for debugging purposes, or by a sophisticated
937  * client library to determine how best to use the available buffers (e.g.,
938  * large buffers can be used for image transfer).
939  *
940  * \param inode device inode.
941  * \param file_priv DRM file private.
942  * \param cmd command.
943  * \param arg pointer to a drm_buf_info structure.
944  * \return zero on success or a negative number on failure.
945  *
946  * Increments drm_device::buf_use while holding the drm_device::buf_lock
947  * lock, preventing of allocating more buffers after this call. Information
948  * about each requested buffer is then copied into user space.
949  */
950 int drm_legacy_infobufs(struct drm_device *dev, void *data,
951 			struct drm_file *file_priv)
952 {
953 	struct drm_device_dma *dma = dev->dma;
954 	struct drm_buf_info *request = data;
955 	int i;
956 	int count;
957 
958 	if (drm_core_check_feature(dev, DRIVER_MODESET))
959 		return -EINVAL;
960 
961 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
962 		return -EINVAL;
963 
964 	if (!dma)
965 		return -EINVAL;
966 
967 	spin_lock(&dev->buf_lock);
968 	if (atomic_read(&dev->buf_alloc)) {
969 		spin_unlock(&dev->buf_lock);
970 		return -EBUSY;
971 	}
972 	++dev->buf_use;		/* Can't allocate more after this call */
973 	spin_unlock(&dev->buf_lock);
974 
975 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
976 		if (dma->bufs[i].buf_count)
977 			++count;
978 	}
979 
980 	DRM_DEBUG("count = %d\n", count);
981 
982 	if (request->count >= count) {
983 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
984 			if (dma->bufs[i].buf_count) {
985 				struct drm_buf_desc __user *to =
986 				    &request->list[count];
987 				struct drm_buf_entry *from = &dma->bufs[i];
988 				if (copy_to_user(&to->count,
989 						 &from->buf_count,
990 						 sizeof(from->buf_count)) ||
991 				    copy_to_user(&to->size,
992 						 &from->buf_size,
993 						 sizeof(from->buf_size)) ||
994 				    copy_to_user(&to->low_mark,
995 						 &from->low_mark,
996 						 sizeof(from->low_mark)) ||
997 				    copy_to_user(&to->high_mark,
998 						 &from->high_mark,
999 						 sizeof(from->high_mark)))
1000 					return -EFAULT;
1001 
1002 				DRM_DEBUG("%d %d %d %d %d\n",
1003 					  i,
1004 					  dma->bufs[i].buf_count,
1005 					  dma->bufs[i].buf_size,
1006 					  dma->bufs[i].low_mark,
1007 					  dma->bufs[i].high_mark);
1008 				++count;
1009 			}
1010 		}
1011 	}
1012 	request->count = count;
1013 
1014 	return 0;
1015 }
1016 
1017 /**
1018  * Specifies a low and high water mark for buffer allocation
1019  *
1020  * \param inode device inode.
1021  * \param file_priv DRM file private.
1022  * \param cmd command.
1023  * \param arg a pointer to a drm_buf_desc structure.
1024  * \return zero on success or a negative number on failure.
1025  *
1026  * Verifies that the size order is bounded between the admissible orders and
1027  * updates the respective drm_device_dma::bufs entry low and high water mark.
1028  *
1029  * \note This ioctl is deprecated and mostly never used.
1030  */
1031 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1032 			struct drm_file *file_priv)
1033 {
1034 	struct drm_device_dma *dma = dev->dma;
1035 	struct drm_buf_desc *request = data;
1036 	int order;
1037 	struct drm_buf_entry *entry;
1038 
1039 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1040 		return -EINVAL;
1041 
1042 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1043 		return -EINVAL;
1044 
1045 	if (!dma)
1046 		return -EINVAL;
1047 
1048 	DRM_DEBUG("%d, %d, %d\n",
1049 		  request->size, request->low_mark, request->high_mark);
1050 	order = order_base_2(request->size);
1051 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1052 		return -EINVAL;
1053 	entry = &dma->bufs[order];
1054 
1055 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1056 		return -EINVAL;
1057 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1058 		return -EINVAL;
1059 
1060 	entry->low_mark = request->low_mark;
1061 	entry->high_mark = request->high_mark;
1062 
1063 	return 0;
1064 }
1065 
1066 /**
1067  * Unreserve the buffers in list, previously reserved using drmDMA.
1068  *
1069  * \param inode device inode.
1070  * \param file_priv DRM file private.
1071  * \param cmd command.
1072  * \param arg pointer to a drm_buf_free structure.
1073  * \return zero on success or a negative number on failure.
1074  *
1075  * Calls free_buffer() for each used buffer.
1076  * This function is primarily used for debugging.
1077  */
1078 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1079 			struct drm_file *file_priv)
1080 {
1081 	drm_device_dma_t *dma = dev->dma;
1082 	struct drm_buf_free *request = data;
1083 	int i;
1084 	int idx;
1085 	drm_buf_t *buf;
1086 	int retcode = 0;
1087 
1088 	DRM_DEBUG("%d\n", request->count);
1089 
1090 	spin_lock(&dev->dma_lock);
1091 	for (i = 0; i < request->count; i++) {
1092 		if (copy_from_user(&idx, &request->list[i], sizeof(idx))) {
1093 			retcode = -EFAULT;
1094 			break;
1095 		}
1096 		if (idx < 0 || idx >= dma->buf_count) {
1097 			DRM_ERROR("Index %d (of %d max)\n",
1098 			    idx, dma->buf_count - 1);
1099 			retcode = -EINVAL;
1100 			break;
1101 		}
1102 		buf = dma->buflist[idx];
1103 		if (buf->file_priv != file_priv) {
1104 			DRM_ERROR("Process %d freeing buffer not owned\n",
1105 			    DRM_CURRENTPID);
1106 			retcode = -EINVAL;
1107 			break;
1108 		}
1109 		drm_legacy_free_buffer(dev, buf);
1110 	}
1111 	spin_unlock(&dev->dma_lock);
1112 
1113 	return retcode;
1114 }
1115 
1116 /**
1117  * Maps all of the DMA buffers into client-virtual space (ioctl).
1118  *
1119  * \param inode device inode.
1120  * \param file_priv DRM file private.
1121  * \param cmd command.
1122  * \param arg pointer to a drm_buf_map structure.
1123  * \return zero on success or a negative number on failure.
1124  *
1125  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1126  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1127  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1128  * drm_mmap_dma().
1129  */
1130 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1131 		       struct drm_file *file_priv)
1132 {
1133 	drm_device_dma_t *dma = dev->dma;
1134 	int retcode = 0;
1135 	const int zero = 0;
1136 	vm_offset_t address;
1137 	struct vmspace *vms;
1138 	vm_ooffset_t foff;
1139 	vm_size_t size;
1140 	vm_offset_t vaddr;
1141 	struct drm_buf_map *request = data;
1142 	int i;
1143 
1144 	vms = DRM_CURPROC->td_proc->p_vmspace;
1145 
1146 	spin_lock(&dev->dma_lock);
1147 	dev->buf_use++;		/* Can't allocate more after this call */
1148 	spin_unlock(&dev->dma_lock);
1149 
1150 	if (request->count < dma->buf_count)
1151 		goto done;
1152 
1153 	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1154 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1155 	    (dma->flags & _DRM_DMA_USE_SG))) {
1156 		drm_local_map_t *map = dev->agp_buffer_map;
1157 
1158 		if (map == NULL) {
1159 			retcode = -EINVAL;
1160 			goto done;
1161 		}
1162 		size = round_page(map->size);
1163 		foff = (unsigned long)map->handle;
1164 	} else {
1165 		size = round_page(dma->byte_count),
1166 		foff = 0;
1167 	}
1168 
1169 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1170 	retcode = -vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1171 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1172 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1173 	if (retcode)
1174 		goto done;
1175 
1176 	request->virtual = (void *)vaddr;
1177 
1178 	for (i = 0; i < dma->buf_count; i++) {
1179 		if (copy_to_user(&request->list[i].idx,
1180 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1181 			retcode = -EFAULT;
1182 			goto done;
1183 		}
1184 		if (copy_to_user(&request->list[i].total,
1185 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1186 			retcode = -EFAULT;
1187 			goto done;
1188 		}
1189 		if (copy_to_user(&request->list[i].used, &zero,
1190 		    sizeof(zero))) {
1191 			retcode = -EFAULT;
1192 			goto done;
1193 		}
1194 		address = vaddr + dma->buflist[i]->offset; /* *** */
1195 		if (copy_to_user(&request->list[i].address, &address,
1196 		    sizeof(address))) {
1197 			retcode = -EFAULT;
1198 			goto done;
1199 		}
1200 	}
1201       done:
1202 	request->count = dma->buf_count;
1203 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1204 
1205 	return retcode;
1206 }
1207 
1208 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1209 		  struct drm_file *file_priv)
1210 {
1211 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1212 		return -EINVAL;
1213 
1214 	if (dev->driver->dma_ioctl)
1215 		return dev->driver->dma_ioctl(dev, data, file_priv);
1216 	else
1217 		return -EINVAL;
1218 }
1219 
1220 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1221 {
1222 	struct drm_map_list *entry;
1223 
1224 	list_for_each_entry(entry, &dev->maplist, head) {
1225 		if (entry->map && entry->map->type == _DRM_SHM &&
1226 		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1227 			return entry->map;
1228 		}
1229 	}
1230 	return NULL;
1231 }
1232 EXPORT_SYMBOL(drm_legacy_getsarea);
1233