xref: /dragonfly/sys/dev/drm/drm_bufs.c (revision 5cef369f)
1 /*
2  * Legacy: Generic DRM Buffer Management
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9  * Author: Gareth Hughes <gareth@valinux.com>
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28  * OTHER DEALINGS IN THE SOFTWARE.
29  */
30 
31 #include <sys/conf.h>
32 #include <bus/pci/pcireg.h>
33 #include <linux/types.h>
34 #include <linux/export.h>
35 #include <drm/drmP.h>
36 #include "drm_legacy.h"
37 
38 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
39 		      unsigned int size, enum drm_map_type type,
40 		      enum drm_map_flags flags, struct drm_local_map **map_ptr)
41 {
42 	struct drm_local_map *map;
43 	struct drm_map_list *entry = NULL;
44 	drm_dma_handle_t *dmah;
45 
46 	/* Allocate a new map structure, fill it in, and do any type-specific
47 	 * initialization necessary.
48 	 */
49 	map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
50 	if (!map) {
51 		return -ENOMEM;
52 	}
53 
54 	map->offset = offset;
55 	map->size = size;
56 	map->type = type;
57 	map->flags = flags;
58 
59 	/* Only allow shared memory to be removable since we only keep enough
60 	 * book keeping information about shared memory to allow for removal
61 	 * when processes fork.
62 	 */
63 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
64 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
65 		drm_free(map, M_DRM);
66 		return -EINVAL;
67 	}
68 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
69 		DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
70 		    (uintmax_t)offset, size);
71 		drm_free(map, M_DRM);
72 		return -EINVAL;
73 	}
74 	if (offset + size < offset) {
75 		DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
76 		    (uintmax_t)offset, size);
77 		drm_free(map, M_DRM);
78 		return -EINVAL;
79 	}
80 
81 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
82 		  (unsigned long long)map->offset, map->size, map->type);
83 
84 	/* Check if this is just another version of a kernel-allocated map, and
85 	 * just hand that back if so.
86 	 */
87 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
88 	    type == _DRM_SHM) {
89 		list_for_each_entry(entry, &dev->maplist, head) {
90 			if (entry->map->type == type && (entry->map->offset == offset ||
91 			    (entry->map->type == _DRM_SHM &&
92 			    entry->map->flags == _DRM_CONTAINS_LOCK))) {
93 				entry->map->size = size;
94 				DRM_DEBUG("Found kernel map %d\n", type);
95 				goto done;
96 			}
97 		}
98 	}
99 
100 	switch (map->type) {
101 	case _DRM_REGISTERS:
102 	case _DRM_FRAME_BUFFER:
103 
104 		if (map->type == _DRM_FRAME_BUFFER ||
105 		    (map->flags & _DRM_WRITE_COMBINING)) {
106 			map->mtrr =
107 				arch_phys_wc_add(map->offset, map->size);
108 		}
109 		if (map->type == _DRM_REGISTERS) {
110 			if (map->flags & _DRM_WRITE_COMBINING)
111 				map->handle = ioremap_wc(map->offset,
112 							 map->size);
113 			else
114 				map->handle = ioremap(map->offset, map->size);
115 			if (!map->handle) {
116 				kfree(map);
117 				return -ENOMEM;
118 			}
119 		}
120 
121 		break;
122 	case _DRM_SHM:
123 		map->handle = kmalloc(map->size, M_DRM, M_WAITOK | M_NULLOK);
124 		DRM_DEBUG("%lu %d %p\n",
125 			  map->size, order_base_2(map->size), map->handle);
126 		if (!map->handle) {
127 			drm_free(map, M_DRM);
128 			return -ENOMEM;
129 		}
130 		map->offset = (unsigned long)map->handle;
131 		if (map->flags & _DRM_CONTAINS_LOCK) {
132 			/* Prevent a 2nd X Server from creating a 2nd lock */
133 			DRM_LOCK(dev);
134 			if (dev->lock.hw_lock != NULL) {
135 				DRM_UNLOCK(dev);
136 				drm_free(map->handle, M_DRM);
137 				drm_free(map, M_DRM);
138 				return -EBUSY;
139 			}
140 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
141 			DRM_UNLOCK(dev);
142 		}
143 		break;
144 	case _DRM_AGP:
145 		/*valid = 0;*/
146 		/* In some cases (i810 driver), user space may have already
147 		 * added the AGP base itself, because dev->agp->base previously
148 		 * only got set during AGP enable.  So, only add the base
149 		 * address if the map's offset isn't already within the
150 		 * aperture.
151 		 */
152 		if (map->offset < dev->agp->base ||
153 		    map->offset > dev->agp->base +
154 		    dev->agp->agp_info.ai_aperture_size - 1) {
155 			map->offset += dev->agp->base;
156 		}
157 		map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
158 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
159 			if ((map->offset >= entry->bound) &&
160 			    (map->offset + map->size <=
161 			    entry->bound + entry->pages * PAGE_SIZE)) {
162 				valid = 1;
163 				break;
164 			}
165 		}
166 		if (!valid) {
167 			drm_free(map, M_DRM);
168 			return -EACCES;
169 		}*/
170 		break;
171 	case _DRM_SCATTER_GATHER:
172 		if (!dev->sg) {
173 			drm_free(map, M_DRM);
174 			return -EINVAL;
175 		}
176 		map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
177 		map->offset = dev->sg->vaddr + offset;
178 		break;
179 	case _DRM_CONSISTENT:
180 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
181 		 * As we're limiting the address to 2^32-1 (or less),
182 		 * casting it down to 32 bits is no problem, but we
183 		 * need to point to a 64bit variable first. */
184 		dmah = drm_pci_alloc(dev, map->size, map->size);
185 		if (!dmah) {
186 			kfree(map);
187 			return -ENOMEM;
188 		}
189 		map->handle = dmah->vaddr;
190 		map->offset = dmah->busaddr;
191 		break;
192 	default:
193 		DRM_ERROR("Bad map type %d\n", map->type);
194 		drm_free(map, M_DRM);
195 		return -EINVAL;
196 	}
197 
198 	list_add(&entry->head, &dev->maplist);
199 
200 done:
201 	/* Jumped to, with lock held, when a kernel map is found. */
202 
203 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
204 	    map->size);
205 
206 	*map_ptr = map;
207 
208 	return 0;
209 }
210 
211 /**
212  * Ioctl to specify a range of memory that is available for mapping by a
213  * non-root process.
214  *
215  * \param inode device inode.
216  * \param file_priv DRM file private.
217  * \param cmd command.
218  * \param arg pointer to a drm_map structure.
219  * \return zero on success or a negative value on error.
220  *
221  */
222 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
223 			    struct drm_file *file_priv)
224 {
225 	struct drm_map *request = data;
226 	drm_local_map_t *map;
227 	int err;
228 
229 	if (!(dev->flags & (FREAD|FWRITE)))
230 		return -EACCES; /* Require read/write */
231 
232 	if (!capable(CAP_SYS_ADMIN) && request->type != _DRM_AGP)
233 		return -EACCES;
234 
235 	DRM_LOCK(dev);
236 	err = drm_legacy_addmap(dev, request->offset, request->size, request->type,
237 	    request->flags, &map);
238 	DRM_UNLOCK(dev);
239 	if (err != 0)
240 		return err;
241 
242 	request->offset = map->offset;
243 	request->size = map->size;
244 	request->type = map->type;
245 	request->flags = map->flags;
246 	request->mtrr   = map->mtrr;
247 	request->handle = (void *)map->handle;
248 
249 	return 0;
250 }
251 
252 /**
253  * Remove a map private from list and deallocate resources if the mapping
254  * isn't in use.
255  *
256  * Searches the map on drm_device::maplist, removes it from the list, see if
257  * its being used, and free any associate resource (such as MTRR's) if it's not
258  * being on use.
259  *
260  * \sa drm_legacy_addmap
261  */
262 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
263 {
264 	struct drm_map_list *r_list = NULL, *list_t;
265 	drm_dma_handle_t dmah;
266 	int found = 0;
267 
268 	/* Find the list entry for the map and remove it */
269 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
270 		if (r_list->map == map) {
271 			list_del(&r_list->head);
272 			kfree(r_list);
273 			found = 1;
274 			break;
275 		}
276 	}
277 
278 	if (!found)
279 		return -EINVAL;
280 
281 	switch (map->type) {
282 	case _DRM_REGISTERS:
283 		drm_legacy_ioremapfree(map, dev);
284 		/* FALLTHROUGH */
285 	case _DRM_FRAME_BUFFER:
286 		arch_phys_wc_del(map->mtrr);
287 		break;
288 	case _DRM_SHM:
289 		drm_free(map->handle, M_DRM);
290 		break;
291 	case _DRM_AGP:
292 	case _DRM_SCATTER_GATHER:
293 		break;
294 	case _DRM_CONSISTENT:
295 		dmah.vaddr = map->handle;
296 		dmah.busaddr = map->offset;
297 		dmah.size = map->size;
298 		__drm_legacy_pci_free(dev, &dmah);
299 		break;
300 	}
301 	kfree(map);
302 
303 	return 0;
304 }
305 
306 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
307 {
308 	int ret;
309 
310 	mutex_lock(&dev->struct_mutex);
311 	ret = drm_legacy_rmmap_locked(dev, map);
312 	mutex_unlock(&dev->struct_mutex);
313 
314 	return ret;
315 }
316 EXPORT_SYMBOL(drm_legacy_rmmap);
317 
318 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
319  * the last close of the device, and this is necessary for cleanup when things
320  * exit uncleanly.  Therefore, having userland manually remove mappings seems
321  * like a pointless exercise since they're going away anyway.
322  *
323  * One use case might be after addmap is allowed for normal users for SHM and
324  * gets used by drivers that the server doesn't need to care about.  This seems
325  * unlikely.
326  *
327  * \param inode device inode.
328  * \param file_priv DRM file private.
329  * \param cmd command.
330  * \param arg pointer to a struct drm_map structure.
331  * \return zero on success or a negative value on error.
332  */
333 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
334 			   struct drm_file *file_priv)
335 {
336 	struct drm_map *request = data;
337 	struct drm_local_map *map = NULL;
338 	struct drm_map_list *r_list;
339 
340 	DRM_LOCK(dev);
341 	list_for_each_entry(r_list, &dev->maplist, head) {
342 		if (r_list->map &&
343 		    r_list->user_token == (unsigned long)request->handle &&
344 		    r_list->map->flags & _DRM_REMOVABLE) {
345 			map = r_list->map;
346 			break;
347 		}
348 	}
349 
350 	/* List has wrapped around to the head pointer, or its empty we didn't
351 	 * find anything.
352 	 */
353 	if (list_empty(&dev->maplist) || !map) {
354 		DRM_UNLOCK(dev);
355 		return -EINVAL;
356 	}
357 
358 	/* Register and framebuffer maps are permanent */
359 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
360 		DRM_UNLOCK(dev);
361 		return 0;
362 	}
363 
364 	drm_legacy_rmmap(dev, map);
365 
366 	DRM_UNLOCK(dev);
367 
368 	return 0;
369 }
370 
371 /**
372  * Cleanup after an error on one of the addbufs() functions.
373  *
374  * \param dev DRM device.
375  * \param entry buffer entry where the error occurred.
376  *
377  * Frees any pages and buffers associated with the given entry.
378  */
379 static void drm_cleanup_buf_error(struct drm_device * dev,
380 				  struct drm_buf_entry * entry)
381 {
382 	int i;
383 
384 	if (entry->seg_count) {
385 		for (i = 0; i < entry->seg_count; i++) {
386 			drm_pci_free(dev, entry->seglist[i]);
387 		}
388 		drm_free(entry->seglist, M_DRM);
389 
390 		entry->seg_count = 0;
391 	}
392 
393    	if (entry->buf_count) {
394 	   	for (i = 0; i < entry->buf_count; i++) {
395 			drm_free(entry->buflist[i].dev_private, M_DRM);
396 		}
397 		drm_free(entry->buflist, M_DRM);
398 
399 		entry->buf_count = 0;
400 	}
401 }
402 
403 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
404 {
405 	drm_device_dma_t *dma = dev->dma;
406 	drm_buf_entry_t *entry;
407 	/*drm_agp_mem_t *agp_entry;
408 	int valid*/
409 	drm_buf_t *buf;
410 	unsigned long offset;
411 	unsigned long agp_offset;
412 	int count;
413 	int order;
414 	int size;
415 	int alignment;
416 	int page_order;
417 	int total;
418 	int byte_count;
419 	int i;
420 	drm_buf_t **temp_buflist;
421 
422 	count = request->count;
423 	order = order_base_2(request->size);
424 	size = 1 << order;
425 
426 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
427 	    ? round_page(size) : size;
428 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
429 	total = PAGE_SIZE << page_order;
430 
431 	byte_count = 0;
432 	agp_offset = dev->agp->base + request->agp_start;
433 
434 	DRM_DEBUG("count:      %d\n",  count);
435 	DRM_DEBUG("order:      %d\n",  order);
436 	DRM_DEBUG("size:       %d\n",  size);
437 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
438 	DRM_DEBUG("alignment:  %d\n",  alignment);
439 	DRM_DEBUG("page_order: %d\n",  page_order);
440 	DRM_DEBUG("total:      %d\n",  total);
441 
442 	/* Make sure buffers are located in AGP memory that we own */
443 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
444 	 * memory.  Safe to ignore for now because these ioctls are still
445 	 * root-only.
446 	 */
447 	/*valid = 0;
448 	for (agp_entry = dev->agp->memory; agp_entry;
449 	    agp_entry = agp_entry->next) {
450 		if ((agp_offset >= agp_entry->bound) &&
451 		    (agp_offset + total * count <=
452 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
453 			valid = 1;
454 			break;
455 		}
456 	}
457 	if (!valid) {
458 		DRM_DEBUG("zone invalid\n");
459 		return -EINVAL;
460 	}*/
461 
462 	entry = &dma->bufs[order];
463 
464 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
465 				 M_WAITOK | M_NULLOK | M_ZERO);
466 	if (!entry->buflist) {
467 		return -ENOMEM;
468 	}
469 
470 	entry->buf_size = size;
471 	entry->page_order = page_order;
472 
473 	offset = 0;
474 
475 	while (entry->buf_count < count) {
476 		buf          = &entry->buflist[entry->buf_count];
477 		buf->idx     = dma->buf_count + entry->buf_count;
478 		buf->total   = alignment;
479 		buf->order   = order;
480 		buf->used    = 0;
481 
482 		buf->offset  = (dma->byte_count + offset);
483 		buf->bus_address = agp_offset + offset;
484 		buf->address = (void *)(agp_offset + offset);
485 		buf->next    = NULL;
486 		buf->pending = 0;
487 		buf->file_priv = NULL;
488 
489 		buf->dev_priv_size = dev->driver->dev_priv_size;
490 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
491 					   M_WAITOK | M_NULLOK | M_ZERO);
492 		if (buf->dev_private == NULL) {
493 			/* Set count correctly so we free the proper amount. */
494 			entry->buf_count = count;
495 			drm_cleanup_buf_error(dev, entry);
496 			return -ENOMEM;
497 		}
498 
499 		offset += alignment;
500 		entry->buf_count++;
501 		byte_count += PAGE_SIZE << page_order;
502 	}
503 
504 	DRM_DEBUG("byte_count: %d\n", byte_count);
505 
506 	temp_buflist = krealloc(dma->buflist,
507 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
508 	    M_DRM, M_WAITOK | M_NULLOK);
509 	if (temp_buflist == NULL) {
510 		/* Free the entry because it isn't valid */
511 		drm_cleanup_buf_error(dev, entry);
512 		return -ENOMEM;
513 	}
514 	dma->buflist = temp_buflist;
515 
516 	for (i = 0; i < entry->buf_count; i++) {
517 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
518 	}
519 
520 	dma->buf_count += entry->buf_count;
521 	dma->byte_count += byte_count;
522 
523 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
524 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
525 
526 	request->count = entry->buf_count;
527 	request->size = size;
528 
529 	dma->flags = _DRM_DMA_USE_AGP;
530 
531 	return 0;
532 }
533 
534 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
535 {
536 	drm_device_dma_t *dma = dev->dma;
537 	int count;
538 	int order;
539 	int size;
540 	int total;
541 	int page_order;
542 	drm_buf_entry_t *entry;
543 	drm_dma_handle_t *dmah;
544 	drm_buf_t *buf;
545 	int alignment;
546 	unsigned long offset;
547 	int i;
548 	int byte_count;
549 	int page_count;
550 	unsigned long *temp_pagelist;
551 	drm_buf_t **temp_buflist;
552 
553 	count = request->count;
554 	order = order_base_2(request->size);
555 	size = 1 << order;
556 
557 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
558 	    request->count, request->size, size, order);
559 
560 	alignment = (request->flags & _DRM_PAGE_ALIGN)
561 	    ? round_page(size) : size;
562 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
563 	total = PAGE_SIZE << page_order;
564 
565 	entry = &dma->bufs[order];
566 
567 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
568 				 M_WAITOK | M_NULLOK | M_ZERO);
569 	entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
570 				 M_WAITOK | M_NULLOK | M_ZERO);
571 
572 	/* Keep the original pagelist until we know all the allocations
573 	 * have succeeded
574 	 */
575 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
576 				sizeof(*dma->pagelist),
577 				M_DRM, M_WAITOK | M_NULLOK);
578 
579 	if (entry->buflist == NULL || entry->seglist == NULL ||
580 	    temp_pagelist == NULL) {
581 		drm_free(temp_pagelist, M_DRM);
582 		drm_free(entry->seglist, M_DRM);
583 		drm_free(entry->buflist, M_DRM);
584 		return -ENOMEM;
585 	}
586 
587 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
588 	    sizeof(*dma->pagelist));
589 
590 	DRM_DEBUG("pagelist: %d entries\n",
591 	    dma->page_count + (count << page_order));
592 
593 	entry->buf_size	= size;
594 	entry->page_order = page_order;
595 	byte_count = 0;
596 	page_count = 0;
597 
598 	while (entry->buf_count < count) {
599 		spin_unlock(&dev->dma_lock);
600 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
601 		spin_lock(&dev->dma_lock);
602 
603 		if (!dmah) {
604 			/* Set count correctly so we free the proper amount. */
605 			entry->buf_count = count;
606 			entry->seg_count = count;
607 			drm_cleanup_buf_error(dev, entry);
608 			drm_free(temp_pagelist, M_DRM);
609 			return -ENOMEM;
610 		}
611 
612 		entry->seglist[entry->seg_count++] = dmah;
613 		for (i = 0; i < (1 << page_order); i++) {
614 			DRM_DEBUG("page %d @ 0x%08lx\n",
615 				  dma->page_count + page_count,
616 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
617 			temp_pagelist[dma->page_count + page_count++]
618 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
619 		}
620 		for (offset = 0;
621 		    offset + size <= total && entry->buf_count < count;
622 		    offset += alignment, ++entry->buf_count) {
623 			buf	     = &entry->buflist[entry->buf_count];
624 			buf->idx     = dma->buf_count + entry->buf_count;
625 			buf->total   = alignment;
626 			buf->order   = order;
627 			buf->used    = 0;
628 			buf->offset  = (dma->byte_count + byte_count + offset);
629 			buf->address = ((char *)dmah->vaddr + offset);
630 			buf->bus_address = dmah->busaddr + offset;
631 			buf->next    = NULL;
632 			buf->pending = 0;
633 			buf->file_priv = NULL;
634 
635 			buf->dev_priv_size = dev->driver->dev_priv_size;
636 			buf->dev_private = kmalloc(buf->dev_priv_size,
637 						   M_DRM,
638 						   M_WAITOK | M_NULLOK |
639 						    M_ZERO);
640 			if (buf->dev_private == NULL) {
641 				/* Set count correctly so we free the proper amount. */
642 				entry->buf_count = count;
643 				entry->seg_count = count;
644 				drm_cleanup_buf_error(dev, entry);
645 				drm_free(temp_pagelist, M_DRM);
646 				return -ENOMEM;
647 			}
648 
649 			DRM_DEBUG("buffer %d @ %p\n",
650 			    entry->buf_count, buf->address);
651 		}
652 		byte_count += PAGE_SIZE << page_order;
653 	}
654 
655 	temp_buflist = krealloc(dma->buflist,
656 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
657 	    M_DRM, M_WAITOK | M_NULLOK);
658 	if (temp_buflist == NULL) {
659 		/* Free the entry because it isn't valid */
660 		drm_cleanup_buf_error(dev, entry);
661 		drm_free(temp_pagelist, M_DRM);
662 		return -ENOMEM;
663 	}
664 	dma->buflist = temp_buflist;
665 
666 	for (i = 0; i < entry->buf_count; i++) {
667 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
668 	}
669 
670 	/* No allocations failed, so now we can replace the orginal pagelist
671 	 * with the new one.
672 	 */
673 	drm_free(dma->pagelist, M_DRM);
674 	dma->pagelist = temp_pagelist;
675 
676 	dma->buf_count += entry->buf_count;
677 	dma->seg_count += entry->seg_count;
678 	dma->page_count += entry->seg_count << page_order;
679 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
680 
681 	request->count = entry->buf_count;
682 	request->size = size;
683 
684 	return 0;
685 
686 }
687 
688 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
689 {
690 	drm_device_dma_t *dma = dev->dma;
691 	drm_buf_entry_t *entry;
692 	drm_buf_t *buf;
693 	unsigned long offset;
694 	unsigned long agp_offset;
695 	int count;
696 	int order;
697 	int size;
698 	int alignment;
699 	int page_order;
700 	int total;
701 	int byte_count;
702 	int i;
703 	drm_buf_t **temp_buflist;
704 
705 	count = request->count;
706 	order = order_base_2(request->size);
707 	size = 1 << order;
708 
709 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
710 	    ? round_page(size) : size;
711 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
712 	total = PAGE_SIZE << page_order;
713 
714 	byte_count = 0;
715 	agp_offset = request->agp_start;
716 
717 	DRM_DEBUG("count:      %d\n",  count);
718 	DRM_DEBUG("order:      %d\n",  order);
719 	DRM_DEBUG("size:       %d\n",  size);
720 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
721 	DRM_DEBUG("alignment:  %d\n",  alignment);
722 	DRM_DEBUG("page_order: %d\n",  page_order);
723 	DRM_DEBUG("total:      %d\n",  total);
724 
725 	entry = &dma->bufs[order];
726 
727 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
728 				 M_WAITOK | M_NULLOK | M_ZERO);
729 	if (entry->buflist == NULL)
730 		return -ENOMEM;
731 
732 	entry->buf_size = size;
733 	entry->page_order = page_order;
734 
735 	offset = 0;
736 
737 	while (entry->buf_count < count) {
738 		buf          = &entry->buflist[entry->buf_count];
739 		buf->idx     = dma->buf_count + entry->buf_count;
740 		buf->total   = alignment;
741 		buf->order   = order;
742 		buf->used    = 0;
743 
744 		buf->offset  = (dma->byte_count + offset);
745 		buf->bus_address = agp_offset + offset;
746 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
747 		buf->next    = NULL;
748 		buf->pending = 0;
749 		buf->file_priv = NULL;
750 
751 		buf->dev_priv_size = dev->driver->dev_priv_size;
752 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
753 					   M_WAITOK | M_NULLOK | M_ZERO);
754 		if (buf->dev_private == NULL) {
755 			/* Set count correctly so we free the proper amount. */
756 			entry->buf_count = count;
757 			drm_cleanup_buf_error(dev, entry);
758 			return -ENOMEM;
759 		}
760 
761 		DRM_DEBUG("buffer %d @ %p\n",
762 		    entry->buf_count, buf->address);
763 
764 		offset += alignment;
765 		entry->buf_count++;
766 		byte_count += PAGE_SIZE << page_order;
767 	}
768 
769 	DRM_DEBUG("byte_count: %d\n", byte_count);
770 
771 	temp_buflist = krealloc(dma->buflist,
772 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
773 	    M_DRM, M_WAITOK | M_NULLOK);
774 	if (temp_buflist == NULL) {
775 		/* Free the entry because it isn't valid */
776 		drm_cleanup_buf_error(dev, entry);
777 		return -ENOMEM;
778 	}
779 	dma->buflist = temp_buflist;
780 
781 	for (i = 0; i < entry->buf_count; i++) {
782 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
783 	}
784 
785 	dma->buf_count += entry->buf_count;
786 	dma->byte_count += byte_count;
787 
788 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
789 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
790 
791 	request->count = entry->buf_count;
792 	request->size = size;
793 
794 	dma->flags = _DRM_DMA_USE_SG;
795 
796 	return 0;
797 }
798 
799 /**
800  * Add AGP buffers for DMA transfers.
801  *
802  * \param dev struct drm_device to which the buffers are to be added.
803  * \param request pointer to a struct drm_buf_desc describing the request.
804  * \return zero on success or a negative number on failure.
805  *
806  * After some sanity checks creates a drm_buf structure for each buffer and
807  * reallocates the buffer list of the same size order to accommodate the new
808  * buffers.
809  */
810 int drm_legacy_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
811 {
812 	int order, ret;
813 
814 	if (request->count < 0 || request->count > 4096)
815 		return -EINVAL;
816 
817 	order = order_base_2(request->size);
818 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
819 		return -EINVAL;
820 
821 	spin_lock(&dev->dma_lock);
822 
823 	/* No more allocations after first buffer-using ioctl. */
824 	if (dev->buf_use != 0) {
825 		spin_unlock(&dev->dma_lock);
826 		return -EBUSY;
827 	}
828 	/* No more than one allocation per order */
829 	if (dev->dma->bufs[order].buf_count != 0) {
830 		spin_unlock(&dev->dma_lock);
831 		return -ENOMEM;
832 	}
833 
834 	ret = drm_do_addbufs_agp(dev, request);
835 
836 	spin_unlock(&dev->dma_lock);
837 
838 	return ret;
839 }
840 
841 static int drm_legacy_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
842 {
843 	int order, ret;
844 
845 	if (!capable(CAP_SYS_ADMIN))
846 		return -EACCES;
847 
848 	if (request->count < 0 || request->count > 4096)
849 		return -EINVAL;
850 
851 	order = order_base_2(request->size);
852 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
853 		return -EINVAL;
854 
855 	spin_lock(&dev->dma_lock);
856 
857 	/* No more allocations after first buffer-using ioctl. */
858 	if (dev->buf_use != 0) {
859 		spin_unlock(&dev->dma_lock);
860 		return -EBUSY;
861 	}
862 	/* No more than one allocation per order */
863 	if (dev->dma->bufs[order].buf_count != 0) {
864 		spin_unlock(&dev->dma_lock);
865 		return -ENOMEM;
866 	}
867 
868 	ret = drm_do_addbufs_sg(dev, request);
869 
870 	spin_unlock(&dev->dma_lock);
871 
872 	return ret;
873 }
874 
875 int drm_legacy_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
876 {
877 	int order, ret;
878 
879 	if (!capable(CAP_SYS_ADMIN))
880 		return -EACCES;
881 
882 	if (request->count < 0 || request->count > 4096)
883 		return -EINVAL;
884 
885 	order = order_base_2(request->size);
886 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
887 		return -EINVAL;
888 
889 	spin_lock(&dev->dma_lock);
890 
891 	/* No more allocations after first buffer-using ioctl. */
892 	if (dev->buf_use != 0) {
893 		spin_unlock(&dev->dma_lock);
894 		return -EBUSY;
895 	}
896 	/* No more than one allocation per order */
897 	if (dev->dma->bufs[order].buf_count != 0) {
898 		spin_unlock(&dev->dma_lock);
899 		return -ENOMEM;
900 	}
901 
902 	ret = drm_do_addbufs_pci(dev, request);
903 
904 	spin_unlock(&dev->dma_lock);
905 
906 	return ret;
907 }
908 
909 /**
910  * Add buffers for DMA transfers (ioctl).
911  *
912  * \param inode device inode.
913  * \param file_priv DRM file private.
914  * \param cmd command.
915  * \param arg pointer to a struct drm_buf_desc request.
916  * \return zero on success or a negative number on failure.
917  *
918  * According with the memory type specified in drm_buf_desc::flags and the
919  * build options, it dispatches the call either to addbufs_agp(),
920  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
921  * PCI memory respectively.
922  */
923 int drm_legacy_addbufs(struct drm_device *dev, void *data,
924 		       struct drm_file *file_priv)
925 {
926 	struct drm_buf_desc *request = data;
927 	int err;
928 
929 	if (request->flags & _DRM_AGP_BUFFER)
930 		err = drm_legacy_addbufs_agp(dev, request);
931 	else if (request->flags & _DRM_SG_BUFFER)
932 		err = drm_legacy_addbufs_sg(dev, request);
933 	else
934 		err = drm_legacy_addbufs_pci(dev, request);
935 
936 	return err;
937 }
938 
939 /**
940  * Get information about the buffer mappings.
941  *
942  * This was originally mean for debugging purposes, or by a sophisticated
943  * client library to determine how best to use the available buffers (e.g.,
944  * large buffers can be used for image transfer).
945  *
946  * \param inode device inode.
947  * \param file_priv DRM file private.
948  * \param cmd command.
949  * \param arg pointer to a drm_buf_info structure.
950  * \return zero on success or a negative number on failure.
951  *
952  * Increments drm_device::buf_use while holding the drm_device::buf_lock
953  * lock, preventing of allocating more buffers after this call. Information
954  * about each requested buffer is then copied into user space.
955  */
956 int drm_legacy_infobufs(struct drm_device *dev, void *data,
957 			struct drm_file *file_priv)
958 {
959 	struct drm_device_dma *dma = dev->dma;
960 	struct drm_buf_info *request = data;
961 	int i;
962 	int count;
963 
964 	if (drm_core_check_feature(dev, DRIVER_MODESET))
965 		return -EINVAL;
966 
967 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
968 		return -EINVAL;
969 
970 	if (!dma)
971 		return -EINVAL;
972 
973 	spin_lock(&dev->buf_lock);
974 	if (atomic_read(&dev->buf_alloc)) {
975 		spin_unlock(&dev->buf_lock);
976 		return -EBUSY;
977 	}
978 	++dev->buf_use;		/* Can't allocate more after this call */
979 	spin_unlock(&dev->buf_lock);
980 
981 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
982 		if (dma->bufs[i].buf_count)
983 			++count;
984 	}
985 
986 	DRM_DEBUG("count = %d\n", count);
987 
988 	if (request->count >= count) {
989 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
990 			if (dma->bufs[i].buf_count) {
991 				struct drm_buf_desc __user *to =
992 				    &request->list[count];
993 				struct drm_buf_entry *from = &dma->bufs[i];
994 				if (copy_to_user(&to->count,
995 						 &from->buf_count,
996 						 sizeof(from->buf_count)) ||
997 				    copy_to_user(&to->size,
998 						 &from->buf_size,
999 						 sizeof(from->buf_size)) ||
1000 				    copy_to_user(&to->low_mark,
1001 						 &from->low_mark,
1002 						 sizeof(from->low_mark)) ||
1003 				    copy_to_user(&to->high_mark,
1004 						 &from->high_mark,
1005 						 sizeof(from->high_mark)))
1006 					return -EFAULT;
1007 
1008 				DRM_DEBUG("%d %d %d %d %d\n",
1009 					  i,
1010 					  dma->bufs[i].buf_count,
1011 					  dma->bufs[i].buf_size,
1012 					  dma->bufs[i].low_mark,
1013 					  dma->bufs[i].high_mark);
1014 				++count;
1015 			}
1016 		}
1017 	}
1018 	request->count = count;
1019 
1020 	return 0;
1021 }
1022 
1023 /**
1024  * Specifies a low and high water mark for buffer allocation
1025  *
1026  * \param inode device inode.
1027  * \param file_priv DRM file private.
1028  * \param cmd command.
1029  * \param arg a pointer to a drm_buf_desc structure.
1030  * \return zero on success or a negative number on failure.
1031  *
1032  * Verifies that the size order is bounded between the admissible orders and
1033  * updates the respective drm_device_dma::bufs entry low and high water mark.
1034  *
1035  * \note This ioctl is deprecated and mostly never used.
1036  */
1037 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1038 			struct drm_file *file_priv)
1039 {
1040 	struct drm_device_dma *dma = dev->dma;
1041 	struct drm_buf_desc *request = data;
1042 	int order;
1043 	struct drm_buf_entry *entry;
1044 
1045 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1046 		return -EINVAL;
1047 
1048 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1049 		return -EINVAL;
1050 
1051 	if (!dma)
1052 		return -EINVAL;
1053 
1054 	DRM_DEBUG("%d, %d, %d\n",
1055 		  request->size, request->low_mark, request->high_mark);
1056 	order = order_base_2(request->size);
1057 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1058 		return -EINVAL;
1059 	entry = &dma->bufs[order];
1060 
1061 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1062 		return -EINVAL;
1063 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1064 		return -EINVAL;
1065 
1066 	entry->low_mark = request->low_mark;
1067 	entry->high_mark = request->high_mark;
1068 
1069 	return 0;
1070 }
1071 
1072 /**
1073  * Unreserve the buffers in list, previously reserved using drmDMA.
1074  *
1075  * \param inode device inode.
1076  * \param file_priv DRM file private.
1077  * \param cmd command.
1078  * \param arg pointer to a drm_buf_free structure.
1079  * \return zero on success or a negative number on failure.
1080  *
1081  * Calls free_buffer() for each used buffer.
1082  * This function is primarily used for debugging.
1083  */
1084 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1085 			struct drm_file *file_priv)
1086 {
1087 	drm_device_dma_t *dma = dev->dma;
1088 	struct drm_buf_free *request = data;
1089 	int i;
1090 	int idx;
1091 	drm_buf_t *buf;
1092 	int retcode = 0;
1093 
1094 	DRM_DEBUG("%d\n", request->count);
1095 
1096 	spin_lock(&dev->dma_lock);
1097 	for (i = 0; i < request->count; i++) {
1098 		if (copy_from_user(&idx, &request->list[i], sizeof(idx))) {
1099 			retcode = -EFAULT;
1100 			break;
1101 		}
1102 		if (idx < 0 || idx >= dma->buf_count) {
1103 			DRM_ERROR("Index %d (of %d max)\n",
1104 			    idx, dma->buf_count - 1);
1105 			retcode = -EINVAL;
1106 			break;
1107 		}
1108 		buf = dma->buflist[idx];
1109 		if (buf->file_priv != file_priv) {
1110 			DRM_ERROR("Process %d freeing buffer not owned\n",
1111 			    DRM_CURRENTPID);
1112 			retcode = -EINVAL;
1113 			break;
1114 		}
1115 		drm_legacy_free_buffer(dev, buf);
1116 	}
1117 	spin_unlock(&dev->dma_lock);
1118 
1119 	return retcode;
1120 }
1121 
1122 /**
1123  * Maps all of the DMA buffers into client-virtual space (ioctl).
1124  *
1125  * \param inode device inode.
1126  * \param file_priv DRM file private.
1127  * \param cmd command.
1128  * \param arg pointer to a drm_buf_map structure.
1129  * \return zero on success or a negative number on failure.
1130  *
1131  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1132  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1133  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1134  * drm_mmap_dma().
1135  */
1136 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1137 		       struct drm_file *file_priv)
1138 {
1139 	drm_device_dma_t *dma = dev->dma;
1140 	int retcode = 0;
1141 	const int zero = 0;
1142 	vm_offset_t address;
1143 	struct vmspace *vms;
1144 	vm_ooffset_t foff;
1145 	vm_size_t size;
1146 	vm_offset_t vaddr;
1147 	struct drm_buf_map *request = data;
1148 	int i;
1149 
1150 	vms = DRM_CURPROC->td_proc->p_vmspace;
1151 
1152 	spin_lock(&dev->dma_lock);
1153 	dev->buf_use++;		/* Can't allocate more after this call */
1154 	spin_unlock(&dev->dma_lock);
1155 
1156 	if (request->count < dma->buf_count)
1157 		goto done;
1158 
1159 	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1160 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1161 	    (dma->flags & _DRM_DMA_USE_SG))) {
1162 		drm_local_map_t *map = dev->agp_buffer_map;
1163 
1164 		if (map == NULL) {
1165 			retcode = -EINVAL;
1166 			goto done;
1167 		}
1168 		size = round_page(map->size);
1169 		foff = (unsigned long)map->handle;
1170 	} else {
1171 		size = round_page(dma->byte_count),
1172 		foff = 0;
1173 	}
1174 
1175 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1176 	retcode = -vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1177 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1178 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1179 	if (retcode)
1180 		goto done;
1181 
1182 	request->virtual = (void *)vaddr;
1183 
1184 	for (i = 0; i < dma->buf_count; i++) {
1185 		if (copy_to_user(&request->list[i].idx,
1186 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1187 			retcode = -EFAULT;
1188 			goto done;
1189 		}
1190 		if (copy_to_user(&request->list[i].total,
1191 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1192 			retcode = -EFAULT;
1193 			goto done;
1194 		}
1195 		if (copy_to_user(&request->list[i].used, &zero,
1196 		    sizeof(zero))) {
1197 			retcode = -EFAULT;
1198 			goto done;
1199 		}
1200 		address = vaddr + dma->buflist[i]->offset; /* *** */
1201 		if (copy_to_user(&request->list[i].address, &address,
1202 		    sizeof(address))) {
1203 			retcode = -EFAULT;
1204 			goto done;
1205 		}
1206 	}
1207       done:
1208 	request->count = dma->buf_count;
1209 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1210 
1211 	return retcode;
1212 }
1213 
1214 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1215 		  struct drm_file *file_priv)
1216 {
1217 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1218 		return -EINVAL;
1219 
1220 	if (dev->driver->dma_ioctl)
1221 		return dev->driver->dma_ioctl(dev, data, file_priv);
1222 	else
1223 		return -EINVAL;
1224 }
1225 
1226 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1227 {
1228 	struct drm_map_list *entry;
1229 
1230 	list_for_each_entry(entry, &dev->maplist, head) {
1231 		if (entry->map && entry->map->type == _DRM_SHM &&
1232 		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1233 			return entry->map;
1234 		}
1235 	}
1236 	return NULL;
1237 }
1238 EXPORT_SYMBOL(drm_legacy_getsarea);
1239