xref: /dragonfly/sys/dev/drm/drm_bufs.c (revision 7b1120e5)
1 /*
2  * Legacy: Generic DRM Buffer Management
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9  * Author: Gareth Hughes <gareth@valinux.com>
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28  * OTHER DEALINGS IN THE SOFTWARE.
29  */
30 
31 #include <linux/vmalloc.h>
32 #include <linux/log2.h>
33 #include <linux/export.h>
34 #include <asm/shmparam.h>
35 #include <drm/drmP.h>
36 #include "drm_legacy.h"
37 
38 #include <sys/mman.h>
39 #include <vm/vm_map.h>
40 
41 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
42 						  struct drm_local_map *map)
43 {
44 	struct drm_map_list *entry;
45 	list_for_each_entry(entry, &dev->maplist, head) {
46 		/*
47 		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
48 		 * while PCI resources may live above that, we only compare the
49 		 * lower 32 bits of the map offset for maps of type
50 		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
51 		 * It is assumed that if a driver have more than one resource
52 		 * of each type, the lower 32 bits are different.
53 		 */
54 		if (!entry->map ||
55 		    map->type != entry->map->type ||
56 		    entry->master != dev->primary->master)
57 			continue;
58 		switch (map->type) {
59 		case _DRM_SHM:
60 			if (map->flags != _DRM_CONTAINS_LOCK)
61 				break;
62 			return entry;
63 		case _DRM_REGISTERS:
64 		case _DRM_FRAME_BUFFER:
65 			if ((entry->map->offset & 0xffffffff) ==
66 			    (map->offset & 0xffffffff))
67 				return entry;
68 		default: /* Make gcc happy */
69 			;
70 		}
71 		if (entry->map->offset == map->offset)
72 			return entry;
73 	}
74 
75 	return NULL;
76 }
77 
78 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
79 			  unsigned long user_token, int hashed_handle, int shm)
80 {
81 	int use_hashed_handle, shift;
82 	unsigned long add;
83 
84 #if (BITS_PER_LONG == 64)
85 	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
86 #elif (BITS_PER_LONG == 32)
87 	use_hashed_handle = hashed_handle;
88 #else
89 #error Unsupported long size. Neither 64 nor 32 bits.
90 #endif
91 
92 	if (!use_hashed_handle) {
93 		int ret;
94 		hash->key = user_token >> PAGE_SHIFT;
95 		ret = drm_ht_insert_item(&dev->map_hash, hash);
96 		if (ret != -EINVAL)
97 			return ret;
98 	}
99 
100 	shift = 0;
101 	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
102 	if (shm && (SHMLBA > PAGE_SIZE)) {
103 		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
104 
105 		/* For shared memory, we have to preserve the SHMLBA
106 		 * bits of the eventual vma->vm_pgoff value during
107 		 * mmap().  Otherwise we run into cache aliasing problems
108 		 * on some platforms.  On these platforms, the pgoff of
109 		 * a mmap() request is used to pick a suitable virtual
110 		 * address for the mmap() region such that it will not
111 		 * cause cache aliasing problems.
112 		 *
113 		 * Therefore, make sure the SHMLBA relevant bits of the
114 		 * hash value we use are equal to those in the original
115 		 * kernel virtual address.
116 		 */
117 		shift = bits;
118 		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
119 	}
120 
121 	return drm_ht_just_insert_please(&dev->map_hash, hash,
122 					 user_token, 32 - PAGE_SHIFT - 3,
123 					 shift, add);
124 }
125 
126 /**
127  * Core function to create a range of memory available for mapping by a
128  * non-root process.
129  *
130  * Adjusts the memory offset to its absolute value according to the mapping
131  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
132  * applicable and if supported by the kernel.
133  */
134 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
135 			   unsigned int size, enum drm_map_type type,
136 			   enum drm_map_flags flags,
137 			   struct drm_map_list ** maplist)
138 {
139 	struct drm_local_map *map;
140 	struct drm_map_list *list;
141 	drm_dma_handle_t *dmah;
142 	unsigned long user_token;
143 	int ret;
144 
145 	map = kmalloc(sizeof(*map), M_DRM, M_WAITOK);
146 	if (!map)
147 		return -ENOMEM;
148 
149 	map->offset = offset;
150 	map->size = size;
151 	map->flags = flags;
152 	map->type = type;
153 
154 	/* Only allow shared memory to be removable since we only keep enough
155 	 * book keeping information about shared memory to allow for removal
156 	 * when processes fork.
157 	 */
158 	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
159 		kfree(map);
160 		return -EINVAL;
161 	}
162 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
163 		  (unsigned long long)map->offset, map->size, map->type);
164 
165 	/* page-align _DRM_SHM maps. They are allocated here so there is no security
166 	 * hole created by that and it works around various broken drivers that use
167 	 * a non-aligned quantity to map the SAREA. --BenH
168 	 */
169 	if (map->type == _DRM_SHM)
170 		map->size = PAGE_ALIGN(map->size);
171 
172 	if ((map->offset & (~(resource_size_t)LINUX_PAGE_MASK)) || (map->size & (~LINUX_PAGE_MASK))) {
173 		kfree(map);
174 		return -EINVAL;
175 	}
176 	map->mtrr = -1;
177 	map->handle = NULL;
178 
179 	switch (map->type) {
180 	case _DRM_REGISTERS:
181 	case _DRM_FRAME_BUFFER:
182 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
183 		if (map->offset + (map->size-1) < map->offset ||
184 		    map->offset < virt_to_phys(high_memory)) {
185 			kfree(map);
186 			return -EINVAL;
187 		}
188 #endif
189 		/* Some drivers preinitialize some maps, without the X Server
190 		 * needing to be aware of it.  Therefore, we just return success
191 		 * when the server tries to create a duplicate map.
192 		 */
193 		list = drm_find_matching_map(dev, map);
194 		if (list != NULL) {
195 			if (list->map->size != map->size) {
196 				DRM_DEBUG("Matching maps of type %d with "
197 					  "mismatched sizes, (%ld vs %ld)\n",
198 					  map->type, map->size,
199 					  list->map->size);
200 				list->map->size = map->size;
201 			}
202 
203 			kfree(map);
204 			*maplist = list;
205 			return 0;
206 		}
207 
208 		if (map->type == _DRM_FRAME_BUFFER ||
209 		    (map->flags & _DRM_WRITE_COMBINING)) {
210 			map->mtrr =
211 				arch_phys_wc_add(map->offset, map->size);
212 		}
213 		if (map->type == _DRM_REGISTERS) {
214 			if (map->flags & _DRM_WRITE_COMBINING)
215 				map->handle = ioremap_wc(map->offset,
216 							 map->size);
217 			else
218 				map->handle = ioremap(map->offset, map->size);
219 			if (!map->handle) {
220 				kfree(map);
221 				return -ENOMEM;
222 			}
223 		}
224 
225 		break;
226 	case _DRM_SHM:
227 		list = drm_find_matching_map(dev, map);
228 		if (list != NULL) {
229 			if(list->map->size != map->size) {
230 				DRM_DEBUG("Matching maps of type %d with "
231 					  "mismatched sizes, (%ld vs %ld)\n",
232 					  map->type, map->size, list->map->size);
233 				list->map->size = map->size;
234 			}
235 
236 			kfree(map);
237 			*maplist = list;
238 			return 0;
239 		}
240 		map->handle = vmalloc_user(map->size);
241 		DRM_DEBUG("%lu %d %p\n",
242 			  map->size, order_base_2(map->size), map->handle);
243 		if (!map->handle) {
244 			kfree(map);
245 			return -ENOMEM;
246 		}
247 		map->offset = (unsigned long)map->handle;
248 		if (map->flags & _DRM_CONTAINS_LOCK) {
249 			/* Prevent a 2nd X Server from creating a 2nd lock */
250 			if (dev->primary->master->lock.hw_lock != NULL) {
251 				vfree(map->handle);
252 				kfree(map);
253 				return -EBUSY;
254 			}
255 			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
256 		}
257 		break;
258 	case _DRM_AGP: {
259 #if 0
260 		struct drm_agp_mem *entry;
261 		int valid = 0;
262 #endif
263 
264 		if (!dev->agp) {
265 			kfree(map);
266 			return -EINVAL;
267 		}
268 #ifdef __alpha__
269 		map->offset += dev->hose->mem_space->start;
270 #endif
271 		/* In some cases (i810 driver), user space may have already
272 		 * added the AGP base itself, because dev->agp->base previously
273 		 * only got set during AGP enable.  So, only add the base
274 		 * address if the map's offset isn't already within the
275 		 * aperture.
276 		 */
277 		if (map->offset < dev->agp->base ||
278 		    map->offset > dev->agp->base +
279 		    dev->agp->agp_info.ai_aperture_size - 1) {
280 			map->offset += dev->agp->base;
281 		}
282 		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
283 
284 		/* This assumes the DRM is in total control of AGP space.
285 		 * It's not always the case as AGP can be in the control
286 		 * of user space (i.e. i810 driver). So this loop will get
287 		 * skipped and we double check that dev->agp->memory is
288 		 * actually set as well as being invalid before EPERM'ing
289 		 */
290 #if 0
291 		list_for_each_entry(entry, &dev->agp->memory, head) {
292 			if ((map->offset >= entry->bound) &&
293 			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
294 				valid = 1;
295 				break;
296 			}
297 		}
298 		if (!list_empty(&dev->agp->memory) && !valid) {
299 			kfree(map);
300 			return -EPERM;
301 		}
302 #endif
303 		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
304 			  (unsigned long long)map->offset, map->size);
305 
306 		break;
307 	}
308 	case _DRM_SCATTER_GATHER:
309 		if (!dev->sg) {
310 			kfree(map);
311 			return -EINVAL;
312 		}
313 		map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
314 		map->offset = dev->sg->vaddr + offset;
315 		break;
316 	case _DRM_CONSISTENT:
317 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
318 		 * As we're limiting the address to 2^32-1 (or less),
319 		 * casting it down to 32 bits is no problem, but we
320 		 * need to point to a 64bit variable first. */
321 		dmah = drm_pci_alloc(dev, map->size, map->size);
322 		if (!dmah) {
323 			kfree(map);
324 			return -ENOMEM;
325 		}
326 		map->handle = dmah->vaddr;
327 		map->offset = (unsigned long)dmah->busaddr;
328 		kfree(dmah);
329 		break;
330 	default:
331 		kfree(map);
332 		return -EINVAL;
333 	}
334 
335 	list = kzalloc(sizeof(*list), GFP_KERNEL);
336 	if (!list) {
337 		if (map->type == _DRM_REGISTERS)
338 			iounmap(map->handle);
339 		kfree(map);
340 		return -EINVAL;
341 	}
342 	list->map = map;
343 
344 	mutex_lock(&dev->struct_mutex);
345 	list_add(&list->head, &dev->maplist);
346 
347 	/* Assign a 32-bit handle */
348 	/* We do it here so that dev->struct_mutex protects the increment */
349 	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
350 		map->offset;
351 	ret = drm_map_handle(dev, &list->hash, user_token, 0,
352 			     (map->type == _DRM_SHM));
353 	if (ret) {
354 		if (map->type == _DRM_REGISTERS)
355 			iounmap(map->handle);
356 		kfree(map);
357 		kfree(list);
358 		mutex_unlock(&dev->struct_mutex);
359 		return ret;
360 	}
361 
362 	list->user_token = list->hash.key << PAGE_SHIFT;
363 	mutex_unlock(&dev->struct_mutex);
364 
365 	if (!(map->flags & _DRM_DRIVER))
366 		list->master = dev->primary->master;
367 	*maplist = list;
368 	return 0;
369 }
370 
371 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
372 		      unsigned int size, enum drm_map_type type,
373 		      enum drm_map_flags flags, struct drm_local_map **map_ptr)
374 {
375 	struct drm_map_list *list;
376 	int rc;
377 
378 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
379 	if (!rc)
380 		*map_ptr = list->map;
381 	return rc;
382 }
383 EXPORT_SYMBOL(drm_legacy_addmap);
384 
385 /**
386  * Ioctl to specify a range of memory that is available for mapping by a
387  * non-root process.
388  *
389  * \param inode device inode.
390  * \param file_priv DRM file private.
391  * \param cmd command.
392  * \param arg pointer to a drm_map structure.
393  * \return zero on success or a negative value on error.
394  *
395  */
396 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
397 			    struct drm_file *file_priv)
398 {
399 	struct drm_map *map = data;
400 	struct drm_map_list *maplist;
401 	int err;
402 
403 	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
404 		return -EPERM;
405 
406 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
407 	    drm_core_check_feature(dev, DRIVER_MODESET))
408 		return -EINVAL;
409 
410 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
411 			      map->flags, &maplist);
412 
413 	if (err)
414 		return err;
415 
416 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
417 	map->handle = (void *)(unsigned long)maplist->user_token;
418 
419 	/*
420 	 * It appears that there are no users of this value whatsoever --
421 	 * drmAddMap just discards it.  Let's not encourage its use.
422 	 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
423 	 *  it's not a real mtrr index anymore.)
424 	 */
425 	map->mtrr = -1;
426 
427 	return 0;
428 }
429 
430 /*
431  * Get a mapping information.
432  *
433  * \param inode device inode.
434  * \param file_priv DRM file private.
435  * \param cmd command.
436  * \param arg user argument, pointing to a drm_map structure.
437  *
438  * \return zero on success or a negative number on failure.
439  *
440  * Searches for the mapping with the specified offset and copies its information
441  * into userspace
442  */
443 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
444 			    struct drm_file *file_priv)
445 {
446 	struct drm_map *map = data;
447 	struct drm_map_list *r_list = NULL;
448 	struct list_head *list;
449 	int idx;
450 	int i;
451 
452 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
453 	    drm_core_check_feature(dev, DRIVER_MODESET))
454 		return -EINVAL;
455 
456 	idx = map->offset;
457 	if (idx < 0)
458 		return -EINVAL;
459 
460 	i = 0;
461 	mutex_lock(&dev->struct_mutex);
462 	list_for_each(list, &dev->maplist) {
463 		if (i == idx) {
464 			r_list = list_entry(list, struct drm_map_list, head);
465 			break;
466 		}
467 		i++;
468 	}
469 	if (!r_list || !r_list->map) {
470 		mutex_unlock(&dev->struct_mutex);
471 		return -EINVAL;
472 	}
473 
474 	map->offset = r_list->map->offset;
475 	map->size = r_list->map->size;
476 	map->type = r_list->map->type;
477 	map->flags = r_list->map->flags;
478 	map->handle = (void *)(unsigned long) r_list->user_token;
479 	map->mtrr = r_list->map->mtrr;
480 
481 	mutex_unlock(&dev->struct_mutex);
482 
483 	return 0;
484 }
485 
486 /**
487  * Remove a map private from list and deallocate resources if the mapping
488  * isn't in use.
489  *
490  * Searches the map on drm_device::maplist, removes it from the list, see if
491  * its being used, and free any associate resource (such as MTRR's) if it's not
492  * being on use.
493  *
494  * \sa drm_legacy_addmap
495  */
496 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
497 {
498 	struct drm_map_list *r_list = NULL, *list_t;
499 	drm_dma_handle_t dmah;
500 	int found = 0;
501 	struct drm_master *master;
502 
503 	/* Find the list entry for the map and remove it */
504 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
505 		if (r_list->map == map) {
506 			master = r_list->master;
507 			list_del(&r_list->head);
508 			drm_ht_remove_key(&dev->map_hash,
509 					  r_list->user_token >> PAGE_SHIFT);
510 			kfree(r_list);
511 			found = 1;
512 			break;
513 		}
514 	}
515 
516 	if (!found)
517 		return -EINVAL;
518 
519 	switch (map->type) {
520 	case _DRM_REGISTERS:
521 		iounmap(map->handle);
522 		/* FALLTHROUGH */
523 	case _DRM_FRAME_BUFFER:
524 		arch_phys_wc_del(map->mtrr);
525 		break;
526 	case _DRM_SHM:
527 		vfree(map->handle);
528 		if (master) {
529 			if (dev->sigdata.lock == master->lock.hw_lock)
530 				dev->sigdata.lock = NULL;
531 			master->lock.hw_lock = NULL;   /* SHM removed */
532 			master->lock.file_priv = NULL;
533 			wake_up_interruptible_all(&master->lock.lock_queue);
534 		}
535 		break;
536 	case _DRM_AGP:
537 	case _DRM_SCATTER_GATHER:
538 		break;
539 	case _DRM_CONSISTENT:
540 		dmah.vaddr = map->handle;
541 		dmah.busaddr = map->offset;
542 		dmah.size = map->size;
543 		__drm_legacy_pci_free(dev, &dmah);
544 		break;
545 	}
546 	kfree(map);
547 
548 	return 0;
549 }
550 EXPORT_SYMBOL(drm_legacy_rmmap_locked);
551 
552 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
553 {
554 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
555 	    drm_core_check_feature(dev, DRIVER_MODESET))
556 		return;
557 
558 	mutex_lock(&dev->struct_mutex);
559 	drm_legacy_rmmap_locked(dev, map);
560 	mutex_unlock(&dev->struct_mutex);
561 }
562 EXPORT_SYMBOL(drm_legacy_rmmap);
563 
564 #if 0
565 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
566 {
567 	struct drm_map_list *r_list, *list_temp;
568 
569 	if (drm_core_check_feature(dev, DRIVER_MODESET))
570 		return;
571 
572 	mutex_lock(&dev->struct_mutex);
573 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
574 		if (r_list->master == master) {
575 			drm_legacy_rmmap_locked(dev, r_list->map);
576 			r_list = NULL;
577 		}
578 	}
579 	mutex_unlock(&dev->struct_mutex);
580 }
581 #endif
582 
583 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
584  * the last close of the device, and this is necessary for cleanup when things
585  * exit uncleanly.  Therefore, having userland manually remove mappings seems
586  * like a pointless exercise since they're going away anyway.
587  *
588  * One use case might be after addmap is allowed for normal users for SHM and
589  * gets used by drivers that the server doesn't need to care about.  This seems
590  * unlikely.
591  *
592  * \param inode device inode.
593  * \param file_priv DRM file private.
594  * \param cmd command.
595  * \param arg pointer to a struct drm_map structure.
596  * \return zero on success or a negative value on error.
597  */
598 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
599 			   struct drm_file *file_priv)
600 {
601 	struct drm_map *request = data;
602 	struct drm_local_map *map = NULL;
603 	struct drm_map_list *r_list;
604 	int ret;
605 
606 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
607 	    drm_core_check_feature(dev, DRIVER_MODESET))
608 		return -EINVAL;
609 
610 	mutex_lock(&dev->struct_mutex);
611 	list_for_each_entry(r_list, &dev->maplist, head) {
612 		if (r_list->map &&
613 		    r_list->user_token == (unsigned long)request->handle &&
614 		    r_list->map->flags & _DRM_REMOVABLE) {
615 			map = r_list->map;
616 			break;
617 		}
618 	}
619 
620 	/* List has wrapped around to the head pointer, or its empty we didn't
621 	 * find anything.
622 	 */
623 	if (list_empty(&dev->maplist) || !map) {
624 		mutex_unlock(&dev->struct_mutex);
625 		return -EINVAL;
626 	}
627 
628 	/* Register and framebuffer maps are permanent */
629 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
630 		mutex_unlock(&dev->struct_mutex);
631 		return 0;
632 	}
633 
634 	ret = drm_legacy_rmmap_locked(dev, map);
635 
636 	mutex_unlock(&dev->struct_mutex);
637 
638 	return ret;
639 }
640 
641 /**
642  * Cleanup after an error on one of the addbufs() functions.
643  *
644  * \param dev DRM device.
645  * \param entry buffer entry where the error occurred.
646  *
647  * Frees any pages and buffers associated with the given entry.
648  */
649 static void drm_cleanup_buf_error(struct drm_device * dev,
650 				  struct drm_buf_entry * entry)
651 {
652 	int i;
653 
654 	if (entry->seg_count) {
655 		for (i = 0; i < entry->seg_count; i++) {
656 			if (entry->seglist[i]) {
657 				drm_pci_free(dev, entry->seglist[i]);
658 			}
659 		}
660 		kfree(entry->seglist);
661 
662 		entry->seg_count = 0;
663 	}
664 
665 	if (entry->buf_count) {
666 		for (i = 0; i < entry->buf_count; i++) {
667 			kfree(entry->buflist[i].dev_private);
668 		}
669 		kfree(entry->buflist);
670 
671 		entry->buf_count = 0;
672 	}
673 }
674 
675 #if IS_ENABLED(CONFIG_AGP)
676 /**
677  * Add AGP buffers for DMA transfers.
678  *
679  * \param dev struct drm_device to which the buffers are to be added.
680  * \param request pointer to a struct drm_buf_desc describing the request.
681  * \return zero on success or a negative number on failure.
682  *
683  * After some sanity checks creates a drm_buf structure for each buffer and
684  * reallocates the buffer list of the same size order to accommodate the new
685  * buffers.
686  */
687 int drm_legacy_addbufs_agp(struct drm_device *dev,
688 			   struct drm_buf_desc *request)
689 {
690 	struct drm_device_dma *dma = dev->dma;
691 	struct drm_buf_entry *entry;
692 	/* struct drm_agp_mem *agp_entry; */
693 	struct drm_buf *buf;
694 	unsigned long offset;
695 	unsigned long agp_offset;
696 	int count;
697 	int order;
698 	int size;
699 	int alignment;
700 	int page_order;
701 	int total;
702 	int byte_count;
703 #if 0
704 	int i, valid;
705 #else
706 	int i;
707 #endif
708 
709 	struct drm_buf **temp_buflist;
710 
711 	if (!dma)
712 		return -EINVAL;
713 
714 	count = request->count;
715 	order = order_base_2(request->size);
716 	size = 1 << order;
717 
718 	alignment = (request->flags & _DRM_PAGE_ALIGN)
719 	    ? PAGE_ALIGN(size) : size;
720 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
721 	total = PAGE_SIZE << page_order;
722 
723 	byte_count = 0;
724 	agp_offset = dev->agp->base + request->agp_start;
725 
726 	DRM_DEBUG("count:      %d\n", count);
727 	DRM_DEBUG("order:      %d\n", order);
728 	DRM_DEBUG("size:       %d\n", size);
729 	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
730 	DRM_DEBUG("alignment:  %d\n", alignment);
731 	DRM_DEBUG("page_order: %d\n", page_order);
732 	DRM_DEBUG("total:      %d\n", total);
733 
734 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
735 		return -EINVAL;
736 
737 	/* Make sure buffers are located in AGP memory that we own */
738 #if 0
739 	valid = 0;
740 	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
741 		if ((agp_offset >= agp_entry->bound) &&
742 		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
743 			valid = 1;
744 			break;
745 		}
746 	}
747 	if (!list_empty(&dev->agp->memory) && !valid) {
748 		DRM_DEBUG("zone invalid\n");
749 		return -EINVAL;
750 	}
751 #endif
752 	spin_lock(&dev->buf_lock);
753 	if (dev->buf_use) {
754 		spin_unlock(&dev->buf_lock);
755 		return -EBUSY;
756 	}
757 	atomic_inc(&dev->buf_alloc);
758 	spin_unlock(&dev->buf_lock);
759 
760 	mutex_lock(&dev->struct_mutex);
761 	entry = &dma->bufs[order];
762 	if (entry->buf_count) {
763 		mutex_unlock(&dev->struct_mutex);
764 		atomic_dec(&dev->buf_alloc);
765 		return -ENOMEM;	/* May only call once for each order */
766 	}
767 
768 	if (count < 0 || count > 4096) {
769 		mutex_unlock(&dev->struct_mutex);
770 		atomic_dec(&dev->buf_alloc);
771 		return -EINVAL;
772 	}
773 
774 	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
775 	if (!entry->buflist) {
776 		mutex_unlock(&dev->struct_mutex);
777 		atomic_dec(&dev->buf_alloc);
778 		return -ENOMEM;
779 	}
780 
781 	entry->buf_size = size;
782 	entry->page_order = page_order;
783 
784 	offset = 0;
785 
786 	while (entry->buf_count < count) {
787 		buf = &entry->buflist[entry->buf_count];
788 		buf->idx = dma->buf_count + entry->buf_count;
789 		buf->total = alignment;
790 		buf->order = order;
791 		buf->used = 0;
792 
793 		buf->offset = (dma->byte_count + offset);
794 		buf->bus_address = agp_offset + offset;
795 		buf->address = (void *)(agp_offset + offset);
796 		buf->next = NULL;
797 		buf->waiting = 0;
798 		buf->pending = 0;
799 		buf->file_priv = NULL;
800 
801 		buf->dev_priv_size = dev->driver->dev_priv_size;
802 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
803 		if (!buf->dev_private) {
804 			/* Set count correctly so we free the proper amount. */
805 			entry->buf_count = count;
806 			drm_cleanup_buf_error(dev, entry);
807 			mutex_unlock(&dev->struct_mutex);
808 			atomic_dec(&dev->buf_alloc);
809 			return -ENOMEM;
810 		}
811 
812 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
813 
814 		offset += alignment;
815 		entry->buf_count++;
816 		byte_count += PAGE_SIZE << page_order;
817 	}
818 
819 	DRM_DEBUG("byte_count: %d\n", byte_count);
820 
821 	temp_buflist = krealloc(dma->buflist,
822 				(dma->buf_count + entry->buf_count) *
823 				sizeof(*dma->buflist), M_DRM, M_WAITOK);
824 	if (!temp_buflist) {
825 		/* Free the entry because it isn't valid */
826 		drm_cleanup_buf_error(dev, entry);
827 		mutex_unlock(&dev->struct_mutex);
828 		atomic_dec(&dev->buf_alloc);
829 		return -ENOMEM;
830 	}
831 	dma->buflist = temp_buflist;
832 
833 	for (i = 0; i < entry->buf_count; i++) {
834 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
835 	}
836 
837 	dma->buf_count += entry->buf_count;
838 	dma->seg_count += entry->seg_count;
839 	dma->page_count += byte_count >> PAGE_SHIFT;
840 	dma->byte_count += byte_count;
841 
842 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
843 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
844 
845 	mutex_unlock(&dev->struct_mutex);
846 
847 	request->count = entry->buf_count;
848 	request->size = size;
849 
850 	dma->flags = _DRM_DMA_USE_AGP;
851 
852 	atomic_dec(&dev->buf_alloc);
853 	return 0;
854 }
855 EXPORT_SYMBOL(drm_legacy_addbufs_agp);
856 #endif /* CONFIG_AGP */
857 
858 int drm_legacy_addbufs_pci(struct drm_device *dev,
859 			   struct drm_buf_desc *request)
860 {
861 	struct drm_device_dma *dma = dev->dma;
862 	int count;
863 	int order;
864 	int size;
865 	int total;
866 	int page_order;
867 	struct drm_buf_entry *entry;
868 	drm_dma_handle_t *dmah;
869 	struct drm_buf *buf;
870 	int alignment;
871 	unsigned long offset;
872 	int i;
873 	int byte_count;
874 	int page_count;
875 	unsigned long *temp_pagelist;
876 	struct drm_buf **temp_buflist;
877 
878 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
879 		return -EINVAL;
880 
881 	if (!dma)
882 		return -EINVAL;
883 
884 	if (!capable(CAP_SYS_ADMIN))
885 		return -EPERM;
886 
887 	count = request->count;
888 	order = order_base_2(request->size);
889 	size = 1 << order;
890 
891 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
892 		  request->count, request->size, size, order);
893 
894 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
895 		return -EINVAL;
896 
897 	alignment = (request->flags & _DRM_PAGE_ALIGN)
898 	    ? PAGE_ALIGN(size) : size;
899 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
900 	total = PAGE_SIZE << page_order;
901 
902 	spin_lock(&dev->buf_lock);
903 	if (dev->buf_use) {
904 		spin_unlock(&dev->buf_lock);
905 		return -EBUSY;
906 	}
907 	atomic_inc(&dev->buf_alloc);
908 	spin_unlock(&dev->buf_lock);
909 
910 	mutex_lock(&dev->struct_mutex);
911 	entry = &dma->bufs[order];
912 	if (entry->buf_count) {
913 		mutex_unlock(&dev->struct_mutex);
914 		atomic_dec(&dev->buf_alloc);
915 		return -ENOMEM;	/* May only call once for each order */
916 	}
917 
918 	if (count < 0 || count > 4096) {
919 		mutex_unlock(&dev->struct_mutex);
920 		atomic_dec(&dev->buf_alloc);
921 		return -EINVAL;
922 	}
923 
924 	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
925 	if (!entry->buflist) {
926 		mutex_unlock(&dev->struct_mutex);
927 		atomic_dec(&dev->buf_alloc);
928 		return -ENOMEM;
929 	}
930 
931 	entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
932 	if (!entry->seglist) {
933 		kfree(entry->buflist);
934 		mutex_unlock(&dev->struct_mutex);
935 		atomic_dec(&dev->buf_alloc);
936 		return -ENOMEM;
937 	}
938 
939 	/* Keep the original pagelist until we know all the allocations
940 	 * have succeeded
941 	 */
942 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
943 			       sizeof(*dma->pagelist), M_DRM, M_WAITOK );
944 	if (!temp_pagelist) {
945 		kfree(entry->buflist);
946 		kfree(entry->seglist);
947 		mutex_unlock(&dev->struct_mutex);
948 		atomic_dec(&dev->buf_alloc);
949 		return -ENOMEM;
950 	}
951 	memcpy(temp_pagelist,
952 	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
953 	DRM_DEBUG("pagelist: %d entries\n",
954 		  dma->page_count + (count << page_order));
955 
956 	entry->buf_size = size;
957 	entry->page_order = page_order;
958 	byte_count = 0;
959 	page_count = 0;
960 
961 	while (entry->buf_count < count) {
962 
963 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
964 
965 		if (!dmah) {
966 			/* Set count correctly so we free the proper amount. */
967 			entry->buf_count = count;
968 			entry->seg_count = count;
969 			drm_cleanup_buf_error(dev, entry);
970 			kfree(temp_pagelist);
971 			mutex_unlock(&dev->struct_mutex);
972 			atomic_dec(&dev->buf_alloc);
973 			return -ENOMEM;
974 		}
975 		entry->seglist[entry->seg_count++] = dmah;
976 		for (i = 0; i < (1 << page_order); i++) {
977 			DRM_DEBUG("page %d @ 0x%08lx\n",
978 				  dma->page_count + page_count,
979 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
980 			temp_pagelist[dma->page_count + page_count++]
981 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
982 		}
983 		for (offset = 0;
984 		     offset + size <= total && entry->buf_count < count;
985 		     offset += alignment, ++entry->buf_count) {
986 			buf = &entry->buflist[entry->buf_count];
987 			buf->idx = dma->buf_count + entry->buf_count;
988 			buf->total = alignment;
989 			buf->order = order;
990 			buf->used = 0;
991 			buf->offset = (dma->byte_count + byte_count + offset);
992 			buf->address = ((char *)dmah->vaddr + offset);
993 			buf->bus_address = dmah->busaddr + offset;
994 			buf->next = NULL;
995 			buf->waiting = 0;
996 			buf->pending = 0;
997 			buf->file_priv = NULL;
998 
999 			buf->dev_priv_size = dev->driver->dev_priv_size;
1000 			buf->dev_private = kzalloc(buf->dev_priv_size,
1001 						GFP_KERNEL);
1002 			if (!buf->dev_private) {
1003 				/* Set count correctly so we free the proper amount. */
1004 				entry->buf_count = count;
1005 				entry->seg_count = count;
1006 				drm_cleanup_buf_error(dev, entry);
1007 				kfree(temp_pagelist);
1008 				mutex_unlock(&dev->struct_mutex);
1009 				atomic_dec(&dev->buf_alloc);
1010 				return -ENOMEM;
1011 			}
1012 
1013 			DRM_DEBUG("buffer %d @ %p\n",
1014 				  entry->buf_count, buf->address);
1015 		}
1016 		byte_count += PAGE_SIZE << page_order;
1017 	}
1018 
1019 	temp_buflist = krealloc(dma->buflist,
1020 				(dma->buf_count + entry->buf_count) *
1021 				sizeof(*dma->buflist), M_DRM, M_WAITOK);
1022 	if (!temp_buflist) {
1023 		/* Free the entry because it isn't valid */
1024 		drm_cleanup_buf_error(dev, entry);
1025 		kfree(temp_pagelist);
1026 		mutex_unlock(&dev->struct_mutex);
1027 		atomic_dec(&dev->buf_alloc);
1028 		return -ENOMEM;
1029 	}
1030 	dma->buflist = temp_buflist;
1031 
1032 	for (i = 0; i < entry->buf_count; i++) {
1033 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1034 	}
1035 
1036 	/* No allocations failed, so now we can replace the original pagelist
1037 	 * with the new one.
1038 	 */
1039 	if (dma->page_count) {
1040 		kfree(dma->pagelist);
1041 	}
1042 	dma->pagelist = temp_pagelist;
1043 
1044 	dma->buf_count += entry->buf_count;
1045 	dma->seg_count += entry->seg_count;
1046 	dma->page_count += entry->seg_count << page_order;
1047 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1048 
1049 	mutex_unlock(&dev->struct_mutex);
1050 
1051 	request->count = entry->buf_count;
1052 	request->size = size;
1053 
1054 	if (request->flags & _DRM_PCI_BUFFER_RO)
1055 		dma->flags = _DRM_DMA_USE_PCI_RO;
1056 
1057 	atomic_dec(&dev->buf_alloc);
1058 	return 0;
1059 
1060 }
1061 EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1062 
1063 static int drm_legacy_addbufs_sg(struct drm_device *dev,
1064 				 struct drm_buf_desc *request)
1065 {
1066 	struct drm_device_dma *dma = dev->dma;
1067 	struct drm_buf_entry *entry;
1068 	struct drm_buf *buf;
1069 	unsigned long offset;
1070 	unsigned long agp_offset;
1071 	int count;
1072 	int order;
1073 	int size;
1074 	int alignment;
1075 	int page_order;
1076 	int total;
1077 	int byte_count;
1078 	int i;
1079 	struct drm_buf **temp_buflist;
1080 
1081 	if (!drm_core_check_feature(dev, DRIVER_SG))
1082 		return -EINVAL;
1083 
1084 	if (!dma)
1085 		return -EINVAL;
1086 
1087 	if (!capable(CAP_SYS_ADMIN))
1088 		return -EPERM;
1089 
1090 	count = request->count;
1091 	order = order_base_2(request->size);
1092 	size = 1 << order;
1093 
1094 	alignment = (request->flags & _DRM_PAGE_ALIGN)
1095 	    ? PAGE_ALIGN(size) : size;
1096 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1097 	total = PAGE_SIZE << page_order;
1098 
1099 	byte_count = 0;
1100 	agp_offset = request->agp_start;
1101 
1102 	DRM_DEBUG("count:      %d\n", count);
1103 	DRM_DEBUG("order:      %d\n", order);
1104 	DRM_DEBUG("size:       %d\n", size);
1105 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1106 	DRM_DEBUG("alignment:  %d\n", alignment);
1107 	DRM_DEBUG("page_order: %d\n", page_order);
1108 	DRM_DEBUG("total:      %d\n", total);
1109 
1110 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1111 		return -EINVAL;
1112 
1113 	spin_lock(&dev->buf_lock);
1114 	if (dev->buf_use) {
1115 		spin_unlock(&dev->buf_lock);
1116 		return -EBUSY;
1117 	}
1118 	atomic_inc(&dev->buf_alloc);
1119 	spin_unlock(&dev->buf_lock);
1120 
1121 	mutex_lock(&dev->struct_mutex);
1122 	entry = &dma->bufs[order];
1123 	if (entry->buf_count) {
1124 		mutex_unlock(&dev->struct_mutex);
1125 		atomic_dec(&dev->buf_alloc);
1126 		return -ENOMEM;	/* May only call once for each order */
1127 	}
1128 
1129 	if (count < 0 || count > 4096) {
1130 		mutex_unlock(&dev->struct_mutex);
1131 		atomic_dec(&dev->buf_alloc);
1132 		return -EINVAL;
1133 	}
1134 
1135 	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1136 				GFP_KERNEL);
1137 	if (!entry->buflist) {
1138 		mutex_unlock(&dev->struct_mutex);
1139 		atomic_dec(&dev->buf_alloc);
1140 		return -ENOMEM;
1141 	}
1142 
1143 	entry->buf_size = size;
1144 	entry->page_order = page_order;
1145 
1146 	offset = 0;
1147 
1148 	while (entry->buf_count < count) {
1149 		buf = &entry->buflist[entry->buf_count];
1150 		buf->idx = dma->buf_count + entry->buf_count;
1151 		buf->total = alignment;
1152 		buf->order = order;
1153 		buf->used = 0;
1154 
1155 		buf->offset = (dma->byte_count + offset);
1156 		buf->bus_address = agp_offset + offset;
1157 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
1158 		buf->next = NULL;
1159 		buf->waiting = 0;
1160 		buf->pending = 0;
1161 		buf->file_priv = NULL;
1162 
1163 		buf->dev_priv_size = dev->driver->dev_priv_size;
1164 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1165 		if (!buf->dev_private) {
1166 			/* Set count correctly so we free the proper amount. */
1167 			entry->buf_count = count;
1168 			drm_cleanup_buf_error(dev, entry);
1169 			mutex_unlock(&dev->struct_mutex);
1170 			atomic_dec(&dev->buf_alloc);
1171 			return -ENOMEM;
1172 		}
1173 
1174 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1175 
1176 		offset += alignment;
1177 		entry->buf_count++;
1178 		byte_count += PAGE_SIZE << page_order;
1179 	}
1180 
1181 	DRM_DEBUG("byte_count: %d\n", byte_count);
1182 
1183 	temp_buflist = krealloc(dma->buflist,
1184 				(dma->buf_count + entry->buf_count) *
1185 				sizeof(*dma->buflist), M_DRM, M_WAITOK);
1186 	if (!temp_buflist) {
1187 		/* Free the entry because it isn't valid */
1188 		drm_cleanup_buf_error(dev, entry);
1189 		mutex_unlock(&dev->struct_mutex);
1190 		atomic_dec(&dev->buf_alloc);
1191 		return -ENOMEM;
1192 	}
1193 	dma->buflist = temp_buflist;
1194 
1195 	for (i = 0; i < entry->buf_count; i++) {
1196 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1197 	}
1198 
1199 	dma->buf_count += entry->buf_count;
1200 	dma->seg_count += entry->seg_count;
1201 	dma->page_count += byte_count >> PAGE_SHIFT;
1202 	dma->byte_count += byte_count;
1203 
1204 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1205 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1206 
1207 	mutex_unlock(&dev->struct_mutex);
1208 
1209 	request->count = entry->buf_count;
1210 	request->size = size;
1211 
1212 	dma->flags = _DRM_DMA_USE_SG;
1213 
1214 	atomic_dec(&dev->buf_alloc);
1215 	return 0;
1216 }
1217 
1218 /**
1219  * Add buffers for DMA transfers (ioctl).
1220  *
1221  * \param inode device inode.
1222  * \param file_priv DRM file private.
1223  * \param cmd command.
1224  * \param arg pointer to a struct drm_buf_desc request.
1225  * \return zero on success or a negative number on failure.
1226  *
1227  * According with the memory type specified in drm_buf_desc::flags and the
1228  * build options, it dispatches the call either to addbufs_agp(),
1229  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1230  * PCI memory respectively.
1231  */
1232 int drm_legacy_addbufs(struct drm_device *dev, void *data,
1233 		       struct drm_file *file_priv)
1234 {
1235 	struct drm_buf_desc *request = data;
1236 	int ret;
1237 
1238 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1239 		return -EINVAL;
1240 
1241 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1242 		return -EINVAL;
1243 
1244 #if IS_ENABLED(CONFIG_AGP)
1245 	if (request->flags & _DRM_AGP_BUFFER)
1246 		ret = drm_legacy_addbufs_agp(dev, request);
1247 	else
1248 #endif
1249 	if (request->flags & _DRM_SG_BUFFER)
1250 		ret = drm_legacy_addbufs_sg(dev, request);
1251 	else if (request->flags & _DRM_FB_BUFFER)
1252 		ret = -EINVAL;
1253 	else
1254 		ret = drm_legacy_addbufs_pci(dev, request);
1255 
1256 	return ret;
1257 }
1258 
1259 /**
1260  * Get information about the buffer mappings.
1261  *
1262  * This was originally mean for debugging purposes, or by a sophisticated
1263  * client library to determine how best to use the available buffers (e.g.,
1264  * large buffers can be used for image transfer).
1265  *
1266  * \param inode device inode.
1267  * \param file_priv DRM file private.
1268  * \param cmd command.
1269  * \param arg pointer to a drm_buf_info structure.
1270  * \return zero on success or a negative number on failure.
1271  *
1272  * Increments drm_device::buf_use while holding the drm_device::buf_lock
1273  * lock, preventing of allocating more buffers after this call. Information
1274  * about each requested buffer is then copied into user space.
1275  */
1276 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1277 			struct drm_file *file_priv)
1278 {
1279 	struct drm_device_dma *dma = dev->dma;
1280 	struct drm_buf_info *request = data;
1281 	int i;
1282 	int count;
1283 
1284 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1285 		return -EINVAL;
1286 
1287 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1288 		return -EINVAL;
1289 
1290 	if (!dma)
1291 		return -EINVAL;
1292 
1293 	spin_lock(&dev->buf_lock);
1294 	if (atomic_read(&dev->buf_alloc)) {
1295 		spin_unlock(&dev->buf_lock);
1296 		return -EBUSY;
1297 	}
1298 	++dev->buf_use;		/* Can't allocate more after this call */
1299 	spin_unlock(&dev->buf_lock);
1300 
1301 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1302 		if (dma->bufs[i].buf_count)
1303 			++count;
1304 	}
1305 
1306 	DRM_DEBUG("count = %d\n", count);
1307 
1308 	if (request->count >= count) {
1309 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1310 			if (dma->bufs[i].buf_count) {
1311 				struct drm_buf_desc __user *to =
1312 				    &request->list[count];
1313 				struct drm_buf_entry *from = &dma->bufs[i];
1314 				if (copy_to_user(&to->count,
1315 						 &from->buf_count,
1316 						 sizeof(from->buf_count)) ||
1317 				    copy_to_user(&to->size,
1318 						 &from->buf_size,
1319 						 sizeof(from->buf_size)) ||
1320 				    copy_to_user(&to->low_mark,
1321 						 &from->low_mark,
1322 						 sizeof(from->low_mark)) ||
1323 				    copy_to_user(&to->high_mark,
1324 						 &from->high_mark,
1325 						 sizeof(from->high_mark)))
1326 					return -EFAULT;
1327 
1328 				DRM_DEBUG("%d %d %d %d %d\n",
1329 					  i,
1330 					  dma->bufs[i].buf_count,
1331 					  dma->bufs[i].buf_size,
1332 					  dma->bufs[i].low_mark,
1333 					  dma->bufs[i].high_mark);
1334 				++count;
1335 			}
1336 		}
1337 	}
1338 	request->count = count;
1339 
1340 	return 0;
1341 }
1342 
1343 /**
1344  * Specifies a low and high water mark for buffer allocation
1345  *
1346  * \param inode device inode.
1347  * \param file_priv DRM file private.
1348  * \param cmd command.
1349  * \param arg a pointer to a drm_buf_desc structure.
1350  * \return zero on success or a negative number on failure.
1351  *
1352  * Verifies that the size order is bounded between the admissible orders and
1353  * updates the respective drm_device_dma::bufs entry low and high water mark.
1354  *
1355  * \note This ioctl is deprecated and mostly never used.
1356  */
1357 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1358 			struct drm_file *file_priv)
1359 {
1360 	struct drm_device_dma *dma = dev->dma;
1361 	struct drm_buf_desc *request = data;
1362 	int order;
1363 	struct drm_buf_entry *entry;
1364 
1365 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1366 		return -EINVAL;
1367 
1368 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1369 		return -EINVAL;
1370 
1371 	if (!dma)
1372 		return -EINVAL;
1373 
1374 	DRM_DEBUG("%d, %d, %d\n",
1375 		  request->size, request->low_mark, request->high_mark);
1376 	order = order_base_2(request->size);
1377 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1378 		return -EINVAL;
1379 	entry = &dma->bufs[order];
1380 
1381 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1382 		return -EINVAL;
1383 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1384 		return -EINVAL;
1385 
1386 	entry->low_mark = request->low_mark;
1387 	entry->high_mark = request->high_mark;
1388 
1389 	return 0;
1390 }
1391 
1392 /**
1393  * Unreserve the buffers in list, previously reserved using drmDMA.
1394  *
1395  * \param inode device inode.
1396  * \param file_priv DRM file private.
1397  * \param cmd command.
1398  * \param arg pointer to a drm_buf_free structure.
1399  * \return zero on success or a negative number on failure.
1400  *
1401  * Calls free_buffer() for each used buffer.
1402  * This function is primarily used for debugging.
1403  */
1404 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1405 			struct drm_file *file_priv)
1406 {
1407 	struct drm_device_dma *dma = dev->dma;
1408 	struct drm_buf_free *request = data;
1409 	int i;
1410 	int idx;
1411 	struct drm_buf *buf;
1412 
1413 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1414 		return -EINVAL;
1415 
1416 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1417 		return -EINVAL;
1418 
1419 	if (!dma)
1420 		return -EINVAL;
1421 
1422 	DRM_DEBUG("%d\n", request->count);
1423 	for (i = 0; i < request->count; i++) {
1424 		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1425 			return -EFAULT;
1426 		if (idx < 0 || idx >= dma->buf_count) {
1427 			DRM_ERROR("Index %d (of %d max)\n",
1428 				  idx, dma->buf_count - 1);
1429 			return -EINVAL;
1430 		}
1431 		buf = dma->buflist[idx];
1432 		if (buf->file_priv != file_priv) {
1433 			DRM_ERROR("Process %d freeing buffer not owned\n",
1434 				  DRM_CURRENTPID);
1435 			return -EINVAL;
1436 		}
1437 		drm_legacy_free_buffer(dev, buf);
1438 	}
1439 
1440 	return 0;
1441 }
1442 
1443 /**
1444  * Maps all of the DMA buffers into client-virtual space (ioctl).
1445  *
1446  * \param inode device inode.
1447  * \param file_priv DRM file private.
1448  * \param cmd command.
1449  * \param arg pointer to a drm_buf_map structure.
1450  * \return zero on success or a negative number on failure.
1451  *
1452  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1453  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1454  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1455  * drm_mmap_dma().
1456  */
1457 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1458 		       struct drm_file *file_priv)
1459 {
1460 	struct drm_device_dma *dma = dev->dma;
1461 	int retcode = 0;
1462 	const int zero = 0;
1463 	unsigned long virtual;
1464 	vm_offset_t address;
1465 	struct vmspace *vms  = DRM_CURPROC->td_proc->p_vmspace;
1466 	struct drm_buf_map *request = data;
1467 	int i;
1468 
1469 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1470 		return -EINVAL;
1471 
1472 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1473 		return -EINVAL;
1474 
1475 	if (!dma)
1476 		return -EINVAL;
1477 
1478 	spin_lock(&dev->buf_lock);
1479 	if (atomic_read(&dev->buf_alloc)) {
1480 		spin_unlock(&dev->buf_lock);
1481 		return -EBUSY;
1482 	}
1483 	dev->buf_use++;		/* Can't allocate more after this call */
1484 	spin_unlock(&dev->buf_lock);
1485 
1486 	if (request->count >= dma->buf_count) {
1487 		if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1488 		    || (drm_core_check_feature(dev, DRIVER_SG)
1489 			&& (dma->flags & _DRM_DMA_USE_SG))) {
1490 			struct drm_local_map *map = dev->agp_buffer_map;
1491 			unsigned long token = dev->agp_buffer_token;
1492 
1493 			if (!map) {
1494 				retcode = -EINVAL;
1495 				goto done;
1496 			}
1497 			virtual = vm_mmap(&vms->vm_map, 0, map->size,
1498 					  PROT_READ | PROT_WRITE,
1499 					  VM_PROT_ALL,
1500 					  MAP_SHARED,
1501 					  SLIST_FIRST(&dev->devnode->si_hlist),
1502 					  token);
1503 		} else {
1504 			virtual = vm_mmap(&vms->vm_map, 0, dma->byte_count,
1505 					  PROT_READ | PROT_WRITE,
1506 					  VM_PROT_ALL,
1507 					  MAP_SHARED,
1508 					  SLIST_FIRST(&dev->devnode->si_hlist),
1509 					  0);
1510 		}
1511 		if (virtual > -1024UL) {
1512 			/* Real error */
1513 			retcode = (signed long)virtual;
1514 			goto done;
1515 		}
1516 		request->virtual = (void __user *)virtual;
1517 
1518 		for (i = 0; i < dma->buf_count; i++) {
1519 			if (copy_to_user(&request->list[i].idx,
1520 					 &dma->buflist[i]->idx,
1521 					 sizeof(request->list[0].idx))) {
1522 				retcode = -EFAULT;
1523 				goto done;
1524 			}
1525 			if (copy_to_user(&request->list[i].total,
1526 					 &dma->buflist[i]->total,
1527 					 sizeof(request->list[0].total))) {
1528 				retcode = -EFAULT;
1529 				goto done;
1530 			}
1531 			if (copy_to_user(&request->list[i].used,
1532 					 &zero, sizeof(zero))) {
1533 				retcode = -EFAULT;
1534 				goto done;
1535 			}
1536 			address = virtual + dma->buflist[i]->offset;	/* *** */
1537 			if (copy_to_user(&request->list[i].address,
1538 					 &address, sizeof(address))) {
1539 				retcode = -EFAULT;
1540 				goto done;
1541 			}
1542 		}
1543 	}
1544       done:
1545 	request->count = dma->buf_count;
1546 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1547 
1548 	return retcode;
1549 }
1550 
1551 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1552 		  struct drm_file *file_priv)
1553 {
1554 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1555 		return -EINVAL;
1556 
1557 	if (dev->driver->dma_ioctl)
1558 		return dev->driver->dma_ioctl(dev, data, file_priv);
1559 	else
1560 		return -EINVAL;
1561 }
1562 
1563 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1564 {
1565 	struct drm_map_list *entry;
1566 
1567 	list_for_each_entry(entry, &dev->maplist, head) {
1568 		if (entry->map && entry->map->type == _DRM_SHM &&
1569 		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1570 			return entry->map;
1571 		}
1572 	}
1573 	return NULL;
1574 }
1575 EXPORT_SYMBOL(drm_legacy_getsarea);
1576