xref: /freebsd/sys/dev/drm2/drm_bufs.c (revision fdafd315)
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/shm.h>
38 
39 #include <dev/pci/pcireg.h>
40 
41 #include <dev/drm2/drmP.h>
42 
43 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
44  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
45  * address for accessing them.  Cleaned up at unload.
46  */
drm_alloc_resource(struct drm_device * dev,int resource)47 static int drm_alloc_resource(struct drm_device *dev, int resource)
48 {
49 	struct resource *res;
50 	int rid;
51 
52 	if (resource >= DRM_MAX_PCI_RESOURCE) {
53 		DRM_ERROR("Resource %d too large\n", resource);
54 		return 1;
55 	}
56 
57 	if (dev->pcir[resource] != NULL) {
58 		return 0;
59 	}
60 
61 	rid = PCIR_BAR(resource);
62 	res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
63 	    RF_SHAREABLE);
64 	if (res == NULL) {
65 		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
66 		return 1;
67 	}
68 
69 	if (dev->pcir[resource] == NULL) {
70 		dev->pcirid[resource] = rid;
71 		dev->pcir[resource] = res;
72 	}
73 
74 	return 0;
75 }
76 
drm_get_resource_start(struct drm_device * dev,unsigned int resource)77 unsigned long drm_get_resource_start(struct drm_device *dev,
78 				     unsigned int resource)
79 {
80 	unsigned long start;
81 
82 	mtx_lock(&dev->pcir_lock);
83 
84 	if (drm_alloc_resource(dev, resource) != 0)
85 		return 0;
86 
87 	start = rman_get_start(dev->pcir[resource]);
88 
89 	mtx_unlock(&dev->pcir_lock);
90 
91 	return (start);
92 }
93 
drm_get_resource_len(struct drm_device * dev,unsigned int resource)94 unsigned long drm_get_resource_len(struct drm_device *dev,
95 				   unsigned int resource)
96 {
97 	unsigned long len;
98 
99 	mtx_lock(&dev->pcir_lock);
100 
101 	if (drm_alloc_resource(dev, resource) != 0)
102 		return 0;
103 
104 	len = rman_get_size(dev->pcir[resource]);
105 
106 	mtx_unlock(&dev->pcir_lock);
107 
108 	return (len);
109 }
110 
drm_find_matching_map(struct drm_device * dev,struct drm_local_map * map)111 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
112 						  struct drm_local_map *map)
113 {
114 	struct drm_map_list *entry;
115 	list_for_each_entry(entry, &dev->maplist, head) {
116 		/*
117 		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
118 		 * while PCI resources may live above that, we only compare the
119 		 * lower 32 bits of the map offset for maps of type
120 		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
121 		 * It is assumed that if a driver have more than one resource
122 		 * of each type, the lower 32 bits are different.
123 		 */
124 		if (!entry->map ||
125 		    map->type != entry->map->type ||
126 		    entry->master != dev->primary->master)
127 			continue;
128 		switch (map->type) {
129 		case _DRM_SHM:
130 			if (map->flags != _DRM_CONTAINS_LOCK)
131 				break;
132 			return entry;
133 		case _DRM_REGISTERS:
134 		case _DRM_FRAME_BUFFER:
135 			if ((entry->map->offset & 0xffffffff) ==
136 			    (map->offset & 0xffffffff))
137 				return entry;
138 		default: /* Make gcc happy */
139 			;
140 		}
141 		if (entry->map->offset == map->offset)
142 			return entry;
143 	}
144 
145 	return NULL;
146 }
147 
drm_map_handle(struct drm_device * dev,struct drm_hash_item * hash,unsigned long user_token,int hashed_handle,int shm)148 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
149 			  unsigned long user_token, int hashed_handle, int shm)
150 {
151 	int use_hashed_handle, shift;
152 	unsigned long add;
153 
154 #if (BITS_PER_LONG == 64)
155 	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
156 #elif (BITS_PER_LONG == 32)
157 	use_hashed_handle = hashed_handle;
158 #else
159 #error Unsupported long size. Neither 64 nor 32 bits.
160 #endif
161 
162 	if (!use_hashed_handle) {
163 		int ret;
164 		hash->key = user_token >> PAGE_SHIFT;
165 		ret = drm_ht_insert_item(&dev->map_hash, hash);
166 		if (ret != -EINVAL)
167 			return ret;
168 	}
169 
170 	shift = 0;
171 	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
172 	if (shm && (SHMLBA > PAGE_SIZE)) {
173 		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
174 
175 		/* For shared memory, we have to preserve the SHMLBA
176 		 * bits of the eventual vma->vm_pgoff value during
177 		 * mmap().  Otherwise we run into cache aliasing problems
178 		 * on some platforms.  On these platforms, the pgoff of
179 		 * a mmap() request is used to pick a suitable virtual
180 		 * address for the mmap() region such that it will not
181 		 * cause cache aliasing problems.
182 		 *
183 		 * Therefore, make sure the SHMLBA relevant bits of the
184 		 * hash value we use are equal to those in the original
185 		 * kernel virtual address.
186 		 */
187 		shift = bits;
188 		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
189 	}
190 
191 	return drm_ht_just_insert_please(&dev->map_hash, hash,
192 					 user_token, 32 - PAGE_SHIFT - 3,
193 					 shift, add);
194 }
195 
196 /**
197  * Core function to create a range of memory available for mapping by a
198  * non-root process.
199  *
200  * Adjusts the memory offset to its absolute value according to the mapping
201  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
202  * applicable and if supported by the kernel.
203  */
drm_addmap_core(struct drm_device * dev,resource_size_t offset,unsigned int size,enum drm_map_type type,enum drm_map_flags flags,struct drm_map_list ** maplist)204 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
205 			   unsigned int size, enum drm_map_type type,
206 			   enum drm_map_flags flags,
207 			   struct drm_map_list ** maplist)
208 {
209 	struct drm_local_map *map;
210 	struct drm_map_list *list;
211 	drm_dma_handle_t *dmah;
212 	unsigned long user_token;
213 	int ret;
214 	int align;
215 
216 	map = malloc(sizeof(*map), DRM_MEM_MAPS, M_NOWAIT);
217 	if (!map)
218 		return -ENOMEM;
219 
220 	map->offset = offset;
221 	map->size = size;
222 	map->flags = flags;
223 	map->type = type;
224 
225 	/* Only allow shared memory to be removable since we only keep enough
226 	 * book keeping information about shared memory to allow for removal
227 	 * when processes fork.
228 	 */
229 	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
230 		free(map, DRM_MEM_MAPS);
231 		return -EINVAL;
232 	}
233 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
234 		  (unsigned long long)map->offset, map->size, map->type);
235 
236 	/* page-align _DRM_SHM maps. They are allocated here so there is no security
237 	 * hole created by that and it works around various broken drivers that use
238 	 * a non-aligned quantity to map the SAREA. --BenH
239 	 */
240 	if (map->type == _DRM_SHM)
241 		map->size = PAGE_ALIGN(map->size);
242 
243 	/*
244 	 * FreeBSD port note: FreeBSD's PAGE_MASK is the inverse of
245 	 * Linux's one. That's why the test below doesn't inverse the
246 	 * constant.
247 	 */
248 	if ((map->offset & ((resource_size_t)PAGE_MASK)) || (map->size & (PAGE_MASK))) {
249 		free(map, DRM_MEM_MAPS);
250 		return -EINVAL;
251 	}
252 	map->mtrr = -1;
253 	map->handle = NULL;
254 
255 	switch (map->type) {
256 	case _DRM_REGISTERS:
257 	case _DRM_FRAME_BUFFER:
258 #ifdef __linux__
259 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
260 		if (map->offset + (map->size-1) < map->offset ||
261 		    map->offset < virt_to_phys(high_memory)) {
262 			kfree(map);
263 			return -EINVAL;
264 		}
265 #endif
266 #endif
267 		/* Some drivers preinitialize some maps, without the X Server
268 		 * needing to be aware of it.  Therefore, we just return success
269 		 * when the server tries to create a duplicate map.
270 		 */
271 		list = drm_find_matching_map(dev, map);
272 		if (list != NULL) {
273 			if (list->map->size != map->size) {
274 				DRM_DEBUG("Matching maps of type %d with "
275 					  "mismatched sizes, (%ld vs %ld)\n",
276 					  map->type, map->size,
277 					  list->map->size);
278 				list->map->size = map->size;
279 			}
280 
281 			free(map, DRM_MEM_MAPS);
282 			*maplist = list;
283 			return 0;
284 		}
285 
286 		if (drm_core_has_MTRR(dev)) {
287 			if (map->type == _DRM_FRAME_BUFFER ||
288 			    (map->flags & _DRM_WRITE_COMBINING)) {
289 				if (drm_mtrr_add(
290 				    map->offset, map->size,
291 				    DRM_MTRR_WC) == 0)
292 					map->mtrr = 1;
293 			}
294 		}
295 		if (map->type == _DRM_REGISTERS) {
296 			drm_core_ioremap(map, dev);
297 			if (!map->handle) {
298 				free(map, DRM_MEM_MAPS);
299 				return -ENOMEM;
300 			}
301 		}
302 
303 		break;
304 	case _DRM_SHM:
305 		list = drm_find_matching_map(dev, map);
306 		if (list != NULL) {
307 			if(list->map->size != map->size) {
308 				DRM_DEBUG("Matching maps of type %d with "
309 					  "mismatched sizes, (%ld vs %ld)\n",
310 					  map->type, map->size, list->map->size);
311 				list->map->size = map->size;
312 			}
313 
314 			free(map, DRM_MEM_MAPS);
315 			*maplist = list;
316 			return 0;
317 		}
318 		map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
319 		DRM_DEBUG("%lu %d %p\n",
320 			  map->size, drm_order(map->size), map->handle);
321 		if (!map->handle) {
322 			free(map, DRM_MEM_MAPS);
323 			return -ENOMEM;
324 		}
325 		map->offset = (unsigned long)map->handle;
326 		if (map->flags & _DRM_CONTAINS_LOCK) {
327 			/* Prevent a 2nd X Server from creating a 2nd lock */
328 			if (dev->primary->master->lock.hw_lock != NULL) {
329 				free(map->handle, DRM_MEM_MAPS);
330 				free(map, DRM_MEM_MAPS);
331 				return -EBUSY;
332 			}
333 			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
334 		}
335 		break;
336 	case _DRM_AGP: {
337 		struct drm_agp_mem *entry;
338 		int valid = 0;
339 
340 		if (!drm_core_has_AGP(dev)) {
341 			free(map, DRM_MEM_MAPS);
342 			return -EINVAL;
343 		}
344 #ifdef __linux__
345 #ifdef __alpha__
346 		map->offset += dev->hose->mem_space->start;
347 #endif
348 #endif
349 		/* In some cases (i810 driver), user space may have already
350 		 * added the AGP base itself, because dev->agp->base previously
351 		 * only got set during AGP enable.  So, only add the base
352 		 * address if the map's offset isn't already within the
353 		 * aperture.
354 		 */
355 		if (map->offset < dev->agp->base ||
356 		    map->offset > dev->agp->base +
357 		    dev->agp->agp_info.ai_aperture_size * 1024 * 1024 - 1) {
358 			map->offset += dev->agp->base;
359 		}
360 		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
361 
362 		/* This assumes the DRM is in total control of AGP space.
363 		 * It's not always the case as AGP can be in the control
364 		 * of user space (i.e. i810 driver). So this loop will get
365 		 * skipped and we double check that dev->agp->memory is
366 		 * actually set as well as being invalid before EPERM'ing
367 		 */
368 		list_for_each_entry(entry, &dev->agp->memory, head) {
369 			if ((map->offset >= entry->bound) &&
370 			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
371 				valid = 1;
372 				break;
373 			}
374 		}
375 		if (!list_empty(&dev->agp->memory) && !valid) {
376 			free(map, DRM_MEM_MAPS);
377 			return -EPERM;
378 		}
379 		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
380 			  (unsigned long long)map->offset, map->size);
381 
382 		break;
383 	}
384 	case _DRM_GEM:
385 		DRM_ERROR("tried to addmap GEM object\n");
386 		break;
387 	case _DRM_SCATTER_GATHER:
388 		if (!dev->sg) {
389 			free(map, DRM_MEM_MAPS);
390 			return -EINVAL;
391 		}
392 		map->handle = (char *)dev->sg->vaddr + offset;
393 		map->offset += (uintptr_t)dev->sg->vaddr;
394 		break;
395 	case _DRM_CONSISTENT:
396 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
397 		 * As we're limiting the address to 2^32-1 (or less),
398 		 * casting it down to 32 bits is no problem, but we
399 		 * need to point to a 64bit variable first. */
400 		align = map->size;
401 		if ((align & (align - 1)) != 0)
402 			align = PAGE_SIZE;
403 		dmah = drm_pci_alloc(dev, map->size, align, BUS_SPACE_MAXADDR);
404 		if (!dmah) {
405 			free(map, DRM_MEM_MAPS);
406 			return -ENOMEM;
407 		}
408 		map->handle = dmah->vaddr;
409 		map->offset = dmah->busaddr;
410 		map->dmah = dmah;
411 		break;
412 	default:
413 		free(map, DRM_MEM_MAPS);
414 		return -EINVAL;
415 	}
416 
417 	list = malloc(sizeof(*list), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
418 	if (!list) {
419 		if (map->type == _DRM_REGISTERS)
420 			drm_core_ioremapfree(map, dev);
421 		free(map, DRM_MEM_MAPS);
422 		return -EINVAL;
423 	}
424 	list->map = map;
425 
426 	DRM_LOCK(dev);
427 	list_add(&list->head, &dev->maplist);
428 
429 	/* Assign a 32-bit handle */
430 	/* We do it here so that dev->struct_mutex protects the increment */
431 	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
432 		map->offset;
433 	ret = drm_map_handle(dev, &list->hash, user_token, 0,
434 			     (map->type == _DRM_SHM));
435 	if (ret) {
436 		if (map->type == _DRM_REGISTERS)
437 			drm_core_ioremapfree(map, dev);
438 		free(map, DRM_MEM_MAPS);
439 		free(list, DRM_MEM_MAPS);
440 		DRM_UNLOCK(dev);
441 		return ret;
442 	}
443 
444 	list->user_token = list->hash.key << PAGE_SHIFT;
445 	DRM_UNLOCK(dev);
446 
447 	if (!(map->flags & _DRM_DRIVER))
448 		list->master = dev->primary->master;
449 	*maplist = list;
450 	return 0;
451 	}
452 
drm_addmap(struct drm_device * dev,resource_size_t offset,unsigned int size,enum drm_map_type type,enum drm_map_flags flags,struct drm_local_map ** map_ptr)453 int drm_addmap(struct drm_device * dev, resource_size_t offset,
454 	       unsigned int size, enum drm_map_type type,
455 	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
456 {
457 	struct drm_map_list *list;
458 	int rc;
459 
460 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
461 	if (!rc)
462 		*map_ptr = list->map;
463 	return rc;
464 }
465 
466 EXPORT_SYMBOL(drm_addmap);
467 
468 /**
469  * Ioctl to specify a range of memory that is available for mapping by a
470  * non-root process.
471  *
472  * \param inode device inode.
473  * \param file_priv DRM file private.
474  * \param cmd command.
475  * \param arg pointer to a drm_map structure.
476  * \return zero on success or a negative value on error.
477  *
478  */
drm_addmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)479 int drm_addmap_ioctl(struct drm_device *dev, void *data,
480 		     struct drm_file *file_priv)
481 {
482 	struct drm_map *map = data;
483 	struct drm_map_list *maplist;
484 	int err;
485 
486 	if (!(DRM_SUSER(DRM_CURPROC) || map->type == _DRM_AGP || map->type == _DRM_SHM))
487 		return -EPERM;
488 
489 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
490 			      map->flags, &maplist);
491 
492 	if (err)
493 		return err;
494 
495 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
496 	map->handle = (void *)(unsigned long)maplist->user_token;
497 	return 0;
498 }
499 
500 /**
501  * Remove a map private from list and deallocate resources if the mapping
502  * isn't in use.
503  *
504  * Searches the map on drm_device::maplist, removes it from the list, see if
505  * its being used, and free any associate resource (such as MTRR's) if it's not
506  * being on use.
507  *
508  * \sa drm_addmap
509  */
drm_rmmap_locked(struct drm_device * dev,struct drm_local_map * map)510 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
511 {
512 	struct drm_map_list *r_list = NULL, *list_t;
513 	int found = 0;
514 	struct drm_master *master;
515 
516 	/* Find the list entry for the map and remove it */
517 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
518 		if (r_list->map == map) {
519 			master = r_list->master;
520 			list_del(&r_list->head);
521 			drm_ht_remove_key(&dev->map_hash,
522 					  r_list->user_token >> PAGE_SHIFT);
523 			free(r_list, DRM_MEM_MAPS);
524 			found = 1;
525 			break;
526 		}
527 	}
528 
529 	if (!found)
530 		return -EINVAL;
531 
532 	switch (map->type) {
533 	case _DRM_REGISTERS:
534 		drm_core_ioremapfree(map, dev);
535 		/* FALLTHROUGH */
536 	case _DRM_FRAME_BUFFER:
537 		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
538 			int retcode;
539 			retcode = drm_mtrr_del(map->mtrr, map->offset,
540 			    map->size, DRM_MTRR_WC);
541 			DRM_DEBUG("mtrr_del=%d\n", retcode);
542 		}
543 		break;
544 	case _DRM_SHM:
545 		free(map->handle, DRM_MEM_MAPS);
546 		if (master) {
547 			if (dev->sigdata.lock == master->lock.hw_lock)
548 				dev->sigdata.lock = NULL;
549 			master->lock.hw_lock = NULL;   /* SHM removed */
550 			master->lock.file_priv = NULL;
551 			DRM_WAKEUP_INT((void *)&master->lock.lock_queue);
552 		}
553 		break;
554 	case _DRM_AGP:
555 	case _DRM_SCATTER_GATHER:
556 		break;
557 	case _DRM_CONSISTENT:
558 		drm_pci_free(dev, map->dmah);
559 		break;
560 	case _DRM_GEM:
561 		DRM_ERROR("tried to rmmap GEM object\n");
562 		break;
563 	}
564 	free(map, DRM_MEM_MAPS);
565 
566 	return 0;
567 }
568 EXPORT_SYMBOL(drm_rmmap_locked);
569 
drm_rmmap(struct drm_device * dev,struct drm_local_map * map)570 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
571 {
572 	int ret;
573 
574 	DRM_LOCK(dev);
575 	ret = drm_rmmap_locked(dev, map);
576 	DRM_UNLOCK(dev);
577 
578 	return ret;
579 }
580 EXPORT_SYMBOL(drm_rmmap);
581 
582 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
583  * the last close of the device, and this is necessary for cleanup when things
584  * exit uncleanly.  Therefore, having userland manually remove mappings seems
585  * like a pointless exercise since they're going away anyway.
586  *
587  * One use case might be after addmap is allowed for normal users for SHM and
588  * gets used by drivers that the server doesn't need to care about.  This seems
589  * unlikely.
590  *
591  * \param inode device inode.
592  * \param file_priv DRM file private.
593  * \param cmd command.
594  * \param arg pointer to a struct drm_map structure.
595  * \return zero on success or a negative value on error.
596  */
drm_rmmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)597 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
598 		    struct drm_file *file_priv)
599 {
600 	struct drm_map *request = data;
601 	struct drm_local_map *map = NULL;
602 	struct drm_map_list *r_list;
603 	int ret;
604 
605 	DRM_LOCK(dev);
606 	list_for_each_entry(r_list, &dev->maplist, head) {
607 		if (r_list->map &&
608 		    r_list->user_token == (unsigned long)request->handle &&
609 		    r_list->map->flags & _DRM_REMOVABLE) {
610 			map = r_list->map;
611 			break;
612 		}
613 	}
614 
615 	/* List has wrapped around to the head pointer, or its empty we didn't
616 	 * find anything.
617 	 */
618 	if (list_empty(&dev->maplist) || !map) {
619 		DRM_UNLOCK(dev);
620 		return -EINVAL;
621 	}
622 
623 	/* Register and framebuffer maps are permanent */
624 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
625 		DRM_UNLOCK(dev);
626 		return 0;
627 	}
628 
629 	ret = drm_rmmap_locked(dev, map);
630 
631 	DRM_UNLOCK(dev);
632 
633 	return ret;
634 }
635 
636 /**
637  * Cleanup after an error on one of the addbufs() functions.
638  *
639  * \param dev DRM device.
640  * \param entry buffer entry where the error occurred.
641  *
642  * Frees any pages and buffers associated with the given entry.
643  */
drm_cleanup_buf_error(struct drm_device * dev,struct drm_buf_entry * entry)644 static void drm_cleanup_buf_error(struct drm_device * dev,
645 				  struct drm_buf_entry * entry)
646 {
647 	int i;
648 
649 	if (entry->seg_count) {
650 		for (i = 0; i < entry->seg_count; i++) {
651 			if (entry->seglist[i]) {
652 				drm_pci_free(dev, entry->seglist[i]);
653 			}
654 		}
655 		free(entry->seglist, DRM_MEM_SEGS);
656 
657 		entry->seg_count = 0;
658 	}
659 
660 	if (entry->buf_count) {
661 		for (i = 0; i < entry->buf_count; i++) {
662 			free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
663 		}
664 		free(entry->buflist, DRM_MEM_BUFS);
665 
666 		entry->buf_count = 0;
667 	}
668 }
669 
670 #if __OS_HAS_AGP
671 /**
672  * Add AGP buffers for DMA transfers.
673  *
674  * \param dev struct drm_device to which the buffers are to be added.
675  * \param request pointer to a struct drm_buf_desc describing the request.
676  * \return zero on success or a negative number on failure.
677  *
678  * After some sanity checks creates a drm_buf structure for each buffer and
679  * reallocates the buffer list of the same size order to accommodate the new
680  * buffers.
681  */
drm_addbufs_agp(struct drm_device * dev,struct drm_buf_desc * request)682 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
683 {
684 	struct drm_device_dma *dma = dev->dma;
685 	struct drm_buf_entry *entry;
686 	struct drm_agp_mem *agp_entry;
687 	struct drm_buf *buf;
688 	unsigned long offset;
689 	unsigned long agp_offset;
690 	int count;
691 	int order;
692 	int size;
693 	int alignment;
694 	int page_order;
695 	int total;
696 	int byte_count;
697 	int i, valid;
698 	struct drm_buf **temp_buflist;
699 
700 	if (!dma)
701 		return -EINVAL;
702 
703 	count = request->count;
704 	order = drm_order(request->size);
705 	size = 1 << order;
706 
707 	alignment = (request->flags & _DRM_PAGE_ALIGN)
708 	    ? PAGE_ALIGN(size) : size;
709 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
710 	total = PAGE_SIZE << page_order;
711 
712 	byte_count = 0;
713 	agp_offset = dev->agp->base + request->agp_start;
714 
715 	DRM_DEBUG("count:      %d\n", count);
716 	DRM_DEBUG("order:      %d\n", order);
717 	DRM_DEBUG("size:       %d\n", size);
718 	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
719 	DRM_DEBUG("alignment:  %d\n", alignment);
720 	DRM_DEBUG("page_order: %d\n", page_order);
721 	DRM_DEBUG("total:      %d\n", total);
722 
723 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
724 		return -EINVAL;
725 
726 	/* Make sure buffers are located in AGP memory that we own */
727 	valid = 0;
728 	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
729 		if ((agp_offset >= agp_entry->bound) &&
730 		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
731 			valid = 1;
732 			break;
733 		}
734 	}
735 	if (!list_empty(&dev->agp->memory) && !valid) {
736 		DRM_DEBUG("zone invalid\n");
737 		return -EINVAL;
738 	}
739 	mtx_lock(&dev->count_lock);
740 	if (dev->buf_use) {
741 		mtx_unlock(&dev->count_lock);
742 		return -EBUSY;
743 	}
744 	atomic_inc(&dev->buf_alloc);
745 	mtx_unlock(&dev->count_lock);
746 
747 	DRM_LOCK(dev);
748 	entry = &dma->bufs[order];
749 	if (entry->buf_count) {
750 		DRM_UNLOCK(dev);
751 		atomic_dec(&dev->buf_alloc);
752 		return -ENOMEM;	/* May only call once for each order */
753 	}
754 
755 	if (count < 0 || count > 4096) {
756 		DRM_UNLOCK(dev);
757 		atomic_dec(&dev->buf_alloc);
758 		return -EINVAL;
759 	}
760 
761 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
762 	    M_NOWAIT | M_ZERO);
763 	if (!entry->buflist) {
764 		DRM_UNLOCK(dev);
765 		atomic_dec(&dev->buf_alloc);
766 		return -ENOMEM;
767 	}
768 
769 	entry->buf_size = size;
770 	entry->page_order = page_order;
771 
772 	offset = 0;
773 
774 	while (entry->buf_count < count) {
775 		buf = &entry->buflist[entry->buf_count];
776 		buf->idx = dma->buf_count + entry->buf_count;
777 		buf->total = alignment;
778 		buf->order = order;
779 		buf->used = 0;
780 
781 		buf->offset = (dma->byte_count + offset);
782 		buf->bus_address = agp_offset + offset;
783 		buf->address = (void *)(agp_offset + offset);
784 		buf->next = NULL;
785 		buf->waiting = 0;
786 		buf->pending = 0;
787 		buf->file_priv = NULL;
788 
789 		buf->dev_priv_size = dev->driver->dev_priv_size;
790 		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
791 		    M_NOWAIT | M_ZERO);
792 		if (!buf->dev_private) {
793 			/* Set count correctly so we free the proper amount. */
794 			entry->buf_count = count;
795 			drm_cleanup_buf_error(dev, entry);
796 			DRM_UNLOCK(dev);
797 			atomic_dec(&dev->buf_alloc);
798 			return -ENOMEM;
799 		}
800 
801 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
802 
803 		offset += alignment;
804 		entry->buf_count++;
805 		byte_count += PAGE_SIZE << page_order;
806 	}
807 
808 	DRM_DEBUG("byte_count: %d\n", byte_count);
809 
810 	temp_buflist = realloc(dma->buflist,
811 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
812 	    DRM_MEM_BUFS, M_NOWAIT);
813 	if (!temp_buflist) {
814 		/* Free the entry because it isn't valid */
815 		drm_cleanup_buf_error(dev, entry);
816 		DRM_UNLOCK(dev);
817 		atomic_dec(&dev->buf_alloc);
818 		return -ENOMEM;
819 	}
820 	dma->buflist = temp_buflist;
821 
822 	for (i = 0; i < entry->buf_count; i++) {
823 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
824 	}
825 
826 	dma->buf_count += entry->buf_count;
827 	dma->seg_count += entry->seg_count;
828 	dma->page_count += byte_count >> PAGE_SHIFT;
829 	dma->byte_count += byte_count;
830 
831 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
832 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
833 
834 	DRM_UNLOCK(dev);
835 
836 	request->count = entry->buf_count;
837 	request->size = size;
838 
839 	dma->flags = _DRM_DMA_USE_AGP;
840 
841 	atomic_dec(&dev->buf_alloc);
842 	return 0;
843 }
844 EXPORT_SYMBOL(drm_addbufs_agp);
845 #endif				/* __OS_HAS_AGP */
846 
drm_addbufs_pci(struct drm_device * dev,struct drm_buf_desc * request)847 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
848 {
849 	struct drm_device_dma *dma = dev->dma;
850 	int count;
851 	int order;
852 	int size;
853 	int total;
854 	int page_order;
855 	struct drm_buf_entry *entry;
856 	drm_dma_handle_t *dmah;
857 	struct drm_buf *buf;
858 	int alignment;
859 	unsigned long offset;
860 	int i;
861 	int byte_count;
862 	int page_count;
863 	unsigned long *temp_pagelist;
864 	struct drm_buf **temp_buflist;
865 
866 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
867 		return -EINVAL;
868 
869 	if (!dma)
870 		return -EINVAL;
871 
872 	if (!DRM_SUSER(DRM_CURPROC))
873 		return -EPERM;
874 
875 	count = request->count;
876 	order = drm_order(request->size);
877 	size = 1 << order;
878 
879 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
880 		  request->count, request->size, size, order);
881 
882 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
883 		return -EINVAL;
884 
885 	alignment = (request->flags & _DRM_PAGE_ALIGN)
886 	    ? PAGE_ALIGN(size) : size;
887 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
888 	total = PAGE_SIZE << page_order;
889 
890 	mtx_lock(&dev->count_lock);
891 	if (dev->buf_use) {
892 		mtx_unlock(&dev->count_lock);
893 		return -EBUSY;
894 	}
895 	atomic_inc(&dev->buf_alloc);
896 	mtx_unlock(&dev->count_lock);
897 
898 	DRM_LOCK(dev);
899 	entry = &dma->bufs[order];
900 	if (entry->buf_count) {
901 		DRM_UNLOCK(dev);
902 		atomic_dec(&dev->buf_alloc);
903 		return -ENOMEM;	/* May only call once for each order */
904 	}
905 
906 	if (count < 0 || count > 4096) {
907 		DRM_UNLOCK(dev);
908 		atomic_dec(&dev->buf_alloc);
909 		return -EINVAL;
910 	}
911 
912 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
913 	    M_NOWAIT | M_ZERO);
914 	if (!entry->buflist) {
915 		DRM_UNLOCK(dev);
916 		atomic_dec(&dev->buf_alloc);
917 		return -ENOMEM;
918 	}
919 
920 	entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
921 	    M_NOWAIT | M_ZERO);
922 	if (!entry->seglist) {
923 		free(entry->buflist, DRM_MEM_BUFS);
924 		DRM_UNLOCK(dev);
925 		atomic_dec(&dev->buf_alloc);
926 		return -ENOMEM;
927 	}
928 
929 	/* Keep the original pagelist until we know all the allocations
930 	 * have succeeded
931 	 */
932 	temp_pagelist = malloc((dma->page_count + (count << page_order)) *
933 	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
934 	if (!temp_pagelist) {
935 		free(entry->buflist, DRM_MEM_BUFS);
936 		free(entry->seglist, DRM_MEM_SEGS);
937 		DRM_UNLOCK(dev);
938 		atomic_dec(&dev->buf_alloc);
939 		return -ENOMEM;
940 	}
941 	memcpy(temp_pagelist,
942 	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
943 	DRM_DEBUG("pagelist: %d entries\n",
944 		  dma->page_count + (count << page_order));
945 
946 	entry->buf_size = size;
947 	entry->page_order = page_order;
948 	byte_count = 0;
949 	page_count = 0;
950 
951 	while (entry->buf_count < count) {
952 
953 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, BUS_SPACE_MAXADDR);
954 
955 		if (!dmah) {
956 			/* Set count correctly so we free the proper amount. */
957 			entry->buf_count = count;
958 			entry->seg_count = count;
959 			drm_cleanup_buf_error(dev, entry);
960 			free(temp_pagelist, DRM_MEM_PAGES);
961 			DRM_UNLOCK(dev);
962 			atomic_dec(&dev->buf_alloc);
963 			return -ENOMEM;
964 		}
965 		entry->seglist[entry->seg_count++] = dmah;
966 		for (i = 0; i < (1 << page_order); i++) {
967 			DRM_DEBUG("page %d @ 0x%08lx\n",
968 				  dma->page_count + page_count,
969 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
970 			temp_pagelist[dma->page_count + page_count++]
971 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
972 		}
973 		for (offset = 0;
974 		     offset + size <= total && entry->buf_count < count;
975 		     offset += alignment, ++entry->buf_count) {
976 			buf = &entry->buflist[entry->buf_count];
977 			buf->idx = dma->buf_count + entry->buf_count;
978 			buf->total = alignment;
979 			buf->order = order;
980 			buf->used = 0;
981 			buf->offset = (dma->byte_count + byte_count + offset);
982 			buf->address = (void *)((char *)dmah->vaddr + offset);
983 			buf->bus_address = dmah->busaddr + offset;
984 			buf->next = NULL;
985 			buf->waiting = 0;
986 			buf->pending = 0;
987 			buf->file_priv = NULL;
988 
989 			buf->dev_priv_size = dev->driver->dev_priv_size;
990 			buf->dev_private = malloc(buf->dev_priv_size,
991 			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
992 			if (!buf->dev_private) {
993 				/* Set count correctly so we free the proper amount. */
994 				entry->buf_count = count;
995 				entry->seg_count = count;
996 				drm_cleanup_buf_error(dev, entry);
997 				free(temp_pagelist, DRM_MEM_PAGES);
998 				DRM_UNLOCK(dev);
999 				atomic_dec(&dev->buf_alloc);
1000 				return -ENOMEM;
1001 			}
1002 
1003 			DRM_DEBUG("buffer %d @ %p\n",
1004 				  entry->buf_count, buf->address);
1005 		}
1006 		byte_count += PAGE_SIZE << page_order;
1007 	}
1008 
1009 	temp_buflist = realloc(dma->buflist,
1010 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
1011 	    DRM_MEM_BUFS, M_NOWAIT);
1012 	if (!temp_buflist) {
1013 		/* Free the entry because it isn't valid */
1014 		drm_cleanup_buf_error(dev, entry);
1015 		free(temp_pagelist, DRM_MEM_PAGES);
1016 		DRM_UNLOCK(dev);
1017 		atomic_dec(&dev->buf_alloc);
1018 		return -ENOMEM;
1019 	}
1020 	dma->buflist = temp_buflist;
1021 
1022 	for (i = 0; i < entry->buf_count; i++) {
1023 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1024 	}
1025 
1026 	/* No allocations failed, so now we can replace the original pagelist
1027 	 * with the new one.
1028 	 */
1029 	if (dma->page_count) {
1030 		free(dma->pagelist, DRM_MEM_PAGES);
1031 	}
1032 	dma->pagelist = temp_pagelist;
1033 
1034 	dma->buf_count += entry->buf_count;
1035 	dma->seg_count += entry->seg_count;
1036 	dma->page_count += entry->seg_count << page_order;
1037 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1038 
1039 	DRM_UNLOCK(dev);
1040 
1041 	request->count = entry->buf_count;
1042 	request->size = size;
1043 
1044 	if (request->flags & _DRM_PCI_BUFFER_RO)
1045 		dma->flags = _DRM_DMA_USE_PCI_RO;
1046 
1047 	atomic_dec(&dev->buf_alloc);
1048 	return 0;
1049 
1050 }
1051 EXPORT_SYMBOL(drm_addbufs_pci);
1052 
drm_addbufs_sg(struct drm_device * dev,struct drm_buf_desc * request)1053 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
1054 {
1055 	struct drm_device_dma *dma = dev->dma;
1056 	struct drm_buf_entry *entry;
1057 	struct drm_buf *buf;
1058 	unsigned long offset;
1059 	unsigned long agp_offset;
1060 	int count;
1061 	int order;
1062 	int size;
1063 	int alignment;
1064 	int page_order;
1065 	int total;
1066 	int byte_count;
1067 	int i;
1068 	struct drm_buf **temp_buflist;
1069 
1070 	if (!drm_core_check_feature(dev, DRIVER_SG))
1071 		return -EINVAL;
1072 
1073 	if (!dma)
1074 		return -EINVAL;
1075 
1076 	if (!DRM_SUSER(DRM_CURPROC))
1077 		return -EPERM;
1078 
1079 	count = request->count;
1080 	order = drm_order(request->size);
1081 	size = 1 << order;
1082 
1083 	alignment = (request->flags & _DRM_PAGE_ALIGN)
1084 	    ? PAGE_ALIGN(size) : size;
1085 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1086 	total = PAGE_SIZE << page_order;
1087 
1088 	byte_count = 0;
1089 	agp_offset = request->agp_start;
1090 
1091 	DRM_DEBUG("count:      %d\n", count);
1092 	DRM_DEBUG("order:      %d\n", order);
1093 	DRM_DEBUG("size:       %d\n", size);
1094 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1095 	DRM_DEBUG("alignment:  %d\n", alignment);
1096 	DRM_DEBUG("page_order: %d\n", page_order);
1097 	DRM_DEBUG("total:      %d\n", total);
1098 
1099 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1100 		return -EINVAL;
1101 
1102 	mtx_lock(&dev->count_lock);
1103 	if (dev->buf_use) {
1104 		mtx_unlock(&dev->count_lock);
1105 		return -EBUSY;
1106 	}
1107 	atomic_inc(&dev->buf_alloc);
1108 	mtx_unlock(&dev->count_lock);
1109 
1110 	DRM_LOCK(dev);
1111 	entry = &dma->bufs[order];
1112 	if (entry->buf_count) {
1113 		DRM_UNLOCK(dev);
1114 		atomic_dec(&dev->buf_alloc);
1115 		return -ENOMEM;	/* May only call once for each order */
1116 	}
1117 
1118 	if (count < 0 || count > 4096) {
1119 		DRM_UNLOCK(dev);
1120 		atomic_dec(&dev->buf_alloc);
1121 		return -EINVAL;
1122 	}
1123 
1124 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
1125 	    M_NOWAIT | M_ZERO);
1126 	if (!entry->buflist) {
1127 		DRM_UNLOCK(dev);
1128 		atomic_dec(&dev->buf_alloc);
1129 		return -ENOMEM;
1130 	}
1131 
1132 	entry->buf_size = size;
1133 	entry->page_order = page_order;
1134 
1135 	offset = 0;
1136 
1137 	while (entry->buf_count < count) {
1138 		buf = &entry->buflist[entry->buf_count];
1139 		buf->idx = dma->buf_count + entry->buf_count;
1140 		buf->total = alignment;
1141 		buf->order = order;
1142 		buf->used = 0;
1143 
1144 		buf->offset = (dma->byte_count + offset);
1145 		buf->bus_address = agp_offset + offset;
1146 		buf->address = (void *)(agp_offset + offset
1147 					+ (unsigned long)dev->sg->vaddr);
1148 		buf->next = NULL;
1149 		buf->waiting = 0;
1150 		buf->pending = 0;
1151 		buf->file_priv = NULL;
1152 
1153 		buf->dev_priv_size = dev->driver->dev_priv_size;
1154 		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
1155 		    M_NOWAIT | M_ZERO);
1156 		if (!buf->dev_private) {
1157 			/* Set count correctly so we free the proper amount. */
1158 			entry->buf_count = count;
1159 			drm_cleanup_buf_error(dev, entry);
1160 			DRM_UNLOCK(dev);
1161 			atomic_dec(&dev->buf_alloc);
1162 			return -ENOMEM;
1163 		}
1164 
1165 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1166 
1167 		offset += alignment;
1168 		entry->buf_count++;
1169 		byte_count += PAGE_SIZE << page_order;
1170 	}
1171 
1172 	DRM_DEBUG("byte_count: %d\n", byte_count);
1173 
1174 	temp_buflist = realloc(dma->buflist,
1175 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
1176 	    DRM_MEM_BUFS, M_NOWAIT);
1177 	if (!temp_buflist) {
1178 		/* Free the entry because it isn't valid */
1179 		drm_cleanup_buf_error(dev, entry);
1180 		DRM_UNLOCK(dev);
1181 		atomic_dec(&dev->buf_alloc);
1182 		return -ENOMEM;
1183 	}
1184 	dma->buflist = temp_buflist;
1185 
1186 	for (i = 0; i < entry->buf_count; i++) {
1187 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1188 	}
1189 
1190 	dma->buf_count += entry->buf_count;
1191 	dma->seg_count += entry->seg_count;
1192 	dma->page_count += byte_count >> PAGE_SHIFT;
1193 	dma->byte_count += byte_count;
1194 
1195 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1196 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1197 
1198 	DRM_UNLOCK(dev);
1199 
1200 	request->count = entry->buf_count;
1201 	request->size = size;
1202 
1203 	dma->flags = _DRM_DMA_USE_SG;
1204 
1205 	atomic_dec(&dev->buf_alloc);
1206 	return 0;
1207 }
1208 
drm_addbufs_fb(struct drm_device * dev,struct drm_buf_desc * request)1209 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1210 {
1211 	struct drm_device_dma *dma = dev->dma;
1212 	struct drm_buf_entry *entry;
1213 	struct drm_buf *buf;
1214 	unsigned long offset;
1215 	unsigned long agp_offset;
1216 	int count;
1217 	int order;
1218 	int size;
1219 	int alignment;
1220 	int page_order;
1221 	int total;
1222 	int byte_count;
1223 	int i;
1224 	struct drm_buf **temp_buflist;
1225 
1226 	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1227 		return -EINVAL;
1228 
1229 	if (!dma)
1230 		return -EINVAL;
1231 
1232 	if (!DRM_SUSER(DRM_CURPROC))
1233 		return -EPERM;
1234 
1235 	count = request->count;
1236 	order = drm_order(request->size);
1237 	size = 1 << order;
1238 
1239 	alignment = (request->flags & _DRM_PAGE_ALIGN)
1240 	    ? PAGE_ALIGN(size) : size;
1241 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1242 	total = PAGE_SIZE << page_order;
1243 
1244 	byte_count = 0;
1245 	agp_offset = request->agp_start;
1246 
1247 	DRM_DEBUG("count:      %d\n", count);
1248 	DRM_DEBUG("order:      %d\n", order);
1249 	DRM_DEBUG("size:       %d\n", size);
1250 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1251 	DRM_DEBUG("alignment:  %d\n", alignment);
1252 	DRM_DEBUG("page_order: %d\n", page_order);
1253 	DRM_DEBUG("total:      %d\n", total);
1254 
1255 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1256 		return -EINVAL;
1257 
1258 	mtx_lock(&dev->count_lock);
1259 	if (dev->buf_use) {
1260 		mtx_unlock(&dev->count_lock);
1261 		return -EBUSY;
1262 	}
1263 	atomic_inc(&dev->buf_alloc);
1264 	mtx_unlock(&dev->count_lock);
1265 
1266 	DRM_LOCK(dev);
1267 	entry = &dma->bufs[order];
1268 	if (entry->buf_count) {
1269 		DRM_UNLOCK(dev);
1270 		atomic_dec(&dev->buf_alloc);
1271 		return -ENOMEM;	/* May only call once for each order */
1272 	}
1273 
1274 	if (count < 0 || count > 4096) {
1275 		DRM_UNLOCK(dev);
1276 		atomic_dec(&dev->buf_alloc);
1277 		return -EINVAL;
1278 	}
1279 
1280 	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
1281 	    M_NOWAIT | M_ZERO);
1282 	if (!entry->buflist) {
1283 		DRM_UNLOCK(dev);
1284 		atomic_dec(&dev->buf_alloc);
1285 		return -ENOMEM;
1286 	}
1287 
1288 	entry->buf_size = size;
1289 	entry->page_order = page_order;
1290 
1291 	offset = 0;
1292 
1293 	while (entry->buf_count < count) {
1294 		buf = &entry->buflist[entry->buf_count];
1295 		buf->idx = dma->buf_count + entry->buf_count;
1296 		buf->total = alignment;
1297 		buf->order = order;
1298 		buf->used = 0;
1299 
1300 		buf->offset = (dma->byte_count + offset);
1301 		buf->bus_address = agp_offset + offset;
1302 		buf->address = (void *)(agp_offset + offset);
1303 		buf->next = NULL;
1304 		buf->waiting = 0;
1305 		buf->pending = 0;
1306 		buf->file_priv = NULL;
1307 
1308 		buf->dev_priv_size = dev->driver->dev_priv_size;
1309 		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
1310 		    M_NOWAIT | M_ZERO);
1311 		if (!buf->dev_private) {
1312 			/* Set count correctly so we free the proper amount. */
1313 			entry->buf_count = count;
1314 			drm_cleanup_buf_error(dev, entry);
1315 			DRM_UNLOCK(dev);
1316 			atomic_dec(&dev->buf_alloc);
1317 			return -ENOMEM;
1318 		}
1319 
1320 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1321 
1322 		offset += alignment;
1323 		entry->buf_count++;
1324 		byte_count += PAGE_SIZE << page_order;
1325 	}
1326 
1327 	DRM_DEBUG("byte_count: %d\n", byte_count);
1328 
1329 	temp_buflist = realloc(dma->buflist,
1330 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
1331 	    DRM_MEM_BUFS, M_NOWAIT);
1332 	if (!temp_buflist) {
1333 		/* Free the entry because it isn't valid */
1334 		drm_cleanup_buf_error(dev, entry);
1335 		DRM_UNLOCK(dev);
1336 		atomic_dec(&dev->buf_alloc);
1337 		return -ENOMEM;
1338 	}
1339 	dma->buflist = temp_buflist;
1340 
1341 	for (i = 0; i < entry->buf_count; i++) {
1342 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1343 	}
1344 
1345 	dma->buf_count += entry->buf_count;
1346 	dma->seg_count += entry->seg_count;
1347 	dma->page_count += byte_count >> PAGE_SHIFT;
1348 	dma->byte_count += byte_count;
1349 
1350 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1351 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1352 
1353 	DRM_UNLOCK(dev);
1354 
1355 	request->count = entry->buf_count;
1356 	request->size = size;
1357 
1358 	dma->flags = _DRM_DMA_USE_FB;
1359 
1360 	atomic_dec(&dev->buf_alloc);
1361 	return 0;
1362 }
1363 
1364 
1365 /**
1366  * Add buffers for DMA transfers (ioctl).
1367  *
1368  * \param inode device inode.
1369  * \param file_priv DRM file private.
1370  * \param cmd command.
1371  * \param arg pointer to a struct drm_buf_desc request.
1372  * \return zero on success or a negative number on failure.
1373  *
1374  * According with the memory type specified in drm_buf_desc::flags and the
1375  * build options, it dispatches the call either to addbufs_agp(),
1376  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1377  * PCI memory respectively.
1378  */
drm_addbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1379 int drm_addbufs(struct drm_device *dev, void *data,
1380 		struct drm_file *file_priv)
1381 {
1382 	struct drm_buf_desc *request = data;
1383 	int ret;
1384 
1385 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1386 		return -EINVAL;
1387 
1388 #if __OS_HAS_AGP
1389 	if (request->flags & _DRM_AGP_BUFFER)
1390 		ret = drm_addbufs_agp(dev, request);
1391 	else
1392 #endif
1393 	if (request->flags & _DRM_SG_BUFFER)
1394 		ret = drm_addbufs_sg(dev, request);
1395 	else if (request->flags & _DRM_FB_BUFFER)
1396 		ret = drm_addbufs_fb(dev, request);
1397 	else
1398 		ret = drm_addbufs_pci(dev, request);
1399 
1400 	return ret;
1401 }
1402 
1403 /**
1404  * Get information about the buffer mappings.
1405  *
1406  * This was originally mean for debugging purposes, or by a sophisticated
1407  * client library to determine how best to use the available buffers (e.g.,
1408  * large buffers can be used for image transfer).
1409  *
1410  * \param inode device inode.
1411  * \param file_priv DRM file private.
1412  * \param cmd command.
1413  * \param arg pointer to a drm_buf_info structure.
1414  * \return zero on success or a negative number on failure.
1415  *
1416  * Increments drm_device::buf_use while holding the drm_device::count_lock
1417  * lock, preventing of allocating more buffers after this call. Information
1418  * about each requested buffer is then copied into user space.
1419  */
drm_infobufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1420 int drm_infobufs(struct drm_device *dev, void *data,
1421 		 struct drm_file *file_priv)
1422 {
1423 	struct drm_device_dma *dma = dev->dma;
1424 	struct drm_buf_info *request = data;
1425 	int i;
1426 	int count;
1427 
1428 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1429 		return -EINVAL;
1430 
1431 	if (!dma)
1432 		return -EINVAL;
1433 
1434 	mtx_lock(&dev->count_lock);
1435 	if (atomic_read(&dev->buf_alloc)) {
1436 		mtx_unlock(&dev->count_lock);
1437 		return -EBUSY;
1438 	}
1439 	++dev->buf_use;		/* Can't allocate more after this call */
1440 	mtx_unlock(&dev->count_lock);
1441 
1442 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1443 		if (dma->bufs[i].buf_count)
1444 			++count;
1445 	}
1446 
1447 	DRM_DEBUG("count = %d\n", count);
1448 
1449 	if (request->count >= count) {
1450 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1451 			if (dma->bufs[i].buf_count) {
1452 				struct drm_buf_desc __user *to =
1453 				    &request->list[count];
1454 				struct drm_buf_entry *from = &dma->bufs[i];
1455 				struct drm_freelist *list = &dma->bufs[i].freelist;
1456 				if (copy_to_user(&to->count,
1457 						 &from->buf_count,
1458 						 sizeof(from->buf_count)) ||
1459 				    copy_to_user(&to->size,
1460 						 &from->buf_size,
1461 						 sizeof(from->buf_size)) ||
1462 				    copy_to_user(&to->low_mark,
1463 						 &list->low_mark,
1464 						 sizeof(list->low_mark)) ||
1465 				    copy_to_user(&to->high_mark,
1466 						 &list->high_mark,
1467 						 sizeof(list->high_mark)))
1468 					return -EFAULT;
1469 
1470 				DRM_DEBUG("%d %d %d %d %d\n",
1471 					  i,
1472 					  dma->bufs[i].buf_count,
1473 					  dma->bufs[i].buf_size,
1474 					  dma->bufs[i].freelist.low_mark,
1475 					  dma->bufs[i].freelist.high_mark);
1476 				++count;
1477 			}
1478 		}
1479 	}
1480 	request->count = count;
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * Specifies a low and high water mark for buffer allocation
1487  *
1488  * \param inode device inode.
1489  * \param file_priv DRM file private.
1490  * \param cmd command.
1491  * \param arg a pointer to a drm_buf_desc structure.
1492  * \return zero on success or a negative number on failure.
1493  *
1494  * Verifies that the size order is bounded between the admissible orders and
1495  * updates the respective drm_device_dma::bufs entry low and high water mark.
1496  *
1497  * \note This ioctl is deprecated and mostly never used.
1498  */
drm_markbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1499 int drm_markbufs(struct drm_device *dev, void *data,
1500 		 struct drm_file *file_priv)
1501 {
1502 	struct drm_device_dma *dma = dev->dma;
1503 	struct drm_buf_desc *request = data;
1504 	int order;
1505 	struct drm_buf_entry *entry;
1506 
1507 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1508 		return -EINVAL;
1509 
1510 	if (!dma)
1511 		return -EINVAL;
1512 
1513 	DRM_DEBUG("%d, %d, %d\n",
1514 		  request->size, request->low_mark, request->high_mark);
1515 	order = drm_order(request->size);
1516 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1517 		return -EINVAL;
1518 	entry = &dma->bufs[order];
1519 
1520 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1521 		return -EINVAL;
1522 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1523 		return -EINVAL;
1524 
1525 	entry->freelist.low_mark = request->low_mark;
1526 	entry->freelist.high_mark = request->high_mark;
1527 
1528 	return 0;
1529 }
1530 
1531 /**
1532  * Unreserve the buffers in list, previously reserved using drmDMA.
1533  *
1534  * \param inode device inode.
1535  * \param file_priv DRM file private.
1536  * \param cmd command.
1537  * \param arg pointer to a drm_buf_free structure.
1538  * \return zero on success or a negative number on failure.
1539  *
1540  * Calls free_buffer() for each used buffer.
1541  * This function is primarily used for debugging.
1542  */
drm_freebufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1543 int drm_freebufs(struct drm_device *dev, void *data,
1544 		 struct drm_file *file_priv)
1545 {
1546 	struct drm_device_dma *dma = dev->dma;
1547 	struct drm_buf_free *request = data;
1548 	int i;
1549 	int idx;
1550 	struct drm_buf *buf;
1551 
1552 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1553 		return -EINVAL;
1554 
1555 	if (!dma)
1556 		return -EINVAL;
1557 
1558 	DRM_DEBUG("%d\n", request->count);
1559 	for (i = 0; i < request->count; i++) {
1560 		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1561 			return -EFAULT;
1562 		if (idx < 0 || idx >= dma->buf_count) {
1563 			DRM_ERROR("Index %d (of %d max)\n",
1564 				  idx, dma->buf_count - 1);
1565 			return -EINVAL;
1566 		}
1567 		buf = dma->buflist[idx];
1568 		if (buf->file_priv != file_priv) {
1569 			DRM_ERROR("Process %d freeing buffer not owned\n",
1570 				  DRM_CURRENTPID);
1571 			return -EINVAL;
1572 		}
1573 		drm_free_buffer(dev, buf);
1574 	}
1575 
1576 	return 0;
1577 }
1578 
1579 /**
1580  * Maps all of the DMA buffers into client-virtual space (ioctl).
1581  *
1582  * \param inode device inode.
1583  * \param file_priv DRM file private.
1584  * \param cmd command.
1585  * \param arg pointer to a drm_buf_map structure.
1586  * \return zero on success or a negative number on failure.
1587  *
1588  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1589  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1590  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1591  * drm_mmap_dma().
1592  */
drm_mapbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1593 int drm_mapbufs(struct drm_device *dev, void *data,
1594 	        struct drm_file *file_priv)
1595 {
1596 	struct drm_device_dma *dma = dev->dma;
1597 	int retcode = 0;
1598 	const int zero = 0;
1599 	vm_offset_t virtual;
1600 	vm_offset_t address;
1601 	struct vmspace *vms;
1602 	struct drm_buf_map *request = data;
1603 	int i;
1604 
1605 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1606 		return -EINVAL;
1607 
1608 	if (!dma)
1609 		return -EINVAL;
1610 
1611 	mtx_lock(&dev->count_lock);
1612 	if (atomic_read(&dev->buf_alloc)) {
1613 		mtx_unlock(&dev->count_lock);
1614 		return -EBUSY;
1615 	}
1616 	dev->buf_use++;		/* Can't allocate more after this call */
1617 	mtx_unlock(&dev->count_lock);
1618 
1619 	vms = DRM_CURPROC->td_proc->p_vmspace;
1620 
1621 	if (request->count >= dma->buf_count) {
1622 		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1623 		    || (drm_core_check_feature(dev, DRIVER_SG)
1624 			&& (dma->flags & _DRM_DMA_USE_SG))
1625 		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1626 			&& (dma->flags & _DRM_DMA_USE_FB))) {
1627 			struct drm_local_map *map = dev->agp_buffer_map;
1628 			vm_ooffset_t token = dev->agp_buffer_token;
1629 
1630 			if (!map) {
1631 				retcode = -EINVAL;
1632 				goto done;
1633 			}
1634 			retcode = vm_mmap(&vms->vm_map, &virtual, map->size,
1635 			    VM_PROT_RW, VM_PROT_RW, MAP_SHARED | MAP_NOSYNC,
1636 			    OBJT_DEVICE, file_priv->minor->device, token);
1637 		} else {
1638 			retcode = vm_mmap(&vms->vm_map, &virtual, dma->byte_count,
1639 			    VM_PROT_RW, VM_PROT_RW, MAP_SHARED | MAP_NOSYNC,
1640 			    OBJT_DEVICE, file_priv->minor->device, 0);
1641 		}
1642 		if (retcode) {
1643 			/* Real error */
1644 			retcode = -retcode;
1645 			goto done;
1646 		}
1647 		request->virtual = (void __user *)virtual;
1648 
1649 		for (i = 0; i < dma->buf_count; i++) {
1650 			if (copy_to_user(&request->list[i].idx,
1651 					 &dma->buflist[i]->idx,
1652 					 sizeof(request->list[0].idx))) {
1653 				retcode = -EFAULT;
1654 				goto done;
1655 			}
1656 			if (copy_to_user(&request->list[i].total,
1657 					 &dma->buflist[i]->total,
1658 					 sizeof(request->list[0].total))) {
1659 				retcode = -EFAULT;
1660 				goto done;
1661 			}
1662 			if (copy_to_user(&request->list[i].used,
1663 					 &zero, sizeof(zero))) {
1664 				retcode = -EFAULT;
1665 				goto done;
1666 			}
1667 			address = virtual + dma->buflist[i]->offset;	/* *** */
1668 			if (copy_to_user(&request->list[i].address,
1669 					 &address, sizeof(address))) {
1670 				retcode = -EFAULT;
1671 				goto done;
1672 			}
1673 		}
1674 	}
1675       done:
1676 	request->count = dma->buf_count;
1677 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1678 
1679 	return retcode;
1680 }
1681 
1682 /**
1683  * Compute size order.  Returns the exponent of the smaller power of two which
1684  * is greater or equal to given number.
1685  *
1686  * \param size size.
1687  * \return order.
1688  *
1689  * \todo Can be made faster.
1690  */
drm_order(unsigned long size)1691 int drm_order(unsigned long size)
1692 {
1693 	int order;
1694 	unsigned long tmp;
1695 
1696 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1697 
1698 	if (size & (size - 1))
1699 		++order;
1700 
1701 	return order;
1702 }
1703 EXPORT_SYMBOL(drm_order);
1704