xref: /dragonfly/sys/dev/drm/drm_bufs.c (revision 2e0c716d)
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  *
35  * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
36  */
37 
38 #include <sys/conf.h>
39 #include <bus/pci/pcireg.h>
40 #include <linux/types.h>
41 #include <linux/export.h>
42 #include <drm/drmP.h>
43 
44 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
45  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
46  * address for accessing them.  Cleaned up at unload.
47  */
48 static int drm_alloc_resource(struct drm_device *dev, int resource)
49 {
50 	struct resource *res;
51 	int rid;
52 
53 	DRM_LOCK_ASSERT(dev);
54 
55 	if (resource >= DRM_MAX_PCI_RESOURCE) {
56 		DRM_ERROR("Resource %d too large\n", resource);
57 		return 1;
58 	}
59 
60 	if (dev->pcir[resource] != NULL) {
61 		return 0;
62 	}
63 
64 	DRM_UNLOCK(dev);
65 	rid = PCIR_BAR(resource);
66 	res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
67 	    RF_SHAREABLE);
68 	DRM_LOCK(dev);
69 	if (res == NULL) {
70 		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
71 		return 1;
72 	}
73 
74 	if (dev->pcir[resource] == NULL) {
75 		dev->pcirid[resource] = rid;
76 		dev->pcir[resource] = res;
77 	}
78 
79 	return 0;
80 }
81 
82 unsigned long drm_get_resource_start(struct drm_device *dev,
83 				     unsigned int resource)
84 {
85 	if (drm_alloc_resource(dev, resource) != 0)
86 		return 0;
87 
88 	return rman_get_start(dev->pcir[resource]);
89 }
90 
91 unsigned long drm_get_resource_len(struct drm_device *dev,
92 				   unsigned int resource)
93 {
94 	if (drm_alloc_resource(dev, resource) != 0)
95 		return 0;
96 
97 	return rman_get_size(dev->pcir[resource]);
98 }
99 
100 int drm_addmap(struct drm_device * dev, resource_size_t offset,
101 	       unsigned int size, enum drm_map_type type,
102 	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
103 {
104 	struct drm_local_map *map;
105 	struct drm_map_list *entry = NULL;
106 	drm_dma_handle_t *dmah;
107 
108 	/* Allocate a new map structure, fill it in, and do any type-specific
109 	 * initialization necessary.
110 	 */
111 	map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
112 	if (!map) {
113 		return ENOMEM;
114 	}
115 
116 	map->offset = offset;
117 	map->size = size;
118 	map->type = type;
119 	map->flags = flags;
120 
121 	/* Only allow shared memory to be removable since we only keep enough
122 	 * book keeping information about shared memory to allow for removal
123 	 * when processes fork.
124 	 */
125 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
126 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
127 		drm_free(map, M_DRM);
128 		return EINVAL;
129 	}
130 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
131 		DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
132 		    (uintmax_t)offset, size);
133 		drm_free(map, M_DRM);
134 		return EINVAL;
135 	}
136 	if (offset + size < offset) {
137 		DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
138 		    (uintmax_t)offset, size);
139 		drm_free(map, M_DRM);
140 		return EINVAL;
141 	}
142 
143 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
144 		  (unsigned long long)map->offset, map->size, map->type);
145 
146 	/* Check if this is just another version of a kernel-allocated map, and
147 	 * just hand that back if so.
148 	 */
149 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
150 	    type == _DRM_SHM) {
151 		list_for_each_entry(entry, &dev->maplist, head) {
152 			if (entry->map->type == type && (entry->map->offset == offset ||
153 			    (entry->map->type == _DRM_SHM &&
154 			    entry->map->flags == _DRM_CONTAINS_LOCK))) {
155 				entry->map->size = size;
156 				DRM_DEBUG("Found kernel map %d\n", type);
157 				goto done;
158 			}
159 		}
160 	}
161 
162 	switch (map->type) {
163 	case _DRM_REGISTERS:
164 		map->handle = drm_ioremap(dev, map);
165 		if (!(map->flags & _DRM_WRITE_COMBINING))
166 			break;
167 		/* FALLTHROUGH */
168 	case _DRM_FRAME_BUFFER:
169 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
170 			map->mtrr = 1;
171 		break;
172 	case _DRM_SHM:
173 		map->handle = kmalloc(map->size, M_DRM, M_WAITOK | M_NULLOK);
174 		DRM_DEBUG("%lu %d %p\n",
175 		    map->size, drm_order(map->size), map->handle);
176 		if (!map->handle) {
177 			drm_free(map, M_DRM);
178 			return ENOMEM;
179 		}
180 		map->offset = (unsigned long)map->handle;
181 		if (map->flags & _DRM_CONTAINS_LOCK) {
182 			/* Prevent a 2nd X Server from creating a 2nd lock */
183 			DRM_LOCK(dev);
184 			if (dev->lock.hw_lock != NULL) {
185 				DRM_UNLOCK(dev);
186 				drm_free(map->handle, M_DRM);
187 				drm_free(map, M_DRM);
188 				return EBUSY;
189 			}
190 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
191 			DRM_UNLOCK(dev);
192 		}
193 		break;
194 	case _DRM_AGP:
195 		/*valid = 0;*/
196 		/* In some cases (i810 driver), user space may have already
197 		 * added the AGP base itself, because dev->agp->base previously
198 		 * only got set during AGP enable.  So, only add the base
199 		 * address if the map's offset isn't already within the
200 		 * aperture.
201 		 */
202 		if (map->offset < dev->agp->base ||
203 		    map->offset > dev->agp->base +
204 		    dev->agp->agp_info.ai_aperture_size - 1) {
205 			map->offset += dev->agp->base;
206 		}
207 		map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
208 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
209 			if ((map->offset >= entry->bound) &&
210 			    (map->offset + map->size <=
211 			    entry->bound + entry->pages * PAGE_SIZE)) {
212 				valid = 1;
213 				break;
214 			}
215 		}
216 		if (!valid) {
217 			drm_free(map, M_DRM);
218 			return EACCES;
219 		}*/
220 		break;
221 	case _DRM_SCATTER_GATHER:
222 		if (!dev->sg) {
223 			drm_free(map, M_DRM);
224 			return EINVAL;
225 		}
226 		map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
227 		map->offset = dev->sg->vaddr + offset;
228 		break;
229 	case _DRM_CONSISTENT:
230 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
231 		 * As we're limiting the address to 2^32-1 (or less),
232 		 * casting it down to 32 bits is no problem, but we
233 		 * need to point to a 64bit variable first. */
234 		dmah = drm_pci_alloc(dev, map->size, map->size);
235 		if (!dmah) {
236 			kfree(map);
237 			return -ENOMEM;
238 		}
239 		map->handle = dmah->vaddr;
240 		map->offset = dmah->busaddr;
241 		break;
242 	default:
243 		DRM_ERROR("Bad map type %d\n", map->type);
244 		drm_free(map, M_DRM);
245 		return EINVAL;
246 	}
247 
248 	list_add(&entry->head, &dev->maplist);
249 
250 done:
251 	/* Jumped to, with lock held, when a kernel map is found. */
252 
253 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
254 	    map->size);
255 
256 	*map_ptr = map;
257 
258 	return 0;
259 }
260 
261 /**
262  * Ioctl to specify a range of memory that is available for mapping by a
263  * non-root process.
264  *
265  * \param inode device inode.
266  * \param file_priv DRM file private.
267  * \param cmd command.
268  * \param arg pointer to a drm_map structure.
269  * \return zero on success or a negative value on error.
270  *
271  */
272 int drm_addmap_ioctl(struct drm_device *dev, void *data,
273 		     struct drm_file *file_priv)
274 {
275 	struct drm_map *request = data;
276 	drm_local_map_t *map;
277 	int err;
278 
279 	if (!(dev->flags & (FREAD|FWRITE)))
280 		return EACCES; /* Require read/write */
281 
282 	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
283 		return EACCES;
284 
285 	DRM_LOCK(dev);
286 	err = drm_addmap(dev, request->offset, request->size, request->type,
287 	    request->flags, &map);
288 	DRM_UNLOCK(dev);
289 	if (err != 0)
290 		return err;
291 
292 	request->offset = map->offset;
293 	request->size = map->size;
294 	request->type = map->type;
295 	request->flags = map->flags;
296 	request->mtrr   = map->mtrr;
297 	request->handle = (void *)map->handle;
298 
299 	return 0;
300 }
301 
302 void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
303 {
304 	struct drm_map_list *r_list = NULL, *list_t;
305 	drm_dma_handle_t dmah;
306 	int found = 0;
307 
308 	DRM_LOCK_ASSERT(dev);
309 
310 	if (map == NULL)
311 		return;
312 
313 	/* Find the list entry for the map and remove it */
314 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
315 		if (r_list->map == map) {
316 			list_del(&r_list->head);
317 			drm_free(r_list, M_DRM);
318 			found = 1;
319 			break;
320 		}
321 	}
322 
323 	if (!found)
324 		return;
325 
326 	switch (map->type) {
327 	case _DRM_REGISTERS:
328 		drm_ioremapfree(map);
329 		/* FALLTHROUGH */
330 	case _DRM_FRAME_BUFFER:
331 		if (map->mtrr) {
332 			int __unused retcode;
333 
334 			retcode = drm_mtrr_del(0, map->offset, map->size,
335 			    DRM_MTRR_WC);
336 			DRM_DEBUG("mtrr_del = %d\n", retcode);
337 		}
338 		break;
339 	case _DRM_SHM:
340 		drm_free(map->handle, M_DRM);
341 		break;
342 	case _DRM_AGP:
343 	case _DRM_SCATTER_GATHER:
344 		break;
345 	case _DRM_CONSISTENT:
346 		dmah.vaddr = map->handle;
347 		dmah.busaddr = map->offset;
348 		drm_pci_free(dev, &dmah);
349 		break;
350 	default:
351 		DRM_ERROR("Bad map type %d\n", map->type);
352 		break;
353 	}
354 
355 	drm_free(map, M_DRM);
356 }
357 
358 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
359  * the last close of the device, and this is necessary for cleanup when things
360  * exit uncleanly.  Therefore, having userland manually remove mappings seems
361  * like a pointless exercise since they're going away anyway.
362  *
363  * One use case might be after addmap is allowed for normal users for SHM and
364  * gets used by drivers that the server doesn't need to care about.  This seems
365  * unlikely.
366  *
367  * \param inode device inode.
368  * \param file_priv DRM file private.
369  * \param cmd command.
370  * \param arg pointer to a struct drm_map structure.
371  * \return zero on success or a negative value on error.
372  */
373 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
374 		    struct drm_file *file_priv)
375 {
376 	struct drm_map *request = data;
377 	struct drm_local_map *map = NULL;
378 	struct drm_map_list *r_list;
379 
380 	DRM_LOCK(dev);
381 	list_for_each_entry(r_list, &dev->maplist, head) {
382 		if (r_list->map &&
383 		    r_list->user_token == (unsigned long)request->handle &&
384 		    r_list->map->flags & _DRM_REMOVABLE) {
385 			map = r_list->map;
386 			break;
387 		}
388 	}
389 
390 	/* List has wrapped around to the head pointer, or its empty we didn't
391 	 * find anything.
392 	 */
393 	if (list_empty(&dev->maplist) || !map) {
394 		DRM_UNLOCK(dev);
395 		return -EINVAL;
396 	}
397 
398 	/* Register and framebuffer maps are permanent */
399 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
400 		DRM_UNLOCK(dev);
401 		return 0;
402 	}
403 
404 	drm_rmmap(dev, map);
405 
406 	DRM_UNLOCK(dev);
407 
408 	return 0;
409 }
410 
411 /**
412  * Cleanup after an error on one of the addbufs() functions.
413  *
414  * \param dev DRM device.
415  * \param entry buffer entry where the error occurred.
416  *
417  * Frees any pages and buffers associated with the given entry.
418  */
419 static void drm_cleanup_buf_error(struct drm_device * dev,
420 				  struct drm_buf_entry * entry)
421 {
422 	int i;
423 
424 	if (entry->seg_count) {
425 		for (i = 0; i < entry->seg_count; i++) {
426 			drm_pci_free(dev, entry->seglist[i]);
427 		}
428 		drm_free(entry->seglist, M_DRM);
429 
430 		entry->seg_count = 0;
431 	}
432 
433    	if (entry->buf_count) {
434 	   	for (i = 0; i < entry->buf_count; i++) {
435 			drm_free(entry->buflist[i].dev_private, M_DRM);
436 		}
437 		drm_free(entry->buflist, M_DRM);
438 
439 		entry->buf_count = 0;
440 	}
441 }
442 
443 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
444 {
445 	drm_device_dma_t *dma = dev->dma;
446 	drm_buf_entry_t *entry;
447 	/*drm_agp_mem_t *agp_entry;
448 	int valid*/
449 	drm_buf_t *buf;
450 	unsigned long offset;
451 	unsigned long agp_offset;
452 	int count;
453 	int order;
454 	int size;
455 	int alignment;
456 	int page_order;
457 	int total;
458 	int byte_count;
459 	int i;
460 	drm_buf_t **temp_buflist;
461 
462 	count = request->count;
463 	order = drm_order(request->size);
464 	size = 1 << order;
465 
466 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
467 	    ? round_page(size) : size;
468 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
469 	total = PAGE_SIZE << page_order;
470 
471 	byte_count = 0;
472 	agp_offset = dev->agp->base + request->agp_start;
473 
474 	DRM_DEBUG("count:      %d\n",  count);
475 	DRM_DEBUG("order:      %d\n",  order);
476 	DRM_DEBUG("size:       %d\n",  size);
477 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
478 	DRM_DEBUG("alignment:  %d\n",  alignment);
479 	DRM_DEBUG("page_order: %d\n",  page_order);
480 	DRM_DEBUG("total:      %d\n",  total);
481 
482 	/* Make sure buffers are located in AGP memory that we own */
483 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
484 	 * memory.  Safe to ignore for now because these ioctls are still
485 	 * root-only.
486 	 */
487 	/*valid = 0;
488 	for (agp_entry = dev->agp->memory; agp_entry;
489 	    agp_entry = agp_entry->next) {
490 		if ((agp_offset >= agp_entry->bound) &&
491 		    (agp_offset + total * count <=
492 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
493 			valid = 1;
494 			break;
495 		}
496 	}
497 	if (!valid) {
498 		DRM_DEBUG("zone invalid\n");
499 		return EINVAL;
500 	}*/
501 
502 	entry = &dma->bufs[order];
503 
504 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
505 				 M_WAITOK | M_NULLOK | M_ZERO);
506 	if (!entry->buflist) {
507 		return ENOMEM;
508 	}
509 
510 	entry->buf_size = size;
511 	entry->page_order = page_order;
512 
513 	offset = 0;
514 
515 	while (entry->buf_count < count) {
516 		buf          = &entry->buflist[entry->buf_count];
517 		buf->idx     = dma->buf_count + entry->buf_count;
518 		buf->total   = alignment;
519 		buf->order   = order;
520 		buf->used    = 0;
521 
522 		buf->offset  = (dma->byte_count + offset);
523 		buf->bus_address = agp_offset + offset;
524 		buf->address = (void *)(agp_offset + offset);
525 		buf->next    = NULL;
526 		buf->pending = 0;
527 		buf->file_priv = NULL;
528 
529 		buf->dev_priv_size = dev->driver->buf_priv_size;
530 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
531 					   M_WAITOK | M_NULLOK | M_ZERO);
532 		if (buf->dev_private == NULL) {
533 			/* Set count correctly so we free the proper amount. */
534 			entry->buf_count = count;
535 			drm_cleanup_buf_error(dev, entry);
536 			return ENOMEM;
537 		}
538 
539 		offset += alignment;
540 		entry->buf_count++;
541 		byte_count += PAGE_SIZE << page_order;
542 	}
543 
544 	DRM_DEBUG("byte_count: %d\n", byte_count);
545 
546 	temp_buflist = krealloc(dma->buflist,
547 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
548 	    M_DRM, M_WAITOK | M_NULLOK);
549 	if (temp_buflist == NULL) {
550 		/* Free the entry because it isn't valid */
551 		drm_cleanup_buf_error(dev, entry);
552 		return ENOMEM;
553 	}
554 	dma->buflist = temp_buflist;
555 
556 	for (i = 0; i < entry->buf_count; i++) {
557 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
558 	}
559 
560 	dma->buf_count += entry->buf_count;
561 	dma->byte_count += byte_count;
562 
563 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
564 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
565 
566 	request->count = entry->buf_count;
567 	request->size = size;
568 
569 	dma->flags = _DRM_DMA_USE_AGP;
570 
571 	return 0;
572 }
573 
574 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
575 {
576 	drm_device_dma_t *dma = dev->dma;
577 	int count;
578 	int order;
579 	int size;
580 	int total;
581 	int page_order;
582 	drm_buf_entry_t *entry;
583 	drm_dma_handle_t *dmah;
584 	drm_buf_t *buf;
585 	int alignment;
586 	unsigned long offset;
587 	int i;
588 	int byte_count;
589 	int page_count;
590 	unsigned long *temp_pagelist;
591 	drm_buf_t **temp_buflist;
592 
593 	count = request->count;
594 	order = drm_order(request->size);
595 	size = 1 << order;
596 
597 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
598 	    request->count, request->size, size, order);
599 
600 	alignment = (request->flags & _DRM_PAGE_ALIGN)
601 	    ? round_page(size) : size;
602 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
603 	total = PAGE_SIZE << page_order;
604 
605 	entry = &dma->bufs[order];
606 
607 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
608 				 M_WAITOK | M_NULLOK | M_ZERO);
609 	entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
610 				 M_WAITOK | M_NULLOK | M_ZERO);
611 
612 	/* Keep the original pagelist until we know all the allocations
613 	 * have succeeded
614 	 */
615 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
616 				sizeof(*dma->pagelist),
617 				M_DRM, M_WAITOK | M_NULLOK);
618 
619 	if (entry->buflist == NULL || entry->seglist == NULL ||
620 	    temp_pagelist == NULL) {
621 		drm_free(temp_pagelist, M_DRM);
622 		drm_free(entry->seglist, M_DRM);
623 		drm_free(entry->buflist, M_DRM);
624 		return ENOMEM;
625 	}
626 
627 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
628 	    sizeof(*dma->pagelist));
629 
630 	DRM_DEBUG("pagelist: %d entries\n",
631 	    dma->page_count + (count << page_order));
632 
633 	entry->buf_size	= size;
634 	entry->page_order = page_order;
635 	byte_count = 0;
636 	page_count = 0;
637 
638 	while (entry->buf_count < count) {
639 		spin_unlock(&dev->dma_lock);
640 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
641 		spin_lock(&dev->dma_lock);
642 
643 		if (!dmah) {
644 			/* Set count correctly so we free the proper amount. */
645 			entry->buf_count = count;
646 			entry->seg_count = count;
647 			drm_cleanup_buf_error(dev, entry);
648 			drm_free(temp_pagelist, M_DRM);
649 			return -ENOMEM;
650 		}
651 
652 		entry->seglist[entry->seg_count++] = dmah;
653 		for (i = 0; i < (1 << page_order); i++) {
654 			DRM_DEBUG("page %d @ 0x%08lx\n",
655 				  dma->page_count + page_count,
656 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
657 			temp_pagelist[dma->page_count + page_count++]
658 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
659 		}
660 		for (offset = 0;
661 		    offset + size <= total && entry->buf_count < count;
662 		    offset += alignment, ++entry->buf_count) {
663 			buf	     = &entry->buflist[entry->buf_count];
664 			buf->idx     = dma->buf_count + entry->buf_count;
665 			buf->total   = alignment;
666 			buf->order   = order;
667 			buf->used    = 0;
668 			buf->offset  = (dma->byte_count + byte_count + offset);
669 			buf->address = ((char *)dmah->vaddr + offset);
670 			buf->bus_address = dmah->busaddr + offset;
671 			buf->next    = NULL;
672 			buf->pending = 0;
673 			buf->file_priv = NULL;
674 
675 			buf->dev_priv_size = dev->driver->buf_priv_size;
676 			buf->dev_private = kmalloc(buf->dev_priv_size,
677 						   M_DRM,
678 						   M_WAITOK | M_NULLOK |
679 						    M_ZERO);
680 			if (buf->dev_private == NULL) {
681 				/* Set count correctly so we free the proper amount. */
682 				entry->buf_count = count;
683 				entry->seg_count = count;
684 				drm_cleanup_buf_error(dev, entry);
685 				drm_free(temp_pagelist, M_DRM);
686 				return ENOMEM;
687 			}
688 
689 			DRM_DEBUG("buffer %d @ %p\n",
690 			    entry->buf_count, buf->address);
691 		}
692 		byte_count += PAGE_SIZE << page_order;
693 	}
694 
695 	temp_buflist = krealloc(dma->buflist,
696 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
697 	    M_DRM, M_WAITOK | M_NULLOK);
698 	if (temp_buflist == NULL) {
699 		/* Free the entry because it isn't valid */
700 		drm_cleanup_buf_error(dev, entry);
701 		drm_free(temp_pagelist, M_DRM);
702 		return ENOMEM;
703 	}
704 	dma->buflist = temp_buflist;
705 
706 	for (i = 0; i < entry->buf_count; i++) {
707 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
708 	}
709 
710 	/* No allocations failed, so now we can replace the orginal pagelist
711 	 * with the new one.
712 	 */
713 	drm_free(dma->pagelist, M_DRM);
714 	dma->pagelist = temp_pagelist;
715 
716 	dma->buf_count += entry->buf_count;
717 	dma->seg_count += entry->seg_count;
718 	dma->page_count += entry->seg_count << page_order;
719 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
720 
721 	request->count = entry->buf_count;
722 	request->size = size;
723 
724 	return 0;
725 
726 }
727 
728 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
729 {
730 	drm_device_dma_t *dma = dev->dma;
731 	drm_buf_entry_t *entry;
732 	drm_buf_t *buf;
733 	unsigned long offset;
734 	unsigned long agp_offset;
735 	int count;
736 	int order;
737 	int size;
738 	int alignment;
739 	int page_order;
740 	int total;
741 	int byte_count;
742 	int i;
743 	drm_buf_t **temp_buflist;
744 
745 	count = request->count;
746 	order = drm_order(request->size);
747 	size = 1 << order;
748 
749 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
750 	    ? round_page(size) : size;
751 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
752 	total = PAGE_SIZE << page_order;
753 
754 	byte_count = 0;
755 	agp_offset = request->agp_start;
756 
757 	DRM_DEBUG("count:      %d\n",  count);
758 	DRM_DEBUG("order:      %d\n",  order);
759 	DRM_DEBUG("size:       %d\n",  size);
760 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
761 	DRM_DEBUG("alignment:  %d\n",  alignment);
762 	DRM_DEBUG("page_order: %d\n",  page_order);
763 	DRM_DEBUG("total:      %d\n",  total);
764 
765 	entry = &dma->bufs[order];
766 
767 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
768 				 M_WAITOK | M_NULLOK | M_ZERO);
769 	if (entry->buflist == NULL)
770 		return ENOMEM;
771 
772 	entry->buf_size = size;
773 	entry->page_order = page_order;
774 
775 	offset = 0;
776 
777 	while (entry->buf_count < count) {
778 		buf          = &entry->buflist[entry->buf_count];
779 		buf->idx     = dma->buf_count + entry->buf_count;
780 		buf->total   = alignment;
781 		buf->order   = order;
782 		buf->used    = 0;
783 
784 		buf->offset  = (dma->byte_count + offset);
785 		buf->bus_address = agp_offset + offset;
786 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
787 		buf->next    = NULL;
788 		buf->pending = 0;
789 		buf->file_priv = NULL;
790 
791 		buf->dev_priv_size = dev->driver->buf_priv_size;
792 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
793 					   M_WAITOK | M_NULLOK | M_ZERO);
794 		if (buf->dev_private == NULL) {
795 			/* Set count correctly so we free the proper amount. */
796 			entry->buf_count = count;
797 			drm_cleanup_buf_error(dev, entry);
798 			return ENOMEM;
799 		}
800 
801 		DRM_DEBUG("buffer %d @ %p\n",
802 		    entry->buf_count, buf->address);
803 
804 		offset += alignment;
805 		entry->buf_count++;
806 		byte_count += PAGE_SIZE << page_order;
807 	}
808 
809 	DRM_DEBUG("byte_count: %d\n", byte_count);
810 
811 	temp_buflist = krealloc(dma->buflist,
812 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
813 	    M_DRM, M_WAITOK | M_NULLOK);
814 	if (temp_buflist == NULL) {
815 		/* Free the entry because it isn't valid */
816 		drm_cleanup_buf_error(dev, entry);
817 		return ENOMEM;
818 	}
819 	dma->buflist = temp_buflist;
820 
821 	for (i = 0; i < entry->buf_count; i++) {
822 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
823 	}
824 
825 	dma->buf_count += entry->buf_count;
826 	dma->byte_count += byte_count;
827 
828 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
829 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
830 
831 	request->count = entry->buf_count;
832 	request->size = size;
833 
834 	dma->flags = _DRM_DMA_USE_SG;
835 
836 	return 0;
837 }
838 
839 /**
840  * Add AGP buffers for DMA transfers.
841  *
842  * \param dev struct drm_device to which the buffers are to be added.
843  * \param request pointer to a struct drm_buf_desc describing the request.
844  * \return zero on success or a negative number on failure.
845  *
846  * After some sanity checks creates a drm_buf structure for each buffer and
847  * reallocates the buffer list of the same size order to accommodate the new
848  * buffers.
849  */
850 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
851 {
852 	int order, ret;
853 
854 	if (request->count < 0 || request->count > 4096)
855 		return EINVAL;
856 
857 	order = drm_order(request->size);
858 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
859 		return EINVAL;
860 
861 	spin_lock(&dev->dma_lock);
862 
863 	/* No more allocations after first buffer-using ioctl. */
864 	if (dev->buf_use != 0) {
865 		spin_unlock(&dev->dma_lock);
866 		return EBUSY;
867 	}
868 	/* No more than one allocation per order */
869 	if (dev->dma->bufs[order].buf_count != 0) {
870 		spin_unlock(&dev->dma_lock);
871 		return ENOMEM;
872 	}
873 
874 	ret = drm_do_addbufs_agp(dev, request);
875 
876 	spin_unlock(&dev->dma_lock);
877 
878 	return ret;
879 }
880 
881 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
882 {
883 	int order, ret;
884 
885 	if (!DRM_SUSER(DRM_CURPROC))
886 		return EACCES;
887 
888 	if (request->count < 0 || request->count > 4096)
889 		return EINVAL;
890 
891 	order = drm_order(request->size);
892 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
893 		return EINVAL;
894 
895 	spin_lock(&dev->dma_lock);
896 
897 	/* No more allocations after first buffer-using ioctl. */
898 	if (dev->buf_use != 0) {
899 		spin_unlock(&dev->dma_lock);
900 		return EBUSY;
901 	}
902 	/* No more than one allocation per order */
903 	if (dev->dma->bufs[order].buf_count != 0) {
904 		spin_unlock(&dev->dma_lock);
905 		return ENOMEM;
906 	}
907 
908 	ret = drm_do_addbufs_sg(dev, request);
909 
910 	spin_unlock(&dev->dma_lock);
911 
912 	return ret;
913 }
914 
915 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
916 {
917 	int order, ret;
918 
919 	if (!DRM_SUSER(DRM_CURPROC))
920 		return EACCES;
921 
922 	if (request->count < 0 || request->count > 4096)
923 		return EINVAL;
924 
925 	order = drm_order(request->size);
926 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
927 		return EINVAL;
928 
929 	spin_lock(&dev->dma_lock);
930 
931 	/* No more allocations after first buffer-using ioctl. */
932 	if (dev->buf_use != 0) {
933 		spin_unlock(&dev->dma_lock);
934 		return EBUSY;
935 	}
936 	/* No more than one allocation per order */
937 	if (dev->dma->bufs[order].buf_count != 0) {
938 		spin_unlock(&dev->dma_lock);
939 		return ENOMEM;
940 	}
941 
942 	ret = drm_do_addbufs_pci(dev, request);
943 
944 	spin_unlock(&dev->dma_lock);
945 
946 	return ret;
947 }
948 
949 /**
950  * Add buffers for DMA transfers (ioctl).
951  *
952  * \param inode device inode.
953  * \param file_priv DRM file private.
954  * \param cmd command.
955  * \param arg pointer to a struct drm_buf_desc request.
956  * \return zero on success or a negative number on failure.
957  *
958  * According with the memory type specified in drm_buf_desc::flags and the
959  * build options, it dispatches the call either to addbufs_agp(),
960  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
961  * PCI memory respectively.
962  */
963 int drm_addbufs(struct drm_device *dev, void *data,
964 		struct drm_file *file_priv)
965 {
966 	struct drm_buf_desc *request = data;
967 	int err;
968 
969 	if (request->flags & _DRM_AGP_BUFFER)
970 		err = drm_addbufs_agp(dev, request);
971 	else if (request->flags & _DRM_SG_BUFFER)
972 		err = drm_addbufs_sg(dev, request);
973 	else
974 		err = drm_addbufs_pci(dev, request);
975 
976 	return err;
977 }
978 
979 /**
980  * Get information about the buffer mappings.
981  *
982  * This was originally mean for debugging purposes, or by a sophisticated
983  * client library to determine how best to use the available buffers (e.g.,
984  * large buffers can be used for image transfer).
985  *
986  * \param inode device inode.
987  * \param file_priv DRM file private.
988  * \param cmd command.
989  * \param arg pointer to a drm_buf_info structure.
990  * \return zero on success or a negative number on failure.
991  *
992  * Increments drm_device::buf_use while holding the drm_device::count_lock
993  * lock, preventing of allocating more buffers after this call. Information
994  * about each requested buffer is then copied into user space.
995  */
996 int drm_infobufs(struct drm_device *dev, void *data,
997 		 struct drm_file *file_priv)
998 {
999 	drm_device_dma_t *dma = dev->dma;
1000 	struct drm_buf_info *request = data;
1001 	int i;
1002 	int count;
1003 	int retcode = 0;
1004 
1005 	spin_lock(&dev->dma_lock);
1006 	++dev->buf_use;		/* Can't allocate more after this call */
1007 	spin_unlock(&dev->dma_lock);
1008 
1009 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1010 		if (dma->bufs[i].buf_count)
1011 			++count;
1012 	}
1013 
1014 	DRM_DEBUG("count = %d\n", count);
1015 
1016 	if (request->count >= count) {
1017 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1018 			if (dma->bufs[i].buf_count) {
1019 				struct drm_buf_desc from;
1020 
1021 				from.count = dma->bufs[i].buf_count;
1022 				from.size = dma->bufs[i].buf_size;
1023 				from.low_mark = dma->bufs[i].freelist.low_mark;
1024 				from.high_mark = dma->bufs[i].freelist.high_mark;
1025 
1026 				if (DRM_COPY_TO_USER(&request->list[count], &from,
1027 				    sizeof(struct drm_buf_desc)) != 0) {
1028 					retcode = EFAULT;
1029 					break;
1030 				}
1031 
1032 				DRM_DEBUG("%d %d %d %d %d\n",
1033 				    i, dma->bufs[i].buf_count,
1034 				    dma->bufs[i].buf_size,
1035 				    dma->bufs[i].freelist.low_mark,
1036 				    dma->bufs[i].freelist.high_mark);
1037 				++count;
1038 			}
1039 		}
1040 	}
1041 	request->count = count;
1042 
1043 	return retcode;
1044 }
1045 
1046 /**
1047  * Specifies a low and high water mark for buffer allocation
1048  *
1049  * \param inode device inode.
1050  * \param file_priv DRM file private.
1051  * \param cmd command.
1052  * \param arg a pointer to a drm_buf_desc structure.
1053  * \return zero on success or a negative number on failure.
1054  *
1055  * Verifies that the size order is bounded between the admissible orders and
1056  * updates the respective drm_device_dma::bufs entry low and high water mark.
1057  *
1058  * \note This ioctl is deprecated and mostly never used.
1059  */
1060 int drm_markbufs(struct drm_device *dev, void *data,
1061 		 struct drm_file *file_priv)
1062 {
1063 	drm_device_dma_t *dma = dev->dma;
1064 	struct drm_buf_desc *request = data;
1065 	int order;
1066 
1067 	DRM_DEBUG("%d, %d, %d\n",
1068 		  request->size, request->low_mark, request->high_mark);
1069 
1070 
1071 	order = drm_order(request->size);
1072 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
1073 	    request->low_mark < 0 || request->high_mark < 0) {
1074 		return EINVAL;
1075 	}
1076 
1077 	spin_lock(&dev->dma_lock);
1078 	if (request->low_mark > dma->bufs[order].buf_count ||
1079 	    request->high_mark > dma->bufs[order].buf_count) {
1080 		spin_unlock(&dev->dma_lock);
1081 		return EINVAL;
1082 	}
1083 
1084 	dma->bufs[order].freelist.low_mark  = request->low_mark;
1085 	dma->bufs[order].freelist.high_mark = request->high_mark;
1086 	spin_unlock(&dev->dma_lock);
1087 
1088 	return 0;
1089 }
1090 
1091 /**
1092  * Unreserve the buffers in list, previously reserved using drmDMA.
1093  *
1094  * \param inode device inode.
1095  * \param file_priv DRM file private.
1096  * \param cmd command.
1097  * \param arg pointer to a drm_buf_free structure.
1098  * \return zero on success or a negative number on failure.
1099  *
1100  * Calls free_buffer() for each used buffer.
1101  * This function is primarily used for debugging.
1102  */
1103 int drm_freebufs(struct drm_device *dev, void *data,
1104 		 struct drm_file *file_priv)
1105 {
1106 	drm_device_dma_t *dma = dev->dma;
1107 	struct drm_buf_free *request = data;
1108 	int i;
1109 	int idx;
1110 	drm_buf_t *buf;
1111 	int retcode = 0;
1112 
1113 	DRM_DEBUG("%d\n", request->count);
1114 
1115 	spin_lock(&dev->dma_lock);
1116 	for (i = 0; i < request->count; i++) {
1117 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1118 			retcode = EFAULT;
1119 			break;
1120 		}
1121 		if (idx < 0 || idx >= dma->buf_count) {
1122 			DRM_ERROR("Index %d (of %d max)\n",
1123 			    idx, dma->buf_count - 1);
1124 			retcode = EINVAL;
1125 			break;
1126 		}
1127 		buf = dma->buflist[idx];
1128 		if (buf->file_priv != file_priv) {
1129 			DRM_ERROR("Process %d freeing buffer not owned\n",
1130 			    DRM_CURRENTPID);
1131 			retcode = EINVAL;
1132 			break;
1133 		}
1134 		drm_free_buffer(dev, buf);
1135 	}
1136 	spin_unlock(&dev->dma_lock);
1137 
1138 	return retcode;
1139 }
1140 
1141 /**
1142  * Maps all of the DMA buffers into client-virtual space (ioctl).
1143  *
1144  * \param inode device inode.
1145  * \param file_priv DRM file private.
1146  * \param cmd command.
1147  * \param arg pointer to a drm_buf_map structure.
1148  * \return zero on success or a negative number on failure.
1149  *
1150  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1151  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1152  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1153  * drm_mmap_dma().
1154  */
1155 int drm_mapbufs(struct drm_device *dev, void *data,
1156 	        struct drm_file *file_priv)
1157 {
1158 	drm_device_dma_t *dma = dev->dma;
1159 	int retcode = 0;
1160 	const int zero = 0;
1161 	vm_offset_t address;
1162 	struct vmspace *vms;
1163 	vm_ooffset_t foff;
1164 	vm_size_t size;
1165 	vm_offset_t vaddr;
1166 	struct drm_buf_map *request = data;
1167 	int i;
1168 
1169 	vms = DRM_CURPROC->td_proc->p_vmspace;
1170 
1171 	spin_lock(&dev->dma_lock);
1172 	dev->buf_use++;		/* Can't allocate more after this call */
1173 	spin_unlock(&dev->dma_lock);
1174 
1175 	if (request->count < dma->buf_count)
1176 		goto done;
1177 
1178 	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1179 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1180 	    (dma->flags & _DRM_DMA_USE_SG))) {
1181 		drm_local_map_t *map = dev->agp_buffer_map;
1182 
1183 		if (map == NULL) {
1184 			retcode = EINVAL;
1185 			goto done;
1186 		}
1187 		size = round_page(map->size);
1188 		foff = (unsigned long)map->handle;
1189 	} else {
1190 		size = round_page(dma->byte_count),
1191 		foff = 0;
1192 	}
1193 
1194 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1195 	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1196 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1197 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1198 	if (retcode)
1199 		goto done;
1200 
1201 	request->virtual = (void *)vaddr;
1202 
1203 	for (i = 0; i < dma->buf_count; i++) {
1204 		if (DRM_COPY_TO_USER(&request->list[i].idx,
1205 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1206 			retcode = EFAULT;
1207 			goto done;
1208 		}
1209 		if (DRM_COPY_TO_USER(&request->list[i].total,
1210 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1211 			retcode = EFAULT;
1212 			goto done;
1213 		}
1214 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1215 		    sizeof(zero))) {
1216 			retcode = EFAULT;
1217 			goto done;
1218 		}
1219 		address = vaddr + dma->buflist[i]->offset; /* *** */
1220 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1221 		    sizeof(address))) {
1222 			retcode = EFAULT;
1223 			goto done;
1224 		}
1225 	}
1226 
1227  done:
1228 	request->count = dma->buf_count;
1229 
1230 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1231 
1232 	return retcode;
1233 }
1234 
1235 /**
1236  * Compute size order.  Returns the exponent of the smaller power of two which
1237  * is greater or equal to given number.
1238  *
1239  * \param size size.
1240  * \return order.
1241  *
1242  * \todo Can be made faster.
1243  */
1244 int drm_order(unsigned long size)
1245 {
1246 	int order;
1247 	unsigned long tmp;
1248 
1249 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1250 
1251 	if (size & (size - 1))
1252 		++order;
1253 
1254 	return order;
1255 }
1256 EXPORT_SYMBOL(drm_order);
1257