xref: /dragonfly/sys/dev/drm/drm_vm.c (revision 0de090e1)
1 /*-
2  * Copyright 2003 Eric Anholt
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $"
24  */
25 
26 /** @file drm_vm.c
27  * Support code for mmaping of DRM maps.
28  */
29 
30 #include <sys/conf.h>
31 #include <sys/devfs.h>
32 #include <sys/mutex2.h>
33 #include <vm/vm_page.h>
34 #include <vm/vm_pager.h>
35 
36 #include <drm/drmP.h>
37 #include <asm/pgtable.h>
38 #include "drm_legacy.h"
39 
40 int drm_mmap(struct dev_mmap_args *ap)
41 {
42 	struct cdev *kdev = ap->a_head.a_dev;
43 	vm_offset_t offset = ap->a_offset;
44 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
45 	struct drm_file *file_priv = NULL;
46 	struct drm_local_map *map = NULL;
47 	int error;
48 	struct drm_hash_item *hash;
49 
50 	enum drm_map_type type;
51 	vm_paddr_t phys;
52 
53 	/* d_mmap gets called twice, we can only reference file_priv during
54 	 * the first call.  We need to assume that if error is EBADF the
55 	 * call was succesful and the client is authenticated.
56 	 */
57 	error = devfs_get_cdevpriv(ap->a_fp, (void **)&file_priv);
58 	if (error == ENOENT) {
59 		DRM_ERROR("Could not find authenticator!\n");
60 		return EINVAL;
61 	}
62 
63 	if (file_priv && !file_priv->authenticated)
64 		return EACCES;
65 
66 	DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset);
67 	if (dev->dma && offset < ptoa(dev->dma->page_count)) {
68 		struct drm_device_dma *dma = dev->dma;
69 
70 		spin_lock(&dev->dma_lock);
71 
72 		if (dma->pagelist != NULL) {
73 			unsigned long page = offset >> PAGE_SHIFT;
74 			unsigned long phys = dma->pagelist[page];
75 
76 			spin_unlock(&dev->dma_lock);
77 			// XXX *paddr = phys;
78 			ap->a_result = phys;
79 			return 0;
80 		} else {
81 			spin_unlock(&dev->dma_lock);
82 			return -1;
83 		}
84 	}
85 
86 	/* A sequential search of a linked list is
87 	   fine here because: 1) there will only be
88 	   about 5-10 entries in the list and, 2) a
89 	   DRI client only has to do this mapping
90 	   once, so it doesn't have to be optimized
91 	   for performance, even if the list was a
92 	   bit longer.
93 	*/
94 	DRM_LOCK(dev);
95 
96 	if (drm_ht_find_item(&dev->map_hash, offset, &hash)) {
97 		DRM_ERROR("Could not find map\n");
98 		return -EINVAL;
99 	}
100 
101 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
102 	if (map == NULL) {
103 		DRM_DEBUG("Can't find map, request offset = %016jx\n",
104 		    (uintmax_t)offset);
105 		DRM_UNLOCK(dev);
106 		return -1;
107 	}
108 	if (((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
109 		DRM_UNLOCK(dev);
110 		DRM_DEBUG("restricted map\n");
111 		return -1;
112 	}
113 
114 	type = map->type;
115 	DRM_UNLOCK(dev);
116 
117 	switch (type) {
118 	case _DRM_FRAME_BUFFER:
119 	case _DRM_AGP:
120 #if 0	/* XXX */
121 		*memattr = VM_MEMATTR_WRITE_COMBINING;
122 #endif
123 		/* FALLTHROUGH */
124 	case _DRM_REGISTERS:
125 		phys = map->offset + offset;
126 		break;
127 	case _DRM_SCATTER_GATHER:
128 #if 0	/* XXX */
129 		*memattr = VM_MEMATTR_WRITE_COMBINING;
130 #endif
131 		/* FALLTHROUGH */
132 	case _DRM_CONSISTENT:
133 	case _DRM_SHM:
134 		phys = vtophys((char *)map->handle + offset);
135 		break;
136 	default:
137 		DRM_ERROR("bad map type %d\n", type);
138 		return -1;	/* This should never happen. */
139 	}
140 
141 	ap->a_result = atop(phys);
142 	return 0;
143 }
144 
145 /* XXX The following is just temporary hack to replace the
146  * vm_phys_fictitious functions available on FreeBSD
147  */
148 #define VM_PHYS_FICTITIOUS_NSEGS        8
149 static struct vm_phys_fictitious_seg {
150         vm_paddr_t      start;
151         vm_paddr_t      end;
152         vm_page_t       first_page;
153 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
154 static struct mtx vm_phys_fictitious_reg_mtx = MTX_INITIALIZER("vmphy");
155 
156 vm_page_t
157 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
158 {
159         struct vm_phys_fictitious_seg *seg;
160         vm_page_t m;
161         int segind;
162 
163         m = NULL;
164         for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
165                 seg = &vm_phys_fictitious_segs[segind];
166                 if (pa >= seg->start && pa < seg->end) {
167                         m = &seg->first_page[atop(pa - seg->start)];
168                         KASSERT((m->flags & PG_FICTITIOUS) != 0,
169                             ("%p not fictitious", m));
170                         break;
171                 }
172         }
173         return (m);
174 }
175 
176 int
177 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
178     vm_memattr_t memattr)
179 {
180         struct vm_phys_fictitious_seg *seg;
181         vm_page_t fp;
182         long i, page_count;
183         int segind;
184 
185         page_count = (end - start) / PAGE_SIZE;
186 
187         fp = kmalloc(page_count * sizeof(struct vm_page), M_DRM,
188                     M_WAITOK | M_ZERO);
189 
190         for (i = 0; i < page_count; i++) {
191 		vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
192 		fp[i].flags &= ~(PG_BUSY | PG_UNMANAGED);
193         }
194         mtx_lock(&vm_phys_fictitious_reg_mtx);
195         for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
196                 seg = &vm_phys_fictitious_segs[segind];
197                 if (seg->start == 0 && seg->end == 0) {
198                         seg->start = start;
199                         seg->end = end;
200                         seg->first_page = fp;
201                         mtx_unlock(&vm_phys_fictitious_reg_mtx);
202                         return (0);
203                 }
204         }
205         mtx_unlock(&vm_phys_fictitious_reg_mtx);
206         kfree(fp);
207         return (EBUSY);
208 }
209 
210 void
211 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
212 {
213 	struct vm_phys_fictitious_seg *seg;
214 	vm_page_t fp;
215 	int segind;
216 
217 	mtx_lock(&vm_phys_fictitious_reg_mtx);
218 	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
219 		seg = &vm_phys_fictitious_segs[segind];
220 		if (seg->start == start && seg->end == end) {
221 			seg->start = seg->end = 0;
222 			fp = seg->first_page;
223 			seg->first_page = NULL;
224 			mtx_unlock(&vm_phys_fictitious_reg_mtx);
225 			kfree(fp);
226 			return;
227 		}
228 	}
229 	mtx_unlock(&vm_phys_fictitious_reg_mtx);
230 	KASSERT(0, ("Unregistering not registered fictitious range"));
231 }
232