xref: /dragonfly/sys/dev/drm/drm_vm.c (revision 3ea159d2)
1 /*-
2  * Copyright 2003 Eric Anholt
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $"
24  */
25 
26 /** @file drm_vm.c
27  * Support code for mmaping of DRM maps.
28  */
29 
30 #include <drm/drmP.h>
31 #include <linux/export.h>
32 #include <linux/seq_file.h>
33 #if defined(__ia64__)
34 #include <linux/efi.h>
35 #include <linux/slab.h>
36 #endif
37 #include <linux/mem_encrypt.h>
38 #include <asm/pgtable.h>
39 #include "drm_internal.h"
40 #include "drm_legacy.h"
41 
42 #include <sys/mutex2.h>
43 
44 int drm_mmap(struct dev_mmap_args *ap)
45 {
46 	struct file *filp = ap->a_fp;
47 	struct drm_file *priv = filp->private_data;
48 	struct cdev *kdev = ap->a_head.a_dev;
49 	vm_offset_t offset = ap->a_offset;
50 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
51 	struct drm_local_map *map = NULL;
52 	struct drm_hash_item *hash;
53 
54 	enum drm_map_type type;
55 	vm_paddr_t phys;
56 
57 	if (!priv->authenticated)
58 		return -EACCES;
59 
60 	DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset);
61 	if (dev->dma && offset < ptoa(dev->dma->page_count)) {
62 		struct drm_device_dma *dma = dev->dma;
63 
64 		if (dma->pagelist != NULL) {
65 			unsigned long page = offset >> PAGE_SHIFT;
66 			unsigned long phys = dma->pagelist[page];
67 
68 			// XXX *paddr = phys;
69 			ap->a_result = phys;
70 			return 0;
71 		} else {
72 			return -1;
73 		}
74 	}
75 
76 	/* A sequential search of a linked list is
77 	   fine here because: 1) there will only be
78 	   about 5-10 entries in the list and, 2) a
79 	   DRI client only has to do this mapping
80 	   once, so it doesn't have to be optimized
81 	   for performance, even if the list was a
82 	   bit longer.
83 	*/
84 	DRM_LOCK(dev);
85 
86 	if (drm_ht_find_item(&dev->map_hash, offset, &hash)) {
87 		DRM_ERROR("Could not find map\n");
88 		return -EINVAL;
89 	}
90 
91 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
92 	if (map == NULL) {
93 		DRM_DEBUG("Can't find map, request offset = %016jx\n",
94 		    (uintmax_t)offset);
95 		DRM_UNLOCK(dev);
96 		return -1;
97 	}
98 	if (((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
99 		DRM_UNLOCK(dev);
100 		DRM_DEBUG("restricted map\n");
101 		return -1;
102 	}
103 
104 	type = map->type;
105 	DRM_UNLOCK(dev);
106 
107 	switch (type) {
108 	case _DRM_FRAME_BUFFER:
109 	case _DRM_AGP:
110 #if 0	/* XXX */
111 		*memattr = VM_MEMATTR_WRITE_COMBINING;
112 #endif
113 		/* FALLTHROUGH */
114 	case _DRM_REGISTERS:
115 		phys = map->offset + offset;
116 		break;
117 	case _DRM_SCATTER_GATHER:
118 #if 0	/* XXX */
119 		*memattr = VM_MEMATTR_WRITE_COMBINING;
120 #endif
121 		/* FALLTHROUGH */
122 	case _DRM_CONSISTENT:
123 	case _DRM_SHM:
124 		phys = vtophys((char *)map->handle + offset);
125 		break;
126 	default:
127 		DRM_ERROR("bad map type %d\n", type);
128 		return -1;	/* This should never happen. */
129 	}
130 
131 	ap->a_result = atop(phys);
132 	return 0;
133 }
134 
135 /* XXX The following is just temporary hack to replace the
136  * vm_phys_fictitious functions available on FreeBSD
137  */
138 #define VM_PHYS_FICTITIOUS_NSEGS        8
139 static struct vm_phys_fictitious_seg {
140         vm_paddr_t      start;
141         vm_paddr_t      end;
142         vm_page_t       first_page;
143 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
144 static struct mtx vm_phys_fictitious_reg_mtx = MTX_INITIALIZER("vmphy");
145 
146 vm_page_t
147 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
148 {
149         struct vm_phys_fictitious_seg *seg;
150         vm_page_t m;
151         int segind;
152 
153         m = NULL;
154         for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
155                 seg = &vm_phys_fictitious_segs[segind];
156                 if (pa >= seg->start && pa < seg->end) {
157                         m = &seg->first_page[atop(pa - seg->start)];
158                         KASSERT((m->flags & PG_FICTITIOUS) != 0,
159                             ("%p not fictitious", m));
160                         break;
161                 }
162         }
163         return (m);
164 }
165 
166 int
167 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
168 			     vm_memattr_t memattr)
169 {
170         struct vm_phys_fictitious_seg *seg;
171         vm_page_t fp;
172         long i, page_count;
173         int segind;
174 
175         page_count = (end - start) / PAGE_SIZE;
176 
177         fp = kmalloc(page_count * sizeof(struct vm_page), M_DRM,
178                     M_WAITOK | M_ZERO);
179 
180         for (i = 0; i < page_count; i++) {
181 		vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
182 		atomic_clear_int(&fp[i].busy_count, PBUSY_LOCKED);
183         }
184         mtx_lock(&vm_phys_fictitious_reg_mtx);
185         for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
186                 seg = &vm_phys_fictitious_segs[segind];
187                 if (seg->start == 0 && seg->end == 0) {
188                         seg->start = start;
189                         seg->end = end;
190                         seg->first_page = fp;
191                         mtx_unlock(&vm_phys_fictitious_reg_mtx);
192                         return (0);
193                 }
194         }
195         mtx_unlock(&vm_phys_fictitious_reg_mtx);
196         kfree(fp);
197         return (EBUSY);
198 }
199 
200 void
201 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
202 {
203 	struct vm_phys_fictitious_seg *seg;
204 	vm_page_t fp;
205 	int segind;
206 
207 	mtx_lock(&vm_phys_fictitious_reg_mtx);
208 	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
209 		seg = &vm_phys_fictitious_segs[segind];
210 		if (seg->start == start && seg->end == end) {
211 			seg->start = seg->end = 0;
212 			fp = seg->first_page;
213 			seg->first_page = NULL;
214 			mtx_unlock(&vm_phys_fictitious_reg_mtx);
215 			kfree(fp);
216 			return;
217 		}
218 	}
219 	mtx_unlock(&vm_phys_fictitious_reg_mtx);
220 	KASSERT(0, ("Unregistering not registered fictitious range"));
221 }
222