1 /*- 2 * Copyright 2003 Eric Anholt 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $" 24 */ 25 26 /** @file drm_vm.c 27 * Support code for mmaping of DRM maps. 28 */ 29 30 #include <drm/drmP.h> 31 #include <linux/export.h> 32 #include <linux/seq_file.h> 33 #if defined(__ia64__) 34 #include <linux/efi.h> 35 #include <linux/slab.h> 36 #endif 37 #include <linux/mem_encrypt.h> 38 #include <asm/pgtable.h> 39 #include "drm_internal.h" 40 #include "drm_legacy.h" 41 42 #include <sys/mutex2.h> 43 44 int 45 drm_mmap(struct dev_mmap_args *ap) 46 { 47 struct file *filp = ap->a_fp; 48 struct drm_file *priv; 49 struct cdev *kdev = ap->a_head.a_dev; 50 vm_offset_t offset = ap->a_offset; 51 struct drm_device *dev = drm_get_device_from_kdev(kdev); 52 struct drm_local_map *map = NULL; 53 struct drm_hash_item *hash; 54 enum drm_map_type type; 55 vm_paddr_t phys; 56 57 /* 58 * NOTE: If ddev->drm_ttm_bdev is not setup properly, this path 59 * may be hit with a NULL filp and panic. 60 */ 61 priv = filp->private_data; 62 if (!priv->authenticated) 63 return -EACCES; 64 65 DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset); 66 if (dev->dma && offset < ptoa(dev->dma->page_count)) { 67 struct drm_device_dma *dma = dev->dma; 68 69 if (dma->pagelist != NULL) { 70 unsigned long page = offset >> PAGE_SHIFT; 71 unsigned long phys = dma->pagelist[page]; 72 73 // XXX *paddr = phys; 74 ap->a_result = phys; 75 return 0; 76 } else { 77 return -1; 78 } 79 } 80 81 /* A sequential search of a linked list is 82 fine here because: 1) there will only be 83 about 5-10 entries in the list and, 2) a 84 DRI client only has to do this mapping 85 once, so it doesn't have to be optimized 86 for performance, even if the list was a 87 bit longer. 88 */ 89 DRM_LOCK(dev); 90 91 if (drm_ht_find_item(&dev->map_hash, offset, &hash)) { 92 DRM_ERROR("Could not find map\n"); 93 DRM_UNLOCK(dev); 94 return -EINVAL; 95 } 96 97 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 98 if (map == NULL) { 99 DRM_DEBUG("Can't find map, request offset = %016jx\n", 100 (uintmax_t)offset); 101 DRM_UNLOCK(dev); 102 return -1; 103 } 104 if (((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { 105 DRM_UNLOCK(dev); 106 DRM_DEBUG("restricted map\n"); 107 return -1; 108 } 109 110 type = map->type; 111 DRM_UNLOCK(dev); 112 113 switch (type) { 114 case _DRM_FRAME_BUFFER: 115 case _DRM_AGP: 116 #if 0 /* XXX */ 117 *memattr = VM_MEMATTR_WRITE_COMBINING; 118 #endif 119 /* FALLTHROUGH */ 120 case _DRM_REGISTERS: 121 phys = map->offset + offset; 122 break; 123 case _DRM_SCATTER_GATHER: 124 #if 0 /* XXX */ 125 *memattr = VM_MEMATTR_WRITE_COMBINING; 126 #endif 127 /* FALLTHROUGH */ 128 case _DRM_CONSISTENT: 129 case _DRM_SHM: 130 phys = vtophys((char *)map->handle + offset); 131 break; 132 default: 133 DRM_ERROR("bad map type %d\n", type); 134 return -1; /* This should never happen. */ 135 } 136 137 ap->a_result = atop(phys); 138 return 0; 139 } 140 141 /* XXX The following is just temporary hack to replace the 142 * vm_phys_fictitious functions available on FreeBSD 143 */ 144 #define VM_PHYS_FICTITIOUS_NSEGS 8 145 static struct vm_phys_fictitious_seg { 146 vm_paddr_t start; 147 vm_paddr_t end; 148 vm_page_t first_page; 149 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS]; 150 static struct mtx vm_phys_fictitious_reg_mtx = MTX_INITIALIZER("vmphy"); 151 152 vm_page_t 153 vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 154 { 155 struct vm_phys_fictitious_seg *seg; 156 vm_page_t m; 157 int segind; 158 159 m = NULL; 160 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 161 seg = &vm_phys_fictitious_segs[segind]; 162 if (pa >= seg->start && pa < seg->end) { 163 m = &seg->first_page[atop(pa - seg->start)]; 164 KASSERT((m->flags & PG_FICTITIOUS) != 0, 165 ("%p not fictitious", m)); 166 break; 167 } 168 } 169 return (m); 170 } 171 172 int 173 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 174 vm_memattr_t memattr) 175 { 176 struct vm_phys_fictitious_seg *seg; 177 vm_page_t fp; 178 long i, page_count; 179 int segind; 180 181 page_count = (end - start) / PAGE_SIZE; 182 183 fp = kmalloc(page_count * sizeof(struct vm_page), M_DRM, 184 M_WAITOK | M_ZERO); 185 186 for (i = 0; i < page_count; i++) { 187 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr); 188 atomic_clear_int(&fp[i].busy_count, PBUSY_LOCKED); 189 } 190 mtx_lock(&vm_phys_fictitious_reg_mtx); 191 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 192 seg = &vm_phys_fictitious_segs[segind]; 193 if (seg->start == 0 && seg->end == 0) { 194 seg->start = start; 195 seg->end = end; 196 seg->first_page = fp; 197 mtx_unlock(&vm_phys_fictitious_reg_mtx); 198 return (0); 199 } 200 } 201 mtx_unlock(&vm_phys_fictitious_reg_mtx); 202 kfree(fp); 203 return (EBUSY); 204 } 205 206 void 207 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 208 { 209 struct vm_phys_fictitious_seg *seg; 210 vm_page_t fp; 211 int segind; 212 213 mtx_lock(&vm_phys_fictitious_reg_mtx); 214 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 215 seg = &vm_phys_fictitious_segs[segind]; 216 if (seg->start == start && seg->end == end) { 217 seg->start = seg->end = 0; 218 fp = seg->first_page; 219 seg->first_page = NULL; 220 mtx_unlock(&vm_phys_fictitious_reg_mtx); 221 kfree(fp); 222 return; 223 } 224 } 225 mtx_unlock(&vm_phys_fictitious_reg_mtx); 226 KASSERT(0, ("Unregistering not registered fictitious range")); 227 } 228