xref: /dragonfly/sys/dev/drm/linux_vmalloc.c (revision d8d5b238)
1 /*
2  * Copyright (c) 2017-2019 François Tigeot <ftigeot@wolfpond.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/queue.h>
28 #include <vm/vm_extern.h>
29 
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/mm.h>
33 
34 struct vmap {
35 	void *addr;
36 	int npages;
37 	SLIST_ENTRY(vmap) vm_vmaps;
38 };
39 
40 SLIST_HEAD(vmap_list_head, vmap) vmap_list = SLIST_HEAD_INITIALIZER(vmap_list);
41 
42 /* vmap: map an array of pages into virtually contiguous space */
43 void *
44 vmap(struct page **pages, unsigned int count,
45 	unsigned long flags, pgprot_t prot)
46 {
47 	struct vmap *vmp;
48 	vm_offset_t off;
49 	size_t size;
50 
51 	vmp = kmalloc(sizeof(struct vmap), M_DRM, M_WAITOK | M_ZERO);
52 
53 	size = count * PAGE_SIZE;
54 	off = kmem_alloc_nofault(&kernel_map, size,
55 				 VM_SUBSYS_DRM_VMAP, PAGE_SIZE);
56 	if (off == 0)
57 		return (NULL);
58 
59 	vmp->addr = (void *)off;
60 	vmp->npages = count;
61 	pmap_qenter(off, (struct vm_page **)pages, count);
62 	SLIST_INSERT_HEAD(&vmap_list, vmp, vm_vmaps);
63 
64 	return (void *)off;
65 }
66 
67 void
68 vunmap(const void *addr)
69 {
70 	struct vmap *vmp, *tmp_vmp;
71 	size_t size;
72 
73 	SLIST_FOREACH_MUTABLE(vmp, &vmap_list, vm_vmaps, tmp_vmp) {
74 		if (vmp->addr == addr) {
75 			size = vmp->npages * PAGE_SIZE;
76 
77 			pmap_qremove((vm_offset_t)addr, vmp->npages);
78 			kmem_free(&kernel_map, (vm_offset_t)addr, size);
79 			goto found;
80 		}
81 	}
82 
83 found:
84 	SLIST_REMOVE(&vmap_list, vmp, vmap, vm_vmaps);
85 	kfree(vmp);
86 }
87 
88 int
89 is_vmalloc_addr(const void *x)
90 {
91 	struct vmap *vmp, *tmp_vmp;
92 
93 	SLIST_FOREACH_MUTABLE(vmp, &vmap_list, vm_vmaps, tmp_vmp) {
94 		if (vmp->addr == x)
95 			return 1;
96 	}
97 
98 	return false;
99 }
100 
101 void *
102 vmalloc(unsigned long size)
103 {
104 	return kmalloc(size, M_DRM, M_WAITOK);
105 }
106 
107 void *
108 vzalloc(unsigned long size)
109 {
110 	return kmalloc(size, M_DRM, M_WAITOK | M_ZERO);
111 }
112 
113 /* allocate zeroed virtually contiguous memory for userspace */
114 void *
115 vmalloc_user(unsigned long size)
116 {
117 	return kmalloc(size, M_DRM, M_WAITOK | M_ZERO);
118 }
119 
120 void
121 vfree(const void *addr)
122 {
123 	void *nc_addr;
124 
125 	memcpy(&nc_addr, &addr, sizeof(void *));
126 	kfree(nc_addr);
127 }
128