xref: /dragonfly/sys/dev/drm/include/linux/mm.h (revision 5ca0a96d)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #ifndef	_LINUX_MM_H_
32 #define	_LINUX_MM_H_
33 
34 #include <linux/errno.h>
35 
36 #include <linux/mmdebug.h>
37 #include <linux/gfp.h>
38 #include <linux/bug.h>
39 #include <linux/list.h>
40 #include <linux/mmzone.h>
41 #include <linux/rbtree.h>
42 #include <linux/atomic.h>
43 #include <linux/mm_types.h>
44 #include <linux/err.h>
45 #include <linux/shrinker.h>
46 
47 #include <asm/page.h>
48 #include <asm/pgtable.h>
49 #include <asm/processor.h>
50 
51 static inline struct page *
52 nth_page(struct page *page, int n)
53 {
54 	return page + n;
55 }
56 
57 #define PAGE_ALIGN(addr) round_page(addr)
58 
59 #define VM_FAULT_RETRY		0x0400
60 
61 #define FAULT_FLAG_ALLOW_RETRY		0x04
62 #define FAULT_FLAG_RETRY_NOWAIT		0x08
63 
64 struct vm_fault {
65 	struct vm_area_struct *vma;
66 	unsigned int flags;
67 	void __user *virtual_address;
68 };
69 
70 #define VM_FAULT_NOPAGE		0x0001
71 #define VM_FAULT_SIGBUS		0x0002
72 #define VM_FAULT_OOM		0x0004
73 
74 #define VM_DONTDUMP	0x0001
75 #define VM_DONTEXPAND	0x0002
76 #define VM_IO		0x0004
77 #define VM_MIXEDMAP	0x0008
78 
79 struct vm_operations_struct {
80 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
81 	void (*open)(struct vm_area_struct *vma);
82 	void (*close)(struct vm_area_struct *vma);
83 	int (*access)(struct vm_area_struct *vma, unsigned long addr,
84 		      void *buf, int len, int write);
85 };
86 
87 /*
88  * Compute log2 of the power of two rounded up count of pages
89  * needed for size bytes.
90  */
91 static inline int
92 get_order(unsigned long size)
93 {
94 	int order;
95 
96 	size = (size - 1) >> PAGE_SHIFT;
97 	order = 0;
98 	while (size) {
99 		order++;
100 		size >>= 1;
101 	}
102 	return (order);
103 }
104 
105 /*
106  * This only works via mmap ops.
107  */
108 static inline int
109 io_remap_pfn_range(struct vm_area_struct *vma,
110     unsigned long addr, unsigned long pfn, unsigned long size,
111     vm_memattr_t prot)
112 {
113 	vma->vm_page_prot = prot;
114 	vma->vm_pfn = pfn;
115 
116 	return (0);
117 }
118 
119 static inline unsigned long
120 vma_pages(struct vm_area_struct *vma)
121 {
122 	unsigned long size;
123 
124 	size = vma->vm_end - vma->vm_start;
125 
126 	return size >> PAGE_SHIFT;
127 }
128 
129 #define offset_in_page(off)	((unsigned long)(off) & PAGE_MASK)
130 
131 static inline void
132 set_page_dirty(struct page *page)
133 {
134 	vm_page_dirty((struct vm_page *)page);
135 }
136 
137 static inline void
138 get_page(struct vm_page *page)
139 {
140 	vm_page_hold(page);
141 }
142 
143 extern vm_paddr_t Realmem;
144 
145 static inline unsigned long get_num_physpages(void)
146 {
147 	return Realmem / PAGE_SIZE;
148 }
149 
150 int is_vmalloc_addr(const void *x);
151 
152 static inline void
153 unmap_mapping_range(struct address_space *mapping,
154 	loff_t const holebegin, loff_t const holelen, int even_cows)
155 {
156 }
157 
158 #define VM_SHARED	0x00000008
159 
160 #define VM_PFNMAP	0x00000400
161 
162 static inline struct page *
163 vmalloc_to_page(const void *addr)
164 {
165 	vm_paddr_t paddr;
166 
167 	paddr = pmap_kextract((vm_offset_t)addr);
168 	return (struct page *)(PHYS_TO_VM_PAGE(paddr));
169 }
170 
171 static inline void
172 put_page(struct page *page)
173 {
174 	vm_page_busy_wait((struct vm_page *)page, FALSE, "i915gem");
175 	vm_page_unwire((struct vm_page *)page, 1);
176 	vm_page_wakeup((struct vm_page *)page);
177 }
178 
179 static inline void *
180 page_address(const struct page *page)
181 {
182 	return (void *)VM_PAGE_TO_PHYS((const struct vm_page *)page);
183 }
184 
185 void * kvmalloc_array(size_t n, size_t size, gfp_t flags);
186 
187 #define kvfree(addr)	kfree(addr)
188 
189 #define FOLL_WRITE	0x01
190 
191 #endif	/* _LINUX_MM_H_ */
192