xref: /dragonfly/sys/dev/drm/include/linux/mm.h (revision 1a05b9d1)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2015-2019 François Tigeot <ftigeot@wolfpond.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #ifndef	_LINUX_MM_H_
32 #define	_LINUX_MM_H_
33 
34 #include <linux/errno.h>
35 
36 #include <linux/mmdebug.h>
37 #include <linux/gfp.h>
38 #include <linux/bug.h>
39 #include <linux/list.h>
40 #include <linux/mmzone.h>
41 #include <linux/rbtree.h>
42 #include <linux/atomic.h>
43 #include <linux/mm_types.h>
44 #include <linux/err.h>
45 #include <linux/shrinker.h>
46 
47 #include <asm/page.h>
48 #include <asm/pgtable.h>
49 #include <asm/processor.h>
50 
51 static inline struct page *
52 nth_page(struct page *page, int n)
53 {
54 	return page + n;
55 }
56 
57 #define PAGE_ALIGN(addr) round_page(addr)
58 
59 #define VM_FAULT_RETRY		0x0400
60 
61 #define FAULT_FLAG_ALLOW_RETRY		0x04
62 #define FAULT_FLAG_RETRY_NOWAIT		0x08
63 
64 struct vm_fault {
65 	unsigned int flags;
66 	void __user *virtual_address;
67 };
68 
69 #define VM_FAULT_NOPAGE		0x0001
70 #define VM_FAULT_SIGBUS		0x0002
71 #define VM_FAULT_OOM		0x0004
72 
73 #define VM_DONTDUMP	0x0001
74 #define VM_DONTEXPAND	0x0002
75 #define VM_IO		0x0004
76 #define VM_MIXEDMAP	0x0008
77 
78 struct vm_operations_struct {
79 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
80 	void (*open)(struct vm_area_struct *vma);
81 	void (*close)(struct vm_area_struct *vma);
82 };
83 
84 /*
85  * Compute log2 of the power of two rounded up count of pages
86  * needed for size bytes.
87  */
88 static inline int
89 get_order(unsigned long size)
90 {
91 	int order;
92 
93 	size = (size - 1) >> PAGE_SHIFT;
94 	order = 0;
95 	while (size) {
96 		order++;
97 		size >>= 1;
98 	}
99 	return (order);
100 }
101 
102 /*
103  * This only works via mmap ops.
104  */
105 static inline int
106 io_remap_pfn_range(struct vm_area_struct *vma,
107     unsigned long addr, unsigned long pfn, unsigned long size,
108     vm_memattr_t prot)
109 {
110 	vma->vm_page_prot = prot;
111 	vma->vm_pfn = pfn;
112 
113 	return (0);
114 }
115 
116 static inline unsigned long
117 vma_pages(struct vm_area_struct *vma)
118 {
119 	unsigned long size;
120 
121 	size = vma->vm_end - vma->vm_start;
122 
123 	return size >> PAGE_SHIFT;
124 }
125 
126 #define offset_in_page(off)	((off) & PAGE_MASK)
127 
128 static inline void
129 set_page_dirty(struct page *page)
130 {
131 	vm_page_dirty((struct vm_page *)page);
132 }
133 
134 /*
135  * Allocate multiple contiguous pages.  The DragonFly code can only do
136  * multiple allocations via the free page reserve.  Linux does not appear
137  * to restrict the address space, so neither do we.
138  */
139 static inline struct vm_page *
140 alloc_pages(int flags, u_int order)
141 {
142 	size_t bytes = PAGE_SIZE << order;
143 	struct vm_page *pgs;
144 
145 	pgs = vm_page_alloc_contig(0LLU, ~0LLU, bytes, bytes, bytes,
146 				   VM_MEMATTR_DEFAULT);
147 	kprintf("alloc_pages order %u vm_pages=%p\n", order, pgs);
148 	return pgs;
149 }
150 
151 /*
152  * Free multiple contiguous pages
153  */
154 static inline void
155 __free_pages(struct vm_page *pgs, u_int order)
156 {
157 	size_t bytes = PAGE_SIZE << order;
158 
159 	vm_page_free_contig(pgs, bytes);
160 }
161 
162 static inline void
163 get_page(struct vm_page *page)
164 {
165 	vm_page_hold(page);
166 }
167 
168 extern vm_paddr_t Realmem;
169 
170 static inline unsigned long get_num_physpages(void)
171 {
172 	return Realmem / PAGE_SIZE;
173 }
174 
175 int is_vmalloc_addr(const void *x);
176 
177 static inline void
178 unmap_mapping_range(struct address_space *mapping,
179 	loff_t const holebegin, loff_t const holelen, int even_cows)
180 {
181 }
182 
183 #define VM_SHARED	0x00000008
184 
185 #define VM_PFNMAP	0x00000400
186 
187 static inline struct page *
188 vmalloc_to_page(const void *addr)
189 {
190 	vm_paddr_t paddr;
191 
192 	paddr = pmap_kextract((vm_offset_t)addr);
193 	return (struct page *)(PHYS_TO_VM_PAGE(paddr));
194 }
195 
196 #endif	/* _LINUX_MM_H_ */
197