xref: /dragonfly/sys/dev/drm/include/linux/mm.h (revision 207ba670)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2015-2019 François Tigeot <ftigeot@wolfpond.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #ifndef	_LINUX_MM_H_
32 #define	_LINUX_MM_H_
33 
34 #include <linux/errno.h>
35 
36 #include <linux/mmdebug.h>
37 #include <linux/gfp.h>
38 #include <linux/bug.h>
39 #include <linux/list.h>
40 #include <linux/mmzone.h>
41 #include <linux/rbtree.h>
42 #include <linux/atomic.h>
43 #include <linux/mm_types.h>
44 #include <linux/err.h>
45 #include <linux/shrinker.h>
46 
47 #include <asm/page.h>
48 #include <asm/pgtable.h>
49 #include <asm/processor.h>
50 
51 struct vm_operations_struct;
52 
53 static inline struct page *
54 nth_page(struct page *page, int n)
55 {
56 	return page + n;
57 }
58 
59 #define PAGE_ALIGN(addr) round_page(addr)
60 
61 #define VM_FAULT_RETRY		0x0400
62 
63 #define FAULT_FLAG_ALLOW_RETRY		0x04
64 #define FAULT_FLAG_RETRY_NOWAIT		0x08
65 
66 struct vm_fault {
67 	unsigned int flags;
68 	void __user *virtual_address;
69 };
70 
71 #define VM_FAULT_NOPAGE		0x0001
72 #define VM_FAULT_SIGBUS		0x0002
73 #define VM_FAULT_OOM		0x0004
74 
75 #define VM_DONTDUMP	0x0001
76 #define VM_DONTEXPAND	0x0002
77 #define VM_IO		0x0004
78 #define VM_MIXEDMAP	0x0008
79 
80 struct vm_operations_struct {
81 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
82 	void (*open)(struct vm_area_struct *vma);
83 	void (*close)(struct vm_area_struct *vma);
84 };
85 
86 /*
87  * Compute log2 of the power of two rounded up count of pages
88  * needed for size bytes.
89  */
90 static inline int
91 get_order(unsigned long size)
92 {
93 	int order;
94 
95 	size = (size - 1) >> PAGE_SHIFT;
96 	order = 0;
97 	while (size) {
98 		order++;
99 		size >>= 1;
100 	}
101 	return (order);
102 }
103 
104 /*
105  * This only works via mmap ops.
106  */
107 static inline int
108 io_remap_pfn_range(struct vm_area_struct *vma,
109     unsigned long addr, unsigned long pfn, unsigned long size,
110     vm_memattr_t prot)
111 {
112 	vma->vm_page_prot = prot;
113 	vma->vm_pfn = pfn;
114 
115 	return (0);
116 }
117 
118 static inline unsigned long
119 vma_pages(struct vm_area_struct *vma)
120 {
121 	unsigned long size;
122 
123 	size = vma->vm_end - vma->vm_start;
124 
125 	return size >> PAGE_SHIFT;
126 }
127 
128 #define offset_in_page(off)	((off) & PAGE_MASK)
129 
130 static inline void
131 set_page_dirty(struct page *page)
132 {
133 	vm_page_dirty((struct vm_page *)page);
134 }
135 
136 /*
137  * Allocate multiple contiguous pages.  The DragonFly code can only do
138  * multiple allocations via the free page reserve.  Linux does not appear
139  * to restrict the address space, so neither do we.
140  */
141 static inline struct vm_page *
142 alloc_pages(int flags, u_int order)
143 {
144 	size_t bytes = PAGE_SIZE << order;
145 	struct vm_page *pgs;
146 
147 	pgs = vm_page_alloc_contig(0LLU, ~0LLU, bytes, bytes, bytes,
148 				   VM_MEMATTR_DEFAULT);
149 	kprintf("alloc_pages order %u vm_pages=%p\n", order, pgs);
150 	return pgs;
151 }
152 
153 /*
154  * Free multiple contiguous pages
155  */
156 static inline void
157 __free_pages(struct vm_page *pgs, u_int order)
158 {
159 	size_t bytes = PAGE_SIZE << order;
160 
161 	vm_page_free_contig(pgs, bytes);
162 }
163 
164 static inline void
165 get_page(struct vm_page *page)
166 {
167 	vm_page_hold(page);
168 }
169 
170 extern vm_paddr_t Realmem;
171 
172 static inline unsigned long get_num_physpages(void)
173 {
174 	return Realmem / PAGE_SIZE;
175 }
176 
177 int is_vmalloc_addr(const void *x);
178 
179 static inline void
180 unmap_mapping_range(struct address_space *mapping,
181 	loff_t const holebegin, loff_t const holelen, int even_cows)
182 {
183 }
184 
185 #define VM_SHARED	0x00000008
186 
187 #define VM_PFNMAP	0x00000400
188 
189 static inline struct page *
190 vmalloc_to_page(const void *addr)
191 {
192 	vm_paddr_t paddr;
193 
194 	paddr = pmap_kextract((vm_offset_t)addr);
195 	return (struct page *)(PHYS_TO_VM_PAGE(paddr));
196 }
197 
198 #endif	/* _LINUX_MM_H_ */
199