xref: /dragonfly/sys/dev/drm/include/linux/mm.h (revision eb67213a)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2015-2019 François Tigeot <ftigeot@wolfpond.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #ifndef	_LINUX_MM_H_
32 #define	_LINUX_MM_H_
33 
34 #include <linux/errno.h>
35 
36 #include <linux/mmdebug.h>
37 #include <linux/gfp.h>
38 #include <linux/bug.h>
39 #include <linux/list.h>
40 #include <linux/mmzone.h>
41 #include <linux/rbtree.h>
42 #include <linux/atomic.h>
43 #include <linux/mm_types.h>
44 #include <linux/err.h>
45 
46 #include <asm/page.h>
47 #include <asm/pgtable.h>
48 #include <asm/processor.h>
49 
50 struct vm_operations_struct;
51 
52 static inline struct page *
53 nth_page(struct page *page, int n)
54 {
55 	return page + n;
56 }
57 
58 #define PAGE_ALIGN(addr) round_page(addr)
59 
60 struct vm_fault {
61 	uintptr_t	virtual_address;
62 };
63 
64 #define VM_FAULT_NOPAGE		0x0001
65 #define VM_FAULT_SIGBUS		0x0002
66 #define VM_FAULT_OOM		0x0004
67 
68 struct vm_area_struct {
69 	vm_offset_t	vm_start;
70 	vm_offset_t	vm_end;
71 	vm_offset_t	vm_pgoff;
72 	vm_paddr_t	vm_pfn;		/* PFN For mmap. */
73 	vm_memattr_t	vm_page_prot;
74 	void		*vm_private_data;
75 	int		vm_flags;
76 	const struct vm_operations_struct *vm_ops;
77 };
78 
79 #define VM_DONTDUMP	0x0001
80 #define VM_DONTEXPAND	0x0002
81 #define VM_IO		0x0004
82 #define VM_MIXEDMAP	0x0008
83 
84 struct vm_operations_struct {
85 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
86 	void (*open)(struct vm_area_struct *vma);
87 	void (*close)(struct vm_area_struct *vma);
88 };
89 
90 /*
91  * Compute log2 of the power of two rounded up count of pages
92  * needed for size bytes.
93  */
94 static inline int
95 get_order(unsigned long size)
96 {
97 	int order;
98 
99 	size = (size - 1) >> PAGE_SHIFT;
100 	order = 0;
101 	while (size) {
102 		order++;
103 		size >>= 1;
104 	}
105 	return (order);
106 }
107 
108 /*
109  * This only works via mmap ops.
110  */
111 static inline int
112 io_remap_pfn_range(struct vm_area_struct *vma,
113     unsigned long addr, unsigned long pfn, unsigned long size,
114     vm_memattr_t prot)
115 {
116 	vma->vm_page_prot = prot;
117 	vma->vm_pfn = pfn;
118 
119 	return (0);
120 }
121 
122 static inline unsigned long
123 vma_pages(struct vm_area_struct *vma)
124 {
125 	unsigned long size;
126 
127 	size = vma->vm_end - vma->vm_start;
128 
129 	return size >> PAGE_SHIFT;
130 }
131 
132 #define offset_in_page(off)	((off) & PAGE_MASK)
133 
134 static inline void
135 set_page_dirty(struct page *page)
136 {
137 	vm_page_dirty((struct vm_page *)page);
138 }
139 
140 /*
141  * Allocate multiple contiguous pages.  The DragonFly code can only do
142  * multiple allocations via the free page reserve.  Linux does not appear
143  * to restrict the address space, so neither do we.
144  */
145 static inline struct vm_page *
146 alloc_pages(int flags, u_int order)
147 {
148 	size_t bytes = PAGE_SIZE << order;
149 	struct vm_page *pgs;
150 
151 	pgs = vm_page_alloc_contig(0LLU, ~0LLU, bytes, bytes, bytes,
152 				   VM_MEMATTR_DEFAULT);
153 	kprintf("alloc_pages order %u vm_pages=%p\n", order, pgs);
154 	return pgs;
155 }
156 
157 /*
158  * Free multiple contiguous pages
159  */
160 static inline void
161 __free_pages(struct vm_page *pgs, u_int order)
162 {
163 	size_t bytes = PAGE_SIZE << order;
164 
165 	vm_page_free_contig(pgs, bytes);
166 }
167 
168 static inline void
169 get_page(struct vm_page *page)
170 {
171 	vm_page_hold(page);
172 }
173 
174 extern vm_paddr_t Realmem;
175 
176 static inline unsigned long get_num_physpages(void)
177 {
178 	return Realmem / PAGE_SIZE;
179 }
180 
181 int is_vmalloc_addr(const void *x);
182 
183 static inline void
184 unmap_mapping_range(struct address_space *mapping,
185 	loff_t const holebegin, loff_t const holelen, int even_cows)
186 {
187 }
188 
189 #endif	/* _LINUX_MM_H_ */
190