xref: /openbsd/sys/uvm/uvm_pmemrange.h (revision 898184e3)
1 /*	$OpenBSD: uvm_pmemrange.h,v 1.11 2011/07/08 18:25:56 ariane Exp $	*/
2 
3 /*
4  * Copyright (c) 2009 Ariane van der Steldt <ariane@stack.nl>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * uvm_pmemrange.h: describe and manage free physical memory.
21  */
22 
23 #ifndef _UVM_UVM_PMEMRANGE_H_
24 #define _UVM_UVM_PMEMRANGE_H_
25 
26 #include <uvm/uvm_extern.h>
27 #include <uvm/uvm_page.h>
28 
29 RB_HEAD(uvm_pmr_addr, vm_page);
30 RB_HEAD(uvm_pmr_size, vm_page);
31 
32 /*
33  * Page types available:
34  * - DIRTY: this page may contain random data.
35  * - ZERO: this page has been zeroed.
36  */
37 #define UVM_PMR_MEMTYPE_DIRTY	0
38 #define UVM_PMR_MEMTYPE_ZERO	1
39 #define UVM_PMR_MEMTYPE_MAX	2
40 
41 /*
42  * An address range of memory.
43  */
44 struct uvm_pmemrange {
45 	struct	uvm_pmr_addr addr;	/* Free page chunks, sorted by addr. */
46 	struct	uvm_pmr_size size[UVM_PMR_MEMTYPE_MAX];
47 					/* Free page chunks, sorted by size. */
48 	TAILQ_HEAD(, vm_page) single[UVM_PMR_MEMTYPE_MAX];
49 					/* single page regions (uses pageq) */
50 
51 	paddr_t	low;			/* Start of address range (pgno). */
52 	paddr_t	high;			/* End +1 (pgno). */
53 	int	use;			/* Use counter. */
54 	psize_t	nsegs;			/* Current range count. */
55 
56 	TAILQ_ENTRY(uvm_pmemrange) pmr_use;
57 					/* pmr, sorted by use */
58 	RB_ENTRY(uvm_pmemrange) pmr_addr;
59 					/* pmr, sorted by address */
60 };
61 
62 /*
63  * Description of failing memory allocation.
64  *
65  * Two ways new pages can become available:
66  * [1] page daemon drops them (we notice because they are freed)
67  * [2] a process calls free
68  *
69  * The buffer cache and page daemon can decide that they don't have the
70  * ability to make pages available in the requested range. In that case,
71  * the FAIL bit will be set.
72  * XXX There's a possibility that a page is no longer on the queues but
73  * XXX has not yet been freed, or that a page was busy.
74  * XXX Also, wired pages are not considered for paging, so they could
75  * XXX cause a failure that may be recoverable.
76  */
77 struct uvm_pmalloc {
78 	TAILQ_ENTRY(uvm_pmalloc) pmq;
79 
80 	/*
81 	 * Allocation request parameters.
82 	 */
83 	struct uvm_constraint_range pm_constraint;
84 	psize_t	pm_size;
85 
86 	/*
87 	 * State flags.
88 	 */
89 	int	pm_flags;
90 };
91 
92 /*
93  * uvm_pmalloc flags.
94  */
95 #define UVM_PMA_LINKED	0x01	/* uvm_pmalloc is on list */
96 #define UVM_PMA_BUSY	0x02	/* entry is busy with fpageq unlocked */
97 #define UVM_PMA_FAIL	0x10	/* page daemon cannot free pages */
98 #define UVM_PMA_FREED	0x20	/* at least one page in the range was freed */
99 
100 RB_HEAD(uvm_pmemrange_addr, uvm_pmemrange);
101 TAILQ_HEAD(uvm_pmemrange_use, uvm_pmemrange);
102 
103 /*
104  * pmr control structure. Contained in uvm.pmr_control.
105  */
106 struct uvm_pmr_control {
107 	struct	uvm_pmemrange_addr addr;
108 	struct	uvm_pmemrange_use use;
109 
110 	/* Only changed while fpageq is locked. */
111 	TAILQ_HEAD(, uvm_pmalloc) allocs;
112 };
113 
114 void	uvm_pmr_freepages(struct vm_page *, psize_t);
115 void	uvm_pmr_freepageq(struct pglist *);
116 int	uvm_pmr_getpages(psize_t, paddr_t, paddr_t, paddr_t, paddr_t,
117 	    int, int, struct pglist *);
118 void	uvm_pmr_init(void);
119 int	uvm_wait_pla(paddr_t, paddr_t, paddr_t, int);
120 void	uvm_wakeup_pla(paddr_t, psize_t);
121 
122 #if defined(DDB) || defined(DEBUG)
123 int	uvm_pmr_isfree(struct vm_page *pg);
124 #endif
125 
126 /*
127  * Internal tree logic.
128  */
129 
130 int	uvm_pmr_addr_cmp(struct vm_page *, struct vm_page *);
131 int	uvm_pmr_size_cmp(struct vm_page *, struct vm_page *);
132 
133 RB_PROTOTYPE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp);
134 RB_PROTOTYPE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp);
135 RB_PROTOTYPE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr,
136     uvm_pmemrange_addr_cmp);
137 
138 struct vm_page		*uvm_pmr_insert_addr(struct uvm_pmemrange *,
139 			    struct vm_page *, int);
140 void			 uvm_pmr_insert_size(struct uvm_pmemrange *,
141 			    struct vm_page *);
142 struct vm_page		*uvm_pmr_insert(struct uvm_pmemrange *,
143 			    struct vm_page *, int);
144 void			 uvm_pmr_remove_addr(struct uvm_pmemrange *,
145 			    struct vm_page *);
146 void			 uvm_pmr_remove_size(struct uvm_pmemrange *,
147 			    struct vm_page *);
148 void			 uvm_pmr_remove(struct uvm_pmemrange *,
149 			    struct vm_page *);
150 struct vm_page		*uvm_pmr_extract_range(struct uvm_pmemrange *,
151 			    struct vm_page *, paddr_t, paddr_t,
152 			    struct pglist *);
153 
154 #endif /* _UVM_UVM_PMEMRANGE_H_ */
155