xref: /openbsd/sys/uvm/uvm_pmemrange.h (revision e1dac33e)
1 /*	$OpenBSD: uvm_pmemrange.h,v 1.19 2024/11/07 11:12:46 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2009 Ariane van der Steldt <ariane@stack.nl>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * uvm_pmemrange.h: describe and manage free physical memory.
21  */
22 
23 #ifndef _UVM_UVM_PMEMRANGE_H_
24 #define _UVM_UVM_PMEMRANGE_H_
25 
26 RBT_HEAD(uvm_pmr_addr, vm_page);
27 RBT_HEAD(uvm_pmr_size, vm_page);
28 
29 /*
30  * Page types available:
31  * - DIRTY: this page may contain random data.
32  * - ZERO: this page has been zeroed.
33  */
34 #define UVM_PMR_MEMTYPE_DIRTY	0
35 #define UVM_PMR_MEMTYPE_ZERO	1
36 #define UVM_PMR_MEMTYPE_MAX	2
37 
38 /*
39  * An address range of memory.
40  */
41 struct uvm_pmemrange {
42 	struct	uvm_pmr_addr addr;	/* Free page chunks, sorted by addr. */
43 	struct	uvm_pmr_size size[UVM_PMR_MEMTYPE_MAX];
44 					/* Free page chunks, sorted by size. */
45 	TAILQ_HEAD(, vm_page) single[UVM_PMR_MEMTYPE_MAX];
46 					/* single page regions (uses pageq) */
47 
48 	paddr_t	low;			/* Start of address range (pgno). */
49 	paddr_t	high;			/* End +1 (pgno). */
50 	int	use;			/* Use counter. */
51 	psize_t	nsegs;			/* Current range count. */
52 
53 	TAILQ_ENTRY(uvm_pmemrange) pmr_use;
54 					/* pmr, sorted by use */
55 	RBT_ENTRY(uvm_pmemrange) pmr_addr;
56 					/* pmr, sorted by address */
57 };
58 
59 /*
60  * Description of failing memory allocation.
61  *
62  * Every descriptor corresponds to a request for the page daemon to release
63  * pages in a given memory range.  There is one global descriptor for nowait
64  * allocations, all others are sitting on the stack of processes waiting for
65  * physical pages.
66  *
67  * There are multiple ways physical pages can become available:
68  * [1] unmanaged pages are released by shrinkers (bufbackoff(), drmbackoff()...)
69  * [2] page daemon drops them (we notice because they are freed)
70  * [3] a process calls free or exit
71  *
72  * The buffer cache and page daemon can decide that they don't have the
73  * ability to make pages available in the requested range. In that case,
74  * the FAIL bit will be set.
75  * XXX There's a possibility that a page is no longer on the queues but
76  * XXX has not yet been freed, or that a page was busy.
77  * XXX Also, wired pages are not considered for paging, so they could
78  * XXX cause a failure that may be recoverable.
79  *
80  * Locks used to protect struct members in this file:
81  *	F	uvm_lock_fpageq
82  *	I	immutable after creation
83  */
84 struct uvm_pmalloc {
85 	TAILQ_ENTRY(uvm_pmalloc) pmq;			/* [F] next request */
86 	struct uvm_constraint_range pm_constraint;	/* [I] memory range */
87 	psize_t	pm_size;				/* [I] # pages */
88 	int	pm_flags;				/* [F] states flags */
89 };
90 
91 /*
92  * Indicate to the page daemon that a nowait call failed and it should
93  * recover at least some memory in the most restricted region (assumed
94  * to be dma_constraint).
95  */
96 extern struct uvm_pmalloc nowait_pma;			/* [F] */
97 
98 
99 /*
100  * uvm_pmalloc flags.
101  */
102 #define UVM_PMA_LINKED	0x01	/* uvm_pmalloc is on list */
103 #define UVM_PMA_BUSY	0x02	/* entry is busy with fpageq unlocked */
104 #define UVM_PMA_FAIL	0x10	/* page daemon cannot free pages */
105 #define UVM_PMA_FREED	0x20	/* at least one page in the range was freed */
106 
107 RBT_HEAD(uvm_pmemrange_addr, uvm_pmemrange);
108 TAILQ_HEAD(uvm_pmemrange_use, uvm_pmemrange);
109 
110 /*
111  * pmr control structure. Contained in uvm.pmr_control.
112  */
113 struct uvm_pmr_control {
114 	struct	uvm_pmemrange_addr addr;
115 	struct	uvm_pmemrange_use use;
116 
117 	/* Only changed while fpageq is locked. */
118 	TAILQ_HEAD(, uvm_pmalloc) allocs;
119 };
120 
121 void	uvm_pmr_freepages(struct vm_page *, psize_t);
122 void	uvm_pmr_freepageq(struct pglist *);
123 int	uvm_pmr_getpages(psize_t, paddr_t, paddr_t, paddr_t, paddr_t,
124 	    int, int, struct pglist *);
125 void	uvm_pmr_init(void);
126 int	uvm_wait_pla(paddr_t, paddr_t, paddr_t, int);
127 void	uvm_wakeup_pla(paddr_t, psize_t);
128 
129 #if defined(DDB) || defined(DEBUG)
130 int	uvm_pmr_isfree(struct vm_page *pg);
131 #endif
132 
133 /*
134  * Internal tree logic.
135  */
136 
137 int	uvm_pmr_addr_cmp(const struct vm_page *, const struct vm_page *);
138 int	uvm_pmr_size_cmp(const struct vm_page *, const struct vm_page *);
139 
140 RBT_PROTOTYPE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp);
141 RBT_PROTOTYPE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp);
142 RBT_PROTOTYPE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr,
143     uvm_pmemrange_addr_cmp);
144 
145 struct vm_page		*uvm_pmr_insert_addr(struct uvm_pmemrange *,
146 			    struct vm_page *, int);
147 void			 uvm_pmr_insert_size(struct uvm_pmemrange *,
148 			    struct vm_page *);
149 struct vm_page		*uvm_pmr_insert(struct uvm_pmemrange *,
150 			    struct vm_page *, int);
151 void			 uvm_pmr_remove_addr(struct uvm_pmemrange *,
152 			    struct vm_page *);
153 void			 uvm_pmr_remove_size(struct uvm_pmemrange *,
154 			    struct vm_page *);
155 void			 uvm_pmr_remove(struct uvm_pmemrange *,
156 			    struct vm_page *);
157 struct vm_page		*uvm_pmr_extract_range(struct uvm_pmemrange *,
158 			    struct vm_page *, paddr_t, paddr_t,
159 			    struct pglist *);
160 struct vm_page		*uvm_pmr_cache_get(int);
161 void			 uvm_pmr_cache_put(struct vm_page *);
162 unsigned int		 uvm_pmr_cache_drain(void);
163 
164 
165 #endif /* _UVM_UVM_PMEMRANGE_H_ */
166