1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://zfsonlinux.org/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #ifndef _SPL_KMEM_H
26 #define	_SPL_KMEM_H
27 
28 #include <sys/debug.h>
29 #include <linux/slab.h>
30 #include <linux/sched.h>
31 #include <linux/mm.h>
32 #include <linux/vmalloc.h>
33 
34 extern int kmem_debugging(void);
35 extern char *kmem_vasprintf(const char *fmt, va_list ap);
36 extern char *kmem_asprintf(const char *fmt, ...);
37 extern char *kmem_strdup(const char *str);
38 extern void kmem_strfree(char *str);
39 
40 /*
41  * Memory allocation interfaces
42  */
43 #define	KM_SLEEP	0x0000	/* can block for memory; success guaranteed */
44 #define	KM_NOSLEEP	0x0001	/* cannot block for memory; may fail */
45 #define	KM_PUSHPAGE	0x0004	/* can block for memory; may use reserve */
46 #define	KM_ZERO		0x1000	/* zero the allocation */
47 #define	KM_VMEM		0x2000	/* caller is vmem_* wrapper */
48 
49 #define	KM_PUBLIC_MASK	(KM_SLEEP | KM_NOSLEEP | KM_PUSHPAGE)
50 
51 static int spl_fstrans_check(void);
52 void *spl_kvmalloc(size_t size, gfp_t flags);
53 
54 /*
55  * Convert a KM_* flags mask to its Linux GFP_* counterpart.  The conversion
56  * function is context aware which means that KM_SLEEP allocations can be
57  * safely used in syncing contexts which have set PF_FSTRANS.
58  */
59 static inline gfp_t
60 kmem_flags_convert(int flags)
61 {
62 	gfp_t lflags = __GFP_NOWARN | __GFP_COMP;
63 
64 	if (flags & KM_NOSLEEP) {
65 		lflags |= GFP_ATOMIC | __GFP_NORETRY;
66 	} else {
67 		lflags |= GFP_KERNEL;
68 		if (spl_fstrans_check())
69 			lflags &= ~(__GFP_IO|__GFP_FS);
70 	}
71 
72 	if (flags & KM_PUSHPAGE)
73 		lflags |= __GFP_HIGH;
74 
75 	if (flags & KM_ZERO)
76 		lflags |= __GFP_ZERO;
77 
78 	return (lflags);
79 }
80 
81 typedef struct {
82 	struct task_struct *fstrans_thread;
83 	unsigned int saved_flags;
84 } fstrans_cookie_t;
85 
86 /*
87  * Introduced in Linux 3.9, however this cannot be solely relied on before
88  * Linux 3.18 as it doesn't turn off __GFP_FS as it should.
89  */
90 #ifdef PF_MEMALLOC_NOIO
91 #define	__SPL_PF_MEMALLOC_NOIO (PF_MEMALLOC_NOIO)
92 #else
93 #define	__SPL_PF_MEMALLOC_NOIO (0)
94 #endif
95 
96 /*
97  * PF_FSTRANS is removed from Linux 4.12
98  */
99 #ifdef PF_FSTRANS
100 #define	__SPL_PF_FSTRANS (PF_FSTRANS)
101 #else
102 #define	__SPL_PF_FSTRANS (0)
103 #endif
104 
105 #define	SPL_FSTRANS (__SPL_PF_FSTRANS|__SPL_PF_MEMALLOC_NOIO)
106 
107 static inline fstrans_cookie_t
108 spl_fstrans_mark(void)
109 {
110 	fstrans_cookie_t cookie;
111 
112 	BUILD_BUG_ON(SPL_FSTRANS == 0);
113 
114 	cookie.fstrans_thread = current;
115 	cookie.saved_flags = current->flags & SPL_FSTRANS;
116 	current->flags |= SPL_FSTRANS;
117 
118 	return (cookie);
119 }
120 
121 static inline void
122 spl_fstrans_unmark(fstrans_cookie_t cookie)
123 {
124 	ASSERT3P(cookie.fstrans_thread, ==, current);
125 	ASSERT((current->flags & SPL_FSTRANS) == SPL_FSTRANS);
126 
127 	current->flags &= ~SPL_FSTRANS;
128 	current->flags |= cookie.saved_flags;
129 }
130 
131 static inline int
132 spl_fstrans_check(void)
133 {
134 	return (current->flags & SPL_FSTRANS);
135 }
136 
137 /*
138  * specifically used to check PF_FSTRANS flag, cannot be relied on for
139  * checking spl_fstrans_mark().
140  */
141 static inline int
142 __spl_pf_fstrans_check(void)
143 {
144 	return (current->flags & __SPL_PF_FSTRANS);
145 }
146 
147 /*
148  * Kernel compatibility for GFP flags
149  */
150 /* < 4.13 */
151 #ifndef __GFP_RETRY_MAYFAIL
152 #define	__GFP_RETRY_MAYFAIL	__GFP_REPEAT
153 #endif
154 /* < 4.4 */
155 #ifndef __GFP_RECLAIM
156 #define	__GFP_RECLAIM		__GFP_WAIT
157 #endif
158 
159 #ifdef HAVE_ATOMIC64_T
160 #define	kmem_alloc_used_add(size)	atomic64_add(size, &kmem_alloc_used)
161 #define	kmem_alloc_used_sub(size)	atomic64_sub(size, &kmem_alloc_used)
162 #define	kmem_alloc_used_read()		atomic64_read(&kmem_alloc_used)
163 #define	kmem_alloc_used_set(size)	atomic64_set(&kmem_alloc_used, size)
164 extern atomic64_t kmem_alloc_used;
165 extern unsigned long long kmem_alloc_max;
166 #else  /* HAVE_ATOMIC64_T */
167 #define	kmem_alloc_used_add(size)	atomic_add(size, &kmem_alloc_used)
168 #define	kmem_alloc_used_sub(size)	atomic_sub(size, &kmem_alloc_used)
169 #define	kmem_alloc_used_read()		atomic_read(&kmem_alloc_used)
170 #define	kmem_alloc_used_set(size)	atomic_set(&kmem_alloc_used, size)
171 extern atomic_t kmem_alloc_used;
172 extern unsigned long long kmem_alloc_max;
173 #endif /* HAVE_ATOMIC64_T */
174 
175 extern unsigned int spl_kmem_alloc_warn;
176 extern unsigned int spl_kmem_alloc_max;
177 
178 #define	kmem_alloc(sz, fl)	spl_kmem_alloc((sz), (fl), __func__, __LINE__)
179 #define	kmem_zalloc(sz, fl)	spl_kmem_zalloc((sz), (fl), __func__, __LINE__)
180 #define	kmem_free(ptr, sz)	spl_kmem_free((ptr), (sz))
181 #define	kmem_cache_reap_active	spl_kmem_cache_reap_active
182 
183 extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line);
184 extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line);
185 extern void spl_kmem_free(const void *ptr, size_t sz);
186 
187 /*
188  * 5.8 API change, pgprot_t argument removed.
189  */
190 #ifdef HAVE_VMALLOC_PAGE_KERNEL
191 #define	spl_vmalloc(size, flags)	__vmalloc(size, flags, PAGE_KERNEL)
192 #else
193 #define	spl_vmalloc(size, flags)	__vmalloc(size, flags)
194 #endif
195 
196 /*
197  * The following functions are only available for internal use.
198  */
199 extern void *spl_kmem_alloc_impl(size_t size, int flags, int node);
200 extern void *spl_kmem_alloc_debug(size_t size, int flags, int node);
201 extern void *spl_kmem_alloc_track(size_t size, int flags,
202     const char *func, int line, int node);
203 extern void spl_kmem_free_impl(const void *buf, size_t size);
204 extern void spl_kmem_free_debug(const void *buf, size_t size);
205 extern void spl_kmem_free_track(const void *buf, size_t size);
206 
207 extern int spl_kmem_init(void);
208 extern void spl_kmem_fini(void);
209 extern int spl_kmem_cache_reap_active(void);
210 
211 #endif	/* _SPL_KMEM_H */
212