1 /* Public domain. */
2
3 #ifndef _LINUX_GFP_H
4 #define _LINUX_GFP_H
5
6 #include <sys/types.h>
7 #include <sys/malloc.h>
8 #include <uvm/uvm_extern.h>
9
10 #include <linux/mmzone.h>
11
12 #define __GFP_ZERO M_ZERO
13 #define __GFP_DMA32 0x00010000
14 #define __GFP_NOWARN 0
15 #define __GFP_NORETRY 0
16 #define __GFP_RETRY_MAYFAIL 0
17 #define __GFP_MOVABLE 0
18 #define __GFP_COMP 0
19 #define __GFP_KSWAPD_RECLAIM M_NOWAIT
20 #define __GFP_HIGHMEM 0
21 #define __GFP_RECLAIMABLE 0
22 #define __GFP_NOMEMALLOC 0
23
24 #define GFP_ATOMIC M_NOWAIT
25 #define GFP_NOWAIT M_NOWAIT
26 #define GFP_KERNEL (M_WAITOK | M_CANFAIL)
27 #define GFP_USER (M_WAITOK | M_CANFAIL)
28 #define GFP_HIGHUSER 0
29 #define GFP_DMA32 __GFP_DMA32
30 #define GFP_TRANSHUGE_LIGHT 0
31
32 static inline bool
gfpflags_allow_blocking(const unsigned int flags)33 gfpflags_allow_blocking(const unsigned int flags)
34 {
35 return (flags & M_WAITOK) != 0;
36 }
37
38 struct vm_page *alloc_pages(unsigned int, unsigned int);
39 void __free_pages(struct vm_page *, unsigned int);
40
41 static inline struct vm_page *
alloc_page(unsigned int gfp_mask)42 alloc_page(unsigned int gfp_mask)
43 {
44 return alloc_pages(gfp_mask, 0);
45 }
46
47 static inline void
__free_page(struct vm_page * page)48 __free_page(struct vm_page *page)
49 {
50 __free_pages(page, 0);
51 }
52
53 static inline unsigned long
__get_free_page(unsigned int gfp_mask)54 __get_free_page(unsigned int gfp_mask)
55 {
56 void *addr = km_alloc(PAGE_SIZE, &kv_page, &kp_dirty, &kd_nowait);
57 return (unsigned long)addr;
58 }
59
60 static inline void
free_page(unsigned long addr)61 free_page(unsigned long addr)
62 {
63 km_free((void *)addr, PAGE_SIZE, &kv_page, &kp_dirty);
64 }
65
66 #endif
67