xref: /linux/include/linux/zsmalloc.h (revision 7c2af309)
1 /*
2  * zsmalloc memory allocator
3  *
4  * Copyright (C) 2011  Nitin Gupta
5  * Copyright (C) 2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the license that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  */
13 
14 #ifndef _ZS_MALLOC_H_
15 #define _ZS_MALLOC_H_
16 
17 #include <linux/types.h>
18 
19 /*
20  * zsmalloc mapping modes
21  *
22  * NOTE: These only make a difference when a mapped object spans pages.
23  */
24 enum zs_mapmode {
25 	ZS_MM_RW, /* normal read-write mapping */
26 	ZS_MM_RO, /* read-only (no copy-out at unmap time) */
27 	ZS_MM_WO /* write-only (no copy-in at map time) */
28 	/*
29 	 * NOTE: ZS_MM_WO should only be used for initializing new
30 	 * (uninitialized) allocations.  Partial writes to already
31 	 * initialized allocations should use ZS_MM_RW to preserve the
32 	 * existing data.
33 	 */
34 };
35 
36 struct zs_pool_stats {
37 	/* How many pages were migrated (freed) */
38 	atomic_long_t pages_compacted;
39 };
40 
41 struct zs_pool;
42 
43 struct zs_pool *zs_create_pool(const char *name);
44 void zs_destroy_pool(struct zs_pool *pool);
45 
46 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
47 void zs_free(struct zs_pool *pool, unsigned long obj);
48 
49 size_t zs_huge_class_size(struct zs_pool *pool);
50 
51 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
52 			enum zs_mapmode mm);
53 void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
54 
55 unsigned long zs_get_total_pages(struct zs_pool *pool);
56 unsigned long zs_compact(struct zs_pool *pool);
57 
58 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
59 
60 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
61 #endif
62