1 /*
2  * zsmalloc memory allocator
3  *
4  * Copyright (C) 2011  Nitin Gupta
5  * Copyright (C) 2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the license that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  */
13 
14 #ifndef _ZS_MALLOC_H_
15 #define _ZS_MALLOC_H_
16 
17 #include <linux/types.h>
18 
19 /*
20  * zsmalloc mapping modes
21  *
22  * NOTE: These only make a difference when a mapped object spans pages.
23  * They also have no effect when PGTABLE_MAPPING is selected.
24  */
25 enum zs_mapmode {
26 	ZS_MM_RW, /* normal read-write mapping */
27 	ZS_MM_RO, /* read-only (no copy-out at unmap time) */
28 	ZS_MM_WO /* write-only (no copy-in at map time) */
29 	/*
30 	 * NOTE: ZS_MM_WO should only be used for initializing new
31 	 * (uninitialized) allocations.  Partial writes to already
32 	 * initialized allocations should use ZS_MM_RW to preserve the
33 	 * existing data.
34 	 */
35 };
36 
37 struct zs_pool_stats {
38 	/* How many pages were migrated (freed) */
39 	unsigned long pages_compacted;
40 };
41 
42 struct zs_pool;
43 
44 struct zs_pool *zs_create_pool(const char *name);
45 void zs_destroy_pool(struct zs_pool *pool);
46 
47 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
48 void zs_free(struct zs_pool *pool, unsigned long obj);
49 
50 size_t zs_huge_class_size(struct zs_pool *pool);
51 
52 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
53 			enum zs_mapmode mm);
54 void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
55 
56 unsigned long zs_get_total_pages(struct zs_pool *pool);
57 unsigned long zs_compact(struct zs_pool *pool);
58 
59 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
60 #endif
61