1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * zpool memory storage api
4  *
5  * Copyright (C) 2014 Dan Streetman
6  *
7  * This is a common frontend for memory storage pool implementations.
8  * Typically, this is used to store compressed memory.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/list.h>
14 #include <linux/types.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <linux/zpool.h>
20 
21 struct zpool {
22 	struct zpool_driver *driver;
23 	void *pool;
24 	const struct zpool_ops *ops;
25 	bool evictable;
26 
27 	struct list_head list;
28 };
29 
30 static LIST_HEAD(drivers_head);
31 static DEFINE_SPINLOCK(drivers_lock);
32 
33 static LIST_HEAD(pools_head);
34 static DEFINE_SPINLOCK(pools_lock);
35 
36 /**
37  * zpool_register_driver() - register a zpool implementation.
38  * @driver:	driver to register
39  */
zpool_register_driver(struct zpool_driver * driver)40 void zpool_register_driver(struct zpool_driver *driver)
41 {
42 	spin_lock(&drivers_lock);
43 	atomic_set(&driver->refcount, 0);
44 	list_add(&driver->list, &drivers_head);
45 	spin_unlock(&drivers_lock);
46 }
47 EXPORT_SYMBOL(zpool_register_driver);
48 
49 /**
50  * zpool_unregister_driver() - unregister a zpool implementation.
51  * @driver:	driver to unregister.
52  *
53  * Module usage counting is used to prevent using a driver
54  * while/after unloading, so if this is called from module
55  * exit function, this should never fail; if called from
56  * other than the module exit function, and this returns
57  * failure, the driver is in use and must remain available.
58  */
zpool_unregister_driver(struct zpool_driver * driver)59 int zpool_unregister_driver(struct zpool_driver *driver)
60 {
61 	int ret = 0, refcount;
62 
63 	spin_lock(&drivers_lock);
64 	refcount = atomic_read(&driver->refcount);
65 	WARN_ON(refcount < 0);
66 	if (refcount > 0)
67 		ret = -EBUSY;
68 	else
69 		list_del(&driver->list);
70 	spin_unlock(&drivers_lock);
71 
72 	return ret;
73 }
74 EXPORT_SYMBOL(zpool_unregister_driver);
75 
76 /* this assumes @type is null-terminated. */
zpool_get_driver(const char * type)77 static struct zpool_driver *zpool_get_driver(const char *type)
78 {
79 	struct zpool_driver *driver;
80 
81 	spin_lock(&drivers_lock);
82 	list_for_each_entry(driver, &drivers_head, list) {
83 		if (!strcmp(driver->type, type)) {
84 			bool got = try_module_get(driver->owner);
85 
86 			if (got)
87 				atomic_inc(&driver->refcount);
88 			spin_unlock(&drivers_lock);
89 			return got ? driver : NULL;
90 		}
91 	}
92 
93 	spin_unlock(&drivers_lock);
94 	return NULL;
95 }
96 
zpool_put_driver(struct zpool_driver * driver)97 static void zpool_put_driver(struct zpool_driver *driver)
98 {
99 	atomic_dec(&driver->refcount);
100 	module_put(driver->owner);
101 }
102 
103 /**
104  * zpool_has_pool() - Check if the pool driver is available
105  * @type:	The type of the zpool to check (e.g. zbud, zsmalloc)
106  *
107  * This checks if the @type pool driver is available.  This will try to load
108  * the requested module, if needed, but there is no guarantee the module will
109  * still be loaded and available immediately after calling.  If this returns
110  * true, the caller should assume the pool is available, but must be prepared
111  * to handle the @zpool_create_pool() returning failure.  However if this
112  * returns false, the caller should assume the requested pool type is not
113  * available; either the requested pool type module does not exist, or could
114  * not be loaded, and calling @zpool_create_pool() with the pool type will
115  * fail.
116  *
117  * The @type string must be null-terminated.
118  *
119  * Returns: true if @type pool is available, false if not
120  */
zpool_has_pool(char * type)121 bool zpool_has_pool(char *type)
122 {
123 	struct zpool_driver *driver = zpool_get_driver(type);
124 
125 	if (!driver) {
126 		request_module("zpool-%s", type);
127 		driver = zpool_get_driver(type);
128 	}
129 
130 	if (!driver)
131 		return false;
132 
133 	zpool_put_driver(driver);
134 	return true;
135 }
136 EXPORT_SYMBOL(zpool_has_pool);
137 
138 /**
139  * zpool_create_pool() - Create a new zpool
140  * @type:	The type of the zpool to create (e.g. zbud, zsmalloc)
141  * @name:	The name of the zpool (e.g. zram0, zswap)
142  * @gfp:	The GFP flags to use when allocating the pool.
143  * @ops:	The optional ops callback.
144  *
145  * This creates a new zpool of the specified type.  The gfp flags will be
146  * used when allocating memory, if the implementation supports it.  If the
147  * ops param is NULL, then the created zpool will not be evictable.
148  *
149  * Implementations must guarantee this to be thread-safe.
150  *
151  * The @type and @name strings must be null-terminated.
152  *
153  * Returns: New zpool on success, NULL on failure.
154  */
zpool_create_pool(const char * type,const char * name,gfp_t gfp,const struct zpool_ops * ops)155 struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
156 		const struct zpool_ops *ops)
157 {
158 	struct zpool_driver *driver;
159 	struct zpool *zpool;
160 
161 	pr_debug("creating pool type %s\n", type);
162 
163 	driver = zpool_get_driver(type);
164 
165 	if (!driver) {
166 		request_module("zpool-%s", type);
167 		driver = zpool_get_driver(type);
168 	}
169 
170 	if (!driver) {
171 		pr_err("no driver for type %s\n", type);
172 		return NULL;
173 	}
174 
175 	zpool = kmalloc(sizeof(*zpool), gfp);
176 	if (!zpool) {
177 		pr_err("couldn't create zpool - out of memory\n");
178 		zpool_put_driver(driver);
179 		return NULL;
180 	}
181 
182 	zpool->driver = driver;
183 	zpool->pool = driver->create(name, gfp, ops, zpool);
184 	zpool->ops = ops;
185 	zpool->evictable = driver->shrink && ops && ops->evict;
186 
187 	if (!zpool->pool) {
188 		pr_err("couldn't create %s pool\n", type);
189 		zpool_put_driver(driver);
190 		kfree(zpool);
191 		return NULL;
192 	}
193 
194 	pr_debug("created pool type %s\n", type);
195 
196 	spin_lock(&pools_lock);
197 	list_add(&zpool->list, &pools_head);
198 	spin_unlock(&pools_lock);
199 
200 	return zpool;
201 }
202 
203 /**
204  * zpool_destroy_pool() - Destroy a zpool
205  * @zpool:	The zpool to destroy.
206  *
207  * Implementations must guarantee this to be thread-safe,
208  * however only when destroying different pools.  The same
209  * pool should only be destroyed once, and should not be used
210  * after it is destroyed.
211  *
212  * This destroys an existing zpool.  The zpool should not be in use.
213  */
zpool_destroy_pool(struct zpool * zpool)214 void zpool_destroy_pool(struct zpool *zpool)
215 {
216 	pr_debug("destroying pool type %s\n", zpool->driver->type);
217 
218 	spin_lock(&pools_lock);
219 	list_del(&zpool->list);
220 	spin_unlock(&pools_lock);
221 	zpool->driver->destroy(zpool->pool);
222 	zpool_put_driver(zpool->driver);
223 	kfree(zpool);
224 }
225 
226 /**
227  * zpool_get_type() - Get the type of the zpool
228  * @zpool:	The zpool to check
229  *
230  * This returns the type of the pool.
231  *
232  * Implementations must guarantee this to be thread-safe.
233  *
234  * Returns: The type of zpool.
235  */
zpool_get_type(struct zpool * zpool)236 const char *zpool_get_type(struct zpool *zpool)
237 {
238 	return zpool->driver->type;
239 }
240 
241 /**
242  * zpool_malloc_support_movable() - Check if the zpool support
243  * allocate movable memory
244  * @zpool:	The zpool to check
245  *
246  * This returns if the zpool support allocate movable memory.
247  *
248  * Implementations must guarantee this to be thread-safe.
249  *
250  * Returns: true if if the zpool support allocate movable memory, false if not
251  */
zpool_malloc_support_movable(struct zpool * zpool)252 bool zpool_malloc_support_movable(struct zpool *zpool)
253 {
254 	return zpool->driver->malloc_support_movable;
255 }
256 
257 /**
258  * zpool_malloc() - Allocate memory
259  * @zpool:	The zpool to allocate from.
260  * @size:	The amount of memory to allocate.
261  * @gfp:	The GFP flags to use when allocating memory.
262  * @handle:	Pointer to the handle to set
263  *
264  * This allocates the requested amount of memory from the pool.
265  * The gfp flags will be used when allocating memory, if the
266  * implementation supports it.  The provided @handle will be
267  * set to the allocated object handle.
268  *
269  * Implementations must guarantee this to be thread-safe.
270  *
271  * Returns: 0 on success, negative value on error.
272  */
zpool_malloc(struct zpool * zpool,size_t size,gfp_t gfp,unsigned long * handle)273 int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
274 			unsigned long *handle)
275 {
276 	return zpool->driver->malloc(zpool->pool, size, gfp, handle);
277 }
278 
279 /**
280  * zpool_free() - Free previously allocated memory
281  * @zpool:	The zpool that allocated the memory.
282  * @handle:	The handle to the memory to free.
283  *
284  * This frees previously allocated memory.  This does not guarantee
285  * that the pool will actually free memory, only that the memory
286  * in the pool will become available for use by the pool.
287  *
288  * Implementations must guarantee this to be thread-safe,
289  * however only when freeing different handles.  The same
290  * handle should only be freed once, and should not be used
291  * after freeing.
292  */
zpool_free(struct zpool * zpool,unsigned long handle)293 void zpool_free(struct zpool *zpool, unsigned long handle)
294 {
295 	zpool->driver->free(zpool->pool, handle);
296 }
297 
298 /**
299  * zpool_shrink() - Shrink the pool size
300  * @zpool:	The zpool to shrink.
301  * @pages:	The number of pages to shrink the pool.
302  * @reclaimed:	The number of pages successfully evicted.
303  *
304  * This attempts to shrink the actual memory size of the pool
305  * by evicting currently used handle(s).  If the pool was
306  * created with no zpool_ops, or the evict call fails for any
307  * of the handles, this will fail.  If non-NULL, the @reclaimed
308  * parameter will be set to the number of pages reclaimed,
309  * which may be more than the number of pages requested.
310  *
311  * Implementations must guarantee this to be thread-safe.
312  *
313  * Returns: 0 on success, negative value on error/failure.
314  */
zpool_shrink(struct zpool * zpool,unsigned int pages,unsigned int * reclaimed)315 int zpool_shrink(struct zpool *zpool, unsigned int pages,
316 			unsigned int *reclaimed)
317 {
318 	return zpool->driver->shrink ?
319 	       zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
320 }
321 
322 /**
323  * zpool_map_handle() - Map a previously allocated handle into memory
324  * @zpool:	The zpool that the handle was allocated from
325  * @handle:	The handle to map
326  * @mapmode:	How the memory should be mapped
327  *
328  * This maps a previously allocated handle into memory.  The @mapmode
329  * param indicates to the implementation how the memory will be
330  * used, i.e. read-only, write-only, read-write.  If the
331  * implementation does not support it, the memory will be treated
332  * as read-write.
333  *
334  * This may hold locks, disable interrupts, and/or preemption,
335  * and the zpool_unmap_handle() must be called to undo those
336  * actions.  The code that uses the mapped handle should complete
337  * its operatons on the mapped handle memory quickly and unmap
338  * as soon as possible.  As the implementation may use per-cpu
339  * data, multiple handles should not be mapped concurrently on
340  * any cpu.
341  *
342  * Returns: A pointer to the handle's mapped memory area.
343  */
zpool_map_handle(struct zpool * zpool,unsigned long handle,enum zpool_mapmode mapmode)344 void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
345 			enum zpool_mapmode mapmode)
346 {
347 	return zpool->driver->map(zpool->pool, handle, mapmode);
348 }
349 
350 /**
351  * zpool_unmap_handle() - Unmap a previously mapped handle
352  * @zpool:	The zpool that the handle was allocated from
353  * @handle:	The handle to unmap
354  *
355  * This unmaps a previously mapped handle.  Any locks or other
356  * actions that the implementation took in zpool_map_handle()
357  * will be undone here.  The memory area returned from
358  * zpool_map_handle() should no longer be used after this.
359  */
zpool_unmap_handle(struct zpool * zpool,unsigned long handle)360 void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
361 {
362 	zpool->driver->unmap(zpool->pool, handle);
363 }
364 
365 /**
366  * zpool_get_total_size() - The total size of the pool
367  * @zpool:	The zpool to check
368  *
369  * This returns the total size in bytes of the pool.
370  *
371  * Returns: Total size of the zpool in bytes.
372  */
zpool_get_total_size(struct zpool * zpool)373 u64 zpool_get_total_size(struct zpool *zpool)
374 {
375 	return zpool->driver->total_size(zpool->pool);
376 }
377 
378 /**
379  * zpool_evictable() - Test if zpool is potentially evictable
380  * @zpool:	The zpool to test
381  *
382  * Zpool is only potentially evictable when it's created with struct
383  * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
384  *
385  * However, it doesn't necessarily mean driver will use zpool_ops.evict
386  * in its implementation of zpool_driver.shrink. It could do internal
387  * defragmentation instead.
388  *
389  * Returns: true if potentially evictable; false otherwise.
390  */
zpool_evictable(struct zpool * zpool)391 bool zpool_evictable(struct zpool *zpool)
392 {
393 	return zpool->evictable;
394 }
395 
396 MODULE_LICENSE("GPL");
397 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
398 MODULE_DESCRIPTION("Common API for compressed memory storage");
399