1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * zswap.c - zswap driver file
4 *
5 * zswap is a backend for frontswap that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/frontswap.h>
24 #include <linux/rbtree.h>
25 #include <linux/swap.h>
26 #include <linux/crypto.h>
27 #include <linux/mempool.h>
28 #include <linux/zpool.h>
29
30 #include <linux/mm_types.h>
31 #include <linux/page-flags.h>
32 #include <linux/swapops.h>
33 #include <linux/writeback.h>
34 #include <linux/pagemap.h>
35
36 /*********************************
37 * statistics
38 **********************************/
39 /* Total bytes used by the compressed storage */
40 static u64 zswap_pool_total_size;
41 /* The number of compressed pages currently stored in zswap */
42 static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
43 /* The number of same-value filled pages currently stored in zswap */
44 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
45
46 /*
47 * The statistics below are not protected from concurrent access for
48 * performance reasons so they may not be a 100% accurate. However,
49 * they do provide useful information on roughly how many times a
50 * certain event is occurring.
51 */
52
53 /* Pool limit was hit (see zswap_max_pool_percent) */
54 static u64 zswap_pool_limit_hit;
55 /* Pages written back when pool limit was reached */
56 static u64 zswap_written_back_pages;
57 /* Store failed due to a reclaim failure after pool limit was reached */
58 static u64 zswap_reject_reclaim_fail;
59 /* Compressed page was too big for the allocator to (optimally) store */
60 static u64 zswap_reject_compress_poor;
61 /* Store failed because underlying allocator could not get memory */
62 static u64 zswap_reject_alloc_fail;
63 /* Store failed because the entry metadata could not be allocated (rare) */
64 static u64 zswap_reject_kmemcache_fail;
65 /* Duplicate store was encountered (rare) */
66 static u64 zswap_duplicate_entry;
67
68 /*********************************
69 * tunables
70 **********************************/
71
72 #define ZSWAP_PARAM_UNSET ""
73
74 /* Enable/disable zswap (disabled by default) */
75 static bool zswap_enabled;
76 static int zswap_enabled_param_set(const char *,
77 const struct kernel_param *);
78 static struct kernel_param_ops zswap_enabled_param_ops = {
79 .set = zswap_enabled_param_set,
80 .get = param_get_bool,
81 };
82 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
83
84 /* Crypto compressor to use */
85 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
86 static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
87 static int zswap_compressor_param_set(const char *,
88 const struct kernel_param *);
89 static struct kernel_param_ops zswap_compressor_param_ops = {
90 .set = zswap_compressor_param_set,
91 .get = param_get_charp,
92 .free = param_free_charp,
93 };
94 module_param_cb(compressor, &zswap_compressor_param_ops,
95 &zswap_compressor, 0644);
96
97 /* Compressed storage zpool to use */
98 #define ZSWAP_ZPOOL_DEFAULT "zbud"
99 static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
100 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
101 static struct kernel_param_ops zswap_zpool_param_ops = {
102 .set = zswap_zpool_param_set,
103 .get = param_get_charp,
104 .free = param_free_charp,
105 };
106 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
107
108 /* The maximum percentage of memory that the compressed pool can occupy */
109 static unsigned int zswap_max_pool_percent = 20;
110 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
111
112 /* Enable/disable handling same-value filled pages (enabled by default) */
113 static bool zswap_same_filled_pages_enabled = true;
114 module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
115 bool, 0644);
116
117 /*********************************
118 * data structures
119 **********************************/
120
121 struct zswap_pool {
122 struct zpool *zpool;
123 struct crypto_comp * __percpu *tfm;
124 struct kref kref;
125 struct list_head list;
126 struct work_struct work;
127 struct hlist_node node;
128 char tfm_name[CRYPTO_MAX_ALG_NAME];
129 };
130
131 /*
132 * struct zswap_entry
133 *
134 * This structure contains the metadata for tracking a single compressed
135 * page within zswap.
136 *
137 * rbnode - links the entry into red-black tree for the appropriate swap type
138 * offset - the swap offset for the entry. Index into the red-black tree.
139 * refcount - the number of outstanding reference to the entry. This is needed
140 * to protect against premature freeing of the entry by code
141 * concurrent calls to load, invalidate, and writeback. The lock
142 * for the zswap_tree structure that contains the entry must
143 * be held while changing the refcount. Since the lock must
144 * be held, there is no reason to also make refcount atomic.
145 * length - the length in bytes of the compressed page data. Needed during
146 * decompression. For a same value filled page length is 0.
147 * pool - the zswap_pool the entry's data is in
148 * handle - zpool allocation handle that stores the compressed page data
149 * value - value of the same-value filled pages which have same content
150 */
151 struct zswap_entry {
152 struct rb_node rbnode;
153 pgoff_t offset;
154 int refcount;
155 unsigned int length;
156 struct zswap_pool *pool;
157 union {
158 unsigned long handle;
159 unsigned long value;
160 };
161 };
162
163 struct zswap_header {
164 swp_entry_t swpentry;
165 };
166
167 /*
168 * The tree lock in the zswap_tree struct protects a few things:
169 * - the rbtree
170 * - the refcount field of each entry in the tree
171 */
172 struct zswap_tree {
173 struct rb_root rbroot;
174 spinlock_t lock;
175 };
176
177 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
178
179 /* RCU-protected iteration */
180 static LIST_HEAD(zswap_pools);
181 /* protects zswap_pools list modification */
182 static DEFINE_SPINLOCK(zswap_pools_lock);
183 /* pool counter to provide unique names to zpool */
184 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
185
186 /* used by param callback function */
187 static bool zswap_init_started;
188
189 /* fatal error during init */
190 static bool zswap_init_failed;
191
192 /* init completed, but couldn't create the initial pool */
193 static bool zswap_has_pool;
194
195 /*********************************
196 * helpers and fwd declarations
197 **********************************/
198
199 #define zswap_pool_debug(msg, p) \
200 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
201 zpool_get_type((p)->zpool))
202
203 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
204 static int zswap_pool_get(struct zswap_pool *pool);
205 static void zswap_pool_put(struct zswap_pool *pool);
206
207 static const struct zpool_ops zswap_zpool_ops = {
208 .evict = zswap_writeback_entry
209 };
210
zswap_is_full(void)211 static bool zswap_is_full(void)
212 {
213 return totalram_pages() * zswap_max_pool_percent / 100 <
214 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
215 }
216
zswap_update_total_size(void)217 static void zswap_update_total_size(void)
218 {
219 struct zswap_pool *pool;
220 u64 total = 0;
221
222 rcu_read_lock();
223
224 list_for_each_entry_rcu(pool, &zswap_pools, list)
225 total += zpool_get_total_size(pool->zpool);
226
227 rcu_read_unlock();
228
229 zswap_pool_total_size = total;
230 }
231
232 /*********************************
233 * zswap entry functions
234 **********************************/
235 static struct kmem_cache *zswap_entry_cache;
236
zswap_entry_cache_create(void)237 static int __init zswap_entry_cache_create(void)
238 {
239 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
240 return zswap_entry_cache == NULL;
241 }
242
zswap_entry_cache_destroy(void)243 static void __init zswap_entry_cache_destroy(void)
244 {
245 kmem_cache_destroy(zswap_entry_cache);
246 }
247
zswap_entry_cache_alloc(gfp_t gfp)248 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
249 {
250 struct zswap_entry *entry;
251 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
252 if (!entry)
253 return NULL;
254 entry->refcount = 1;
255 RB_CLEAR_NODE(&entry->rbnode);
256 return entry;
257 }
258
zswap_entry_cache_free(struct zswap_entry * entry)259 static void zswap_entry_cache_free(struct zswap_entry *entry)
260 {
261 kmem_cache_free(zswap_entry_cache, entry);
262 }
263
264 /*********************************
265 * rbtree functions
266 **********************************/
zswap_rb_search(struct rb_root * root,pgoff_t offset)267 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
268 {
269 struct rb_node *node = root->rb_node;
270 struct zswap_entry *entry;
271
272 while (node) {
273 entry = rb_entry(node, struct zswap_entry, rbnode);
274 if (entry->offset > offset)
275 node = node->rb_left;
276 else if (entry->offset < offset)
277 node = node->rb_right;
278 else
279 return entry;
280 }
281 return NULL;
282 }
283
284 /*
285 * In the case that a entry with the same offset is found, a pointer to
286 * the existing entry is stored in dupentry and the function returns -EEXIST
287 */
zswap_rb_insert(struct rb_root * root,struct zswap_entry * entry,struct zswap_entry ** dupentry)288 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
289 struct zswap_entry **dupentry)
290 {
291 struct rb_node **link = &root->rb_node, *parent = NULL;
292 struct zswap_entry *myentry;
293
294 while (*link) {
295 parent = *link;
296 myentry = rb_entry(parent, struct zswap_entry, rbnode);
297 if (myentry->offset > entry->offset)
298 link = &(*link)->rb_left;
299 else if (myentry->offset < entry->offset)
300 link = &(*link)->rb_right;
301 else {
302 *dupentry = myentry;
303 return -EEXIST;
304 }
305 }
306 rb_link_node(&entry->rbnode, parent, link);
307 rb_insert_color(&entry->rbnode, root);
308 return 0;
309 }
310
zswap_rb_erase(struct rb_root * root,struct zswap_entry * entry)311 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
312 {
313 if (!RB_EMPTY_NODE(&entry->rbnode)) {
314 rb_erase(&entry->rbnode, root);
315 RB_CLEAR_NODE(&entry->rbnode);
316 }
317 }
318
319 /*
320 * Carries out the common pattern of freeing and entry's zpool allocation,
321 * freeing the entry itself, and decrementing the number of stored pages.
322 */
zswap_free_entry(struct zswap_entry * entry)323 static void zswap_free_entry(struct zswap_entry *entry)
324 {
325 if (!entry->length)
326 atomic_dec(&zswap_same_filled_pages);
327 else {
328 zpool_free(entry->pool->zpool, entry->handle);
329 zswap_pool_put(entry->pool);
330 }
331 zswap_entry_cache_free(entry);
332 atomic_dec(&zswap_stored_pages);
333 zswap_update_total_size();
334 }
335
336 /* caller must hold the tree lock */
zswap_entry_get(struct zswap_entry * entry)337 static void zswap_entry_get(struct zswap_entry *entry)
338 {
339 entry->refcount++;
340 }
341
342 /* caller must hold the tree lock
343 * remove from the tree and free it, if nobody reference the entry
344 */
zswap_entry_put(struct zswap_tree * tree,struct zswap_entry * entry)345 static void zswap_entry_put(struct zswap_tree *tree,
346 struct zswap_entry *entry)
347 {
348 int refcount = --entry->refcount;
349
350 BUG_ON(refcount < 0);
351 if (refcount == 0) {
352 zswap_rb_erase(&tree->rbroot, entry);
353 zswap_free_entry(entry);
354 }
355 }
356
357 /* caller must hold the tree lock */
zswap_entry_find_get(struct rb_root * root,pgoff_t offset)358 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
359 pgoff_t offset)
360 {
361 struct zswap_entry *entry;
362
363 entry = zswap_rb_search(root, offset);
364 if (entry)
365 zswap_entry_get(entry);
366
367 return entry;
368 }
369
370 /*********************************
371 * per-cpu code
372 **********************************/
373 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
374
zswap_dstmem_prepare(unsigned int cpu)375 static int zswap_dstmem_prepare(unsigned int cpu)
376 {
377 u8 *dst;
378
379 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
380 if (!dst)
381 return -ENOMEM;
382
383 per_cpu(zswap_dstmem, cpu) = dst;
384 return 0;
385 }
386
zswap_dstmem_dead(unsigned int cpu)387 static int zswap_dstmem_dead(unsigned int cpu)
388 {
389 u8 *dst;
390
391 dst = per_cpu(zswap_dstmem, cpu);
392 kfree(dst);
393 per_cpu(zswap_dstmem, cpu) = NULL;
394
395 return 0;
396 }
397
zswap_cpu_comp_prepare(unsigned int cpu,struct hlist_node * node)398 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
399 {
400 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
401 struct crypto_comp *tfm;
402
403 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
404 return 0;
405
406 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
407 if (IS_ERR_OR_NULL(tfm)) {
408 pr_err("could not alloc crypto comp %s : %ld\n",
409 pool->tfm_name, PTR_ERR(tfm));
410 return -ENOMEM;
411 }
412 *per_cpu_ptr(pool->tfm, cpu) = tfm;
413 return 0;
414 }
415
zswap_cpu_comp_dead(unsigned int cpu,struct hlist_node * node)416 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
417 {
418 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
419 struct crypto_comp *tfm;
420
421 tfm = *per_cpu_ptr(pool->tfm, cpu);
422 if (!IS_ERR_OR_NULL(tfm))
423 crypto_free_comp(tfm);
424 *per_cpu_ptr(pool->tfm, cpu) = NULL;
425 return 0;
426 }
427
428 /*********************************
429 * pool functions
430 **********************************/
431
__zswap_pool_current(void)432 static struct zswap_pool *__zswap_pool_current(void)
433 {
434 struct zswap_pool *pool;
435
436 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
437 WARN_ONCE(!pool && zswap_has_pool,
438 "%s: no page storage pool!\n", __func__);
439
440 return pool;
441 }
442
zswap_pool_current(void)443 static struct zswap_pool *zswap_pool_current(void)
444 {
445 assert_spin_locked(&zswap_pools_lock);
446
447 return __zswap_pool_current();
448 }
449
zswap_pool_current_get(void)450 static struct zswap_pool *zswap_pool_current_get(void)
451 {
452 struct zswap_pool *pool;
453
454 rcu_read_lock();
455
456 pool = __zswap_pool_current();
457 if (!zswap_pool_get(pool))
458 pool = NULL;
459
460 rcu_read_unlock();
461
462 return pool;
463 }
464
zswap_pool_last_get(void)465 static struct zswap_pool *zswap_pool_last_get(void)
466 {
467 struct zswap_pool *pool, *last = NULL;
468
469 rcu_read_lock();
470
471 list_for_each_entry_rcu(pool, &zswap_pools, list)
472 last = pool;
473 WARN_ONCE(!last && zswap_has_pool,
474 "%s: no page storage pool!\n", __func__);
475 if (!zswap_pool_get(last))
476 last = NULL;
477
478 rcu_read_unlock();
479
480 return last;
481 }
482
483 /* type and compressor must be null-terminated */
zswap_pool_find_get(char * type,char * compressor)484 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
485 {
486 struct zswap_pool *pool;
487
488 assert_spin_locked(&zswap_pools_lock);
489
490 list_for_each_entry_rcu(pool, &zswap_pools, list) {
491 if (strcmp(pool->tfm_name, compressor))
492 continue;
493 if (strcmp(zpool_get_type(pool->zpool), type))
494 continue;
495 /* if we can't get it, it's about to be destroyed */
496 if (!zswap_pool_get(pool))
497 continue;
498 return pool;
499 }
500
501 return NULL;
502 }
503
zswap_pool_create(char * type,char * compressor)504 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
505 {
506 struct zswap_pool *pool;
507 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
508 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
509 int ret;
510
511 if (!zswap_has_pool) {
512 /* if either are unset, pool initialization failed, and we
513 * need both params to be set correctly before trying to
514 * create a pool.
515 */
516 if (!strcmp(type, ZSWAP_PARAM_UNSET))
517 return NULL;
518 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
519 return NULL;
520 }
521
522 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
523 if (!pool)
524 return NULL;
525
526 /* unique name for each pool specifically required by zsmalloc */
527 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
528
529 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
530 if (!pool->zpool) {
531 pr_err("%s zpool not available\n", type);
532 goto error;
533 }
534 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
535
536 strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
537 pool->tfm = alloc_percpu(struct crypto_comp *);
538 if (!pool->tfm) {
539 pr_err("percpu alloc failed\n");
540 goto error;
541 }
542
543 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
544 &pool->node);
545 if (ret)
546 goto error;
547 pr_debug("using %s compressor\n", pool->tfm_name);
548
549 /* being the current pool takes 1 ref; this func expects the
550 * caller to always add the new pool as the current pool
551 */
552 kref_init(&pool->kref);
553 INIT_LIST_HEAD(&pool->list);
554
555 zswap_pool_debug("created", pool);
556
557 return pool;
558
559 error:
560 free_percpu(pool->tfm);
561 if (pool->zpool)
562 zpool_destroy_pool(pool->zpool);
563 kfree(pool);
564 return NULL;
565 }
566
__zswap_pool_create_fallback(void)567 static __init struct zswap_pool *__zswap_pool_create_fallback(void)
568 {
569 bool has_comp, has_zpool;
570
571 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
572 if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
573 pr_err("compressor %s not available, using default %s\n",
574 zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
575 param_free_charp(&zswap_compressor);
576 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
577 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
578 }
579 if (!has_comp) {
580 pr_err("default compressor %s not available\n",
581 zswap_compressor);
582 param_free_charp(&zswap_compressor);
583 zswap_compressor = ZSWAP_PARAM_UNSET;
584 }
585
586 has_zpool = zpool_has_pool(zswap_zpool_type);
587 if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
588 pr_err("zpool %s not available, using default %s\n",
589 zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
590 param_free_charp(&zswap_zpool_type);
591 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
592 has_zpool = zpool_has_pool(zswap_zpool_type);
593 }
594 if (!has_zpool) {
595 pr_err("default zpool %s not available\n",
596 zswap_zpool_type);
597 param_free_charp(&zswap_zpool_type);
598 zswap_zpool_type = ZSWAP_PARAM_UNSET;
599 }
600
601 if (!has_comp || !has_zpool)
602 return NULL;
603
604 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
605 }
606
zswap_pool_destroy(struct zswap_pool * pool)607 static void zswap_pool_destroy(struct zswap_pool *pool)
608 {
609 zswap_pool_debug("destroying", pool);
610
611 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
612 free_percpu(pool->tfm);
613 zpool_destroy_pool(pool->zpool);
614 kfree(pool);
615 }
616
zswap_pool_get(struct zswap_pool * pool)617 static int __must_check zswap_pool_get(struct zswap_pool *pool)
618 {
619 if (!pool)
620 return 0;
621
622 return kref_get_unless_zero(&pool->kref);
623 }
624
__zswap_pool_release(struct work_struct * work)625 static void __zswap_pool_release(struct work_struct *work)
626 {
627 struct zswap_pool *pool = container_of(work, typeof(*pool), work);
628
629 synchronize_rcu();
630
631 /* nobody should have been able to get a kref... */
632 WARN_ON(kref_get_unless_zero(&pool->kref));
633
634 /* pool is now off zswap_pools list and has no references. */
635 zswap_pool_destroy(pool);
636 }
637
__zswap_pool_empty(struct kref * kref)638 static void __zswap_pool_empty(struct kref *kref)
639 {
640 struct zswap_pool *pool;
641
642 pool = container_of(kref, typeof(*pool), kref);
643
644 spin_lock(&zswap_pools_lock);
645
646 WARN_ON(pool == zswap_pool_current());
647
648 list_del_rcu(&pool->list);
649
650 INIT_WORK(&pool->work, __zswap_pool_release);
651 schedule_work(&pool->work);
652
653 spin_unlock(&zswap_pools_lock);
654 }
655
zswap_pool_put(struct zswap_pool * pool)656 static void zswap_pool_put(struct zswap_pool *pool)
657 {
658 kref_put(&pool->kref, __zswap_pool_empty);
659 }
660
661 /*********************************
662 * param callbacks
663 **********************************/
664
665 /* val must be a null-terminated string */
__zswap_param_set(const char * val,const struct kernel_param * kp,char * type,char * compressor)666 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
667 char *type, char *compressor)
668 {
669 struct zswap_pool *pool, *put_pool = NULL;
670 char *s = strstrip((char *)val);
671 int ret;
672
673 if (zswap_init_failed) {
674 pr_err("can't set param, initialization failed\n");
675 return -ENODEV;
676 }
677
678 /* no change required */
679 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
680 return 0;
681
682 /* if this is load-time (pre-init) param setting,
683 * don't create a pool; that's done during init.
684 */
685 if (!zswap_init_started)
686 return param_set_charp(s, kp);
687
688 if (!type) {
689 if (!zpool_has_pool(s)) {
690 pr_err("zpool %s not available\n", s);
691 return -ENOENT;
692 }
693 type = s;
694 } else if (!compressor) {
695 if (!crypto_has_comp(s, 0, 0)) {
696 pr_err("compressor %s not available\n", s);
697 return -ENOENT;
698 }
699 compressor = s;
700 } else {
701 WARN_ON(1);
702 return -EINVAL;
703 }
704
705 spin_lock(&zswap_pools_lock);
706
707 pool = zswap_pool_find_get(type, compressor);
708 if (pool) {
709 zswap_pool_debug("using existing", pool);
710 WARN_ON(pool == zswap_pool_current());
711 list_del_rcu(&pool->list);
712 }
713
714 spin_unlock(&zswap_pools_lock);
715
716 if (!pool)
717 pool = zswap_pool_create(type, compressor);
718
719 if (pool)
720 ret = param_set_charp(s, kp);
721 else
722 ret = -EINVAL;
723
724 spin_lock(&zswap_pools_lock);
725
726 if (!ret) {
727 put_pool = zswap_pool_current();
728 list_add_rcu(&pool->list, &zswap_pools);
729 zswap_has_pool = true;
730 } else if (pool) {
731 /* add the possibly pre-existing pool to the end of the pools
732 * list; if it's new (and empty) then it'll be removed and
733 * destroyed by the put after we drop the lock
734 */
735 list_add_tail_rcu(&pool->list, &zswap_pools);
736 put_pool = pool;
737 }
738
739 spin_unlock(&zswap_pools_lock);
740
741 if (!zswap_has_pool && !pool) {
742 /* if initial pool creation failed, and this pool creation also
743 * failed, maybe both compressor and zpool params were bad.
744 * Allow changing this param, so pool creation will succeed
745 * when the other param is changed. We already verified this
746 * param is ok in the zpool_has_pool() or crypto_has_comp()
747 * checks above.
748 */
749 ret = param_set_charp(s, kp);
750 }
751
752 /* drop the ref from either the old current pool,
753 * or the new pool we failed to add
754 */
755 if (put_pool)
756 zswap_pool_put(put_pool);
757
758 return ret;
759 }
760
zswap_compressor_param_set(const char * val,const struct kernel_param * kp)761 static int zswap_compressor_param_set(const char *val,
762 const struct kernel_param *kp)
763 {
764 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
765 }
766
zswap_zpool_param_set(const char * val,const struct kernel_param * kp)767 static int zswap_zpool_param_set(const char *val,
768 const struct kernel_param *kp)
769 {
770 return __zswap_param_set(val, kp, NULL, zswap_compressor);
771 }
772
zswap_enabled_param_set(const char * val,const struct kernel_param * kp)773 static int zswap_enabled_param_set(const char *val,
774 const struct kernel_param *kp)
775 {
776 if (zswap_init_failed) {
777 pr_err("can't enable, initialization failed\n");
778 return -ENODEV;
779 }
780 if (!zswap_has_pool && zswap_init_started) {
781 pr_err("can't enable, no pool configured\n");
782 return -ENODEV;
783 }
784
785 return param_set_bool(val, kp);
786 }
787
788 /*********************************
789 * writeback code
790 **********************************/
791 /* return enum for zswap_get_swap_cache_page */
792 enum zswap_get_swap_ret {
793 ZSWAP_SWAPCACHE_NEW,
794 ZSWAP_SWAPCACHE_EXIST,
795 ZSWAP_SWAPCACHE_FAIL,
796 };
797
798 /*
799 * zswap_get_swap_cache_page
800 *
801 * This is an adaption of read_swap_cache_async()
802 *
803 * This function tries to find a page with the given swap entry
804 * in the swapper_space address space (the swap cache). If the page
805 * is found, it is returned in retpage. Otherwise, a page is allocated,
806 * added to the swap cache, and returned in retpage.
807 *
808 * If success, the swap cache page is returned in retpage
809 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
810 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
811 * the new page is added to swapcache and locked
812 * Returns ZSWAP_SWAPCACHE_FAIL on error
813 */
zswap_get_swap_cache_page(swp_entry_t entry,struct page ** retpage)814 static int zswap_get_swap_cache_page(swp_entry_t entry,
815 struct page **retpage)
816 {
817 bool page_was_allocated;
818
819 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
820 NULL, 0, &page_was_allocated);
821 if (page_was_allocated)
822 return ZSWAP_SWAPCACHE_NEW;
823 if (!*retpage)
824 return ZSWAP_SWAPCACHE_FAIL;
825 return ZSWAP_SWAPCACHE_EXIST;
826 }
827
828 /*
829 * Attempts to free an entry by adding a page to the swap cache,
830 * decompressing the entry data into the page, and issuing a
831 * bio write to write the page back to the swap device.
832 *
833 * This can be thought of as a "resumed writeback" of the page
834 * to the swap device. We are basically resuming the same swap
835 * writeback path that was intercepted with the frontswap_store()
836 * in the first place. After the page has been decompressed into
837 * the swap cache, the compressed version stored by zswap can be
838 * freed.
839 */
zswap_writeback_entry(struct zpool * pool,unsigned long handle)840 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
841 {
842 struct zswap_header *zhdr;
843 swp_entry_t swpentry;
844 struct zswap_tree *tree;
845 pgoff_t offset;
846 struct zswap_entry *entry;
847 struct page *page;
848 struct crypto_comp *tfm;
849 u8 *src, *dst;
850 unsigned int dlen;
851 int ret;
852 struct writeback_control wbc = {
853 .sync_mode = WB_SYNC_NONE,
854 };
855
856 /* extract swpentry from data */
857 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
858 swpentry = zhdr->swpentry; /* here */
859 tree = zswap_trees[swp_type(swpentry)];
860 offset = swp_offset(swpentry);
861
862 /* find and ref zswap entry */
863 spin_lock(&tree->lock);
864 entry = zswap_entry_find_get(&tree->rbroot, offset);
865 if (!entry) {
866 /* entry was invalidated */
867 spin_unlock(&tree->lock);
868 zpool_unmap_handle(pool, handle);
869 return 0;
870 }
871 spin_unlock(&tree->lock);
872 BUG_ON(offset != entry->offset);
873
874 /* try to allocate swap cache page */
875 switch (zswap_get_swap_cache_page(swpentry, &page)) {
876 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
877 ret = -ENOMEM;
878 goto fail;
879
880 case ZSWAP_SWAPCACHE_EXIST:
881 /* page is already in the swap cache, ignore for now */
882 put_page(page);
883 ret = -EEXIST;
884 goto fail;
885
886 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
887 /* decompress */
888 dlen = PAGE_SIZE;
889 src = (u8 *)zhdr + sizeof(struct zswap_header);
890 dst = kmap_atomic(page);
891 tfm = *get_cpu_ptr(entry->pool->tfm);
892 ret = crypto_comp_decompress(tfm, src, entry->length,
893 dst, &dlen);
894 put_cpu_ptr(entry->pool->tfm);
895 kunmap_atomic(dst);
896 BUG_ON(ret);
897 BUG_ON(dlen != PAGE_SIZE);
898
899 /* page is up to date */
900 SetPageUptodate(page);
901 }
902
903 /* move it to the tail of the inactive list after end_writeback */
904 SetPageReclaim(page);
905
906 /* start writeback */
907 __swap_writepage(page, &wbc, end_swap_bio_write);
908 put_page(page);
909 zswap_written_back_pages++;
910
911 spin_lock(&tree->lock);
912 /* drop local reference */
913 zswap_entry_put(tree, entry);
914
915 /*
916 * There are two possible situations for entry here:
917 * (1) refcount is 1(normal case), entry is valid and on the tree
918 * (2) refcount is 0, entry is freed and not on the tree
919 * because invalidate happened during writeback
920 * search the tree and free the entry if find entry
921 */
922 if (entry == zswap_rb_search(&tree->rbroot, offset))
923 zswap_entry_put(tree, entry);
924 spin_unlock(&tree->lock);
925
926 goto end;
927
928 /*
929 * if we get here due to ZSWAP_SWAPCACHE_EXIST
930 * a load may happening concurrently
931 * it is safe and okay to not free the entry
932 * if we free the entry in the following put
933 * it it either okay to return !0
934 */
935 fail:
936 spin_lock(&tree->lock);
937 zswap_entry_put(tree, entry);
938 spin_unlock(&tree->lock);
939
940 end:
941 zpool_unmap_handle(pool, handle);
942 return ret;
943 }
944
zswap_shrink(void)945 static int zswap_shrink(void)
946 {
947 struct zswap_pool *pool;
948 int ret;
949
950 pool = zswap_pool_last_get();
951 if (!pool)
952 return -ENOENT;
953
954 ret = zpool_shrink(pool->zpool, 1, NULL);
955
956 zswap_pool_put(pool);
957
958 return ret;
959 }
960
zswap_is_page_same_filled(void * ptr,unsigned long * value)961 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
962 {
963 unsigned int pos;
964 unsigned long *page;
965
966 page = (unsigned long *)ptr;
967 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
968 if (page[pos] != page[0])
969 return 0;
970 }
971 *value = page[0];
972 return 1;
973 }
974
zswap_fill_page(void * ptr,unsigned long value)975 static void zswap_fill_page(void *ptr, unsigned long value)
976 {
977 unsigned long *page;
978
979 page = (unsigned long *)ptr;
980 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
981 }
982
983 /*********************************
984 * frontswap hooks
985 **********************************/
986 /* attempts to compress and store an single page */
zswap_frontswap_store(unsigned type,pgoff_t offset,struct page * page)987 static int zswap_frontswap_store(unsigned type, pgoff_t offset,
988 struct page *page)
989 {
990 struct zswap_tree *tree = zswap_trees[type];
991 struct zswap_entry *entry, *dupentry;
992 struct crypto_comp *tfm;
993 int ret;
994 unsigned int hlen, dlen = PAGE_SIZE;
995 unsigned long handle, value;
996 char *buf;
997 u8 *src, *dst;
998 struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
999 gfp_t gfp;
1000
1001 /* THP isn't supported */
1002 if (PageTransHuge(page)) {
1003 ret = -EINVAL;
1004 goto reject;
1005 }
1006
1007 if (!zswap_enabled || !tree) {
1008 ret = -ENODEV;
1009 goto reject;
1010 }
1011
1012 /* reclaim space if needed */
1013 if (zswap_is_full()) {
1014 zswap_pool_limit_hit++;
1015 if (zswap_shrink()) {
1016 zswap_reject_reclaim_fail++;
1017 ret = -ENOMEM;
1018 goto reject;
1019 }
1020
1021 /* A second zswap_is_full() check after
1022 * zswap_shrink() to make sure it's now
1023 * under the max_pool_percent
1024 */
1025 if (zswap_is_full()) {
1026 ret = -ENOMEM;
1027 goto reject;
1028 }
1029 }
1030
1031 /* allocate entry */
1032 entry = zswap_entry_cache_alloc(GFP_KERNEL);
1033 if (!entry) {
1034 zswap_reject_kmemcache_fail++;
1035 ret = -ENOMEM;
1036 goto reject;
1037 }
1038
1039 if (zswap_same_filled_pages_enabled) {
1040 src = kmap_atomic(page);
1041 if (zswap_is_page_same_filled(src, &value)) {
1042 kunmap_atomic(src);
1043 entry->offset = offset;
1044 entry->length = 0;
1045 entry->value = value;
1046 atomic_inc(&zswap_same_filled_pages);
1047 goto insert_entry;
1048 }
1049 kunmap_atomic(src);
1050 }
1051
1052 /* if entry is successfully added, it keeps the reference */
1053 entry->pool = zswap_pool_current_get();
1054 if (!entry->pool) {
1055 ret = -EINVAL;
1056 goto freepage;
1057 }
1058
1059 /* compress */
1060 dst = get_cpu_var(zswap_dstmem);
1061 tfm = *get_cpu_ptr(entry->pool->tfm);
1062 src = kmap_atomic(page);
1063 ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1064 kunmap_atomic(src);
1065 put_cpu_ptr(entry->pool->tfm);
1066 if (ret) {
1067 ret = -EINVAL;
1068 goto put_dstmem;
1069 }
1070
1071 /* store */
1072 hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
1073 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1074 if (zpool_malloc_support_movable(entry->pool->zpool))
1075 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1076 ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
1077 if (ret == -ENOSPC) {
1078 zswap_reject_compress_poor++;
1079 goto put_dstmem;
1080 }
1081 if (ret) {
1082 zswap_reject_alloc_fail++;
1083 goto put_dstmem;
1084 }
1085 buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1086 memcpy(buf, &zhdr, hlen);
1087 memcpy(buf + hlen, dst, dlen);
1088 zpool_unmap_handle(entry->pool->zpool, handle);
1089 put_cpu_var(zswap_dstmem);
1090
1091 /* populate entry */
1092 entry->offset = offset;
1093 entry->handle = handle;
1094 entry->length = dlen;
1095
1096 insert_entry:
1097 /* map */
1098 spin_lock(&tree->lock);
1099 do {
1100 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1101 if (ret == -EEXIST) {
1102 zswap_duplicate_entry++;
1103 /* remove from rbtree */
1104 zswap_rb_erase(&tree->rbroot, dupentry);
1105 zswap_entry_put(tree, dupentry);
1106 }
1107 } while (ret == -EEXIST);
1108 spin_unlock(&tree->lock);
1109
1110 /* update stats */
1111 atomic_inc(&zswap_stored_pages);
1112 zswap_update_total_size();
1113
1114 return 0;
1115
1116 put_dstmem:
1117 put_cpu_var(zswap_dstmem);
1118 zswap_pool_put(entry->pool);
1119 freepage:
1120 zswap_entry_cache_free(entry);
1121 reject:
1122 return ret;
1123 }
1124
1125 /*
1126 * returns 0 if the page was successfully decompressed
1127 * return -1 on entry not found or error
1128 */
zswap_frontswap_load(unsigned type,pgoff_t offset,struct page * page)1129 static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1130 struct page *page)
1131 {
1132 struct zswap_tree *tree = zswap_trees[type];
1133 struct zswap_entry *entry;
1134 struct crypto_comp *tfm;
1135 u8 *src, *dst;
1136 unsigned int dlen;
1137 int ret;
1138
1139 /* find */
1140 spin_lock(&tree->lock);
1141 entry = zswap_entry_find_get(&tree->rbroot, offset);
1142 if (!entry) {
1143 /* entry was written back */
1144 spin_unlock(&tree->lock);
1145 return -1;
1146 }
1147 spin_unlock(&tree->lock);
1148
1149 if (!entry->length) {
1150 dst = kmap_atomic(page);
1151 zswap_fill_page(dst, entry->value);
1152 kunmap_atomic(dst);
1153 goto freeentry;
1154 }
1155
1156 /* decompress */
1157 dlen = PAGE_SIZE;
1158 src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1159 if (zpool_evictable(entry->pool->zpool))
1160 src += sizeof(struct zswap_header);
1161 dst = kmap_atomic(page);
1162 tfm = *get_cpu_ptr(entry->pool->tfm);
1163 ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1164 put_cpu_ptr(entry->pool->tfm);
1165 kunmap_atomic(dst);
1166 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1167 BUG_ON(ret);
1168
1169 freeentry:
1170 spin_lock(&tree->lock);
1171 zswap_entry_put(tree, entry);
1172 spin_unlock(&tree->lock);
1173
1174 return 0;
1175 }
1176
1177 /* frees an entry in zswap */
zswap_frontswap_invalidate_page(unsigned type,pgoff_t offset)1178 static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1179 {
1180 struct zswap_tree *tree = zswap_trees[type];
1181 struct zswap_entry *entry;
1182
1183 /* find */
1184 spin_lock(&tree->lock);
1185 entry = zswap_rb_search(&tree->rbroot, offset);
1186 if (!entry) {
1187 /* entry was written back */
1188 spin_unlock(&tree->lock);
1189 return;
1190 }
1191
1192 /* remove from rbtree */
1193 zswap_rb_erase(&tree->rbroot, entry);
1194
1195 /* drop the initial reference from entry creation */
1196 zswap_entry_put(tree, entry);
1197
1198 spin_unlock(&tree->lock);
1199 }
1200
1201 /* frees all zswap entries for the given swap type */
zswap_frontswap_invalidate_area(unsigned type)1202 static void zswap_frontswap_invalidate_area(unsigned type)
1203 {
1204 struct zswap_tree *tree = zswap_trees[type];
1205 struct zswap_entry *entry, *n;
1206
1207 if (!tree)
1208 return;
1209
1210 /* walk the tree and free everything */
1211 spin_lock(&tree->lock);
1212 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1213 zswap_free_entry(entry);
1214 tree->rbroot = RB_ROOT;
1215 spin_unlock(&tree->lock);
1216 kfree(tree);
1217 zswap_trees[type] = NULL;
1218 }
1219
zswap_frontswap_init(unsigned type)1220 static void zswap_frontswap_init(unsigned type)
1221 {
1222 struct zswap_tree *tree;
1223
1224 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1225 if (!tree) {
1226 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1227 return;
1228 }
1229
1230 tree->rbroot = RB_ROOT;
1231 spin_lock_init(&tree->lock);
1232 zswap_trees[type] = tree;
1233 }
1234
1235 static struct frontswap_ops zswap_frontswap_ops = {
1236 .store = zswap_frontswap_store,
1237 .load = zswap_frontswap_load,
1238 .invalidate_page = zswap_frontswap_invalidate_page,
1239 .invalidate_area = zswap_frontswap_invalidate_area,
1240 .init = zswap_frontswap_init
1241 };
1242
1243 /*********************************
1244 * debugfs functions
1245 **********************************/
1246 #ifdef CONFIG_DEBUG_FS
1247 #include <linux/debugfs.h>
1248
1249 static struct dentry *zswap_debugfs_root;
1250
zswap_debugfs_init(void)1251 static int __init zswap_debugfs_init(void)
1252 {
1253 if (!debugfs_initialized())
1254 return -ENODEV;
1255
1256 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1257
1258 debugfs_create_u64("pool_limit_hit", 0444,
1259 zswap_debugfs_root, &zswap_pool_limit_hit);
1260 debugfs_create_u64("reject_reclaim_fail", 0444,
1261 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1262 debugfs_create_u64("reject_alloc_fail", 0444,
1263 zswap_debugfs_root, &zswap_reject_alloc_fail);
1264 debugfs_create_u64("reject_kmemcache_fail", 0444,
1265 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1266 debugfs_create_u64("reject_compress_poor", 0444,
1267 zswap_debugfs_root, &zswap_reject_compress_poor);
1268 debugfs_create_u64("written_back_pages", 0444,
1269 zswap_debugfs_root, &zswap_written_back_pages);
1270 debugfs_create_u64("duplicate_entry", 0444,
1271 zswap_debugfs_root, &zswap_duplicate_entry);
1272 debugfs_create_u64("pool_total_size", 0444,
1273 zswap_debugfs_root, &zswap_pool_total_size);
1274 debugfs_create_atomic_t("stored_pages", 0444,
1275 zswap_debugfs_root, &zswap_stored_pages);
1276 debugfs_create_atomic_t("same_filled_pages", 0444,
1277 zswap_debugfs_root, &zswap_same_filled_pages);
1278
1279 return 0;
1280 }
1281
zswap_debugfs_exit(void)1282 static void __exit zswap_debugfs_exit(void)
1283 {
1284 debugfs_remove_recursive(zswap_debugfs_root);
1285 }
1286 #else
zswap_debugfs_init(void)1287 static int __init zswap_debugfs_init(void)
1288 {
1289 return 0;
1290 }
1291
zswap_debugfs_exit(void)1292 static void __exit zswap_debugfs_exit(void) { }
1293 #endif
1294
1295 /*********************************
1296 * module init and exit
1297 **********************************/
init_zswap(void)1298 static int __init init_zswap(void)
1299 {
1300 struct zswap_pool *pool;
1301 int ret;
1302
1303 zswap_init_started = true;
1304
1305 if (zswap_entry_cache_create()) {
1306 pr_err("entry cache creation failed\n");
1307 goto cache_fail;
1308 }
1309
1310 ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1311 zswap_dstmem_prepare, zswap_dstmem_dead);
1312 if (ret) {
1313 pr_err("dstmem alloc failed\n");
1314 goto dstmem_fail;
1315 }
1316
1317 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1318 "mm/zswap_pool:prepare",
1319 zswap_cpu_comp_prepare,
1320 zswap_cpu_comp_dead);
1321 if (ret)
1322 goto hp_fail;
1323
1324 pool = __zswap_pool_create_fallback();
1325 if (pool) {
1326 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1327 zpool_get_type(pool->zpool));
1328 list_add(&pool->list, &zswap_pools);
1329 zswap_has_pool = true;
1330 } else {
1331 pr_err("pool creation failed\n");
1332 zswap_enabled = false;
1333 }
1334
1335 frontswap_register_ops(&zswap_frontswap_ops);
1336 if (zswap_debugfs_init())
1337 pr_warn("debugfs initialization failed\n");
1338 return 0;
1339
1340 hp_fail:
1341 cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1342 dstmem_fail:
1343 zswap_entry_cache_destroy();
1344 cache_fail:
1345 /* if built-in, we aren't unloaded on failure; don't allow use */
1346 zswap_init_failed = true;
1347 zswap_enabled = false;
1348 return -ENOMEM;
1349 }
1350 /* must be late so crypto has time to come up */
1351 late_initcall(init_zswap);
1352
1353 MODULE_LICENSE("GPL");
1354 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1355 MODULE_DESCRIPTION("Compressed cache for swap pages");
1356