Lines Matching refs:zram

54 static void zram_free_page(struct zram *zram, size_t index);
55 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
59 static int zram_slot_trylock(struct zram *zram, u32 index) in zram_slot_trylock() argument
61 return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); in zram_slot_trylock()
64 static void zram_slot_lock(struct zram *zram, u32 index) in zram_slot_lock() argument
66 bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); in zram_slot_lock()
69 static void zram_slot_unlock(struct zram *zram, u32 index) in zram_slot_unlock() argument
71 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); in zram_slot_unlock()
74 static inline bool init_done(struct zram *zram) in init_done() argument
76 return zram->disksize; in init_done()
79 static inline struct zram *dev_to_zram(struct device *dev) in dev_to_zram()
81 return (struct zram *)dev_to_disk(dev)->private_data; in dev_to_zram()
84 static unsigned long zram_get_handle(struct zram *zram, u32 index) in zram_get_handle() argument
86 return zram->table[index].handle; in zram_get_handle()
89 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) in zram_set_handle() argument
91 zram->table[index].handle = handle; in zram_set_handle()
95 static bool zram_test_flag(struct zram *zram, u32 index, in zram_test_flag() argument
98 return zram->table[index].flags & BIT(flag); in zram_test_flag()
101 static void zram_set_flag(struct zram *zram, u32 index, in zram_set_flag() argument
104 zram->table[index].flags |= BIT(flag); in zram_set_flag()
107 static void zram_clear_flag(struct zram *zram, u32 index, in zram_clear_flag() argument
110 zram->table[index].flags &= ~BIT(flag); in zram_clear_flag()
113 static inline void zram_set_element(struct zram *zram, u32 index, in zram_set_element() argument
116 zram->table[index].element = element; in zram_set_element()
119 static unsigned long zram_get_element(struct zram *zram, u32 index) in zram_get_element() argument
121 return zram->table[index].element; in zram_get_element()
124 static size_t zram_get_obj_size(struct zram *zram, u32 index) in zram_get_obj_size() argument
126 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); in zram_get_obj_size()
129 static void zram_set_obj_size(struct zram *zram, in zram_set_obj_size() argument
132 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; in zram_set_obj_size()
134 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; in zram_set_obj_size()
137 static inline bool zram_allocated(struct zram *zram, u32 index) in zram_allocated() argument
139 return zram_get_obj_size(zram, index) || in zram_allocated()
140 zram_test_flag(zram, index, ZRAM_SAME) || in zram_allocated()
141 zram_test_flag(zram, index, ZRAM_WB); in zram_allocated()
159 static inline bool valid_io_request(struct zram *zram, in valid_io_request() argument
171 bound = zram->disksize >> SECTOR_SHIFT; in valid_io_request()
186 static inline void update_used_max(struct zram *zram, in update_used_max() argument
191 old_max = atomic_long_read(&zram->stats.max_used_pages); in update_used_max()
197 &zram->stats.max_used_pages, cur_max, pages); in update_used_max()
231 struct zram *zram = dev_to_zram(dev); in initstate_show() local
233 down_read(&zram->init_lock); in initstate_show()
234 val = init_done(zram); in initstate_show()
235 up_read(&zram->init_lock); in initstate_show()
243 struct zram *zram = dev_to_zram(dev); in disksize_show() local
245 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); in disksize_show()
253 struct zram *zram = dev_to_zram(dev); in mem_limit_store() local
259 down_write(&zram->init_lock); in mem_limit_store()
260 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; in mem_limit_store()
261 up_write(&zram->init_lock); in mem_limit_store()
271 struct zram *zram = dev_to_zram(dev); in mem_used_max_store() local
277 down_read(&zram->init_lock); in mem_used_max_store()
278 if (init_done(zram)) { in mem_used_max_store()
279 atomic_long_set(&zram->stats.max_used_pages, in mem_used_max_store()
280 zs_get_total_pages(zram->mem_pool)); in mem_used_max_store()
282 up_read(&zram->init_lock); in mem_used_max_store()
290 struct zram *zram = dev_to_zram(dev); in idle_store() local
291 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in idle_store()
297 down_read(&zram->init_lock); in idle_store()
298 if (!init_done(zram)) { in idle_store()
299 up_read(&zram->init_lock); in idle_store()
308 zram_slot_lock(zram, index); in idle_store()
309 if (zram_allocated(zram, index) && in idle_store()
310 !zram_test_flag(zram, index, ZRAM_UNDER_WB)) in idle_store()
311 zram_set_flag(zram, index, ZRAM_IDLE); in idle_store()
312 zram_slot_unlock(zram, index); in idle_store()
315 up_read(&zram->init_lock); in idle_store()
324 struct zram *zram = dev_to_zram(dev); in writeback_limit_enable_store() local
331 down_read(&zram->init_lock); in writeback_limit_enable_store()
332 spin_lock(&zram->wb_limit_lock); in writeback_limit_enable_store()
333 zram->wb_limit_enable = val; in writeback_limit_enable_store()
334 spin_unlock(&zram->wb_limit_lock); in writeback_limit_enable_store()
335 up_read(&zram->init_lock); in writeback_limit_enable_store()
345 struct zram *zram = dev_to_zram(dev); in writeback_limit_enable_show() local
347 down_read(&zram->init_lock); in writeback_limit_enable_show()
348 spin_lock(&zram->wb_limit_lock); in writeback_limit_enable_show()
349 val = zram->wb_limit_enable; in writeback_limit_enable_show()
350 spin_unlock(&zram->wb_limit_lock); in writeback_limit_enable_show()
351 up_read(&zram->init_lock); in writeback_limit_enable_show()
359 struct zram *zram = dev_to_zram(dev); in writeback_limit_store() local
366 down_read(&zram->init_lock); in writeback_limit_store()
367 spin_lock(&zram->wb_limit_lock); in writeback_limit_store()
368 zram->bd_wb_limit = val; in writeback_limit_store()
369 spin_unlock(&zram->wb_limit_lock); in writeback_limit_store()
370 up_read(&zram->init_lock); in writeback_limit_store()
380 struct zram *zram = dev_to_zram(dev); in writeback_limit_show() local
382 down_read(&zram->init_lock); in writeback_limit_show()
383 spin_lock(&zram->wb_limit_lock); in writeback_limit_show()
384 val = zram->bd_wb_limit; in writeback_limit_show()
385 spin_unlock(&zram->wb_limit_lock); in writeback_limit_show()
386 up_read(&zram->init_lock); in writeback_limit_show()
391 static void reset_bdev(struct zram *zram) in reset_bdev() argument
395 if (!zram->backing_dev) in reset_bdev()
398 bdev = zram->bdev; in reset_bdev()
399 if (zram->old_block_size) in reset_bdev()
400 set_blocksize(bdev, zram->old_block_size); in reset_bdev()
403 filp_close(zram->backing_dev, NULL); in reset_bdev()
404 zram->backing_dev = NULL; in reset_bdev()
405 zram->old_block_size = 0; in reset_bdev()
406 zram->bdev = NULL; in reset_bdev()
407 zram->disk->queue->backing_dev_info->capabilities |= in reset_bdev()
409 kvfree(zram->bitmap); in reset_bdev()
410 zram->bitmap = NULL; in reset_bdev()
417 struct zram *zram = dev_to_zram(dev); in backing_dev_show() local
421 down_read(&zram->init_lock); in backing_dev_show()
422 file = zram->backing_dev; in backing_dev_show()
425 up_read(&zram->init_lock); in backing_dev_show()
439 up_read(&zram->init_lock); in backing_dev_show()
455 struct zram *zram = dev_to_zram(dev); in backing_dev_store() local
461 down_write(&zram->init_lock); in backing_dev_store()
462 if (init_done(zram)) { in backing_dev_store()
491 err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); in backing_dev_store()
510 reset_bdev(zram); in backing_dev_store()
512 zram->old_block_size = old_block_size; in backing_dev_store()
513 zram->bdev = bdev; in backing_dev_store()
514 zram->backing_dev = backing_dev; in backing_dev_store()
515 zram->bitmap = bitmap; in backing_dev_store()
516 zram->nr_pages = nr_pages; in backing_dev_store()
527 zram->disk->queue->backing_dev_info->capabilities &= in backing_dev_store()
529 up_write(&zram->init_lock); in backing_dev_store()
545 up_write(&zram->init_lock); in backing_dev_store()
552 static unsigned long alloc_block_bdev(struct zram *zram) in alloc_block_bdev() argument
557 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx); in alloc_block_bdev()
558 if (blk_idx == zram->nr_pages) in alloc_block_bdev()
561 if (test_and_set_bit(blk_idx, zram->bitmap)) in alloc_block_bdev()
564 atomic64_inc(&zram->stats.bd_count); in alloc_block_bdev()
568 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) in free_block_bdev() argument
572 was_set = test_and_clear_bit(blk_idx, zram->bitmap); in free_block_bdev()
574 atomic64_dec(&zram->stats.bd_count); in free_block_bdev()
589 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_async() argument
599 bio_set_dev(bio, zram->bdev); in read_from_bdev_async()
623 struct zram *zram = dev_to_zram(dev); in writeback_store() local
624 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in writeback_store()
640 down_read(&zram->init_lock); in writeback_store()
641 if (!init_done(zram)) { in writeback_store()
646 if (!zram->backing_dev) { in writeback_store()
664 spin_lock(&zram->wb_limit_lock); in writeback_store()
665 if (zram->wb_limit_enable && !zram->bd_wb_limit) { in writeback_store()
666 spin_unlock(&zram->wb_limit_lock); in writeback_store()
670 spin_unlock(&zram->wb_limit_lock); in writeback_store()
673 blk_idx = alloc_block_bdev(zram); in writeback_store()
680 zram_slot_lock(zram, index); in writeback_store()
681 if (!zram_allocated(zram, index)) in writeback_store()
684 if (zram_test_flag(zram, index, ZRAM_WB) || in writeback_store()
685 zram_test_flag(zram, index, ZRAM_SAME) || in writeback_store()
686 zram_test_flag(zram, index, ZRAM_UNDER_WB)) in writeback_store()
690 !zram_test_flag(zram, index, ZRAM_IDLE)) in writeback_store()
693 !zram_test_flag(zram, index, ZRAM_HUGE)) in writeback_store()
699 zram_set_flag(zram, index, ZRAM_UNDER_WB); in writeback_store()
701 zram_set_flag(zram, index, ZRAM_IDLE); in writeback_store()
702 zram_slot_unlock(zram, index); in writeback_store()
703 if (zram_bvec_read(zram, &bvec, index, 0, NULL)) { in writeback_store()
704 zram_slot_lock(zram, index); in writeback_store()
705 zram_clear_flag(zram, index, ZRAM_UNDER_WB); in writeback_store()
706 zram_clear_flag(zram, index, ZRAM_IDLE); in writeback_store()
707 zram_slot_unlock(zram, index); in writeback_store()
712 bio_set_dev(&bio, zram->bdev); in writeback_store()
724 zram_slot_lock(zram, index); in writeback_store()
725 zram_clear_flag(zram, index, ZRAM_UNDER_WB); in writeback_store()
726 zram_clear_flag(zram, index, ZRAM_IDLE); in writeback_store()
727 zram_slot_unlock(zram, index); in writeback_store()
731 atomic64_inc(&zram->stats.bd_writes); in writeback_store()
741 zram_slot_lock(zram, index); in writeback_store()
742 if (!zram_allocated(zram, index) || in writeback_store()
743 !zram_test_flag(zram, index, ZRAM_IDLE)) { in writeback_store()
744 zram_clear_flag(zram, index, ZRAM_UNDER_WB); in writeback_store()
745 zram_clear_flag(zram, index, ZRAM_IDLE); in writeback_store()
749 zram_free_page(zram, index); in writeback_store()
750 zram_clear_flag(zram, index, ZRAM_UNDER_WB); in writeback_store()
751 zram_set_flag(zram, index, ZRAM_WB); in writeback_store()
752 zram_set_element(zram, index, blk_idx); in writeback_store()
754 atomic64_inc(&zram->stats.pages_stored); in writeback_store()
755 spin_lock(&zram->wb_limit_lock); in writeback_store()
756 if (zram->wb_limit_enable && zram->bd_wb_limit > 0) in writeback_store()
757 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); in writeback_store()
758 spin_unlock(&zram->wb_limit_lock); in writeback_store()
760 zram_slot_unlock(zram, index); in writeback_store()
764 free_block_bdev(zram, blk_idx); in writeback_store()
768 up_read(&zram->init_lock); in writeback_store()
775 struct zram *zram; member
785 struct zram *zram = zw->zram; in zram_sync_read() local
789 read_from_bdev_async(zram, &zw->bvec, entry, bio); in zram_sync_read()
797 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_sync() argument
803 work.zram = zram; in read_from_bdev_sync()
815 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_sync() argument
823 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, in read_from_bdev() argument
826 atomic64_inc(&zram->stats.bd_reads); in read_from_bdev()
828 return read_from_bdev_sync(zram, bvec, entry, parent); in read_from_bdev()
830 return read_from_bdev_async(zram, bvec, entry, parent); in read_from_bdev()
833 static inline void reset_bdev(struct zram *zram) {}; in reset_bdev() argument
834 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, in read_from_bdev() argument
840 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {}; in free_block_bdev() argument
857 static void zram_accessed(struct zram *zram, u32 index) in zram_accessed() argument
859 zram_clear_flag(zram, index, ZRAM_IDLE); in zram_accessed()
860 zram->table[index].ac_time = ktime_get_boottime(); in zram_accessed()
868 struct zram *zram = file->private_data; in read_block_state() local
869 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; in read_block_state()
876 down_read(&zram->init_lock); in read_block_state()
877 if (!init_done(zram)) { in read_block_state()
878 up_read(&zram->init_lock); in read_block_state()
886 zram_slot_lock(zram, index); in read_block_state()
887 if (!zram_allocated(zram, index)) in read_block_state()
890 ts = ktime_to_timespec64(zram->table[index].ac_time); in read_block_state()
895 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.', in read_block_state()
896 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', in read_block_state()
897 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.', in read_block_state()
898 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.'); in read_block_state()
901 zram_slot_unlock(zram, index); in read_block_state()
907 zram_slot_unlock(zram, index); in read_block_state()
911 up_read(&zram->init_lock); in read_block_state()
925 static void zram_debugfs_register(struct zram *zram) in zram_debugfs_register() argument
930 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name, in zram_debugfs_register()
932 debugfs_create_file("block_state", 0400, zram->debugfs_dir, in zram_debugfs_register()
933 zram, &proc_zram_block_state_op); in zram_debugfs_register()
936 static void zram_debugfs_unregister(struct zram *zram) in zram_debugfs_unregister() argument
938 debugfs_remove_recursive(zram->debugfs_dir); in zram_debugfs_unregister()
943 static void zram_accessed(struct zram *zram, u32 index) in zram_accessed() argument
945 zram_clear_flag(zram, index, ZRAM_IDLE); in zram_accessed()
947 static void zram_debugfs_register(struct zram *zram) {}; in zram_debugfs_register() argument
948 static void zram_debugfs_unregister(struct zram *zram) {}; in zram_debugfs_unregister() argument
976 struct zram *zram = dev_to_zram(dev); in comp_algorithm_show() local
978 down_read(&zram->init_lock); in comp_algorithm_show()
979 sz = zcomp_available_show(zram->compressor, buf); in comp_algorithm_show()
980 up_read(&zram->init_lock); in comp_algorithm_show()
988 struct zram *zram = dev_to_zram(dev); in comp_algorithm_store() local
989 char compressor[ARRAY_SIZE(zram->compressor)]; in comp_algorithm_store()
1001 down_write(&zram->init_lock); in comp_algorithm_store()
1002 if (init_done(zram)) { in comp_algorithm_store()
1003 up_write(&zram->init_lock); in comp_algorithm_store()
1008 strcpy(zram->compressor, compressor); in comp_algorithm_store()
1009 up_write(&zram->init_lock); in comp_algorithm_store()
1016 struct zram *zram = dev_to_zram(dev); in compact_store() local
1018 down_read(&zram->init_lock); in compact_store()
1019 if (!init_done(zram)) { in compact_store()
1020 up_read(&zram->init_lock); in compact_store()
1024 zs_compact(zram->mem_pool); in compact_store()
1025 up_read(&zram->init_lock); in compact_store()
1033 struct zram *zram = dev_to_zram(dev); in io_stat_show() local
1036 down_read(&zram->init_lock); in io_stat_show()
1039 (u64)atomic64_read(&zram->stats.failed_reads), in io_stat_show()
1040 (u64)atomic64_read(&zram->stats.failed_writes), in io_stat_show()
1041 (u64)atomic64_read(&zram->stats.invalid_io), in io_stat_show()
1042 (u64)atomic64_read(&zram->stats.notify_free)); in io_stat_show()
1043 up_read(&zram->init_lock); in io_stat_show()
1051 struct zram *zram = dev_to_zram(dev); in mm_stat_show() local
1059 down_read(&zram->init_lock); in mm_stat_show()
1060 if (init_done(zram)) { in mm_stat_show()
1061 mem_used = zs_get_total_pages(zram->mem_pool); in mm_stat_show()
1062 zs_pool_stats(zram->mem_pool, &pool_stats); in mm_stat_show()
1065 orig_size = atomic64_read(&zram->stats.pages_stored); in mm_stat_show()
1066 max_used = atomic_long_read(&zram->stats.max_used_pages); in mm_stat_show()
1071 (u64)atomic64_read(&zram->stats.compr_data_size), in mm_stat_show()
1073 zram->limit_pages << PAGE_SHIFT, in mm_stat_show()
1075 (u64)atomic64_read(&zram->stats.same_pages), in mm_stat_show()
1077 (u64)atomic64_read(&zram->stats.huge_pages)); in mm_stat_show()
1078 up_read(&zram->init_lock); in mm_stat_show()
1088 struct zram *zram = dev_to_zram(dev); in bd_stat_show() local
1091 down_read(&zram->init_lock); in bd_stat_show()
1094 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), in bd_stat_show()
1095 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), in bd_stat_show()
1096 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); in bd_stat_show()
1097 up_read(&zram->init_lock); in bd_stat_show()
1107 struct zram *zram = dev_to_zram(dev); in debug_stat_show() local
1110 down_read(&zram->init_lock); in debug_stat_show()
1114 (u64)atomic64_read(&zram->stats.writestall), in debug_stat_show()
1115 (u64)atomic64_read(&zram->stats.miss_free)); in debug_stat_show()
1116 up_read(&zram->init_lock); in debug_stat_show()
1128 static void zram_meta_free(struct zram *zram, u64 disksize) in zram_meta_free() argument
1135 zram_free_page(zram, index); in zram_meta_free()
1137 zs_destroy_pool(zram->mem_pool); in zram_meta_free()
1138 vfree(zram->table); in zram_meta_free()
1141 static bool zram_meta_alloc(struct zram *zram, u64 disksize) in zram_meta_alloc() argument
1146 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); in zram_meta_alloc()
1147 if (!zram->table) in zram_meta_alloc()
1150 zram->mem_pool = zs_create_pool(zram->disk->disk_name); in zram_meta_alloc()
1151 if (!zram->mem_pool) { in zram_meta_alloc()
1152 vfree(zram->table); in zram_meta_alloc()
1157 huge_class_size = zs_huge_class_size(zram->mem_pool); in zram_meta_alloc()
1166 static void zram_free_page(struct zram *zram, size_t index) in zram_free_page() argument
1171 zram->table[index].ac_time = 0; in zram_free_page()
1173 if (zram_test_flag(zram, index, ZRAM_IDLE)) in zram_free_page()
1174 zram_clear_flag(zram, index, ZRAM_IDLE); in zram_free_page()
1176 if (zram_test_flag(zram, index, ZRAM_HUGE)) { in zram_free_page()
1177 zram_clear_flag(zram, index, ZRAM_HUGE); in zram_free_page()
1178 atomic64_dec(&zram->stats.huge_pages); in zram_free_page()
1181 if (zram_test_flag(zram, index, ZRAM_WB)) { in zram_free_page()
1182 zram_clear_flag(zram, index, ZRAM_WB); in zram_free_page()
1183 free_block_bdev(zram, zram_get_element(zram, index)); in zram_free_page()
1191 if (zram_test_flag(zram, index, ZRAM_SAME)) { in zram_free_page()
1192 zram_clear_flag(zram, index, ZRAM_SAME); in zram_free_page()
1193 atomic64_dec(&zram->stats.same_pages); in zram_free_page()
1197 handle = zram_get_handle(zram, index); in zram_free_page()
1201 zs_free(zram->mem_pool, handle); in zram_free_page()
1203 atomic64_sub(zram_get_obj_size(zram, index), in zram_free_page()
1204 &zram->stats.compr_data_size); in zram_free_page()
1206 atomic64_dec(&zram->stats.pages_stored); in zram_free_page()
1207 zram_set_handle(zram, index, 0); in zram_free_page()
1208 zram_set_obj_size(zram, index, 0); in zram_free_page()
1209 WARN_ON_ONCE(zram->table[index].flags & in zram_free_page()
1213 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, in __zram_bvec_read() argument
1221 zram_slot_lock(zram, index); in __zram_bvec_read()
1222 if (zram_test_flag(zram, index, ZRAM_WB)) { in __zram_bvec_read()
1225 zram_slot_unlock(zram, index); in __zram_bvec_read()
1230 return read_from_bdev(zram, &bvec, in __zram_bvec_read()
1231 zram_get_element(zram, index), in __zram_bvec_read()
1235 handle = zram_get_handle(zram, index); in __zram_bvec_read()
1236 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { in __zram_bvec_read()
1240 value = handle ? zram_get_element(zram, index) : 0; in __zram_bvec_read()
1244 zram_slot_unlock(zram, index); in __zram_bvec_read()
1248 size = zram_get_obj_size(zram, index); in __zram_bvec_read()
1250 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); in __zram_bvec_read()
1257 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); in __zram_bvec_read()
1262 zcomp_stream_put(zram->comp); in __zram_bvec_read()
1264 zs_unmap_object(zram->mem_pool, handle); in __zram_bvec_read()
1265 zram_slot_unlock(zram, index); in __zram_bvec_read()
1274 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read() argument
1288 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec)); in zram_bvec_read()
1307 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, in __zram_bvec_write() argument
1325 atomic64_inc(&zram->stats.same_pages); in __zram_bvec_write()
1331 zstrm = zcomp_stream_get(zram->comp); in __zram_bvec_write()
1337 zcomp_stream_put(zram->comp); in __zram_bvec_write()
1339 zs_free(zram->mem_pool, handle); in __zram_bvec_write()
1359 handle = zs_malloc(zram->mem_pool, comp_len, in __zram_bvec_write()
1365 zcomp_stream_put(zram->comp); in __zram_bvec_write()
1366 atomic64_inc(&zram->stats.writestall); in __zram_bvec_write()
1367 handle = zs_malloc(zram->mem_pool, comp_len, in __zram_bvec_write()
1375 alloced_pages = zs_get_total_pages(zram->mem_pool); in __zram_bvec_write()
1376 update_used_max(zram, alloced_pages); in __zram_bvec_write()
1378 if (zram->limit_pages && alloced_pages > zram->limit_pages) { in __zram_bvec_write()
1379 zcomp_stream_put(zram->comp); in __zram_bvec_write()
1380 zs_free(zram->mem_pool, handle); in __zram_bvec_write()
1384 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); in __zram_bvec_write()
1393 zcomp_stream_put(zram->comp); in __zram_bvec_write()
1394 zs_unmap_object(zram->mem_pool, handle); in __zram_bvec_write()
1395 atomic64_add(comp_len, &zram->stats.compr_data_size); in __zram_bvec_write()
1401 zram_slot_lock(zram, index); in __zram_bvec_write()
1402 zram_free_page(zram, index); in __zram_bvec_write()
1405 zram_set_flag(zram, index, ZRAM_HUGE); in __zram_bvec_write()
1406 atomic64_inc(&zram->stats.huge_pages); in __zram_bvec_write()
1410 zram_set_flag(zram, index, flags); in __zram_bvec_write()
1411 zram_set_element(zram, index, element); in __zram_bvec_write()
1413 zram_set_handle(zram, index, handle); in __zram_bvec_write()
1414 zram_set_obj_size(zram, index, comp_len); in __zram_bvec_write()
1416 zram_slot_unlock(zram, index); in __zram_bvec_write()
1419 atomic64_inc(&zram->stats.pages_stored); in __zram_bvec_write()
1423 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, in zram_bvec_write() argument
1442 ret = __zram_bvec_read(zram, page, index, bio, true); in zram_bvec_write()
1457 ret = __zram_bvec_write(zram, &vec, index, bio); in zram_bvec_write()
1469 static void zram_bio_discard(struct zram *zram, u32 index, in zram_bio_discard() argument
1493 zram_slot_lock(zram, index); in zram_bio_discard()
1494 zram_free_page(zram, index); in zram_bio_discard()
1495 zram_slot_unlock(zram, index); in zram_bio_discard()
1496 atomic64_inc(&zram->stats.notify_free); in zram_bio_discard()
1507 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, in zram_bvec_rw() argument
1511 struct request_queue *q = zram->disk->queue; in zram_bvec_rw()
1515 &zram->disk->part0); in zram_bvec_rw()
1518 atomic64_inc(&zram->stats.num_reads); in zram_bvec_rw()
1519 ret = zram_bvec_read(zram, bvec, index, offset, bio); in zram_bvec_rw()
1522 atomic64_inc(&zram->stats.num_writes); in zram_bvec_rw()
1523 ret = zram_bvec_write(zram, bvec, index, offset, bio); in zram_bvec_rw()
1526 generic_end_io_acct(q, op, &zram->disk->part0, start_time); in zram_bvec_rw()
1528 zram_slot_lock(zram, index); in zram_bvec_rw()
1529 zram_accessed(zram, index); in zram_bvec_rw()
1530 zram_slot_unlock(zram, index); in zram_bvec_rw()
1534 atomic64_inc(&zram->stats.failed_reads); in zram_bvec_rw()
1536 atomic64_inc(&zram->stats.failed_writes); in zram_bvec_rw()
1542 static void __zram_make_request(struct zram *zram, struct bio *bio) in __zram_make_request() argument
1556 zram_bio_discard(zram, index, offset, bio); in __zram_make_request()
1570 if (zram_bvec_rw(zram, &bv, index, offset, in __zram_make_request()
1593 struct zram *zram = queue->queuedata; in zram_make_request() local
1595 if (!valid_io_request(zram, bio->bi_iter.bi_sector, in zram_make_request()
1597 atomic64_inc(&zram->stats.invalid_io); in zram_make_request()
1601 __zram_make_request(zram, bio); in zram_make_request()
1612 struct zram *zram; in zram_slot_free_notify() local
1614 zram = bdev->bd_disk->private_data; in zram_slot_free_notify()
1616 atomic64_inc(&zram->stats.notify_free); in zram_slot_free_notify()
1617 if (!zram_slot_trylock(zram, index)) { in zram_slot_free_notify()
1618 atomic64_inc(&zram->stats.miss_free); in zram_slot_free_notify()
1622 zram_free_page(zram, index); in zram_slot_free_notify()
1623 zram_slot_unlock(zram, index); in zram_slot_free_notify()
1631 struct zram *zram; in zram_rw_page() local
1636 zram = bdev->bd_disk->private_data; in zram_rw_page()
1638 if (!valid_io_request(zram, sector, PAGE_SIZE)) { in zram_rw_page()
1639 atomic64_inc(&zram->stats.invalid_io); in zram_rw_page()
1651 ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL); in zram_rw_page()
1677 static void zram_reset_device(struct zram *zram) in zram_reset_device() argument
1682 down_write(&zram->init_lock); in zram_reset_device()
1684 zram->limit_pages = 0; in zram_reset_device()
1686 if (!init_done(zram)) { in zram_reset_device()
1687 up_write(&zram->init_lock); in zram_reset_device()
1691 comp = zram->comp; in zram_reset_device()
1692 disksize = zram->disksize; in zram_reset_device()
1693 zram->disksize = 0; in zram_reset_device()
1695 set_capacity(zram->disk, 0); in zram_reset_device()
1696 part_stat_set_all(&zram->disk->part0, 0); in zram_reset_device()
1698 up_write(&zram->init_lock); in zram_reset_device()
1700 zram_meta_free(zram, disksize); in zram_reset_device()
1701 memset(&zram->stats, 0, sizeof(zram->stats)); in zram_reset_device()
1703 reset_bdev(zram); in zram_reset_device()
1711 struct zram *zram = dev_to_zram(dev); in disksize_store() local
1718 down_write(&zram->init_lock); in disksize_store()
1719 if (init_done(zram)) { in disksize_store()
1726 if (!zram_meta_alloc(zram, disksize)) { in disksize_store()
1731 comp = zcomp_create(zram->compressor); in disksize_store()
1734 zram->compressor); in disksize_store()
1739 zram->comp = comp; in disksize_store()
1740 zram->disksize = disksize; in disksize_store()
1741 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); in disksize_store()
1743 revalidate_disk(zram->disk); in disksize_store()
1744 up_write(&zram->init_lock); in disksize_store()
1749 zram_meta_free(zram, disksize); in disksize_store()
1751 up_write(&zram->init_lock); in disksize_store()
1760 struct zram *zram; in reset_store() local
1770 zram = dev_to_zram(dev); in reset_store()
1771 bdev = bdget_disk(zram->disk, 0); in reset_store()
1777 if (bdev->bd_openers || zram->claim) { in reset_store()
1784 zram->claim = true; in reset_store()
1789 zram_reset_device(zram); in reset_store()
1790 revalidate_disk(zram->disk); in reset_store()
1794 zram->claim = false; in reset_store()
1803 struct zram *zram; in zram_open() local
1807 zram = bdev->bd_disk->private_data; in zram_open()
1809 if (zram->claim) in zram_open()
1878 struct zram *zram; in zram_add() local
1882 zram = kzalloc(sizeof(struct zram), GFP_KERNEL); in zram_add()
1883 if (!zram) in zram_add()
1886 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL); in zram_add()
1891 init_rwsem(&zram->init_lock); in zram_add()
1893 spin_lock_init(&zram->wb_limit_lock); in zram_add()
1906 zram->disk = alloc_disk(1); in zram_add()
1907 if (!zram->disk) { in zram_add()
1914 zram->disk->major = zram_major; in zram_add()
1915 zram->disk->first_minor = device_id; in zram_add()
1916 zram->disk->fops = &zram_devops; in zram_add()
1917 zram->disk->queue = queue; in zram_add()
1918 zram->disk->queue->queuedata = zram; in zram_add()
1919 zram->disk->private_data = zram; in zram_add()
1920 snprintf(zram->disk->disk_name, 16, "zram%d", device_id); in zram_add()
1923 set_capacity(zram->disk, 0); in zram_add()
1925 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); in zram_add()
1926 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); in zram_add()
1932 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); in zram_add()
1933 blk_queue_logical_block_size(zram->disk->queue, in zram_add()
1935 blk_queue_io_min(zram->disk->queue, PAGE_SIZE); in zram_add()
1936 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); in zram_add()
1937 zram->disk->queue->limits.discard_granularity = PAGE_SIZE; in zram_add()
1938 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); in zram_add()
1939 blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue); in zram_add()
1950 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX); in zram_add()
1952 zram->disk->queue->backing_dev_info->capabilities |= in zram_add()
1954 device_add_disk(NULL, zram->disk, zram_disk_attr_groups); in zram_add()
1956 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); in zram_add()
1958 zram_debugfs_register(zram); in zram_add()
1959 pr_info("Added device: %s\n", zram->disk->disk_name); in zram_add()
1967 kfree(zram); in zram_add()
1971 static int zram_remove(struct zram *zram) in zram_remove() argument
1975 bdev = bdget_disk(zram->disk, 0); in zram_remove()
1980 if (bdev->bd_openers || zram->claim) { in zram_remove()
1986 zram->claim = true; in zram_remove()
1989 zram_debugfs_unregister(zram); in zram_remove()
1993 zram_reset_device(zram); in zram_remove()
1996 pr_info("Removed device: %s\n", zram->disk->disk_name); in zram_remove()
1998 del_gendisk(zram->disk); in zram_remove()
1999 blk_cleanup_queue(zram->disk->queue); in zram_remove()
2000 put_disk(zram->disk); in zram_remove()
2001 kfree(zram); in zram_remove()
2034 struct zram *zram; in hot_remove_store() local
2046 zram = idr_find(&zram_index_idr, dev_id); in hot_remove_store()
2047 if (zram) { in hot_remove_store()
2048 ret = zram_remove(zram); in hot_remove_store()