1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/utils.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 
14 #include "internal.h"
15 #include <linux/pagevec.h>
16 
erofs_allocpage(struct list_head * pool,gfp_t gfp)17 struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
18 {
19 	struct page *page;
20 
21 	if (!list_empty(pool)) {
22 		page = lru_to_page(pool);
23 		list_del(&page->lru);
24 	} else {
25 		page = alloc_pages(gfp | __GFP_NOFAIL, 0);
26 
27 		BUG_ON(page == NULL);
28 		BUG_ON(page->mapping != NULL);
29 	}
30 	return page;
31 }
32 
33 /* global shrink count (for all mounted EROFS instances) */
34 static atomic_long_t erofs_global_shrink_cnt;
35 
36 #ifdef CONFIG_EROFS_FS_ZIP
37 
38 /* radix_tree and the future XArray both don't use tagptr_t yet */
erofs_find_workgroup(struct super_block * sb,pgoff_t index,bool * tag)39 struct erofs_workgroup *erofs_find_workgroup(
40 	struct super_block *sb, pgoff_t index, bool *tag)
41 {
42 	struct erofs_sb_info *sbi = EROFS_SB(sb);
43 	struct erofs_workgroup *grp;
44 	int oldcount;
45 
46 repeat:
47 	rcu_read_lock();
48 	grp = radix_tree_lookup(&sbi->workstn_tree, index);
49 	if (grp != NULL) {
50 		*tag = radix_tree_exceptional_entry(grp);
51 		grp = (void *)((unsigned long)grp &
52 			~RADIX_TREE_EXCEPTIONAL_ENTRY);
53 
54 		if (erofs_workgroup_get(grp, &oldcount)) {
55 			/* prefer to relax rcu read side */
56 			rcu_read_unlock();
57 			goto repeat;
58 		}
59 
60 		/* decrease refcount added by erofs_workgroup_put */
61 		if (unlikely(oldcount == 1))
62 			atomic_long_dec(&erofs_global_shrink_cnt);
63 		BUG_ON(index != grp->index);
64 	}
65 	rcu_read_unlock();
66 	return grp;
67 }
68 
erofs_register_workgroup(struct super_block * sb,struct erofs_workgroup * grp,bool tag)69 int erofs_register_workgroup(struct super_block *sb,
70 			     struct erofs_workgroup *grp,
71 			     bool tag)
72 {
73 	struct erofs_sb_info *sbi;
74 	int err;
75 
76 	/* grp->refcount should not < 1 */
77 	BUG_ON(!atomic_read(&grp->refcount));
78 
79 	err = radix_tree_preload(GFP_NOFS);
80 	if (err)
81 		return err;
82 
83 	sbi = EROFS_SB(sb);
84 	erofs_workstn_lock(sbi);
85 
86 	if (tag)
87 		grp = (void *)((unsigned long)grp |
88 			1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
89 
90 	err = radix_tree_insert(&sbi->workstn_tree,
91 		grp->index, grp);
92 
93 	if (!err) {
94 		__erofs_workgroup_get(grp);
95 	}
96 
97 	erofs_workstn_unlock(sbi);
98 	radix_tree_preload_end();
99 	return err;
100 }
101 
102 extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
103 
erofs_workgroup_put(struct erofs_workgroup * grp)104 int erofs_workgroup_put(struct erofs_workgroup *grp)
105 {
106 	int count = atomic_dec_return(&grp->refcount);
107 
108 	if (count == 1)
109 		atomic_long_inc(&erofs_global_shrink_cnt);
110 	else if (!count) {
111 		atomic_long_dec(&erofs_global_shrink_cnt);
112 		erofs_workgroup_free_rcu(grp);
113 	}
114 	return count;
115 }
116 
erofs_shrink_workstation(struct erofs_sb_info * sbi,unsigned long nr_shrink,bool cleanup)117 unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
118 				       unsigned long nr_shrink,
119 				       bool cleanup)
120 {
121 	pgoff_t first_index = 0;
122 	void *batch[PAGEVEC_SIZE];
123 	unsigned freed = 0;
124 
125 	int i, found;
126 repeat:
127 	erofs_workstn_lock(sbi);
128 
129 	found = radix_tree_gang_lookup(&sbi->workstn_tree,
130 		batch, first_index, PAGEVEC_SIZE);
131 
132 	for (i = 0; i < found; ++i) {
133 		int cnt;
134 		struct erofs_workgroup *grp = (void *)
135 			((unsigned long)batch[i] &
136 				~RADIX_TREE_EXCEPTIONAL_ENTRY);
137 
138 		first_index = grp->index + 1;
139 
140 		cnt = atomic_read(&grp->refcount);
141 		BUG_ON(cnt <= 0);
142 
143 		if (cleanup)
144 			BUG_ON(cnt != 1);
145 
146 #ifndef EROFS_FS_HAS_MANAGED_CACHE
147 		else if (cnt > 1)
148 #else
149 		if (!erofs_workgroup_try_to_freeze(grp, 1))
150 #endif
151 			continue;
152 
153 		if (radix_tree_delete(&sbi->workstn_tree,
154 			grp->index) != grp) {
155 #ifdef EROFS_FS_HAS_MANAGED_CACHE
156 skip:
157 			erofs_workgroup_unfreeze(grp, 1);
158 #endif
159 			continue;
160 		}
161 
162 #ifdef EROFS_FS_HAS_MANAGED_CACHE
163 		if (erofs_try_to_free_all_cached_pages(sbi, grp))
164 			goto skip;
165 
166 		erofs_workgroup_unfreeze(grp, 1);
167 #endif
168 		/* (rarely) grabbed again when freeing */
169 		erofs_workgroup_put(grp);
170 
171 		++freed;
172 		if (unlikely(!--nr_shrink))
173 			break;
174 	}
175 	erofs_workstn_unlock(sbi);
176 
177 	if (i && nr_shrink)
178 		goto repeat;
179 	return freed;
180 }
181 
182 #endif
183 
184 /* protected by 'erofs_sb_list_lock' */
185 static unsigned int shrinker_run_no;
186 
187 /* protects the mounted 'erofs_sb_list' */
188 static DEFINE_SPINLOCK(erofs_sb_list_lock);
189 static LIST_HEAD(erofs_sb_list);
190 
erofs_register_super(struct super_block * sb)191 void erofs_register_super(struct super_block *sb)
192 {
193 	struct erofs_sb_info *sbi = EROFS_SB(sb);
194 
195 	mutex_init(&sbi->umount_mutex);
196 
197 	spin_lock(&erofs_sb_list_lock);
198 	list_add(&sbi->list, &erofs_sb_list);
199 	spin_unlock(&erofs_sb_list_lock);
200 }
201 
erofs_unregister_super(struct super_block * sb)202 void erofs_unregister_super(struct super_block *sb)
203 {
204 	spin_lock(&erofs_sb_list_lock);
205 	list_del(&EROFS_SB(sb)->list);
206 	spin_unlock(&erofs_sb_list_lock);
207 }
208 
erofs_shrink_count(struct shrinker * shrink,struct shrink_control * sc)209 unsigned long erofs_shrink_count(struct shrinker *shrink,
210 				 struct shrink_control *sc)
211 {
212 	return atomic_long_read(&erofs_global_shrink_cnt);
213 }
214 
erofs_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)215 unsigned long erofs_shrink_scan(struct shrinker *shrink,
216 				struct shrink_control *sc)
217 {
218 	struct erofs_sb_info *sbi;
219 	struct list_head *p;
220 
221 	unsigned long nr = sc->nr_to_scan;
222 	unsigned int run_no;
223 	unsigned long freed = 0;
224 
225 	spin_lock(&erofs_sb_list_lock);
226 	do
227 		run_no = ++shrinker_run_no;
228 	while (run_no == 0);
229 
230 	/* Iterate over all mounted superblocks and try to shrink them */
231 	p = erofs_sb_list.next;
232 	while (p != &erofs_sb_list) {
233 		sbi = list_entry(p, struct erofs_sb_info, list);
234 
235 		/*
236 		 * We move the ones we do to the end of the list, so we stop
237 		 * when we see one we have already done.
238 		 */
239 		if (sbi->shrinker_run_no == run_no)
240 			break;
241 
242 		if (!mutex_trylock(&sbi->umount_mutex)) {
243 			p = p->next;
244 			continue;
245 		}
246 
247 		spin_unlock(&erofs_sb_list_lock);
248 		sbi->shrinker_run_no = run_no;
249 
250 #ifdef CONFIG_EROFS_FS_ZIP
251 		freed += erofs_shrink_workstation(sbi, nr, false);
252 #endif
253 
254 		spin_lock(&erofs_sb_list_lock);
255 		/* Get the next list element before we move this one */
256 		p = p->next;
257 
258 		/*
259 		 * Move this one to the end of the list to provide some
260 		 * fairness.
261 		 */
262 		list_move_tail(&sbi->list, &erofs_sb_list);
263 		mutex_unlock(&sbi->umount_mutex);
264 
265 		if (freed >= nr)
266 			break;
267 	}
268 	spin_unlock(&erofs_sb_list_lock);
269 	return freed;
270 }
271 
272