1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic part */
3
4 typedef struct {
5 block_t *p;
6 block_t key;
7 struct buffer_head *bh;
8 } Indirect;
9
10 static DEFINE_RWLOCK(pointers_lock);
11
add_chain(Indirect * p,struct buffer_head * bh,block_t * v)12 static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
13 {
14 p->key = *(p->p = v);
15 p->bh = bh;
16 }
17
verify_chain(Indirect * from,Indirect * to)18 static inline int verify_chain(Indirect *from, Indirect *to)
19 {
20 while (from <= to && from->key == *from->p)
21 from++;
22 return (from > to);
23 }
24
block_end(struct buffer_head * bh)25 static inline block_t *block_end(struct buffer_head *bh)
26 {
27 return (block_t *)((char*)bh->b_data + bh->b_size);
28 }
29
get_branch(struct inode * inode,int depth,int * offsets,Indirect chain[DEPTH],int * err)30 static inline Indirect *get_branch(struct inode *inode,
31 int depth,
32 int *offsets,
33 Indirect chain[DEPTH],
34 int *err)
35 {
36 struct super_block *sb = inode->i_sb;
37 Indirect *p = chain;
38 struct buffer_head *bh;
39
40 *err = 0;
41 /* i_data is not going away, no lock needed */
42 add_chain (chain, NULL, i_data(inode) + *offsets);
43 if (!p->key)
44 goto no_block;
45 while (--depth) {
46 bh = sb_bread(sb, block_to_cpu(p->key));
47 if (!bh)
48 goto failure;
49 read_lock(&pointers_lock);
50 if (!verify_chain(chain, p))
51 goto changed;
52 add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
53 read_unlock(&pointers_lock);
54 if (!p->key)
55 goto no_block;
56 }
57 return NULL;
58
59 changed:
60 read_unlock(&pointers_lock);
61 brelse(bh);
62 *err = -EAGAIN;
63 goto no_block;
64 failure:
65 *err = -EIO;
66 no_block:
67 return p;
68 }
69
alloc_branch(struct inode * inode,int num,int * offsets,Indirect * branch)70 static int alloc_branch(struct inode *inode,
71 int num,
72 int *offsets,
73 Indirect *branch)
74 {
75 int n = 0;
76 int i;
77 int parent = minix_new_block(inode);
78
79 branch[0].key = cpu_to_block(parent);
80 if (parent) for (n = 1; n < num; n++) {
81 struct buffer_head *bh;
82 /* Allocate the next block */
83 int nr = minix_new_block(inode);
84 if (!nr)
85 break;
86 branch[n].key = cpu_to_block(nr);
87 bh = sb_getblk(inode->i_sb, parent);
88 lock_buffer(bh);
89 memset(bh->b_data, 0, bh->b_size);
90 branch[n].bh = bh;
91 branch[n].p = (block_t*) bh->b_data + offsets[n];
92 *branch[n].p = branch[n].key;
93 set_buffer_uptodate(bh);
94 unlock_buffer(bh);
95 mark_buffer_dirty_inode(bh, inode);
96 parent = nr;
97 }
98 if (n == num)
99 return 0;
100
101 /* Allocation failed, free what we already allocated */
102 for (i = 1; i < n; i++)
103 bforget(branch[i].bh);
104 for (i = 0; i < n; i++)
105 minix_free_block(inode, block_to_cpu(branch[i].key));
106 return -ENOSPC;
107 }
108
splice_branch(struct inode * inode,Indirect chain[DEPTH],Indirect * where,int num)109 static inline int splice_branch(struct inode *inode,
110 Indirect chain[DEPTH],
111 Indirect *where,
112 int num)
113 {
114 int i;
115
116 write_lock(&pointers_lock);
117
118 /* Verify that place we are splicing to is still there and vacant */
119 if (!verify_chain(chain, where-1) || *where->p)
120 goto changed;
121
122 *where->p = where->key;
123
124 write_unlock(&pointers_lock);
125
126 /* We are done with atomic stuff, now do the rest of housekeeping */
127
128 inode->i_ctime = current_time(inode);
129
130 /* had we spliced it onto indirect block? */
131 if (where->bh)
132 mark_buffer_dirty_inode(where->bh, inode);
133
134 mark_inode_dirty(inode);
135 return 0;
136
137 changed:
138 write_unlock(&pointers_lock);
139 for (i = 1; i < num; i++)
140 bforget(where[i].bh);
141 for (i = 0; i < num; i++)
142 minix_free_block(inode, block_to_cpu(where[i].key));
143 return -EAGAIN;
144 }
145
get_block(struct inode * inode,sector_t block,struct buffer_head * bh,int create)146 static int get_block(struct inode * inode, sector_t block,
147 struct buffer_head *bh, int create)
148 {
149 int err = -EIO;
150 int offsets[DEPTH];
151 Indirect chain[DEPTH];
152 Indirect *partial;
153 int left;
154 int depth = block_to_path(inode, block, offsets);
155
156 if (depth == 0)
157 goto out;
158
159 reread:
160 partial = get_branch(inode, depth, offsets, chain, &err);
161
162 /* Simplest case - block found, no allocation needed */
163 if (!partial) {
164 got_it:
165 map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
166 /* Clean up and exit */
167 partial = chain+depth-1; /* the whole chain */
168 goto cleanup;
169 }
170
171 /* Next simple case - plain lookup or failed read of indirect block */
172 if (!create || err == -EIO) {
173 cleanup:
174 while (partial > chain) {
175 brelse(partial->bh);
176 partial--;
177 }
178 out:
179 return err;
180 }
181
182 /*
183 * Indirect block might be removed by truncate while we were
184 * reading it. Handling of that case (forget what we've got and
185 * reread) is taken out of the main path.
186 */
187 if (err == -EAGAIN)
188 goto changed;
189
190 left = (chain + depth) - partial;
191 err = alloc_branch(inode, left, offsets+(partial-chain), partial);
192 if (err)
193 goto cleanup;
194
195 if (splice_branch(inode, chain, partial, left) < 0)
196 goto changed;
197
198 set_buffer_new(bh);
199 goto got_it;
200
201 changed:
202 while (partial > chain) {
203 brelse(partial->bh);
204 partial--;
205 }
206 goto reread;
207 }
208
all_zeroes(block_t * p,block_t * q)209 static inline int all_zeroes(block_t *p, block_t *q)
210 {
211 while (p < q)
212 if (*p++)
213 return 0;
214 return 1;
215 }
216
find_shared(struct inode * inode,int depth,int offsets[DEPTH],Indirect chain[DEPTH],block_t * top)217 static Indirect *find_shared(struct inode *inode,
218 int depth,
219 int offsets[DEPTH],
220 Indirect chain[DEPTH],
221 block_t *top)
222 {
223 Indirect *partial, *p;
224 int k, err;
225
226 *top = 0;
227 for (k = depth; k > 1 && !offsets[k-1]; k--)
228 ;
229 partial = get_branch(inode, k, offsets, chain, &err);
230
231 write_lock(&pointers_lock);
232 if (!partial)
233 partial = chain + k-1;
234 if (!partial->key && *partial->p) {
235 write_unlock(&pointers_lock);
236 goto no_top;
237 }
238 for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
239 ;
240 if (p == chain + k - 1 && p > chain) {
241 p->p--;
242 } else {
243 *top = *p->p;
244 *p->p = 0;
245 }
246 write_unlock(&pointers_lock);
247
248 while(partial > p)
249 {
250 brelse(partial->bh);
251 partial--;
252 }
253 no_top:
254 return partial;
255 }
256
free_data(struct inode * inode,block_t * p,block_t * q)257 static inline void free_data(struct inode *inode, block_t *p, block_t *q)
258 {
259 unsigned long nr;
260
261 for ( ; p < q ; p++) {
262 nr = block_to_cpu(*p);
263 if (nr) {
264 *p = 0;
265 minix_free_block(inode, nr);
266 }
267 }
268 }
269
free_branches(struct inode * inode,block_t * p,block_t * q,int depth)270 static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
271 {
272 struct buffer_head * bh;
273 unsigned long nr;
274
275 if (depth--) {
276 for ( ; p < q ; p++) {
277 nr = block_to_cpu(*p);
278 if (!nr)
279 continue;
280 *p = 0;
281 bh = sb_bread(inode->i_sb, nr);
282 if (!bh)
283 continue;
284 free_branches(inode, (block_t*)bh->b_data,
285 block_end(bh), depth);
286 bforget(bh);
287 minix_free_block(inode, nr);
288 mark_inode_dirty(inode);
289 }
290 } else
291 free_data(inode, p, q);
292 }
293
truncate(struct inode * inode)294 static inline void truncate (struct inode * inode)
295 {
296 struct super_block *sb = inode->i_sb;
297 block_t *idata = i_data(inode);
298 int offsets[DEPTH];
299 Indirect chain[DEPTH];
300 Indirect *partial;
301 block_t nr = 0;
302 int n;
303 int first_whole;
304 long iblock;
305
306 iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits;
307 block_truncate_page(inode->i_mapping, inode->i_size, get_block);
308
309 n = block_to_path(inode, iblock, offsets);
310 if (!n)
311 return;
312
313 if (n == 1) {
314 free_data(inode, idata+offsets[0], idata + DIRECT);
315 first_whole = 0;
316 goto do_indirects;
317 }
318
319 first_whole = offsets[0] + 1 - DIRECT;
320 partial = find_shared(inode, n, offsets, chain, &nr);
321 if (nr) {
322 if (partial == chain)
323 mark_inode_dirty(inode);
324 else
325 mark_buffer_dirty_inode(partial->bh, inode);
326 free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
327 }
328 /* Clear the ends of indirect blocks on the shared branch */
329 while (partial > chain) {
330 free_branches(inode, partial->p + 1, block_end(partial->bh),
331 (chain+n-1) - partial);
332 mark_buffer_dirty_inode(partial->bh, inode);
333 brelse (partial->bh);
334 partial--;
335 }
336 do_indirects:
337 /* Kill the remaining (whole) subtrees */
338 while (first_whole < DEPTH-1) {
339 nr = idata[DIRECT+first_whole];
340 if (nr) {
341 idata[DIRECT+first_whole] = 0;
342 mark_inode_dirty(inode);
343 free_branches(inode, &nr, &nr+1, first_whole+1);
344 }
345 first_whole++;
346 }
347 inode->i_mtime = inode->i_ctime = current_time(inode);
348 mark_inode_dirty(inode);
349 }
350
nblocks(loff_t size,struct super_block * sb)351 static inline unsigned nblocks(loff_t size, struct super_block *sb)
352 {
353 int k = sb->s_blocksize_bits - 10;
354 unsigned blocks, res, direct = DIRECT, i = DEPTH;
355 blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
356 res = blocks;
357 while (--i && blocks > direct) {
358 blocks -= direct;
359 blocks += sb->s_blocksize/sizeof(block_t) - 1;
360 blocks /= sb->s_blocksize/sizeof(block_t);
361 res += blocks;
362 direct = 1;
363 }
364 return res;
365 }
366