1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/resize.c
4  *
5  * Support for resizing an ext4 filesystem while it is mounted.
6  *
7  * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8  *
9  * This could probably be made into a module, because it is not often in use.
10  */
11 
12 
13 #define EXT4FS_DEBUG
14 
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/jiffies.h>
18 
19 #include "ext4_jbd2.h"
20 
21 struct ext4_rcu_ptr {
22 	struct rcu_head rcu;
23 	void *ptr;
24 };
25 
ext4_rcu_ptr_callback(struct rcu_head * head)26 static void ext4_rcu_ptr_callback(struct rcu_head *head)
27 {
28 	struct ext4_rcu_ptr *ptr;
29 
30 	ptr = container_of(head, struct ext4_rcu_ptr, rcu);
31 	kvfree(ptr->ptr);
32 	kfree(ptr);
33 }
34 
ext4_kvfree_array_rcu(void * to_free)35 void ext4_kvfree_array_rcu(void *to_free)
36 {
37 	struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
38 
39 	if (ptr) {
40 		ptr->ptr = to_free;
41 		call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
42 		return;
43 	}
44 	synchronize_rcu();
45 	kvfree(to_free);
46 }
47 
ext4_resize_begin(struct super_block * sb)48 int ext4_resize_begin(struct super_block *sb)
49 {
50 	struct ext4_sb_info *sbi = EXT4_SB(sb);
51 	int ret = 0;
52 
53 	if (!capable(CAP_SYS_RESOURCE))
54 		return -EPERM;
55 
56 	/*
57 	 * If the reserved GDT blocks is non-zero, the resize_inode feature
58 	 * should always be set.
59 	 */
60 	if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
61 	    !ext4_has_feature_resize_inode(sb)) {
62 		ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
63 		return -EFSCORRUPTED;
64 	}
65 
66 	/*
67 	 * If we are not using the primary superblock/GDT copy don't resize,
68          * because the user tools have no way of handling this.  Probably a
69          * bad time to do it anyways.
70          */
71 	if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
72 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
73 		ext4_warning(sb, "won't resize using backup superblock at %llu",
74 			(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
75 		return -EPERM;
76 	}
77 
78 	/*
79 	 * We are not allowed to do online-resizing on a filesystem mounted
80 	 * with error, because it can destroy the filesystem easily.
81 	 */
82 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
83 		ext4_warning(sb, "There are errors in the filesystem, "
84 			     "so online resizing is not allowed");
85 		return -EPERM;
86 	}
87 
88 	if (ext4_has_feature_sparse_super2(sb)) {
89 		ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
90 		return -EOPNOTSUPP;
91 	}
92 
93 	if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
94 				  &EXT4_SB(sb)->s_ext4_flags))
95 		ret = -EBUSY;
96 
97 	return ret;
98 }
99 
ext4_resize_end(struct super_block * sb,bool update_backups)100 int ext4_resize_end(struct super_block *sb, bool update_backups)
101 {
102 	clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
103 	smp_mb__after_atomic();
104 	if (update_backups)
105 		return ext4_update_overhead(sb, true);
106 	return 0;
107 }
108 
ext4_meta_bg_first_group(struct super_block * sb,ext4_group_t group)109 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
110 					     ext4_group_t group) {
111 	return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
112 	       EXT4_DESC_PER_BLOCK_BITS(sb);
113 }
114 
ext4_meta_bg_first_block_no(struct super_block * sb,ext4_group_t group)115 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
116 					     ext4_group_t group) {
117 	group = ext4_meta_bg_first_group(sb, group);
118 	return ext4_group_first_block_no(sb, group);
119 }
120 
ext4_group_overhead_blocks(struct super_block * sb,ext4_group_t group)121 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
122 						ext4_group_t group) {
123 	ext4_grpblk_t overhead;
124 	overhead = ext4_bg_num_gdb(sb, group);
125 	if (ext4_bg_has_super(sb, group))
126 		overhead += 1 +
127 			  le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
128 	return overhead;
129 }
130 
131 #define outside(b, first, last)	((b) < (first) || (b) >= (last))
132 #define inside(b, first, last)	((b) >= (first) && (b) < (last))
133 
verify_group_input(struct super_block * sb,struct ext4_new_group_data * input)134 static int verify_group_input(struct super_block *sb,
135 			      struct ext4_new_group_data *input)
136 {
137 	struct ext4_sb_info *sbi = EXT4_SB(sb);
138 	struct ext4_super_block *es = sbi->s_es;
139 	ext4_fsblk_t start = ext4_blocks_count(es);
140 	ext4_fsblk_t end = start + input->blocks_count;
141 	ext4_group_t group = input->group;
142 	ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
143 	unsigned overhead;
144 	ext4_fsblk_t metaend;
145 	struct buffer_head *bh = NULL;
146 	ext4_grpblk_t free_blocks_count, offset;
147 	int err = -EINVAL;
148 
149 	if (group != sbi->s_groups_count) {
150 		ext4_warning(sb, "Cannot add at group %u (only %u groups)",
151 			     input->group, sbi->s_groups_count);
152 		return -EINVAL;
153 	}
154 
155 	overhead = ext4_group_overhead_blocks(sb, group);
156 	metaend = start + overhead;
157 	input->free_clusters_count = free_blocks_count =
158 		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
159 
160 	if (test_opt(sb, DEBUG))
161 		printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
162 		       "(%d free, %u reserved)\n",
163 		       ext4_bg_has_super(sb, input->group) ? "normal" :
164 		       "no-super", input->group, input->blocks_count,
165 		       free_blocks_count, input->reserved_blocks);
166 
167 	ext4_get_group_no_and_offset(sb, start, NULL, &offset);
168 	if (offset != 0)
169 			ext4_warning(sb, "Last group not full");
170 	else if (input->reserved_blocks > input->blocks_count / 5)
171 		ext4_warning(sb, "Reserved blocks too high (%u)",
172 			     input->reserved_blocks);
173 	else if (free_blocks_count < 0)
174 		ext4_warning(sb, "Bad blocks count %u",
175 			     input->blocks_count);
176 	else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
177 		err = PTR_ERR(bh);
178 		bh = NULL;
179 		ext4_warning(sb, "Cannot read last block (%llu)",
180 			     end - 1);
181 	} else if (outside(input->block_bitmap, start, end))
182 		ext4_warning(sb, "Block bitmap not in group (block %llu)",
183 			     (unsigned long long)input->block_bitmap);
184 	else if (outside(input->inode_bitmap, start, end))
185 		ext4_warning(sb, "Inode bitmap not in group (block %llu)",
186 			     (unsigned long long)input->inode_bitmap);
187 	else if (outside(input->inode_table, start, end) ||
188 		 outside(itend - 1, start, end))
189 		ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
190 			     (unsigned long long)input->inode_table, itend - 1);
191 	else if (input->inode_bitmap == input->block_bitmap)
192 		ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
193 			     (unsigned long long)input->block_bitmap);
194 	else if (inside(input->block_bitmap, input->inode_table, itend))
195 		ext4_warning(sb, "Block bitmap (%llu) in inode table "
196 			     "(%llu-%llu)",
197 			     (unsigned long long)input->block_bitmap,
198 			     (unsigned long long)input->inode_table, itend - 1);
199 	else if (inside(input->inode_bitmap, input->inode_table, itend))
200 		ext4_warning(sb, "Inode bitmap (%llu) in inode table "
201 			     "(%llu-%llu)",
202 			     (unsigned long long)input->inode_bitmap,
203 			     (unsigned long long)input->inode_table, itend - 1);
204 	else if (inside(input->block_bitmap, start, metaend))
205 		ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
206 			     (unsigned long long)input->block_bitmap,
207 			     start, metaend - 1);
208 	else if (inside(input->inode_bitmap, start, metaend))
209 		ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
210 			     (unsigned long long)input->inode_bitmap,
211 			     start, metaend - 1);
212 	else if (inside(input->inode_table, start, metaend) ||
213 		 inside(itend - 1, start, metaend))
214 		ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
215 			     "(%llu-%llu)",
216 			     (unsigned long long)input->inode_table,
217 			     itend - 1, start, metaend - 1);
218 	else
219 		err = 0;
220 	brelse(bh);
221 
222 	return err;
223 }
224 
225 /*
226  * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
227  * group each time.
228  */
229 struct ext4_new_flex_group_data {
230 	struct ext4_new_group_data *groups;	/* new_group_data for groups
231 						   in the flex group */
232 	__u16 *bg_flags;			/* block group flags of groups
233 						   in @groups */
234 	ext4_group_t count;			/* number of groups in @groups
235 						 */
236 };
237 
238 /*
239  * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
240  * @flexbg_size.
241  *
242  * Returns NULL on failure otherwise address of the allocated structure.
243  */
alloc_flex_gd(unsigned long flexbg_size)244 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
245 {
246 	struct ext4_new_flex_group_data *flex_gd;
247 
248 	flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
249 	if (flex_gd == NULL)
250 		goto out3;
251 
252 	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
253 		goto out2;
254 	flex_gd->count = flexbg_size;
255 
256 	flex_gd->groups = kmalloc_array(flexbg_size,
257 					sizeof(struct ext4_new_group_data),
258 					GFP_NOFS);
259 	if (flex_gd->groups == NULL)
260 		goto out2;
261 
262 	flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
263 					  GFP_NOFS);
264 	if (flex_gd->bg_flags == NULL)
265 		goto out1;
266 
267 	return flex_gd;
268 
269 out1:
270 	kfree(flex_gd->groups);
271 out2:
272 	kfree(flex_gd);
273 out3:
274 	return NULL;
275 }
276 
free_flex_gd(struct ext4_new_flex_group_data * flex_gd)277 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
278 {
279 	kfree(flex_gd->bg_flags);
280 	kfree(flex_gd->groups);
281 	kfree(flex_gd);
282 }
283 
284 /*
285  * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
286  * and inode tables for a flex group.
287  *
288  * This function is used by 64bit-resize.  Note that this function allocates
289  * group tables from the 1st group of groups contained by @flexgd, which may
290  * be a partial of a flex group.
291  *
292  * @sb: super block of fs to which the groups belongs
293  *
294  * Returns 0 on a successful allocation of the metadata blocks in the
295  * block group.
296  */
ext4_alloc_group_tables(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd,int flexbg_size)297 static int ext4_alloc_group_tables(struct super_block *sb,
298 				struct ext4_new_flex_group_data *flex_gd,
299 				int flexbg_size)
300 {
301 	struct ext4_new_group_data *group_data = flex_gd->groups;
302 	ext4_fsblk_t start_blk;
303 	ext4_fsblk_t last_blk;
304 	ext4_group_t src_group;
305 	ext4_group_t bb_index = 0;
306 	ext4_group_t ib_index = 0;
307 	ext4_group_t it_index = 0;
308 	ext4_group_t group;
309 	ext4_group_t last_group;
310 	unsigned overhead;
311 	__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
312 	int i;
313 
314 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
315 
316 	src_group = group_data[0].group;
317 	last_group  = src_group + flex_gd->count - 1;
318 
319 	BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
320 	       (last_group & ~(flexbg_size - 1))));
321 next_group:
322 	group = group_data[0].group;
323 	if (src_group >= group_data[0].group + flex_gd->count)
324 		return -ENOSPC;
325 	start_blk = ext4_group_first_block_no(sb, src_group);
326 	last_blk = start_blk + group_data[src_group - group].blocks_count;
327 
328 	overhead = ext4_group_overhead_blocks(sb, src_group);
329 
330 	start_blk += overhead;
331 
332 	/* We collect contiguous blocks as much as possible. */
333 	src_group++;
334 	for (; src_group <= last_group; src_group++) {
335 		overhead = ext4_group_overhead_blocks(sb, src_group);
336 		if (overhead == 0)
337 			last_blk += group_data[src_group - group].blocks_count;
338 		else
339 			break;
340 	}
341 
342 	/* Allocate block bitmaps */
343 	for (; bb_index < flex_gd->count; bb_index++) {
344 		if (start_blk >= last_blk)
345 			goto next_group;
346 		group_data[bb_index].block_bitmap = start_blk++;
347 		group = ext4_get_group_number(sb, start_blk - 1);
348 		group -= group_data[0].group;
349 		group_data[group].mdata_blocks++;
350 		flex_gd->bg_flags[group] &= uninit_mask;
351 	}
352 
353 	/* Allocate inode bitmaps */
354 	for (; ib_index < flex_gd->count; ib_index++) {
355 		if (start_blk >= last_blk)
356 			goto next_group;
357 		group_data[ib_index].inode_bitmap = start_blk++;
358 		group = ext4_get_group_number(sb, start_blk - 1);
359 		group -= group_data[0].group;
360 		group_data[group].mdata_blocks++;
361 		flex_gd->bg_flags[group] &= uninit_mask;
362 	}
363 
364 	/* Allocate inode tables */
365 	for (; it_index < flex_gd->count; it_index++) {
366 		unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
367 		ext4_fsblk_t next_group_start;
368 
369 		if (start_blk + itb > last_blk)
370 			goto next_group;
371 		group_data[it_index].inode_table = start_blk;
372 		group = ext4_get_group_number(sb, start_blk);
373 		next_group_start = ext4_group_first_block_no(sb, group + 1);
374 		group -= group_data[0].group;
375 
376 		if (start_blk + itb > next_group_start) {
377 			flex_gd->bg_flags[group + 1] &= uninit_mask;
378 			overhead = start_blk + itb - next_group_start;
379 			group_data[group + 1].mdata_blocks += overhead;
380 			itb -= overhead;
381 		}
382 
383 		group_data[group].mdata_blocks += itb;
384 		flex_gd->bg_flags[group] &= uninit_mask;
385 		start_blk += EXT4_SB(sb)->s_itb_per_group;
386 	}
387 
388 	/* Update free clusters count to exclude metadata blocks */
389 	for (i = 0; i < flex_gd->count; i++) {
390 		group_data[i].free_clusters_count -=
391 				EXT4_NUM_B2C(EXT4_SB(sb),
392 					     group_data[i].mdata_blocks);
393 	}
394 
395 	if (test_opt(sb, DEBUG)) {
396 		int i;
397 		group = group_data[0].group;
398 
399 		printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
400 		       "%d groups, flexbg size is %d:\n", flex_gd->count,
401 		       flexbg_size);
402 
403 		for (i = 0; i < flex_gd->count; i++) {
404 			ext4_debug(
405 			       "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
406 			       ext4_bg_has_super(sb, group + i) ? "normal" :
407 			       "no-super", group + i,
408 			       group_data[i].blocks_count,
409 			       group_data[i].free_clusters_count,
410 			       group_data[i].mdata_blocks);
411 		}
412 	}
413 	return 0;
414 }
415 
bclean(handle_t * handle,struct super_block * sb,ext4_fsblk_t blk)416 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
417 				  ext4_fsblk_t blk)
418 {
419 	struct buffer_head *bh;
420 	int err;
421 
422 	bh = sb_getblk(sb, blk);
423 	if (unlikely(!bh))
424 		return ERR_PTR(-ENOMEM);
425 	BUFFER_TRACE(bh, "get_write_access");
426 	err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
427 	if (err) {
428 		brelse(bh);
429 		bh = ERR_PTR(err);
430 	} else {
431 		memset(bh->b_data, 0, sb->s_blocksize);
432 		set_buffer_uptodate(bh);
433 	}
434 
435 	return bh;
436 }
437 
ext4_resize_ensure_credits_batch(handle_t * handle,int credits)438 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
439 {
440 	return ext4_journal_ensure_credits_fn(handle, credits,
441 		EXT4_MAX_TRANS_DATA, 0, 0);
442 }
443 
444 /*
445  * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
446  *
447  * Helper function for ext4_setup_new_group_blocks() which set .
448  *
449  * @sb: super block
450  * @handle: journal handle
451  * @flex_gd: flex group data
452  */
set_flexbg_block_bitmap(struct super_block * sb,handle_t * handle,struct ext4_new_flex_group_data * flex_gd,ext4_fsblk_t first_cluster,ext4_fsblk_t last_cluster)453 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
454 			struct ext4_new_flex_group_data *flex_gd,
455 			ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
456 {
457 	struct ext4_sb_info *sbi = EXT4_SB(sb);
458 	ext4_group_t count = last_cluster - first_cluster + 1;
459 	ext4_group_t count2;
460 
461 	ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
462 		   last_cluster);
463 	for (count2 = count; count > 0;
464 	     count -= count2, first_cluster += count2) {
465 		ext4_fsblk_t start;
466 		struct buffer_head *bh;
467 		ext4_group_t group;
468 		int err;
469 
470 		group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
471 		start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
472 		group -= flex_gd->groups[0].group;
473 
474 		count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
475 		if (count2 > count)
476 			count2 = count;
477 
478 		if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
479 			BUG_ON(flex_gd->count > 1);
480 			continue;
481 		}
482 
483 		err = ext4_resize_ensure_credits_batch(handle, 1);
484 		if (err < 0)
485 			return err;
486 
487 		bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
488 		if (unlikely(!bh))
489 			return -ENOMEM;
490 
491 		BUFFER_TRACE(bh, "get_write_access");
492 		err = ext4_journal_get_write_access(handle, sb, bh,
493 						    EXT4_JTR_NONE);
494 		if (err) {
495 			brelse(bh);
496 			return err;
497 		}
498 		ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
499 			   first_cluster, first_cluster - start, count2);
500 		mb_set_bits(bh->b_data, first_cluster - start, count2);
501 
502 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
503 		brelse(bh);
504 		if (unlikely(err))
505 			return err;
506 	}
507 
508 	return 0;
509 }
510 
511 /*
512  * Set up the block and inode bitmaps, and the inode table for the new groups.
513  * This doesn't need to be part of the main transaction, since we are only
514  * changing blocks outside the actual filesystem.  We still do journaling to
515  * ensure the recovery is correct in case of a failure just after resize.
516  * If any part of this fails, we simply abort the resize.
517  *
518  * setup_new_flex_group_blocks handles a flex group as follow:
519  *  1. copy super block and GDT, and initialize group tables if necessary.
520  *     In this step, we only set bits in blocks bitmaps for blocks taken by
521  *     super block and GDT.
522  *  2. allocate group tables in block bitmaps, that is, set bits in block
523  *     bitmap for blocks taken by group tables.
524  */
setup_new_flex_group_blocks(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)525 static int setup_new_flex_group_blocks(struct super_block *sb,
526 				struct ext4_new_flex_group_data *flex_gd)
527 {
528 	int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
529 	ext4_fsblk_t start;
530 	ext4_fsblk_t block;
531 	struct ext4_sb_info *sbi = EXT4_SB(sb);
532 	struct ext4_super_block *es = sbi->s_es;
533 	struct ext4_new_group_data *group_data = flex_gd->groups;
534 	__u16 *bg_flags = flex_gd->bg_flags;
535 	handle_t *handle;
536 	ext4_group_t group, count;
537 	struct buffer_head *bh = NULL;
538 	int reserved_gdb, i, j, err = 0, err2;
539 	int meta_bg;
540 
541 	BUG_ON(!flex_gd->count || !group_data ||
542 	       group_data[0].group != sbi->s_groups_count);
543 
544 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
545 	meta_bg = ext4_has_feature_meta_bg(sb);
546 
547 	/* This transaction may be extended/restarted along the way */
548 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
549 	if (IS_ERR(handle))
550 		return PTR_ERR(handle);
551 
552 	group = group_data[0].group;
553 	for (i = 0; i < flex_gd->count; i++, group++) {
554 		unsigned long gdblocks;
555 		ext4_grpblk_t overhead;
556 
557 		gdblocks = ext4_bg_num_gdb(sb, group);
558 		start = ext4_group_first_block_no(sb, group);
559 
560 		if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
561 			goto handle_itb;
562 
563 		if (meta_bg == 1) {
564 			ext4_group_t first_group;
565 			first_group = ext4_meta_bg_first_group(sb, group);
566 			if (first_group != group + 1 &&
567 			    first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
568 				goto handle_itb;
569 		}
570 
571 		block = start + ext4_bg_has_super(sb, group);
572 		/* Copy all of the GDT blocks into the backup in this group */
573 		for (j = 0; j < gdblocks; j++, block++) {
574 			struct buffer_head *gdb;
575 
576 			ext4_debug("update backup group %#04llx\n", block);
577 			err = ext4_resize_ensure_credits_batch(handle, 1);
578 			if (err < 0)
579 				goto out;
580 
581 			gdb = sb_getblk(sb, block);
582 			if (unlikely(!gdb)) {
583 				err = -ENOMEM;
584 				goto out;
585 			}
586 
587 			BUFFER_TRACE(gdb, "get_write_access");
588 			err = ext4_journal_get_write_access(handle, sb, gdb,
589 							    EXT4_JTR_NONE);
590 			if (err) {
591 				brelse(gdb);
592 				goto out;
593 			}
594 			memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
595 				s_group_desc, j)->b_data, gdb->b_size);
596 			set_buffer_uptodate(gdb);
597 
598 			err = ext4_handle_dirty_metadata(handle, NULL, gdb);
599 			if (unlikely(err)) {
600 				brelse(gdb);
601 				goto out;
602 			}
603 			brelse(gdb);
604 		}
605 
606 		/* Zero out all of the reserved backup group descriptor
607 		 * table blocks
608 		 */
609 		if (ext4_bg_has_super(sb, group)) {
610 			err = sb_issue_zeroout(sb, gdblocks + start + 1,
611 					reserved_gdb, GFP_NOFS);
612 			if (err)
613 				goto out;
614 		}
615 
616 handle_itb:
617 		/* Initialize group tables of the grop @group */
618 		if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
619 			goto handle_bb;
620 
621 		/* Zero out all of the inode table blocks */
622 		block = group_data[i].inode_table;
623 		ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
624 			   block, sbi->s_itb_per_group);
625 		err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
626 				       GFP_NOFS);
627 		if (err)
628 			goto out;
629 
630 handle_bb:
631 		if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
632 			goto handle_ib;
633 
634 		/* Initialize block bitmap of the @group */
635 		block = group_data[i].block_bitmap;
636 		err = ext4_resize_ensure_credits_batch(handle, 1);
637 		if (err < 0)
638 			goto out;
639 
640 		bh = bclean(handle, sb, block);
641 		if (IS_ERR(bh)) {
642 			err = PTR_ERR(bh);
643 			goto out;
644 		}
645 		overhead = ext4_group_overhead_blocks(sb, group);
646 		if (overhead != 0) {
647 			ext4_debug("mark backup superblock %#04llx (+0)\n",
648 				   start);
649 			mb_set_bits(bh->b_data, 0,
650 				      EXT4_NUM_B2C(sbi, overhead));
651 		}
652 		ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
653 				     sb->s_blocksize * 8, bh->b_data);
654 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
655 		brelse(bh);
656 		if (err)
657 			goto out;
658 
659 handle_ib:
660 		if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
661 			continue;
662 
663 		/* Initialize inode bitmap of the @group */
664 		block = group_data[i].inode_bitmap;
665 		err = ext4_resize_ensure_credits_batch(handle, 1);
666 		if (err < 0)
667 			goto out;
668 		/* Mark unused entries in inode bitmap used */
669 		bh = bclean(handle, sb, block);
670 		if (IS_ERR(bh)) {
671 			err = PTR_ERR(bh);
672 			goto out;
673 		}
674 
675 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
676 				     sb->s_blocksize * 8, bh->b_data);
677 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
678 		brelse(bh);
679 		if (err)
680 			goto out;
681 	}
682 
683 	/* Mark group tables in block bitmap */
684 	for (j = 0; j < GROUP_TABLE_COUNT; j++) {
685 		count = group_table_count[j];
686 		start = (&group_data[0].block_bitmap)[j];
687 		block = start;
688 		for (i = 1; i < flex_gd->count; i++) {
689 			block += group_table_count[j];
690 			if (block == (&group_data[i].block_bitmap)[j]) {
691 				count += group_table_count[j];
692 				continue;
693 			}
694 			err = set_flexbg_block_bitmap(sb, handle,
695 						      flex_gd,
696 						      EXT4_B2C(sbi, start),
697 						      EXT4_B2C(sbi,
698 							       start + count
699 							       - 1));
700 			if (err)
701 				goto out;
702 			count = group_table_count[j];
703 			start = (&group_data[i].block_bitmap)[j];
704 			block = start;
705 		}
706 
707 		if (count) {
708 			err = set_flexbg_block_bitmap(sb, handle,
709 						      flex_gd,
710 						      EXT4_B2C(sbi, start),
711 						      EXT4_B2C(sbi,
712 							       start + count
713 							       - 1));
714 			if (err)
715 				goto out;
716 		}
717 	}
718 
719 out:
720 	err2 = ext4_journal_stop(handle);
721 	if (err2 && !err)
722 		err = err2;
723 
724 	return err;
725 }
726 
727 /*
728  * Iterate through the groups which hold BACKUP superblock/GDT copies in an
729  * ext4 filesystem.  The counters should be initialized to 1, 5, and 7 before
730  * calling this for the first time.  In a sparse filesystem it will be the
731  * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
732  * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
733  */
ext4_list_backups(struct super_block * sb,unsigned int * three,unsigned int * five,unsigned int * seven)734 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
735 			       unsigned int *five, unsigned int *seven)
736 {
737 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
738 	unsigned int *min = three;
739 	int mult = 3;
740 	unsigned int ret;
741 
742 	if (ext4_has_feature_sparse_super2(sb)) {
743 		do {
744 			if (*min > 2)
745 				return UINT_MAX;
746 			ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
747 			*min += 1;
748 		} while (!ret);
749 		return ret;
750 	}
751 
752 	if (!ext4_has_feature_sparse_super(sb)) {
753 		ret = *min;
754 		*min += 1;
755 		return ret;
756 	}
757 
758 	if (*five < *min) {
759 		min = five;
760 		mult = 5;
761 	}
762 	if (*seven < *min) {
763 		min = seven;
764 		mult = 7;
765 	}
766 
767 	ret = *min;
768 	*min *= mult;
769 
770 	return ret;
771 }
772 
773 /*
774  * Check that all of the backup GDT blocks are held in the primary GDT block.
775  * It is assumed that they are stored in group order.  Returns the number of
776  * groups in current filesystem that have BACKUPS, or -ve error code.
777  */
verify_reserved_gdb(struct super_block * sb,ext4_group_t end,struct buffer_head * primary)778 static int verify_reserved_gdb(struct super_block *sb,
779 			       ext4_group_t end,
780 			       struct buffer_head *primary)
781 {
782 	const ext4_fsblk_t blk = primary->b_blocknr;
783 	unsigned three = 1;
784 	unsigned five = 5;
785 	unsigned seven = 7;
786 	unsigned grp;
787 	__le32 *p = (__le32 *)primary->b_data;
788 	int gdbackups = 0;
789 
790 	while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
791 		if (le32_to_cpu(*p++) !=
792 		    grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
793 			ext4_warning(sb, "reserved GDT %llu"
794 				     " missing grp %d (%llu)",
795 				     blk, grp,
796 				     grp *
797 				     (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
798 				     blk);
799 			return -EINVAL;
800 		}
801 		if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
802 			return -EFBIG;
803 	}
804 
805 	return gdbackups;
806 }
807 
808 /*
809  * Called when we need to bring a reserved group descriptor table block into
810  * use from the resize inode.  The primary copy of the new GDT block currently
811  * is an indirect block (under the double indirect block in the resize inode).
812  * The new backup GDT blocks will be stored as leaf blocks in this indirect
813  * block, in group order.  Even though we know all the block numbers we need,
814  * we check to ensure that the resize inode has actually reserved these blocks.
815  *
816  * Don't need to update the block bitmaps because the blocks are still in use.
817  *
818  * We get all of the error cases out of the way, so that we are sure to not
819  * fail once we start modifying the data on disk, because JBD has no rollback.
820  */
add_new_gdb(handle_t * handle,struct inode * inode,ext4_group_t group)821 static int add_new_gdb(handle_t *handle, struct inode *inode,
822 		       ext4_group_t group)
823 {
824 	struct super_block *sb = inode->i_sb;
825 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
826 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
827 	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
828 	struct buffer_head **o_group_desc, **n_group_desc = NULL;
829 	struct buffer_head *dind = NULL;
830 	struct buffer_head *gdb_bh = NULL;
831 	int gdbackups;
832 	struct ext4_iloc iloc = { .bh = NULL };
833 	__le32 *data;
834 	int err;
835 
836 	if (test_opt(sb, DEBUG))
837 		printk(KERN_DEBUG
838 		       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
839 		       gdb_num);
840 
841 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
842 	if (IS_ERR(gdb_bh))
843 		return PTR_ERR(gdb_bh);
844 
845 	gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
846 	if (gdbackups < 0) {
847 		err = gdbackups;
848 		goto errout;
849 	}
850 
851 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
852 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
853 	if (IS_ERR(dind)) {
854 		err = PTR_ERR(dind);
855 		dind = NULL;
856 		goto errout;
857 	}
858 
859 	data = (__le32 *)dind->b_data;
860 	if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
861 		ext4_warning(sb, "new group %u GDT block %llu not reserved",
862 			     group, gdblock);
863 		err = -EINVAL;
864 		goto errout;
865 	}
866 
867 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
868 	err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
869 					    EXT4_JTR_NONE);
870 	if (unlikely(err))
871 		goto errout;
872 
873 	BUFFER_TRACE(gdb_bh, "get_write_access");
874 	err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
875 	if (unlikely(err))
876 		goto errout;
877 
878 	BUFFER_TRACE(dind, "get_write_access");
879 	err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE);
880 	if (unlikely(err)) {
881 		ext4_std_error(sb, err);
882 		goto errout;
883 	}
884 
885 	/* ext4_reserve_inode_write() gets a reference on the iloc */
886 	err = ext4_reserve_inode_write(handle, inode, &iloc);
887 	if (unlikely(err))
888 		goto errout;
889 
890 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
891 				GFP_KERNEL);
892 	if (!n_group_desc) {
893 		err = -ENOMEM;
894 		ext4_warning(sb, "not enough memory for %lu groups",
895 			     gdb_num + 1);
896 		goto errout;
897 	}
898 
899 	/*
900 	 * Finally, we have all of the possible failures behind us...
901 	 *
902 	 * Remove new GDT block from inode double-indirect block and clear out
903 	 * the new GDT block for use (which also "frees" the backup GDT blocks
904 	 * from the reserved inode).  We don't need to change the bitmaps for
905 	 * these blocks, because they are marked as in-use from being in the
906 	 * reserved inode, and will become GDT blocks (primary and backup).
907 	 */
908 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
909 	err = ext4_handle_dirty_metadata(handle, NULL, dind);
910 	if (unlikely(err)) {
911 		ext4_std_error(sb, err);
912 		goto errout;
913 	}
914 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
915 			   (9 - EXT4_SB(sb)->s_cluster_bits);
916 	ext4_mark_iloc_dirty(handle, inode, &iloc);
917 	memset(gdb_bh->b_data, 0, sb->s_blocksize);
918 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
919 	if (unlikely(err)) {
920 		ext4_std_error(sb, err);
921 		iloc.bh = NULL;
922 		goto errout;
923 	}
924 	brelse(dind);
925 
926 	rcu_read_lock();
927 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
928 	memcpy(n_group_desc, o_group_desc,
929 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
930 	rcu_read_unlock();
931 	n_group_desc[gdb_num] = gdb_bh;
932 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
933 	EXT4_SB(sb)->s_gdb_count++;
934 	ext4_kvfree_array_rcu(o_group_desc);
935 
936 	lock_buffer(EXT4_SB(sb)->s_sbh);
937 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
938 	ext4_superblock_csum_set(sb);
939 	unlock_buffer(EXT4_SB(sb)->s_sbh);
940 	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
941 	if (err)
942 		ext4_std_error(sb, err);
943 	return err;
944 errout:
945 	kvfree(n_group_desc);
946 	brelse(iloc.bh);
947 	brelse(dind);
948 	brelse(gdb_bh);
949 
950 	ext4_debug("leaving with error %d\n", err);
951 	return err;
952 }
953 
954 /*
955  * add_new_gdb_meta_bg is the sister of add_new_gdb.
956  */
add_new_gdb_meta_bg(struct super_block * sb,handle_t * handle,ext4_group_t group)957 static int add_new_gdb_meta_bg(struct super_block *sb,
958 			       handle_t *handle, ext4_group_t group) {
959 	ext4_fsblk_t gdblock;
960 	struct buffer_head *gdb_bh;
961 	struct buffer_head **o_group_desc, **n_group_desc;
962 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
963 	int err;
964 
965 	gdblock = ext4_meta_bg_first_block_no(sb, group) +
966 		   ext4_bg_has_super(sb, group);
967 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
968 	if (IS_ERR(gdb_bh))
969 		return PTR_ERR(gdb_bh);
970 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
971 				GFP_KERNEL);
972 	if (!n_group_desc) {
973 		brelse(gdb_bh);
974 		err = -ENOMEM;
975 		ext4_warning(sb, "not enough memory for %lu groups",
976 			     gdb_num + 1);
977 		return err;
978 	}
979 
980 	rcu_read_lock();
981 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
982 	memcpy(n_group_desc, o_group_desc,
983 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
984 	rcu_read_unlock();
985 	n_group_desc[gdb_num] = gdb_bh;
986 
987 	BUFFER_TRACE(gdb_bh, "get_write_access");
988 	err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
989 	if (err) {
990 		kvfree(n_group_desc);
991 		brelse(gdb_bh);
992 		return err;
993 	}
994 
995 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
996 	EXT4_SB(sb)->s_gdb_count++;
997 	ext4_kvfree_array_rcu(o_group_desc);
998 	return err;
999 }
1000 
1001 /*
1002  * Called when we are adding a new group which has a backup copy of each of
1003  * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
1004  * We need to add these reserved backup GDT blocks to the resize inode, so
1005  * that they are kept for future resizing and not allocated to files.
1006  *
1007  * Each reserved backup GDT block will go into a different indirect block.
1008  * The indirect blocks are actually the primary reserved GDT blocks,
1009  * so we know in advance what their block numbers are.  We only get the
1010  * double-indirect block to verify it is pointing to the primary reserved
1011  * GDT blocks so we don't overwrite a data block by accident.  The reserved
1012  * backup GDT blocks are stored in their reserved primary GDT block.
1013  */
reserve_backup_gdb(handle_t * handle,struct inode * inode,ext4_group_t group)1014 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1015 			      ext4_group_t group)
1016 {
1017 	struct super_block *sb = inode->i_sb;
1018 	int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1019 	int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1020 	struct buffer_head **primary;
1021 	struct buffer_head *dind;
1022 	struct ext4_iloc iloc;
1023 	ext4_fsblk_t blk;
1024 	__le32 *data, *end;
1025 	int gdbackups = 0;
1026 	int res, i;
1027 	int err;
1028 
1029 	primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1030 	if (!primary)
1031 		return -ENOMEM;
1032 
1033 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1034 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1035 	if (IS_ERR(dind)) {
1036 		err = PTR_ERR(dind);
1037 		dind = NULL;
1038 		goto exit_free;
1039 	}
1040 
1041 	blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1042 	data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1043 					 EXT4_ADDR_PER_BLOCK(sb));
1044 	end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1045 
1046 	/* Get each reserved primary GDT block and verify it holds backups */
1047 	for (res = 0; res < reserved_gdb; res++, blk++) {
1048 		if (le32_to_cpu(*data) != blk) {
1049 			ext4_warning(sb, "reserved block %llu"
1050 				     " not at offset %ld",
1051 				     blk,
1052 				     (long)(data - (__le32 *)dind->b_data));
1053 			err = -EINVAL;
1054 			goto exit_bh;
1055 		}
1056 		primary[res] = ext4_sb_bread(sb, blk, 0);
1057 		if (IS_ERR(primary[res])) {
1058 			err = PTR_ERR(primary[res]);
1059 			primary[res] = NULL;
1060 			goto exit_bh;
1061 		}
1062 		gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1063 		if (gdbackups < 0) {
1064 			brelse(primary[res]);
1065 			err = gdbackups;
1066 			goto exit_bh;
1067 		}
1068 		if (++data >= end)
1069 			data = (__le32 *)dind->b_data;
1070 	}
1071 
1072 	for (i = 0; i < reserved_gdb; i++) {
1073 		BUFFER_TRACE(primary[i], "get_write_access");
1074 		if ((err = ext4_journal_get_write_access(handle, sb, primary[i],
1075 							 EXT4_JTR_NONE)))
1076 			goto exit_bh;
1077 	}
1078 
1079 	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1080 		goto exit_bh;
1081 
1082 	/*
1083 	 * Finally we can add each of the reserved backup GDT blocks from
1084 	 * the new group to its reserved primary GDT block.
1085 	 */
1086 	blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1087 	for (i = 0; i < reserved_gdb; i++) {
1088 		int err2;
1089 		data = (__le32 *)primary[i]->b_data;
1090 		/* printk("reserving backup %lu[%u] = %lu\n",
1091 		       primary[i]->b_blocknr, gdbackups,
1092 		       blk + primary[i]->b_blocknr); */
1093 		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1094 		err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1095 		if (!err)
1096 			err = err2;
1097 	}
1098 
1099 	inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1100 	ext4_mark_iloc_dirty(handle, inode, &iloc);
1101 
1102 exit_bh:
1103 	while (--res >= 0)
1104 		brelse(primary[res]);
1105 	brelse(dind);
1106 
1107 exit_free:
1108 	kfree(primary);
1109 
1110 	return err;
1111 }
1112 
1113 /*
1114  * Update the backup copies of the ext4 metadata.  These don't need to be part
1115  * of the main resize transaction, because e2fsck will re-write them if there
1116  * is a problem (basically only OOM will cause a problem).  However, we
1117  * _should_ update the backups if possible, in case the primary gets trashed
1118  * for some reason and we need to run e2fsck from a backup superblock.  The
1119  * important part is that the new block and inode counts are in the backup
1120  * superblocks, and the location of the new group metadata in the GDT backups.
1121  *
1122  * We do not need take the s_resize_lock for this, because these
1123  * blocks are not otherwise touched by the filesystem code when it is
1124  * mounted.  We don't need to worry about last changing from
1125  * sbi->s_groups_count, because the worst that can happen is that we
1126  * do not copy the full number of backups at this time.  The resize
1127  * which changed s_groups_count will backup again.
1128  */
update_backups(struct super_block * sb,sector_t blk_off,char * data,int size,int meta_bg)1129 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1130 			   int size, int meta_bg)
1131 {
1132 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1133 	ext4_group_t last;
1134 	const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1135 	unsigned three = 1;
1136 	unsigned five = 5;
1137 	unsigned seven = 7;
1138 	ext4_group_t group = 0;
1139 	int rest = sb->s_blocksize - size;
1140 	handle_t *handle;
1141 	int err = 0, err2;
1142 
1143 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1144 	if (IS_ERR(handle)) {
1145 		group = 1;
1146 		err = PTR_ERR(handle);
1147 		goto exit_err;
1148 	}
1149 
1150 	if (meta_bg == 0) {
1151 		group = ext4_list_backups(sb, &three, &five, &seven);
1152 		last = sbi->s_groups_count;
1153 	} else {
1154 		group = ext4_get_group_number(sb, blk_off) + 1;
1155 		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1156 	}
1157 
1158 	while (group < sbi->s_groups_count) {
1159 		struct buffer_head *bh;
1160 		ext4_fsblk_t backup_block;
1161 		struct ext4_super_block *es;
1162 
1163 		/* Out of journal space, and can't get more - abort - so sad */
1164 		err = ext4_resize_ensure_credits_batch(handle, 1);
1165 		if (err < 0)
1166 			break;
1167 
1168 		if (meta_bg == 0)
1169 			backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1170 		else
1171 			backup_block = (ext4_group_first_block_no(sb, group) +
1172 					ext4_bg_has_super(sb, group));
1173 
1174 		bh = sb_getblk(sb, backup_block);
1175 		if (unlikely(!bh)) {
1176 			err = -ENOMEM;
1177 			break;
1178 		}
1179 		ext4_debug("update metadata backup %llu(+%llu)\n",
1180 			   backup_block, backup_block -
1181 			   ext4_group_first_block_no(sb, group));
1182 		BUFFER_TRACE(bh, "get_write_access");
1183 		if ((err = ext4_journal_get_write_access(handle, sb, bh,
1184 							 EXT4_JTR_NONE)))
1185 			break;
1186 		lock_buffer(bh);
1187 		memcpy(bh->b_data, data, size);
1188 		if (rest)
1189 			memset(bh->b_data + size, 0, rest);
1190 		es = (struct ext4_super_block *) bh->b_data;
1191 		es->s_block_group_nr = cpu_to_le16(group);
1192 		if (ext4_has_metadata_csum(sb))
1193 			es->s_checksum = ext4_superblock_csum(sb, es);
1194 		set_buffer_uptodate(bh);
1195 		unlock_buffer(bh);
1196 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
1197 		if (unlikely(err))
1198 			ext4_std_error(sb, err);
1199 		brelse(bh);
1200 
1201 		if (meta_bg == 0)
1202 			group = ext4_list_backups(sb, &three, &five, &seven);
1203 		else if (group == last)
1204 			break;
1205 		else
1206 			group = last;
1207 	}
1208 	if ((err2 = ext4_journal_stop(handle)) && !err)
1209 		err = err2;
1210 
1211 	/*
1212 	 * Ugh! Need to have e2fsck write the backup copies.  It is too
1213 	 * late to revert the resize, we shouldn't fail just because of
1214 	 * the backup copies (they are only needed in case of corruption).
1215 	 *
1216 	 * However, if we got here we have a journal problem too, so we
1217 	 * can't really start a transaction to mark the superblock.
1218 	 * Chicken out and just set the flag on the hope it will be written
1219 	 * to disk, and if not - we will simply wait until next fsck.
1220 	 */
1221 exit_err:
1222 	if (err) {
1223 		ext4_warning(sb, "can't update backup for group %u (err %d), "
1224 			     "forcing fsck on next reboot", group, err);
1225 		sbi->s_mount_state &= ~EXT4_VALID_FS;
1226 		sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1227 		mark_buffer_dirty(sbi->s_sbh);
1228 	}
1229 }
1230 
1231 /*
1232  * ext4_add_new_descs() adds @count group descriptor of groups
1233  * starting at @group
1234  *
1235  * @handle: journal handle
1236  * @sb: super block
1237  * @group: the group no. of the first group desc to be added
1238  * @resize_inode: the resize inode
1239  * @count: number of group descriptors to be added
1240  */
ext4_add_new_descs(handle_t * handle,struct super_block * sb,ext4_group_t group,struct inode * resize_inode,ext4_group_t count)1241 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1242 			      ext4_group_t group, struct inode *resize_inode,
1243 			      ext4_group_t count)
1244 {
1245 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1246 	struct ext4_super_block *es = sbi->s_es;
1247 	struct buffer_head *gdb_bh;
1248 	int i, gdb_off, gdb_num, err = 0;
1249 	int meta_bg;
1250 
1251 	meta_bg = ext4_has_feature_meta_bg(sb);
1252 	for (i = 0; i < count; i++, group++) {
1253 		int reserved_gdb = ext4_bg_has_super(sb, group) ?
1254 			le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1255 
1256 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1257 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1258 
1259 		/*
1260 		 * We will only either add reserved group blocks to a backup group
1261 		 * or remove reserved blocks for the first group in a new group block.
1262 		 * Doing both would be mean more complex code, and sane people don't
1263 		 * use non-sparse filesystems anymore.  This is already checked above.
1264 		 */
1265 		if (gdb_off) {
1266 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1267 						     gdb_num);
1268 			BUFFER_TRACE(gdb_bh, "get_write_access");
1269 			err = ext4_journal_get_write_access(handle, sb, gdb_bh,
1270 							    EXT4_JTR_NONE);
1271 
1272 			if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1273 				err = reserve_backup_gdb(handle, resize_inode, group);
1274 		} else if (meta_bg != 0) {
1275 			err = add_new_gdb_meta_bg(sb, handle, group);
1276 		} else {
1277 			err = add_new_gdb(handle, resize_inode, group);
1278 		}
1279 		if (err)
1280 			break;
1281 	}
1282 	return err;
1283 }
1284 
ext4_get_bitmap(struct super_block * sb,__u64 block)1285 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1286 {
1287 	struct buffer_head *bh = sb_getblk(sb, block);
1288 	if (unlikely(!bh))
1289 		return NULL;
1290 	if (!bh_uptodate_or_lock(bh)) {
1291 		if (ext4_read_bh(bh, 0, NULL) < 0) {
1292 			brelse(bh);
1293 			return NULL;
1294 		}
1295 	}
1296 
1297 	return bh;
1298 }
1299 
ext4_set_bitmap_checksums(struct super_block * sb,ext4_group_t group,struct ext4_group_desc * gdp,struct ext4_new_group_data * group_data)1300 static int ext4_set_bitmap_checksums(struct super_block *sb,
1301 				     ext4_group_t group,
1302 				     struct ext4_group_desc *gdp,
1303 				     struct ext4_new_group_data *group_data)
1304 {
1305 	struct buffer_head *bh;
1306 
1307 	if (!ext4_has_metadata_csum(sb))
1308 		return 0;
1309 
1310 	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1311 	if (!bh)
1312 		return -EIO;
1313 	ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1314 				   EXT4_INODES_PER_GROUP(sb) / 8);
1315 	brelse(bh);
1316 
1317 	bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1318 	if (!bh)
1319 		return -EIO;
1320 	ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1321 	brelse(bh);
1322 
1323 	return 0;
1324 }
1325 
1326 /*
1327  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1328  */
ext4_setup_new_descs(handle_t * handle,struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)1329 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1330 				struct ext4_new_flex_group_data *flex_gd)
1331 {
1332 	struct ext4_new_group_data	*group_data = flex_gd->groups;
1333 	struct ext4_group_desc		*gdp;
1334 	struct ext4_sb_info		*sbi = EXT4_SB(sb);
1335 	struct buffer_head		*gdb_bh;
1336 	ext4_group_t			group;
1337 	__u16				*bg_flags = flex_gd->bg_flags;
1338 	int				i, gdb_off, gdb_num, err = 0;
1339 
1340 
1341 	for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1342 		group = group_data->group;
1343 
1344 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1345 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1346 
1347 		/*
1348 		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1349 		 */
1350 		gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1351 		/* Update group descriptor block for new group */
1352 		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1353 						 gdb_off * EXT4_DESC_SIZE(sb));
1354 
1355 		memset(gdp, 0, EXT4_DESC_SIZE(sb));
1356 		ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1357 		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1358 		err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1359 		if (err) {
1360 			ext4_std_error(sb, err);
1361 			break;
1362 		}
1363 
1364 		ext4_inode_table_set(sb, gdp, group_data->inode_table);
1365 		ext4_free_group_clusters_set(sb, gdp,
1366 					     group_data->free_clusters_count);
1367 		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1368 		if (ext4_has_group_desc_csum(sb))
1369 			ext4_itable_unused_set(sb, gdp,
1370 					       EXT4_INODES_PER_GROUP(sb));
1371 		gdp->bg_flags = cpu_to_le16(*bg_flags);
1372 		ext4_group_desc_csum_set(sb, group, gdp);
1373 
1374 		err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1375 		if (unlikely(err)) {
1376 			ext4_std_error(sb, err);
1377 			break;
1378 		}
1379 
1380 		/*
1381 		 * We can allocate memory for mb_alloc based on the new group
1382 		 * descriptor
1383 		 */
1384 		err = ext4_mb_add_groupinfo(sb, group, gdp);
1385 		if (err)
1386 			break;
1387 	}
1388 	return err;
1389 }
1390 
ext4_add_overhead(struct super_block * sb,const ext4_fsblk_t overhead)1391 static void ext4_add_overhead(struct super_block *sb,
1392                               const ext4_fsblk_t overhead)
1393 {
1394        struct ext4_sb_info *sbi = EXT4_SB(sb);
1395        struct ext4_super_block *es = sbi->s_es;
1396 
1397        sbi->s_overhead += overhead;
1398        es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1399        smp_wmb();
1400 }
1401 
1402 /*
1403  * ext4_update_super() updates the super block so that the newly added
1404  * groups can be seen by the filesystem.
1405  *
1406  * @sb: super block
1407  * @flex_gd: new added groups
1408  */
ext4_update_super(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)1409 static void ext4_update_super(struct super_block *sb,
1410 			     struct ext4_new_flex_group_data *flex_gd)
1411 {
1412 	ext4_fsblk_t blocks_count = 0;
1413 	ext4_fsblk_t free_blocks = 0;
1414 	ext4_fsblk_t reserved_blocks = 0;
1415 	struct ext4_new_group_data *group_data = flex_gd->groups;
1416 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1417 	struct ext4_super_block *es = sbi->s_es;
1418 	int i;
1419 
1420 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
1421 	/*
1422 	 * Make the new blocks and inodes valid next.  We do this before
1423 	 * increasing the group count so that once the group is enabled,
1424 	 * all of its blocks and inodes are already valid.
1425 	 *
1426 	 * We always allocate group-by-group, then block-by-block or
1427 	 * inode-by-inode within a group, so enabling these
1428 	 * blocks/inodes before the group is live won't actually let us
1429 	 * allocate the new space yet.
1430 	 */
1431 	for (i = 0; i < flex_gd->count; i++) {
1432 		blocks_count += group_data[i].blocks_count;
1433 		free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1434 	}
1435 
1436 	reserved_blocks = ext4_r_blocks_count(es) * 100;
1437 	reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1438 	reserved_blocks *= blocks_count;
1439 	do_div(reserved_blocks, 100);
1440 
1441 	lock_buffer(sbi->s_sbh);
1442 	ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1443 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1444 	le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1445 		     flex_gd->count);
1446 	le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1447 		     flex_gd->count);
1448 
1449 	ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1450 	/*
1451 	 * We need to protect s_groups_count against other CPUs seeing
1452 	 * inconsistent state in the superblock.
1453 	 *
1454 	 * The precise rules we use are:
1455 	 *
1456 	 * * Writers must perform a smp_wmb() after updating all
1457 	 *   dependent data and before modifying the groups count
1458 	 *
1459 	 * * Readers must perform an smp_rmb() after reading the groups
1460 	 *   count and before reading any dependent data.
1461 	 *
1462 	 * NB. These rules can be relaxed when checking the group count
1463 	 * while freeing data, as we can only allocate from a block
1464 	 * group after serialising against the group count, and we can
1465 	 * only then free after serialising in turn against that
1466 	 * allocation.
1467 	 */
1468 	smp_wmb();
1469 
1470 	/* Update the global fs size fields */
1471 	sbi->s_groups_count += flex_gd->count;
1472 	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1473 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1474 
1475 	/* Update the reserved block counts only once the new group is
1476 	 * active. */
1477 	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1478 				reserved_blocks);
1479 	ext4_superblock_csum_set(sb);
1480 	unlock_buffer(sbi->s_sbh);
1481 
1482 	/* Update the free space counts */
1483 	percpu_counter_add(&sbi->s_freeclusters_counter,
1484 			   EXT4_NUM_B2C(sbi, free_blocks));
1485 	percpu_counter_add(&sbi->s_freeinodes_counter,
1486 			   EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1487 
1488 	ext4_debug("free blocks count %llu",
1489 		   percpu_counter_read(&sbi->s_freeclusters_counter));
1490 	if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1491 		ext4_group_t flex_group;
1492 		struct flex_groups *fg;
1493 
1494 		flex_group = ext4_flex_group(sbi, group_data[0].group);
1495 		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1496 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1497 			     &fg->free_clusters);
1498 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1499 			   &fg->free_inodes);
1500 	}
1501 
1502 	/*
1503 	 * Update the fs overhead information.
1504 	 *
1505 	 * For bigalloc, if the superblock already has a properly calculated
1506 	 * overhead, update it with a value based on numbers already computed
1507 	 * above for the newly allocated capacity.
1508 	 */
1509 	if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0))
1510 		ext4_add_overhead(sb,
1511 			EXT4_NUM_B2C(sbi, blocks_count - free_blocks));
1512 	else
1513 		ext4_calculate_overhead(sb);
1514 	es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1515 
1516 	if (test_opt(sb, DEBUG))
1517 		printk(KERN_DEBUG "EXT4-fs: added group %u:"
1518 		       "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1519 		       blocks_count, free_blocks, reserved_blocks);
1520 }
1521 
1522 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1523  * _before_ we start modifying the filesystem, because we cannot abort the
1524  * transaction and not have it write the data to disk.
1525  */
ext4_flex_group_add(struct super_block * sb,struct inode * resize_inode,struct ext4_new_flex_group_data * flex_gd)1526 static int ext4_flex_group_add(struct super_block *sb,
1527 			       struct inode *resize_inode,
1528 			       struct ext4_new_flex_group_data *flex_gd)
1529 {
1530 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1531 	struct ext4_super_block *es = sbi->s_es;
1532 	ext4_fsblk_t o_blocks_count;
1533 	ext4_grpblk_t last;
1534 	ext4_group_t group;
1535 	handle_t *handle;
1536 	unsigned reserved_gdb;
1537 	int err = 0, err2 = 0, credit;
1538 
1539 	BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1540 
1541 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1542 	o_blocks_count = ext4_blocks_count(es);
1543 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1544 	BUG_ON(last);
1545 
1546 	err = setup_new_flex_group_blocks(sb, flex_gd);
1547 	if (err)
1548 		goto exit;
1549 	/*
1550 	 * We will always be modifying at least the superblock and  GDT
1551 	 * blocks.  If we are adding a group past the last current GDT block,
1552 	 * we will also modify the inode and the dindirect block.  If we
1553 	 * are adding a group with superblock/GDT backups  we will also
1554 	 * modify each of the reserved GDT dindirect blocks.
1555 	 */
1556 	credit = 3;	/* sb, resize inode, resize inode dindirect */
1557 	/* GDT blocks */
1558 	credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1559 	credit += reserved_gdb;	/* Reserved GDT dindirect blocks */
1560 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1561 	if (IS_ERR(handle)) {
1562 		err = PTR_ERR(handle);
1563 		goto exit;
1564 	}
1565 
1566 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1567 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1568 					    EXT4_JTR_NONE);
1569 	if (err)
1570 		goto exit_journal;
1571 
1572 	group = flex_gd->groups[0].group;
1573 	BUG_ON(group != sbi->s_groups_count);
1574 	err = ext4_add_new_descs(handle, sb, group,
1575 				resize_inode, flex_gd->count);
1576 	if (err)
1577 		goto exit_journal;
1578 
1579 	err = ext4_setup_new_descs(handle, sb, flex_gd);
1580 	if (err)
1581 		goto exit_journal;
1582 
1583 	ext4_update_super(sb, flex_gd);
1584 
1585 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1586 
1587 exit_journal:
1588 	err2 = ext4_journal_stop(handle);
1589 	if (!err)
1590 		err = err2;
1591 
1592 	if (!err) {
1593 		int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1594 		int gdb_num_end = ((group + flex_gd->count - 1) /
1595 				   EXT4_DESC_PER_BLOCK(sb));
1596 		int meta_bg = ext4_has_feature_meta_bg(sb);
1597 		sector_t old_gdb = 0;
1598 
1599 		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
1600 			       sizeof(struct ext4_super_block), 0);
1601 		for (; gdb_num <= gdb_num_end; gdb_num++) {
1602 			struct buffer_head *gdb_bh;
1603 
1604 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1605 						     gdb_num);
1606 			if (old_gdb == gdb_bh->b_blocknr)
1607 				continue;
1608 			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
1609 				       gdb_bh->b_size, meta_bg);
1610 			old_gdb = gdb_bh->b_blocknr;
1611 		}
1612 	}
1613 exit:
1614 	return err;
1615 }
1616 
ext4_setup_next_flex_gd(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd,ext4_fsblk_t n_blocks_count,unsigned long flexbg_size)1617 static int ext4_setup_next_flex_gd(struct super_block *sb,
1618 				    struct ext4_new_flex_group_data *flex_gd,
1619 				    ext4_fsblk_t n_blocks_count,
1620 				    unsigned long flexbg_size)
1621 {
1622 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1623 	struct ext4_super_block *es = sbi->s_es;
1624 	struct ext4_new_group_data *group_data = flex_gd->groups;
1625 	ext4_fsblk_t o_blocks_count;
1626 	ext4_group_t n_group;
1627 	ext4_group_t group;
1628 	ext4_group_t last_group;
1629 	ext4_grpblk_t last;
1630 	ext4_grpblk_t clusters_per_group;
1631 	unsigned long i;
1632 
1633 	clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1634 
1635 	o_blocks_count = ext4_blocks_count(es);
1636 
1637 	if (o_blocks_count == n_blocks_count)
1638 		return 0;
1639 
1640 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1641 	BUG_ON(last);
1642 	ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1643 
1644 	last_group = group | (flexbg_size - 1);
1645 	if (last_group > n_group)
1646 		last_group = n_group;
1647 
1648 	flex_gd->count = last_group - group + 1;
1649 
1650 	for (i = 0; i < flex_gd->count; i++) {
1651 		int overhead;
1652 
1653 		group_data[i].group = group + i;
1654 		group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1655 		overhead = ext4_group_overhead_blocks(sb, group + i);
1656 		group_data[i].mdata_blocks = overhead;
1657 		group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1658 		if (ext4_has_group_desc_csum(sb)) {
1659 			flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1660 					       EXT4_BG_INODE_UNINIT;
1661 			if (!test_opt(sb, INIT_INODE_TABLE))
1662 				flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1663 		} else
1664 			flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1665 	}
1666 
1667 	if (last_group == n_group && ext4_has_group_desc_csum(sb))
1668 		/* We need to initialize block bitmap of last group. */
1669 		flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1670 
1671 	if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1672 		group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1673 		group_data[i - 1].free_clusters_count -= clusters_per_group -
1674 						       last - 1;
1675 	}
1676 
1677 	return 1;
1678 }
1679 
1680 /* Add group descriptor data to an existing or new group descriptor block.
1681  * Ensure we handle all possible error conditions _before_ we start modifying
1682  * the filesystem, because we cannot abort the transaction and not have it
1683  * write the data to disk.
1684  *
1685  * If we are on a GDT block boundary, we need to get the reserved GDT block.
1686  * Otherwise, we may need to add backup GDT blocks for a sparse group.
1687  *
1688  * We only need to hold the superblock lock while we are actually adding
1689  * in the new group's counts to the superblock.  Prior to that we have
1690  * not really "added" the group at all.  We re-check that we are still
1691  * adding in the last group in case things have changed since verifying.
1692  */
ext4_group_add(struct super_block * sb,struct ext4_new_group_data * input)1693 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1694 {
1695 	struct ext4_new_flex_group_data flex_gd;
1696 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1697 	struct ext4_super_block *es = sbi->s_es;
1698 	int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1699 		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1700 	struct inode *inode = NULL;
1701 	int gdb_off;
1702 	int err;
1703 	__u16 bg_flags = 0;
1704 
1705 	gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1706 
1707 	if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1708 		ext4_warning(sb, "Can't resize non-sparse filesystem further");
1709 		return -EPERM;
1710 	}
1711 
1712 	if (ext4_blocks_count(es) + input->blocks_count <
1713 	    ext4_blocks_count(es)) {
1714 		ext4_warning(sb, "blocks_count overflow");
1715 		return -EINVAL;
1716 	}
1717 
1718 	if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1719 	    le32_to_cpu(es->s_inodes_count)) {
1720 		ext4_warning(sb, "inodes_count overflow");
1721 		return -EINVAL;
1722 	}
1723 
1724 	if (reserved_gdb || gdb_off == 0) {
1725 		if (!ext4_has_feature_resize_inode(sb) ||
1726 		    !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1727 			ext4_warning(sb,
1728 				     "No reserved GDT blocks, can't resize");
1729 			return -EPERM;
1730 		}
1731 		inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1732 		if (IS_ERR(inode)) {
1733 			ext4_warning(sb, "Error opening resize inode");
1734 			return PTR_ERR(inode);
1735 		}
1736 	}
1737 
1738 
1739 	err = verify_group_input(sb, input);
1740 	if (err)
1741 		goto out;
1742 
1743 	err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1744 	if (err)
1745 		goto out;
1746 
1747 	err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1748 	if (err)
1749 		goto out;
1750 
1751 	flex_gd.count = 1;
1752 	flex_gd.groups = input;
1753 	flex_gd.bg_flags = &bg_flags;
1754 	err = ext4_flex_group_add(sb, inode, &flex_gd);
1755 out:
1756 	iput(inode);
1757 	return err;
1758 } /* ext4_group_add */
1759 
1760 /*
1761  * extend a group without checking assuming that checking has been done.
1762  */
ext4_group_extend_no_check(struct super_block * sb,ext4_fsblk_t o_blocks_count,ext4_grpblk_t add)1763 static int ext4_group_extend_no_check(struct super_block *sb,
1764 				      ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1765 {
1766 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1767 	handle_t *handle;
1768 	int err = 0, err2;
1769 
1770 	/* We will update the superblock, one block bitmap, and
1771 	 * one group descriptor via ext4_group_add_blocks().
1772 	 */
1773 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1774 	if (IS_ERR(handle)) {
1775 		err = PTR_ERR(handle);
1776 		ext4_warning(sb, "error %d on journal start", err);
1777 		return err;
1778 	}
1779 
1780 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1781 	err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
1782 					    EXT4_JTR_NONE);
1783 	if (err) {
1784 		ext4_warning(sb, "error %d on journal write access", err);
1785 		goto errout;
1786 	}
1787 
1788 	lock_buffer(EXT4_SB(sb)->s_sbh);
1789 	ext4_blocks_count_set(es, o_blocks_count + add);
1790 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1791 	ext4_superblock_csum_set(sb);
1792 	unlock_buffer(EXT4_SB(sb)->s_sbh);
1793 	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1794 		   o_blocks_count + add);
1795 	/* We add the blocks to the bitmap and set the group need init bit */
1796 	err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1797 	if (err)
1798 		goto errout;
1799 	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1800 	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1801 		   o_blocks_count + add);
1802 errout:
1803 	err2 = ext4_journal_stop(handle);
1804 	if (err2 && !err)
1805 		err = err2;
1806 
1807 	if (!err) {
1808 		if (test_opt(sb, DEBUG))
1809 			printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1810 			       "blocks\n", ext4_blocks_count(es));
1811 		update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
1812 			       (char *)es, sizeof(struct ext4_super_block), 0);
1813 	}
1814 	return err;
1815 }
1816 
1817 /*
1818  * Extend the filesystem to the new number of blocks specified.  This entry
1819  * point is only used to extend the current filesystem to the end of the last
1820  * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
1821  * for emergencies (because it has no dependencies on reserved blocks).
1822  *
1823  * If we _really_ wanted, we could use default values to call ext4_group_add()
1824  * allow the "remount" trick to work for arbitrary resizing, assuming enough
1825  * GDT blocks are reserved to grow to the desired size.
1826  */
ext4_group_extend(struct super_block * sb,struct ext4_super_block * es,ext4_fsblk_t n_blocks_count)1827 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1828 		      ext4_fsblk_t n_blocks_count)
1829 {
1830 	ext4_fsblk_t o_blocks_count;
1831 	ext4_grpblk_t last;
1832 	ext4_grpblk_t add;
1833 	struct buffer_head *bh;
1834 	int err;
1835 	ext4_group_t group;
1836 
1837 	o_blocks_count = ext4_blocks_count(es);
1838 
1839 	if (test_opt(sb, DEBUG))
1840 		ext4_msg(sb, KERN_DEBUG,
1841 			 "extending last group from %llu to %llu blocks",
1842 			 o_blocks_count, n_blocks_count);
1843 
1844 	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1845 		return 0;
1846 
1847 	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1848 		ext4_msg(sb, KERN_ERR,
1849 			 "filesystem too large to resize to %llu blocks safely",
1850 			 n_blocks_count);
1851 		return -EINVAL;
1852 	}
1853 
1854 	if (n_blocks_count < o_blocks_count) {
1855 		ext4_warning(sb, "can't shrink FS - resize aborted");
1856 		return -EINVAL;
1857 	}
1858 
1859 	/* Handle the remaining blocks in the last group only. */
1860 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1861 
1862 	if (last == 0) {
1863 		ext4_warning(sb, "need to use ext2online to resize further");
1864 		return -EPERM;
1865 	}
1866 
1867 	add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1868 
1869 	if (o_blocks_count + add < o_blocks_count) {
1870 		ext4_warning(sb, "blocks_count overflow");
1871 		return -EINVAL;
1872 	}
1873 
1874 	if (o_blocks_count + add > n_blocks_count)
1875 		add = n_blocks_count - o_blocks_count;
1876 
1877 	if (o_blocks_count + add < n_blocks_count)
1878 		ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1879 			     o_blocks_count + add, add);
1880 
1881 	/* See if the device is actually as big as what was requested */
1882 	bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1883 	if (IS_ERR(bh)) {
1884 		ext4_warning(sb, "can't read last block, resize aborted");
1885 		return -ENOSPC;
1886 	}
1887 	brelse(bh);
1888 
1889 	err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1890 	return err;
1891 } /* ext4_group_extend */
1892 
1893 
num_desc_blocks(struct super_block * sb,ext4_group_t groups)1894 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1895 {
1896 	return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1897 }
1898 
1899 /*
1900  * Release the resize inode and drop the resize_inode feature if there
1901  * are no more reserved gdt blocks, and then convert the file system
1902  * to enable meta_bg
1903  */
ext4_convert_meta_bg(struct super_block * sb,struct inode * inode)1904 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1905 {
1906 	handle_t *handle;
1907 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1908 	struct ext4_super_block *es = sbi->s_es;
1909 	struct ext4_inode_info *ei = EXT4_I(inode);
1910 	ext4_fsblk_t nr;
1911 	int i, ret, err = 0;
1912 	int credits = 1;
1913 
1914 	ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1915 	if (inode) {
1916 		if (es->s_reserved_gdt_blocks) {
1917 			ext4_error(sb, "Unexpected non-zero "
1918 				   "s_reserved_gdt_blocks");
1919 			return -EPERM;
1920 		}
1921 
1922 		/* Do a quick sanity check of the resize inode */
1923 		if (inode->i_blocks != 1 << (inode->i_blkbits -
1924 					     (9 - sbi->s_cluster_bits)))
1925 			goto invalid_resize_inode;
1926 		for (i = 0; i < EXT4_N_BLOCKS; i++) {
1927 			if (i == EXT4_DIND_BLOCK) {
1928 				if (ei->i_data[i])
1929 					continue;
1930 				else
1931 					goto invalid_resize_inode;
1932 			}
1933 			if (ei->i_data[i])
1934 				goto invalid_resize_inode;
1935 		}
1936 		credits += 3;	/* block bitmap, bg descriptor, resize inode */
1937 	}
1938 
1939 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1940 	if (IS_ERR(handle))
1941 		return PTR_ERR(handle);
1942 
1943 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1944 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1945 					    EXT4_JTR_NONE);
1946 	if (err)
1947 		goto errout;
1948 
1949 	lock_buffer(sbi->s_sbh);
1950 	ext4_clear_feature_resize_inode(sb);
1951 	ext4_set_feature_meta_bg(sb);
1952 	sbi->s_es->s_first_meta_bg =
1953 		cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1954 	ext4_superblock_csum_set(sb);
1955 	unlock_buffer(sbi->s_sbh);
1956 
1957 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1958 	if (err) {
1959 		ext4_std_error(sb, err);
1960 		goto errout;
1961 	}
1962 
1963 	if (inode) {
1964 		nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1965 		ext4_free_blocks(handle, inode, NULL, nr, 1,
1966 				 EXT4_FREE_BLOCKS_METADATA |
1967 				 EXT4_FREE_BLOCKS_FORGET);
1968 		ei->i_data[EXT4_DIND_BLOCK] = 0;
1969 		inode->i_blocks = 0;
1970 
1971 		err = ext4_mark_inode_dirty(handle, inode);
1972 		if (err)
1973 			ext4_std_error(sb, err);
1974 	}
1975 
1976 errout:
1977 	ret = ext4_journal_stop(handle);
1978 	if (!err)
1979 		err = ret;
1980 	return ret;
1981 
1982 invalid_resize_inode:
1983 	ext4_error(sb, "corrupted/inconsistent resize inode");
1984 	return -EINVAL;
1985 }
1986 
1987 /*
1988  * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1989  *
1990  * @sb: super block of the fs to be resized
1991  * @n_blocks_count: the number of blocks resides in the resized fs
1992  */
ext4_resize_fs(struct super_block * sb,ext4_fsblk_t n_blocks_count)1993 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1994 {
1995 	struct ext4_new_flex_group_data *flex_gd = NULL;
1996 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1997 	struct ext4_super_block *es = sbi->s_es;
1998 	struct buffer_head *bh;
1999 	struct inode *resize_inode = NULL;
2000 	ext4_grpblk_t add, offset;
2001 	unsigned long n_desc_blocks;
2002 	unsigned long o_desc_blocks;
2003 	ext4_group_t o_group;
2004 	ext4_group_t n_group;
2005 	ext4_fsblk_t o_blocks_count;
2006 	ext4_fsblk_t n_blocks_count_retry = 0;
2007 	unsigned long last_update_time = 0;
2008 	int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
2009 	int meta_bg;
2010 
2011 	/* See if the device is actually as big as what was requested */
2012 	bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
2013 	if (IS_ERR(bh)) {
2014 		ext4_warning(sb, "can't read last block, resize aborted");
2015 		return -ENOSPC;
2016 	}
2017 	brelse(bh);
2018 
2019 	/*
2020 	 * For bigalloc, trim the requested size to the nearest cluster
2021 	 * boundary to avoid creating an unusable filesystem. We do this
2022 	 * silently, instead of returning an error, to avoid breaking
2023 	 * callers that blindly resize the filesystem to the full size of
2024 	 * the underlying block device.
2025 	 */
2026 	if (ext4_has_feature_bigalloc(sb))
2027 		n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
2028 
2029 retry:
2030 	o_blocks_count = ext4_blocks_count(es);
2031 
2032 	ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
2033 		 "to %llu blocks", o_blocks_count, n_blocks_count);
2034 
2035 	if (n_blocks_count < o_blocks_count) {
2036 		/* On-line shrinking not supported */
2037 		ext4_warning(sb, "can't shrink FS - resize aborted");
2038 		return -EINVAL;
2039 	}
2040 
2041 	if (n_blocks_count == o_blocks_count)
2042 		/* Nothing need to do */
2043 		return 0;
2044 
2045 	n_group = ext4_get_group_number(sb, n_blocks_count - 1);
2046 	if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
2047 		ext4_warning(sb, "resize would cause inodes_count overflow");
2048 		return -EINVAL;
2049 	}
2050 	ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2051 
2052 	n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2053 	o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2054 
2055 	meta_bg = ext4_has_feature_meta_bg(sb);
2056 
2057 	if (ext4_has_feature_resize_inode(sb)) {
2058 		if (meta_bg) {
2059 			ext4_error(sb, "resize_inode and meta_bg enabled "
2060 				   "simultaneously");
2061 			return -EINVAL;
2062 		}
2063 		if (n_desc_blocks > o_desc_blocks +
2064 		    le16_to_cpu(es->s_reserved_gdt_blocks)) {
2065 			n_blocks_count_retry = n_blocks_count;
2066 			n_desc_blocks = o_desc_blocks +
2067 				le16_to_cpu(es->s_reserved_gdt_blocks);
2068 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2069 			n_blocks_count = (ext4_fsblk_t)n_group *
2070 				EXT4_BLOCKS_PER_GROUP(sb) +
2071 				le32_to_cpu(es->s_first_data_block);
2072 			n_group--; /* set to last group number */
2073 		}
2074 
2075 		if (!resize_inode)
2076 			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2077 						 EXT4_IGET_SPECIAL);
2078 		if (IS_ERR(resize_inode)) {
2079 			ext4_warning(sb, "Error opening resize inode");
2080 			return PTR_ERR(resize_inode);
2081 		}
2082 	}
2083 
2084 	if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2085 		err = ext4_convert_meta_bg(sb, resize_inode);
2086 		if (err)
2087 			goto out;
2088 		if (resize_inode) {
2089 			iput(resize_inode);
2090 			resize_inode = NULL;
2091 		}
2092 		if (n_blocks_count_retry) {
2093 			n_blocks_count = n_blocks_count_retry;
2094 			n_blocks_count_retry = 0;
2095 			goto retry;
2096 		}
2097 	}
2098 
2099 	/*
2100 	 * Make sure the last group has enough space so that it's
2101 	 * guaranteed to have enough space for all metadata blocks
2102 	 * that it might need to hold.  (We might not need to store
2103 	 * the inode table blocks in the last block group, but there
2104 	 * will be cases where this might be needed.)
2105 	 */
2106 	if ((ext4_group_first_block_no(sb, n_group) +
2107 	     ext4_group_overhead_blocks(sb, n_group) + 2 +
2108 	     sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2109 		n_blocks_count = ext4_group_first_block_no(sb, n_group);
2110 		n_group--;
2111 		n_blocks_count_retry = 0;
2112 		if (resize_inode) {
2113 			iput(resize_inode);
2114 			resize_inode = NULL;
2115 		}
2116 		goto retry;
2117 	}
2118 
2119 	/* extend the last group */
2120 	if (n_group == o_group)
2121 		add = n_blocks_count - o_blocks_count;
2122 	else
2123 		add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2124 	if (add > 0) {
2125 		err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2126 		if (err)
2127 			goto out;
2128 	}
2129 
2130 	if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
2131 		goto out;
2132 
2133 	err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2134 	if (err)
2135 		goto out;
2136 
2137 	err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2138 	if (err)
2139 		goto out;
2140 
2141 	flex_gd = alloc_flex_gd(flexbg_size);
2142 	if (flex_gd == NULL) {
2143 		err = -ENOMEM;
2144 		goto out;
2145 	}
2146 
2147 	/* Add flex groups. Note that a regular group is a
2148 	 * flex group with 1 group.
2149 	 */
2150 	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
2151 					      flexbg_size)) {
2152 		if (time_is_before_jiffies(last_update_time + HZ * 10)) {
2153 			if (last_update_time)
2154 				ext4_msg(sb, KERN_INFO,
2155 					 "resized to %llu blocks",
2156 					 ext4_blocks_count(es));
2157 			last_update_time = jiffies;
2158 		}
2159 		if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2160 			break;
2161 		err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2162 		if (unlikely(err))
2163 			break;
2164 	}
2165 
2166 	if (!err && n_blocks_count_retry) {
2167 		n_blocks_count = n_blocks_count_retry;
2168 		n_blocks_count_retry = 0;
2169 		free_flex_gd(flex_gd);
2170 		flex_gd = NULL;
2171 		if (resize_inode) {
2172 			iput(resize_inode);
2173 			resize_inode = NULL;
2174 		}
2175 		goto retry;
2176 	}
2177 
2178 out:
2179 	if (flex_gd)
2180 		free_flex_gd(flex_gd);
2181 	if (resize_inode != NULL)
2182 		iput(resize_inode);
2183 	if (err)
2184 		ext4_warning(sb, "error (%d) occurred during "
2185 			     "file system resize", err);
2186 	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2187 		 ext4_blocks_count(es));
2188 	return err;
2189 }
2190