1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * linux/fs/jbd2/transaction.c
4  *
5  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6  *
7  * Copyright 1998 Red Hat corp --- All Rights Reserved
8  *
9  * Generic filesystem transaction handling code; part of the ext2fs
10  * journaling system.
11  *
12  * This file manages transactions (compound commits managed by the
13  * journaling code) and handles (individual atomic operations by the
14  * filesystem).
15  */
16 
17 #include <linux/time.h>
18 #include <linux/fs.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/hrtimer.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bug.h>
28 #include <linux/module.h>
29 #include <linux/sched/mm.h>
30 
31 #include <trace/events/jbd2.h>
32 
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
34 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
35 
36 static struct kmem_cache *transaction_cache;
jbd2_journal_init_transaction_cache(void)37 int __init jbd2_journal_init_transaction_cache(void)
38 {
39 	J_ASSERT(!transaction_cache);
40 	transaction_cache = kmem_cache_create("jbd2_transaction_s",
41 					sizeof(transaction_t),
42 					0,
43 					SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
44 					NULL);
45 	if (!transaction_cache) {
46 		pr_emerg("JBD2: failed to create transaction cache\n");
47 		return -ENOMEM;
48 	}
49 	return 0;
50 }
51 
jbd2_journal_destroy_transaction_cache(void)52 void jbd2_journal_destroy_transaction_cache(void)
53 {
54 	kmem_cache_destroy(transaction_cache);
55 	transaction_cache = NULL;
56 }
57 
jbd2_journal_free_transaction(transaction_t * transaction)58 void jbd2_journal_free_transaction(transaction_t *transaction)
59 {
60 	if (unlikely(ZERO_OR_NULL_PTR(transaction)))
61 		return;
62 	kmem_cache_free(transaction_cache, transaction);
63 }
64 
65 /*
66  * Base amount of descriptor blocks we reserve for each transaction.
67  */
jbd2_descriptor_blocks_per_trans(journal_t * journal)68 static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
69 {
70 	int tag_space = journal->j_blocksize - sizeof(journal_header_t);
71 	int tags_per_block;
72 
73 	/* Subtract UUID */
74 	tag_space -= 16;
75 	if (jbd2_journal_has_csum_v2or3(journal))
76 		tag_space -= sizeof(struct jbd2_journal_block_tail);
77 	/* Commit code leaves a slack space of 16 bytes at the end of block */
78 	tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
79 	/*
80 	 * Revoke descriptors are accounted separately so we need to reserve
81 	 * space for commit block and normal transaction descriptor blocks.
82 	 */
83 	return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers,
84 				tags_per_block);
85 }
86 
87 /*
88  * jbd2_get_transaction: obtain a new transaction_t object.
89  *
90  * Simply initialise a new transaction. Initialize it in
91  * RUNNING state and add it to the current journal (which should not
92  * have an existing running transaction: we only make a new transaction
93  * once we have started to commit the old one).
94  *
95  * Preconditions:
96  *	The journal MUST be locked.  We don't perform atomic mallocs on the
97  *	new transaction	and we can't block without protecting against other
98  *	processes trying to touch the journal while it is in transition.
99  *
100  */
101 
jbd2_get_transaction(journal_t * journal,transaction_t * transaction)102 static void jbd2_get_transaction(journal_t *journal,
103 				transaction_t *transaction)
104 {
105 	transaction->t_journal = journal;
106 	transaction->t_state = T_RUNNING;
107 	transaction->t_start_time = ktime_get();
108 	transaction->t_tid = journal->j_transaction_sequence++;
109 	transaction->t_expires = jiffies + journal->j_commit_interval;
110 	spin_lock_init(&transaction->t_handle_lock);
111 	atomic_set(&transaction->t_updates, 0);
112 	atomic_set(&transaction->t_outstanding_credits,
113 		   jbd2_descriptor_blocks_per_trans(journal) +
114 		   atomic_read(&journal->j_reserved_credits));
115 	atomic_set(&transaction->t_outstanding_revokes, 0);
116 	atomic_set(&transaction->t_handle_count, 0);
117 	INIT_LIST_HEAD(&transaction->t_inode_list);
118 	INIT_LIST_HEAD(&transaction->t_private_list);
119 
120 	/* Set up the commit timer for the new transaction. */
121 	journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
122 	add_timer(&journal->j_commit_timer);
123 
124 	J_ASSERT(journal->j_running_transaction == NULL);
125 	journal->j_running_transaction = transaction;
126 	transaction->t_max_wait = 0;
127 	transaction->t_start = jiffies;
128 	transaction->t_requested = 0;
129 }
130 
131 /*
132  * Handle management.
133  *
134  * A handle_t is an object which represents a single atomic update to a
135  * filesystem, and which tracks all of the modifications which form part
136  * of that one update.
137  */
138 
139 /*
140  * Update transaction's maximum wait time, if debugging is enabled.
141  *
142  * In order for t_max_wait to be reliable, it must be protected by a
143  * lock.  But doing so will mean that start_this_handle() can not be
144  * run in parallel on SMP systems, which limits our scalability.  So
145  * unless debugging is enabled, we no longer update t_max_wait, which
146  * means that maximum wait time reported by the jbd2_run_stats
147  * tracepoint will always be zero.
148  */
update_t_max_wait(transaction_t * transaction,unsigned long ts)149 static inline void update_t_max_wait(transaction_t *transaction,
150 				     unsigned long ts)
151 {
152 #ifdef CONFIG_JBD2_DEBUG
153 	if (jbd2_journal_enable_debug &&
154 	    time_after(transaction->t_start, ts)) {
155 		ts = jbd2_time_diff(ts, transaction->t_start);
156 		spin_lock(&transaction->t_handle_lock);
157 		if (ts > transaction->t_max_wait)
158 			transaction->t_max_wait = ts;
159 		spin_unlock(&transaction->t_handle_lock);
160 	}
161 #endif
162 }
163 
164 /*
165  * Wait until running transaction passes to T_FLUSH state and new transaction
166  * can thus be started. Also starts the commit if needed. The function expects
167  * running transaction to exist and releases j_state_lock.
168  */
wait_transaction_locked(journal_t * journal)169 static void wait_transaction_locked(journal_t *journal)
170 	__releases(journal->j_state_lock)
171 {
172 	DEFINE_WAIT(wait);
173 	int need_to_start;
174 	tid_t tid = journal->j_running_transaction->t_tid;
175 
176 	prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
177 			TASK_UNINTERRUPTIBLE);
178 	need_to_start = !tid_geq(journal->j_commit_request, tid);
179 	read_unlock(&journal->j_state_lock);
180 	if (need_to_start)
181 		jbd2_log_start_commit(journal, tid);
182 	jbd2_might_wait_for_commit(journal);
183 	schedule();
184 	finish_wait(&journal->j_wait_transaction_locked, &wait);
185 }
186 
187 /*
188  * Wait until running transaction transitions from T_SWITCH to T_FLUSH
189  * state and new transaction can thus be started. The function releases
190  * j_state_lock.
191  */
wait_transaction_switching(journal_t * journal)192 static void wait_transaction_switching(journal_t *journal)
193 	__releases(journal->j_state_lock)
194 {
195 	DEFINE_WAIT(wait);
196 
197 	if (WARN_ON(!journal->j_running_transaction ||
198 		    journal->j_running_transaction->t_state != T_SWITCH)) {
199 		read_unlock(&journal->j_state_lock);
200 		return;
201 	}
202 	prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
203 			TASK_UNINTERRUPTIBLE);
204 	read_unlock(&journal->j_state_lock);
205 	/*
206 	 * We don't call jbd2_might_wait_for_commit() here as there's no
207 	 * waiting for outstanding handles happening anymore in T_SWITCH state
208 	 * and handling of reserved handles actually relies on that for
209 	 * correctness.
210 	 */
211 	schedule();
212 	finish_wait(&journal->j_wait_transaction_locked, &wait);
213 }
214 
sub_reserved_credits(journal_t * journal,int blocks)215 static void sub_reserved_credits(journal_t *journal, int blocks)
216 {
217 	atomic_sub(blocks, &journal->j_reserved_credits);
218 	wake_up(&journal->j_wait_reserved);
219 }
220 
221 /*
222  * Wait until we can add credits for handle to the running transaction.  Called
223  * with j_state_lock held for reading. Returns 0 if handle joined the running
224  * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
225  * caller must retry.
226  *
227  * Note: because j_state_lock may be dropped depending on the return
228  * value, we need to fake out sparse so ti doesn't complain about a
229  * locking imbalance.  Callers of add_transaction_credits will need to
230  * make a similar accomodation.
231  */
add_transaction_credits(journal_t * journal,int blocks,int rsv_blocks)232 static int add_transaction_credits(journal_t *journal, int blocks,
233 				   int rsv_blocks)
234 __must_hold(&journal->j_state_lock)
235 {
236 	transaction_t *t = journal->j_running_transaction;
237 	int needed;
238 	int total = blocks + rsv_blocks;
239 
240 	/*
241 	 * If the current transaction is locked down for commit, wait
242 	 * for the lock to be released.
243 	 */
244 	if (t->t_state != T_RUNNING) {
245 		WARN_ON_ONCE(t->t_state >= T_FLUSH);
246 		wait_transaction_locked(journal);
247 		__acquire(&journal->j_state_lock); /* fake out sparse */
248 		return 1;
249 	}
250 
251 	/*
252 	 * If there is not enough space left in the log to write all
253 	 * potential buffers requested by this operation, we need to
254 	 * stall pending a log checkpoint to free some more log space.
255 	 */
256 	needed = atomic_add_return(total, &t->t_outstanding_credits);
257 	if (needed > journal->j_max_transaction_buffers) {
258 		/*
259 		 * If the current transaction is already too large,
260 		 * then start to commit it: we can then go back and
261 		 * attach this handle to a new transaction.
262 		 */
263 		atomic_sub(total, &t->t_outstanding_credits);
264 
265 		/*
266 		 * Is the number of reserved credits in the current transaction too
267 		 * big to fit this handle? Wait until reserved credits are freed.
268 		 */
269 		if (atomic_read(&journal->j_reserved_credits) + total >
270 		    journal->j_max_transaction_buffers) {
271 			read_unlock(&journal->j_state_lock);
272 			jbd2_might_wait_for_commit(journal);
273 			wait_event(journal->j_wait_reserved,
274 				   atomic_read(&journal->j_reserved_credits) + total <=
275 				   journal->j_max_transaction_buffers);
276 			__acquire(&journal->j_state_lock); /* fake out sparse */
277 			return 1;
278 		}
279 
280 		wait_transaction_locked(journal);
281 		__acquire(&journal->j_state_lock); /* fake out sparse */
282 		return 1;
283 	}
284 
285 	/*
286 	 * The commit code assumes that it can get enough log space
287 	 * without forcing a checkpoint.  This is *critical* for
288 	 * correctness: a checkpoint of a buffer which is also
289 	 * associated with a committing transaction creates a deadlock,
290 	 * so commit simply cannot force through checkpoints.
291 	 *
292 	 * We must therefore ensure the necessary space in the journal
293 	 * *before* starting to dirty potentially checkpointed buffers
294 	 * in the new transaction.
295 	 */
296 	if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) {
297 		atomic_sub(total, &t->t_outstanding_credits);
298 		read_unlock(&journal->j_state_lock);
299 		jbd2_might_wait_for_commit(journal);
300 		write_lock(&journal->j_state_lock);
301 		if (jbd2_log_space_left(journal) <
302 					journal->j_max_transaction_buffers)
303 			__jbd2_log_wait_for_space(journal);
304 		write_unlock(&journal->j_state_lock);
305 		__acquire(&journal->j_state_lock); /* fake out sparse */
306 		return 1;
307 	}
308 
309 	/* No reservation? We are done... */
310 	if (!rsv_blocks)
311 		return 0;
312 
313 	needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
314 	/* We allow at most half of a transaction to be reserved */
315 	if (needed > journal->j_max_transaction_buffers / 2) {
316 		sub_reserved_credits(journal, rsv_blocks);
317 		atomic_sub(total, &t->t_outstanding_credits);
318 		read_unlock(&journal->j_state_lock);
319 		jbd2_might_wait_for_commit(journal);
320 		wait_event(journal->j_wait_reserved,
321 			 atomic_read(&journal->j_reserved_credits) + rsv_blocks
322 			 <= journal->j_max_transaction_buffers / 2);
323 		__acquire(&journal->j_state_lock); /* fake out sparse */
324 		return 1;
325 	}
326 	return 0;
327 }
328 
329 /*
330  * start_this_handle: Given a handle, deal with any locking or stalling
331  * needed to make sure that there is enough journal space for the handle
332  * to begin.  Attach the handle to a transaction and set up the
333  * transaction's buffer credits.
334  */
335 
start_this_handle(journal_t * journal,handle_t * handle,gfp_t gfp_mask)336 static int start_this_handle(journal_t *journal, handle_t *handle,
337 			     gfp_t gfp_mask)
338 {
339 	transaction_t	*transaction, *new_transaction = NULL;
340 	int		blocks = handle->h_total_credits;
341 	int		rsv_blocks = 0;
342 	unsigned long ts = jiffies;
343 
344 	if (handle->h_rsv_handle)
345 		rsv_blocks = handle->h_rsv_handle->h_total_credits;
346 
347 	/*
348 	 * Limit the number of reserved credits to 1/2 of maximum transaction
349 	 * size and limit the number of total credits to not exceed maximum
350 	 * transaction size per operation.
351 	 */
352 	if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
353 	    (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
354 		printk(KERN_ERR "JBD2: %s wants too many credits "
355 		       "credits:%d rsv_credits:%d max:%d\n",
356 		       current->comm, blocks, rsv_blocks,
357 		       journal->j_max_transaction_buffers);
358 		WARN_ON(1);
359 		return -ENOSPC;
360 	}
361 
362 alloc_transaction:
363 	/*
364 	 * This check is racy but it is just an optimization of allocating new
365 	 * transaction early if there are high chances we'll need it. If we
366 	 * guess wrong, we'll retry or free unused transaction.
367 	 */
368 	if (!data_race(journal->j_running_transaction)) {
369 		/*
370 		 * If __GFP_FS is not present, then we may be being called from
371 		 * inside the fs writeback layer, so we MUST NOT fail.
372 		 */
373 		if ((gfp_mask & __GFP_FS) == 0)
374 			gfp_mask |= __GFP_NOFAIL;
375 		new_transaction = kmem_cache_zalloc(transaction_cache,
376 						    gfp_mask);
377 		if (!new_transaction)
378 			return -ENOMEM;
379 	}
380 
381 	jbd_debug(3, "New handle %p going live.\n", handle);
382 
383 	/*
384 	 * We need to hold j_state_lock until t_updates has been incremented,
385 	 * for proper journal barrier handling
386 	 */
387 repeat:
388 	read_lock(&journal->j_state_lock);
389 	BUG_ON(journal->j_flags & JBD2_UNMOUNT);
390 	if (is_journal_aborted(journal) ||
391 	    (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
392 		read_unlock(&journal->j_state_lock);
393 		jbd2_journal_free_transaction(new_transaction);
394 		return -EROFS;
395 	}
396 
397 	/*
398 	 * Wait on the journal's transaction barrier if necessary. Specifically
399 	 * we allow reserved handles to proceed because otherwise commit could
400 	 * deadlock on page writeback not being able to complete.
401 	 */
402 	if (!handle->h_reserved && journal->j_barrier_count) {
403 		read_unlock(&journal->j_state_lock);
404 		wait_event(journal->j_wait_transaction_locked,
405 				journal->j_barrier_count == 0);
406 		goto repeat;
407 	}
408 
409 	if (!journal->j_running_transaction) {
410 		read_unlock(&journal->j_state_lock);
411 		if (!new_transaction)
412 			goto alloc_transaction;
413 		write_lock(&journal->j_state_lock);
414 		if (!journal->j_running_transaction &&
415 		    (handle->h_reserved || !journal->j_barrier_count)) {
416 			jbd2_get_transaction(journal, new_transaction);
417 			new_transaction = NULL;
418 		}
419 		write_unlock(&journal->j_state_lock);
420 		goto repeat;
421 	}
422 
423 	transaction = journal->j_running_transaction;
424 
425 	if (!handle->h_reserved) {
426 		/* We may have dropped j_state_lock - restart in that case */
427 		if (add_transaction_credits(journal, blocks, rsv_blocks)) {
428 			/*
429 			 * add_transaction_credits releases
430 			 * j_state_lock on a non-zero return
431 			 */
432 			__release(&journal->j_state_lock);
433 			goto repeat;
434 		}
435 	} else {
436 		/*
437 		 * We have handle reserved so we are allowed to join T_LOCKED
438 		 * transaction and we don't have to check for transaction size
439 		 * and journal space. But we still have to wait while running
440 		 * transaction is being switched to a committing one as it
441 		 * won't wait for any handles anymore.
442 		 */
443 		if (transaction->t_state == T_SWITCH) {
444 			wait_transaction_switching(journal);
445 			goto repeat;
446 		}
447 		sub_reserved_credits(journal, blocks);
448 		handle->h_reserved = 0;
449 	}
450 
451 	/* OK, account for the buffers that this operation expects to
452 	 * use and add the handle to the running transaction.
453 	 */
454 	update_t_max_wait(transaction, ts);
455 	handle->h_transaction = transaction;
456 	handle->h_requested_credits = blocks;
457 	handle->h_revoke_credits_requested = handle->h_revoke_credits;
458 	handle->h_start_jiffies = jiffies;
459 	atomic_inc(&transaction->t_updates);
460 	atomic_inc(&transaction->t_handle_count);
461 	jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
462 		  handle, blocks,
463 		  atomic_read(&transaction->t_outstanding_credits),
464 		  jbd2_log_space_left(journal));
465 	read_unlock(&journal->j_state_lock);
466 	current->journal_info = handle;
467 
468 	rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
469 	jbd2_journal_free_transaction(new_transaction);
470 	/*
471 	 * Ensure that no allocations done while the transaction is open are
472 	 * going to recurse back to the fs layer.
473 	 */
474 	handle->saved_alloc_context = memalloc_nofs_save();
475 	return 0;
476 }
477 
478 /* Allocate a new handle.  This should probably be in a slab... */
new_handle(int nblocks)479 static handle_t *new_handle(int nblocks)
480 {
481 	handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
482 	if (!handle)
483 		return NULL;
484 	handle->h_total_credits = nblocks;
485 	handle->h_ref = 1;
486 
487 	return handle;
488 }
489 
jbd2__journal_start(journal_t * journal,int nblocks,int rsv_blocks,int revoke_records,gfp_t gfp_mask,unsigned int type,unsigned int line_no)490 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
491 			      int revoke_records, gfp_t gfp_mask,
492 			      unsigned int type, unsigned int line_no)
493 {
494 	handle_t *handle = journal_current_handle();
495 	int err;
496 
497 	if (!journal)
498 		return ERR_PTR(-EROFS);
499 
500 	if (handle) {
501 		J_ASSERT(handle->h_transaction->t_journal == journal);
502 		handle->h_ref++;
503 		return handle;
504 	}
505 
506 	nblocks += DIV_ROUND_UP(revoke_records,
507 				journal->j_revoke_records_per_block);
508 	handle = new_handle(nblocks);
509 	if (!handle)
510 		return ERR_PTR(-ENOMEM);
511 	if (rsv_blocks) {
512 		handle_t *rsv_handle;
513 
514 		rsv_handle = new_handle(rsv_blocks);
515 		if (!rsv_handle) {
516 			jbd2_free_handle(handle);
517 			return ERR_PTR(-ENOMEM);
518 		}
519 		rsv_handle->h_reserved = 1;
520 		rsv_handle->h_journal = journal;
521 		handle->h_rsv_handle = rsv_handle;
522 	}
523 	handle->h_revoke_credits = revoke_records;
524 
525 	err = start_this_handle(journal, handle, gfp_mask);
526 	if (err < 0) {
527 		if (handle->h_rsv_handle)
528 			jbd2_free_handle(handle->h_rsv_handle);
529 		jbd2_free_handle(handle);
530 		return ERR_PTR(err);
531 	}
532 	handle->h_type = type;
533 	handle->h_line_no = line_no;
534 	trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
535 				handle->h_transaction->t_tid, type,
536 				line_no, nblocks);
537 
538 	return handle;
539 }
540 EXPORT_SYMBOL(jbd2__journal_start);
541 
542 
543 /**
544  * jbd2_journal_start() - Obtain a new handle.
545  * @journal: Journal to start transaction on.
546  * @nblocks: number of block buffer we might modify
547  *
548  * We make sure that the transaction can guarantee at least nblocks of
549  * modified buffers in the log.  We block until the log can guarantee
550  * that much space. Additionally, if rsv_blocks > 0, we also create another
551  * handle with rsv_blocks reserved blocks in the journal. This handle is
552  * stored in h_rsv_handle. It is not attached to any particular transaction
553  * and thus doesn't block transaction commit. If the caller uses this reserved
554  * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
555  * on the parent handle will dispose the reserved one. Reserved handle has to
556  * be converted to a normal handle using jbd2_journal_start_reserved() before
557  * it can be used.
558  *
559  * Return a pointer to a newly allocated handle, or an ERR_PTR() value
560  * on failure.
561  */
jbd2_journal_start(journal_t * journal,int nblocks)562 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
563 {
564 	return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0);
565 }
566 EXPORT_SYMBOL(jbd2_journal_start);
567 
__jbd2_journal_unreserve_handle(handle_t * handle,transaction_t * t)568 static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t)
569 {
570 	journal_t *journal = handle->h_journal;
571 
572 	WARN_ON(!handle->h_reserved);
573 	sub_reserved_credits(journal, handle->h_total_credits);
574 	if (t)
575 		atomic_sub(handle->h_total_credits, &t->t_outstanding_credits);
576 }
577 
jbd2_journal_free_reserved(handle_t * handle)578 void jbd2_journal_free_reserved(handle_t *handle)
579 {
580 	journal_t *journal = handle->h_journal;
581 
582 	/* Get j_state_lock to pin running transaction if it exists */
583 	read_lock(&journal->j_state_lock);
584 	__jbd2_journal_unreserve_handle(handle, journal->j_running_transaction);
585 	read_unlock(&journal->j_state_lock);
586 	jbd2_free_handle(handle);
587 }
588 EXPORT_SYMBOL(jbd2_journal_free_reserved);
589 
590 /**
591  * jbd2_journal_start_reserved() - start reserved handle
592  * @handle: handle to start
593  * @type: for handle statistics
594  * @line_no: for handle statistics
595  *
596  * Start handle that has been previously reserved with jbd2_journal_reserve().
597  * This attaches @handle to the running transaction (or creates one if there's
598  * not transaction running). Unlike jbd2_journal_start() this function cannot
599  * block on journal commit, checkpointing, or similar stuff. It can block on
600  * memory allocation or frozen journal though.
601  *
602  * Return 0 on success, non-zero on error - handle is freed in that case.
603  */
jbd2_journal_start_reserved(handle_t * handle,unsigned int type,unsigned int line_no)604 int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
605 				unsigned int line_no)
606 {
607 	journal_t *journal = handle->h_journal;
608 	int ret = -EIO;
609 
610 	if (WARN_ON(!handle->h_reserved)) {
611 		/* Someone passed in normal handle? Just stop it. */
612 		jbd2_journal_stop(handle);
613 		return ret;
614 	}
615 	/*
616 	 * Usefulness of mixing of reserved and unreserved handles is
617 	 * questionable. So far nobody seems to need it so just error out.
618 	 */
619 	if (WARN_ON(current->journal_info)) {
620 		jbd2_journal_free_reserved(handle);
621 		return ret;
622 	}
623 
624 	handle->h_journal = NULL;
625 	/*
626 	 * GFP_NOFS is here because callers are likely from writeback or
627 	 * similarly constrained call sites
628 	 */
629 	ret = start_this_handle(journal, handle, GFP_NOFS);
630 	if (ret < 0) {
631 		handle->h_journal = journal;
632 		jbd2_journal_free_reserved(handle);
633 		return ret;
634 	}
635 	handle->h_type = type;
636 	handle->h_line_no = line_no;
637 	trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
638 				handle->h_transaction->t_tid, type,
639 				line_no, handle->h_total_credits);
640 	return 0;
641 }
642 EXPORT_SYMBOL(jbd2_journal_start_reserved);
643 
644 /**
645  * jbd2_journal_extend() - extend buffer credits.
646  * @handle:  handle to 'extend'
647  * @nblocks: nr blocks to try to extend by.
648  * @revoke_records: number of revoke records to try to extend by.
649  *
650  * Some transactions, such as large extends and truncates, can be done
651  * atomically all at once or in several stages.  The operation requests
652  * a credit for a number of buffer modifications in advance, but can
653  * extend its credit if it needs more.
654  *
655  * jbd2_journal_extend tries to give the running handle more buffer credits.
656  * It does not guarantee that allocation - this is a best-effort only.
657  * The calling process MUST be able to deal cleanly with a failure to
658  * extend here.
659  *
660  * Return 0 on success, non-zero on failure.
661  *
662  * return code < 0 implies an error
663  * return code > 0 implies normal transaction-full status.
664  */
jbd2_journal_extend(handle_t * handle,int nblocks,int revoke_records)665 int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
666 {
667 	transaction_t *transaction = handle->h_transaction;
668 	journal_t *journal;
669 	int result;
670 	int wanted;
671 
672 	if (is_handle_aborted(handle))
673 		return -EROFS;
674 	journal = transaction->t_journal;
675 
676 	result = 1;
677 
678 	read_lock(&journal->j_state_lock);
679 
680 	/* Don't extend a locked-down transaction! */
681 	if (transaction->t_state != T_RUNNING) {
682 		jbd_debug(3, "denied handle %p %d blocks: "
683 			  "transaction not running\n", handle, nblocks);
684 		goto error_out;
685 	}
686 
687 	nblocks += DIV_ROUND_UP(
688 			handle->h_revoke_credits_requested + revoke_records,
689 			journal->j_revoke_records_per_block) -
690 		DIV_ROUND_UP(
691 			handle->h_revoke_credits_requested,
692 			journal->j_revoke_records_per_block);
693 	spin_lock(&transaction->t_handle_lock);
694 	wanted = atomic_add_return(nblocks,
695 				   &transaction->t_outstanding_credits);
696 
697 	if (wanted > journal->j_max_transaction_buffers) {
698 		jbd_debug(3, "denied handle %p %d blocks: "
699 			  "transaction too large\n", handle, nblocks);
700 		atomic_sub(nblocks, &transaction->t_outstanding_credits);
701 		goto unlock;
702 	}
703 
704 	trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
705 				 transaction->t_tid,
706 				 handle->h_type, handle->h_line_no,
707 				 handle->h_total_credits,
708 				 nblocks);
709 
710 	handle->h_total_credits += nblocks;
711 	handle->h_requested_credits += nblocks;
712 	handle->h_revoke_credits += revoke_records;
713 	handle->h_revoke_credits_requested += revoke_records;
714 	result = 0;
715 
716 	jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
717 unlock:
718 	spin_unlock(&transaction->t_handle_lock);
719 error_out:
720 	read_unlock(&journal->j_state_lock);
721 	return result;
722 }
723 
stop_this_handle(handle_t * handle)724 static void stop_this_handle(handle_t *handle)
725 {
726 	transaction_t *transaction = handle->h_transaction;
727 	journal_t *journal = transaction->t_journal;
728 	int revokes;
729 
730 	J_ASSERT(journal_current_handle() == handle);
731 	J_ASSERT(atomic_read(&transaction->t_updates) > 0);
732 	current->journal_info = NULL;
733 	/*
734 	 * Subtract necessary revoke descriptor blocks from handle credits. We
735 	 * take care to account only for revoke descriptor blocks the
736 	 * transaction will really need as large sequences of transactions with
737 	 * small numbers of revokes are relatively common.
738 	 */
739 	revokes = handle->h_revoke_credits_requested - handle->h_revoke_credits;
740 	if (revokes) {
741 		int t_revokes, revoke_descriptors;
742 		int rr_per_blk = journal->j_revoke_records_per_block;
743 
744 		WARN_ON_ONCE(DIV_ROUND_UP(revokes, rr_per_blk)
745 				> handle->h_total_credits);
746 		t_revokes = atomic_add_return(revokes,
747 				&transaction->t_outstanding_revokes);
748 		revoke_descriptors =
749 			DIV_ROUND_UP(t_revokes, rr_per_blk) -
750 			DIV_ROUND_UP(t_revokes - revokes, rr_per_blk);
751 		handle->h_total_credits -= revoke_descriptors;
752 	}
753 	atomic_sub(handle->h_total_credits,
754 		   &transaction->t_outstanding_credits);
755 	if (handle->h_rsv_handle)
756 		__jbd2_journal_unreserve_handle(handle->h_rsv_handle,
757 						transaction);
758 	if (atomic_dec_and_test(&transaction->t_updates))
759 		wake_up(&journal->j_wait_updates);
760 
761 	rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
762 	/*
763 	 * Scope of the GFP_NOFS context is over here and so we can restore the
764 	 * original alloc context.
765 	 */
766 	memalloc_nofs_restore(handle->saved_alloc_context);
767 }
768 
769 /**
770  * jbd2__journal_restart() - restart a handle .
771  * @handle:  handle to restart
772  * @nblocks: nr credits requested
773  * @revoke_records: number of revoke record credits requested
774  * @gfp_mask: memory allocation flags (for start_this_handle)
775  *
776  * Restart a handle for a multi-transaction filesystem
777  * operation.
778  *
779  * If the jbd2_journal_extend() call above fails to grant new buffer credits
780  * to a running handle, a call to jbd2_journal_restart will commit the
781  * handle's transaction so far and reattach the handle to a new
782  * transaction capable of guaranteeing the requested number of
783  * credits. We preserve reserved handle if there's any attached to the
784  * passed in handle.
785  */
jbd2__journal_restart(handle_t * handle,int nblocks,int revoke_records,gfp_t gfp_mask)786 int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records,
787 			  gfp_t gfp_mask)
788 {
789 	transaction_t *transaction = handle->h_transaction;
790 	journal_t *journal;
791 	tid_t		tid;
792 	int		need_to_start;
793 	int		ret;
794 
795 	/* If we've had an abort of any type, don't even think about
796 	 * actually doing the restart! */
797 	if (is_handle_aborted(handle))
798 		return 0;
799 	journal = transaction->t_journal;
800 	tid = transaction->t_tid;
801 
802 	/*
803 	 * First unlink the handle from its current transaction, and start the
804 	 * commit on that.
805 	 */
806 	jbd_debug(2, "restarting handle %p\n", handle);
807 	stop_this_handle(handle);
808 	handle->h_transaction = NULL;
809 
810 	/*
811 	 * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can
812  	 * get rid of pointless j_state_lock traffic like this.
813 	 */
814 	read_lock(&journal->j_state_lock);
815 	need_to_start = !tid_geq(journal->j_commit_request, tid);
816 	read_unlock(&journal->j_state_lock);
817 	if (need_to_start)
818 		jbd2_log_start_commit(journal, tid);
819 	handle->h_total_credits = nblocks +
820 		DIV_ROUND_UP(revoke_records,
821 			     journal->j_revoke_records_per_block);
822 	handle->h_revoke_credits = revoke_records;
823 	ret = start_this_handle(journal, handle, gfp_mask);
824 	trace_jbd2_handle_restart(journal->j_fs_dev->bd_dev,
825 				 ret ? 0 : handle->h_transaction->t_tid,
826 				 handle->h_type, handle->h_line_no,
827 				 handle->h_total_credits);
828 	return ret;
829 }
830 EXPORT_SYMBOL(jbd2__journal_restart);
831 
832 
jbd2_journal_restart(handle_t * handle,int nblocks)833 int jbd2_journal_restart(handle_t *handle, int nblocks)
834 {
835 	return jbd2__journal_restart(handle, nblocks, 0, GFP_NOFS);
836 }
837 EXPORT_SYMBOL(jbd2_journal_restart);
838 
839 /**
840  * jbd2_journal_lock_updates () - establish a transaction barrier.
841  * @journal:  Journal to establish a barrier on.
842  *
843  * This locks out any further updates from being started, and blocks
844  * until all existing updates have completed, returning only once the
845  * journal is in a quiescent state with no updates running.
846  *
847  * The journal lock should not be held on entry.
848  */
jbd2_journal_lock_updates(journal_t * journal)849 void jbd2_journal_lock_updates(journal_t *journal)
850 {
851 	DEFINE_WAIT(wait);
852 
853 	jbd2_might_wait_for_commit(journal);
854 
855 	write_lock(&journal->j_state_lock);
856 	++journal->j_barrier_count;
857 
858 	/* Wait until there are no reserved handles */
859 	if (atomic_read(&journal->j_reserved_credits)) {
860 		write_unlock(&journal->j_state_lock);
861 		wait_event(journal->j_wait_reserved,
862 			   atomic_read(&journal->j_reserved_credits) == 0);
863 		write_lock(&journal->j_state_lock);
864 	}
865 
866 	/* Wait until there are no running updates */
867 	while (1) {
868 		transaction_t *transaction = journal->j_running_transaction;
869 
870 		if (!transaction)
871 			break;
872 
873 		spin_lock(&transaction->t_handle_lock);
874 		prepare_to_wait(&journal->j_wait_updates, &wait,
875 				TASK_UNINTERRUPTIBLE);
876 		if (!atomic_read(&transaction->t_updates)) {
877 			spin_unlock(&transaction->t_handle_lock);
878 			finish_wait(&journal->j_wait_updates, &wait);
879 			break;
880 		}
881 		spin_unlock(&transaction->t_handle_lock);
882 		write_unlock(&journal->j_state_lock);
883 		schedule();
884 		finish_wait(&journal->j_wait_updates, &wait);
885 		write_lock(&journal->j_state_lock);
886 	}
887 	write_unlock(&journal->j_state_lock);
888 
889 	/*
890 	 * We have now established a barrier against other normal updates, but
891 	 * we also need to barrier against other jbd2_journal_lock_updates() calls
892 	 * to make sure that we serialise special journal-locked operations
893 	 * too.
894 	 */
895 	mutex_lock(&journal->j_barrier);
896 }
897 
898 /**
899  * jbd2_journal_unlock_updates () - release barrier
900  * @journal:  Journal to release the barrier on.
901  *
902  * Release a transaction barrier obtained with jbd2_journal_lock_updates().
903  *
904  * Should be called without the journal lock held.
905  */
jbd2_journal_unlock_updates(journal_t * journal)906 void jbd2_journal_unlock_updates (journal_t *journal)
907 {
908 	J_ASSERT(journal->j_barrier_count != 0);
909 
910 	mutex_unlock(&journal->j_barrier);
911 	write_lock(&journal->j_state_lock);
912 	--journal->j_barrier_count;
913 	write_unlock(&journal->j_state_lock);
914 	wake_up(&journal->j_wait_transaction_locked);
915 }
916 
warn_dirty_buffer(struct buffer_head * bh)917 static void warn_dirty_buffer(struct buffer_head *bh)
918 {
919 	printk(KERN_WARNING
920 	       "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
921 	       "There's a risk of filesystem corruption in case of system "
922 	       "crash.\n",
923 	       bh->b_bdev, (unsigned long long)bh->b_blocknr);
924 }
925 
926 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
jbd2_freeze_jh_data(struct journal_head * jh)927 static void jbd2_freeze_jh_data(struct journal_head *jh)
928 {
929 	struct page *page;
930 	int offset;
931 	char *source;
932 	struct buffer_head *bh = jh2bh(jh);
933 
934 	J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
935 	page = bh->b_page;
936 	offset = offset_in_page(bh->b_data);
937 	source = kmap_atomic(page);
938 	/* Fire data frozen trigger just before we copy the data */
939 	jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
940 	memcpy(jh->b_frozen_data, source + offset, bh->b_size);
941 	kunmap_atomic(source);
942 
943 	/*
944 	 * Now that the frozen data is saved off, we need to store any matching
945 	 * triggers.
946 	 */
947 	jh->b_frozen_triggers = jh->b_triggers;
948 }
949 
950 /*
951  * If the buffer is already part of the current transaction, then there
952  * is nothing we need to do.  If it is already part of a prior
953  * transaction which we are still committing to disk, then we need to
954  * make sure that we do not overwrite the old copy: we do copy-out to
955  * preserve the copy going to disk.  We also account the buffer against
956  * the handle's metadata buffer credits (unless the buffer is already
957  * part of the transaction, that is).
958  *
959  */
960 static int
do_get_write_access(handle_t * handle,struct journal_head * jh,int force_copy)961 do_get_write_access(handle_t *handle, struct journal_head *jh,
962 			int force_copy)
963 {
964 	struct buffer_head *bh;
965 	transaction_t *transaction = handle->h_transaction;
966 	journal_t *journal;
967 	int error;
968 	char *frozen_buffer = NULL;
969 	unsigned long start_lock, time_lock;
970 
971 	journal = transaction->t_journal;
972 
973 	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
974 
975 	JBUFFER_TRACE(jh, "entry");
976 repeat:
977 	bh = jh2bh(jh);
978 
979 	/* @@@ Need to check for errors here at some point. */
980 
981  	start_lock = jiffies;
982 	lock_buffer(bh);
983 	spin_lock(&jh->b_state_lock);
984 
985 	/* If it takes too long to lock the buffer, trace it */
986 	time_lock = jbd2_time_diff(start_lock, jiffies);
987 	if (time_lock > HZ/10)
988 		trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev,
989 			jiffies_to_msecs(time_lock));
990 
991 	/* We now hold the buffer lock so it is safe to query the buffer
992 	 * state.  Is the buffer dirty?
993 	 *
994 	 * If so, there are two possibilities.  The buffer may be
995 	 * non-journaled, and undergoing a quite legitimate writeback.
996 	 * Otherwise, it is journaled, and we don't expect dirty buffers
997 	 * in that state (the buffers should be marked JBD_Dirty
998 	 * instead.)  So either the IO is being done under our own
999 	 * control and this is a bug, or it's a third party IO such as
1000 	 * dump(8) (which may leave the buffer scheduled for read ---
1001 	 * ie. locked but not dirty) or tune2fs (which may actually have
1002 	 * the buffer dirtied, ugh.)  */
1003 
1004 	if (buffer_dirty(bh)) {
1005 		/*
1006 		 * First question: is this buffer already part of the current
1007 		 * transaction or the existing committing transaction?
1008 		 */
1009 		if (jh->b_transaction) {
1010 			J_ASSERT_JH(jh,
1011 				jh->b_transaction == transaction ||
1012 				jh->b_transaction ==
1013 					journal->j_committing_transaction);
1014 			if (jh->b_next_transaction)
1015 				J_ASSERT_JH(jh, jh->b_next_transaction ==
1016 							transaction);
1017 			warn_dirty_buffer(bh);
1018 		}
1019 		/*
1020 		 * In any case we need to clean the dirty flag and we must
1021 		 * do it under the buffer lock to be sure we don't race
1022 		 * with running write-out.
1023 		 */
1024 		JBUFFER_TRACE(jh, "Journalling dirty buffer");
1025 		clear_buffer_dirty(bh);
1026 		set_buffer_jbddirty(bh);
1027 	}
1028 
1029 	unlock_buffer(bh);
1030 
1031 	error = -EROFS;
1032 	if (is_handle_aborted(handle)) {
1033 		spin_unlock(&jh->b_state_lock);
1034 		goto out;
1035 	}
1036 	error = 0;
1037 
1038 	/*
1039 	 * The buffer is already part of this transaction if b_transaction or
1040 	 * b_next_transaction points to it
1041 	 */
1042 	if (jh->b_transaction == transaction ||
1043 	    jh->b_next_transaction == transaction)
1044 		goto done;
1045 
1046 	/*
1047 	 * this is the first time this transaction is touching this buffer,
1048 	 * reset the modified flag
1049 	 */
1050 	jh->b_modified = 0;
1051 
1052 	/*
1053 	 * If the buffer is not journaled right now, we need to make sure it
1054 	 * doesn't get written to disk before the caller actually commits the
1055 	 * new data
1056 	 */
1057 	if (!jh->b_transaction) {
1058 		JBUFFER_TRACE(jh, "no transaction");
1059 		J_ASSERT_JH(jh, !jh->b_next_transaction);
1060 		JBUFFER_TRACE(jh, "file as BJ_Reserved");
1061 		/*
1062 		 * Make sure all stores to jh (b_modified, b_frozen_data) are
1063 		 * visible before attaching it to the running transaction.
1064 		 * Paired with barrier in jbd2_write_access_granted()
1065 		 */
1066 		smp_wmb();
1067 		spin_lock(&journal->j_list_lock);
1068 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1069 		spin_unlock(&journal->j_list_lock);
1070 		goto done;
1071 	}
1072 	/*
1073 	 * If there is already a copy-out version of this buffer, then we don't
1074 	 * need to make another one
1075 	 */
1076 	if (jh->b_frozen_data) {
1077 		JBUFFER_TRACE(jh, "has frozen data");
1078 		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1079 		goto attach_next;
1080 	}
1081 
1082 	JBUFFER_TRACE(jh, "owned by older transaction");
1083 	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1084 	J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction);
1085 
1086 	/*
1087 	 * There is one case we have to be very careful about.  If the
1088 	 * committing transaction is currently writing this buffer out to disk
1089 	 * and has NOT made a copy-out, then we cannot modify the buffer
1090 	 * contents at all right now.  The essence of copy-out is that it is
1091 	 * the extra copy, not the primary copy, which gets journaled.  If the
1092 	 * primary copy is already going to disk then we cannot do copy-out
1093 	 * here.
1094 	 */
1095 	if (buffer_shadow(bh)) {
1096 		JBUFFER_TRACE(jh, "on shadow: sleep");
1097 		spin_unlock(&jh->b_state_lock);
1098 		wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE);
1099 		goto repeat;
1100 	}
1101 
1102 	/*
1103 	 * Only do the copy if the currently-owning transaction still needs it.
1104 	 * If buffer isn't on BJ_Metadata list, the committing transaction is
1105 	 * past that stage (here we use the fact that BH_Shadow is set under
1106 	 * bh_state lock together with refiling to BJ_Shadow list and at this
1107 	 * point we know the buffer doesn't have BH_Shadow set).
1108 	 *
1109 	 * Subtle point, though: if this is a get_undo_access, then we will be
1110 	 * relying on the frozen_data to contain the new value of the
1111 	 * committed_data record after the transaction, so we HAVE to force the
1112 	 * frozen_data copy in that case.
1113 	 */
1114 	if (jh->b_jlist == BJ_Metadata || force_copy) {
1115 		JBUFFER_TRACE(jh, "generate frozen data");
1116 		if (!frozen_buffer) {
1117 			JBUFFER_TRACE(jh, "allocate memory for buffer");
1118 			spin_unlock(&jh->b_state_lock);
1119 			frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size,
1120 						   GFP_NOFS | __GFP_NOFAIL);
1121 			goto repeat;
1122 		}
1123 		jh->b_frozen_data = frozen_buffer;
1124 		frozen_buffer = NULL;
1125 		jbd2_freeze_jh_data(jh);
1126 	}
1127 attach_next:
1128 	/*
1129 	 * Make sure all stores to jh (b_modified, b_frozen_data) are visible
1130 	 * before attaching it to the running transaction. Paired with barrier
1131 	 * in jbd2_write_access_granted()
1132 	 */
1133 	smp_wmb();
1134 	jh->b_next_transaction = transaction;
1135 
1136 done:
1137 	spin_unlock(&jh->b_state_lock);
1138 
1139 	/*
1140 	 * If we are about to journal a buffer, then any revoke pending on it is
1141 	 * no longer valid
1142 	 */
1143 	jbd2_journal_cancel_revoke(handle, jh);
1144 
1145 out:
1146 	if (unlikely(frozen_buffer))	/* It's usually NULL */
1147 		jbd2_free(frozen_buffer, bh->b_size);
1148 
1149 	JBUFFER_TRACE(jh, "exit");
1150 	return error;
1151 }
1152 
1153 /* Fast check whether buffer is already attached to the required transaction */
jbd2_write_access_granted(handle_t * handle,struct buffer_head * bh,bool undo)1154 static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1155 							bool undo)
1156 {
1157 	struct journal_head *jh;
1158 	bool ret = false;
1159 
1160 	/* Dirty buffers require special handling... */
1161 	if (buffer_dirty(bh))
1162 		return false;
1163 
1164 	/*
1165 	 * RCU protects us from dereferencing freed pages. So the checks we do
1166 	 * are guaranteed not to oops. However the jh slab object can get freed
1167 	 * & reallocated while we work with it. So we have to be careful. When
1168 	 * we see jh attached to the running transaction, we know it must stay
1169 	 * so until the transaction is committed. Thus jh won't be freed and
1170 	 * will be attached to the same bh while we run.  However it can
1171 	 * happen jh gets freed, reallocated, and attached to the transaction
1172 	 * just after we get pointer to it from bh. So we have to be careful
1173 	 * and recheck jh still belongs to our bh before we return success.
1174 	 */
1175 	rcu_read_lock();
1176 	if (!buffer_jbd(bh))
1177 		goto out;
1178 	/* This should be bh2jh() but that doesn't work with inline functions */
1179 	jh = READ_ONCE(bh->b_private);
1180 	if (!jh)
1181 		goto out;
1182 	/* For undo access buffer must have data copied */
1183 	if (undo && !jh->b_committed_data)
1184 		goto out;
1185 	if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
1186 	    READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
1187 		goto out;
1188 	/*
1189 	 * There are two reasons for the barrier here:
1190 	 * 1) Make sure to fetch b_bh after we did previous checks so that we
1191 	 * detect when jh went through free, realloc, attach to transaction
1192 	 * while we were checking. Paired with implicit barrier in that path.
1193 	 * 2) So that access to bh done after jbd2_write_access_granted()
1194 	 * doesn't get reordered and see inconsistent state of concurrent
1195 	 * do_get_write_access().
1196 	 */
1197 	smp_mb();
1198 	if (unlikely(jh->b_bh != bh))
1199 		goto out;
1200 	ret = true;
1201 out:
1202 	rcu_read_unlock();
1203 	return ret;
1204 }
1205 
1206 /**
1207  * jbd2_journal_get_write_access() - notify intent to modify a buffer
1208  *				     for metadata (not data) update.
1209  * @handle: transaction to add buffer modifications to
1210  * @bh:     bh to be used for metadata writes
1211  *
1212  * Returns: error code or 0 on success.
1213  *
1214  * In full data journalling mode the buffer may be of type BJ_AsyncData,
1215  * because we're ``write()ing`` a buffer which is also part of a shared mapping.
1216  */
1217 
jbd2_journal_get_write_access(handle_t * handle,struct buffer_head * bh)1218 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1219 {
1220 	struct journal_head *jh;
1221 	int rc;
1222 
1223 	if (is_handle_aborted(handle))
1224 		return -EROFS;
1225 
1226 	if (jbd2_write_access_granted(handle, bh, false))
1227 		return 0;
1228 
1229 	jh = jbd2_journal_add_journal_head(bh);
1230 	/* We do not want to get caught playing with fields which the
1231 	 * log thread also manipulates.  Make sure that the buffer
1232 	 * completes any outstanding IO before proceeding. */
1233 	rc = do_get_write_access(handle, jh, 0);
1234 	jbd2_journal_put_journal_head(jh);
1235 	return rc;
1236 }
1237 
1238 
1239 /*
1240  * When the user wants to journal a newly created buffer_head
1241  * (ie. getblk() returned a new buffer and we are going to populate it
1242  * manually rather than reading off disk), then we need to keep the
1243  * buffer_head locked until it has been completely filled with new
1244  * data.  In this case, we should be able to make the assertion that
1245  * the bh is not already part of an existing transaction.
1246  *
1247  * The buffer should already be locked by the caller by this point.
1248  * There is no lock ranking violation: it was a newly created,
1249  * unlocked buffer beforehand. */
1250 
1251 /**
1252  * jbd2_journal_get_create_access () - notify intent to use newly created bh
1253  * @handle: transaction to new buffer to
1254  * @bh: new buffer.
1255  *
1256  * Call this if you create a new bh.
1257  */
jbd2_journal_get_create_access(handle_t * handle,struct buffer_head * bh)1258 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
1259 {
1260 	transaction_t *transaction = handle->h_transaction;
1261 	journal_t *journal;
1262 	struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1263 	int err;
1264 
1265 	jbd_debug(5, "journal_head %p\n", jh);
1266 	err = -EROFS;
1267 	if (is_handle_aborted(handle))
1268 		goto out;
1269 	journal = transaction->t_journal;
1270 	err = 0;
1271 
1272 	JBUFFER_TRACE(jh, "entry");
1273 	/*
1274 	 * The buffer may already belong to this transaction due to pre-zeroing
1275 	 * in the filesystem's new_block code.  It may also be on the previous,
1276 	 * committing transaction's lists, but it HAS to be in Forget state in
1277 	 * that case: the transaction must have deleted the buffer for it to be
1278 	 * reused here.
1279 	 */
1280 	spin_lock(&jh->b_state_lock);
1281 	J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
1282 		jh->b_transaction == NULL ||
1283 		(jh->b_transaction == journal->j_committing_transaction &&
1284 			  jh->b_jlist == BJ_Forget)));
1285 
1286 	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1287 	J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
1288 
1289 	if (jh->b_transaction == NULL) {
1290 		/*
1291 		 * Previous jbd2_journal_forget() could have left the buffer
1292 		 * with jbddirty bit set because it was being committed. When
1293 		 * the commit finished, we've filed the buffer for
1294 		 * checkpointing and marked it dirty. Now we are reallocating
1295 		 * the buffer so the transaction freeing it must have
1296 		 * committed and so it's safe to clear the dirty bit.
1297 		 */
1298 		clear_buffer_dirty(jh2bh(jh));
1299 		/* first access by this transaction */
1300 		jh->b_modified = 0;
1301 
1302 		JBUFFER_TRACE(jh, "file as BJ_Reserved");
1303 		spin_lock(&journal->j_list_lock);
1304 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1305 		spin_unlock(&journal->j_list_lock);
1306 	} else if (jh->b_transaction == journal->j_committing_transaction) {
1307 		/* first access by this transaction */
1308 		jh->b_modified = 0;
1309 
1310 		JBUFFER_TRACE(jh, "set next transaction");
1311 		spin_lock(&journal->j_list_lock);
1312 		jh->b_next_transaction = transaction;
1313 		spin_unlock(&journal->j_list_lock);
1314 	}
1315 	spin_unlock(&jh->b_state_lock);
1316 
1317 	/*
1318 	 * akpm: I added this.  ext3_alloc_branch can pick up new indirect
1319 	 * blocks which contain freed but then revoked metadata.  We need
1320 	 * to cancel the revoke in case we end up freeing it yet again
1321 	 * and the reallocating as data - this would cause a second revoke,
1322 	 * which hits an assertion error.
1323 	 */
1324 	JBUFFER_TRACE(jh, "cancelling revoke");
1325 	jbd2_journal_cancel_revoke(handle, jh);
1326 out:
1327 	jbd2_journal_put_journal_head(jh);
1328 	return err;
1329 }
1330 
1331 /**
1332  * jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
1333  *     non-rewindable consequences
1334  * @handle: transaction
1335  * @bh: buffer to undo
1336  *
1337  * Sometimes there is a need to distinguish between metadata which has
1338  * been committed to disk and that which has not.  The ext3fs code uses
1339  * this for freeing and allocating space, we have to make sure that we
1340  * do not reuse freed space until the deallocation has been committed,
1341  * since if we overwrote that space we would make the delete
1342  * un-rewindable in case of a crash.
1343  *
1344  * To deal with that, jbd2_journal_get_undo_access requests write access to a
1345  * buffer for parts of non-rewindable operations such as delete
1346  * operations on the bitmaps.  The journaling code must keep a copy of
1347  * the buffer's contents prior to the undo_access call until such time
1348  * as we know that the buffer has definitely been committed to disk.
1349  *
1350  * We never need to know which transaction the committed data is part
1351  * of, buffers touched here are guaranteed to be dirtied later and so
1352  * will be committed to a new transaction in due course, at which point
1353  * we can discard the old committed data pointer.
1354  *
1355  * Returns error number or 0 on success.
1356  */
jbd2_journal_get_undo_access(handle_t * handle,struct buffer_head * bh)1357 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1358 {
1359 	int err;
1360 	struct journal_head *jh;
1361 	char *committed_data = NULL;
1362 
1363 	if (is_handle_aborted(handle))
1364 		return -EROFS;
1365 
1366 	if (jbd2_write_access_granted(handle, bh, true))
1367 		return 0;
1368 
1369 	jh = jbd2_journal_add_journal_head(bh);
1370 	JBUFFER_TRACE(jh, "entry");
1371 
1372 	/*
1373 	 * Do this first --- it can drop the journal lock, so we want to
1374 	 * make sure that obtaining the committed_data is done
1375 	 * atomically wrt. completion of any outstanding commits.
1376 	 */
1377 	err = do_get_write_access(handle, jh, 1);
1378 	if (err)
1379 		goto out;
1380 
1381 repeat:
1382 	if (!jh->b_committed_data)
1383 		committed_data = jbd2_alloc(jh2bh(jh)->b_size,
1384 					    GFP_NOFS|__GFP_NOFAIL);
1385 
1386 	spin_lock(&jh->b_state_lock);
1387 	if (!jh->b_committed_data) {
1388 		/* Copy out the current buffer contents into the
1389 		 * preserved, committed copy. */
1390 		JBUFFER_TRACE(jh, "generate b_committed data");
1391 		if (!committed_data) {
1392 			spin_unlock(&jh->b_state_lock);
1393 			goto repeat;
1394 		}
1395 
1396 		jh->b_committed_data = committed_data;
1397 		committed_data = NULL;
1398 		memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
1399 	}
1400 	spin_unlock(&jh->b_state_lock);
1401 out:
1402 	jbd2_journal_put_journal_head(jh);
1403 	if (unlikely(committed_data))
1404 		jbd2_free(committed_data, bh->b_size);
1405 	return err;
1406 }
1407 
1408 /**
1409  * jbd2_journal_set_triggers() - Add triggers for commit writeout
1410  * @bh: buffer to trigger on
1411  * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1412  *
1413  * Set any triggers on this journal_head.  This is always safe, because
1414  * triggers for a committing buffer will be saved off, and triggers for
1415  * a running transaction will match the buffer in that transaction.
1416  *
1417  * Call with NULL to clear the triggers.
1418  */
jbd2_journal_set_triggers(struct buffer_head * bh,struct jbd2_buffer_trigger_type * type)1419 void jbd2_journal_set_triggers(struct buffer_head *bh,
1420 			       struct jbd2_buffer_trigger_type *type)
1421 {
1422 	struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
1423 
1424 	if (WARN_ON_ONCE(!jh))
1425 		return;
1426 	jh->b_triggers = type;
1427 	jbd2_journal_put_journal_head(jh);
1428 }
1429 
jbd2_buffer_frozen_trigger(struct journal_head * jh,void * mapped_data,struct jbd2_buffer_trigger_type * triggers)1430 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
1431 				struct jbd2_buffer_trigger_type *triggers)
1432 {
1433 	struct buffer_head *bh = jh2bh(jh);
1434 
1435 	if (!triggers || !triggers->t_frozen)
1436 		return;
1437 
1438 	triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
1439 }
1440 
jbd2_buffer_abort_trigger(struct journal_head * jh,struct jbd2_buffer_trigger_type * triggers)1441 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1442 			       struct jbd2_buffer_trigger_type *triggers)
1443 {
1444 	if (!triggers || !triggers->t_abort)
1445 		return;
1446 
1447 	triggers->t_abort(triggers, jh2bh(jh));
1448 }
1449 
1450 /**
1451  * jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
1452  * @handle: transaction to add buffer to.
1453  * @bh: buffer to mark
1454  *
1455  * mark dirty metadata which needs to be journaled as part of the current
1456  * transaction.
1457  *
1458  * The buffer must have previously had jbd2_journal_get_write_access()
1459  * called so that it has a valid journal_head attached to the buffer
1460  * head.
1461  *
1462  * The buffer is placed on the transaction's metadata list and is marked
1463  * as belonging to the transaction.
1464  *
1465  * Returns error number or 0 on success.
1466  *
1467  * Special care needs to be taken if the buffer already belongs to the
1468  * current committing transaction (in which case we should have frozen
1469  * data present for that commit).  In that case, we don't relink the
1470  * buffer: that only gets done when the old transaction finally
1471  * completes its commit.
1472  */
jbd2_journal_dirty_metadata(handle_t * handle,struct buffer_head * bh)1473 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1474 {
1475 	transaction_t *transaction = handle->h_transaction;
1476 	journal_t *journal;
1477 	struct journal_head *jh;
1478 	int ret = 0;
1479 
1480 	if (is_handle_aborted(handle))
1481 		return -EROFS;
1482 	if (!buffer_jbd(bh))
1483 		return -EUCLEAN;
1484 
1485 	/*
1486 	 * We don't grab jh reference here since the buffer must be part
1487 	 * of the running transaction.
1488 	 */
1489 	jh = bh2jh(bh);
1490 	jbd_debug(5, "journal_head %p\n", jh);
1491 	JBUFFER_TRACE(jh, "entry");
1492 
1493 	/*
1494 	 * This and the following assertions are unreliable since we may see jh
1495 	 * in inconsistent state unless we grab bh_state lock. But this is
1496 	 * crucial to catch bugs so let's do a reliable check until the
1497 	 * lockless handling is fully proven.
1498 	 */
1499 	if (data_race(jh->b_transaction != transaction &&
1500 	    jh->b_next_transaction != transaction)) {
1501 		spin_lock(&jh->b_state_lock);
1502 		J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1503 				jh->b_next_transaction == transaction);
1504 		spin_unlock(&jh->b_state_lock);
1505 	}
1506 	if (jh->b_modified == 1) {
1507 		/* If it's in our transaction it must be in BJ_Metadata list. */
1508 		if (data_race(jh->b_transaction == transaction &&
1509 		    jh->b_jlist != BJ_Metadata)) {
1510 			spin_lock(&jh->b_state_lock);
1511 			if (jh->b_transaction == transaction &&
1512 			    jh->b_jlist != BJ_Metadata)
1513 				pr_err("JBD2: assertion failure: h_type=%u "
1514 				       "h_line_no=%u block_no=%llu jlist=%u\n",
1515 				       handle->h_type, handle->h_line_no,
1516 				       (unsigned long long) bh->b_blocknr,
1517 				       jh->b_jlist);
1518 			J_ASSERT_JH(jh, jh->b_transaction != transaction ||
1519 					jh->b_jlist == BJ_Metadata);
1520 			spin_unlock(&jh->b_state_lock);
1521 		}
1522 		goto out;
1523 	}
1524 
1525 	journal = transaction->t_journal;
1526 	spin_lock(&jh->b_state_lock);
1527 
1528 	if (jh->b_modified == 0) {
1529 		/*
1530 		 * This buffer's got modified and becoming part
1531 		 * of the transaction. This needs to be done
1532 		 * once a transaction -bzzz
1533 		 */
1534 		if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle) <= 0)) {
1535 			ret = -ENOSPC;
1536 			goto out_unlock_bh;
1537 		}
1538 		jh->b_modified = 1;
1539 		handle->h_total_credits--;
1540 	}
1541 
1542 	/*
1543 	 * fastpath, to avoid expensive locking.  If this buffer is already
1544 	 * on the running transaction's metadata list there is nothing to do.
1545 	 * Nobody can take it off again because there is a handle open.
1546 	 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1547 	 * result in this test being false, so we go in and take the locks.
1548 	 */
1549 	if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1550 		JBUFFER_TRACE(jh, "fastpath");
1551 		if (unlikely(jh->b_transaction !=
1552 			     journal->j_running_transaction)) {
1553 			printk(KERN_ERR "JBD2: %s: "
1554 			       "jh->b_transaction (%llu, %p, %u) != "
1555 			       "journal->j_running_transaction (%p, %u)\n",
1556 			       journal->j_devname,
1557 			       (unsigned long long) bh->b_blocknr,
1558 			       jh->b_transaction,
1559 			       jh->b_transaction ? jh->b_transaction->t_tid : 0,
1560 			       journal->j_running_transaction,
1561 			       journal->j_running_transaction ?
1562 			       journal->j_running_transaction->t_tid : 0);
1563 			ret = -EINVAL;
1564 		}
1565 		goto out_unlock_bh;
1566 	}
1567 
1568 	set_buffer_jbddirty(bh);
1569 
1570 	/*
1571 	 * Metadata already on the current transaction list doesn't
1572 	 * need to be filed.  Metadata on another transaction's list must
1573 	 * be committing, and will be refiled once the commit completes:
1574 	 * leave it alone for now.
1575 	 */
1576 	if (jh->b_transaction != transaction) {
1577 		JBUFFER_TRACE(jh, "already on other transaction");
1578 		if (unlikely(((jh->b_transaction !=
1579 			       journal->j_committing_transaction)) ||
1580 			     (jh->b_next_transaction != transaction))) {
1581 			printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: "
1582 			       "bad jh for block %llu: "
1583 			       "transaction (%p, %u), "
1584 			       "jh->b_transaction (%p, %u), "
1585 			       "jh->b_next_transaction (%p, %u), jlist %u\n",
1586 			       journal->j_devname,
1587 			       (unsigned long long) bh->b_blocknr,
1588 			       transaction, transaction->t_tid,
1589 			       jh->b_transaction,
1590 			       jh->b_transaction ?
1591 			       jh->b_transaction->t_tid : 0,
1592 			       jh->b_next_transaction,
1593 			       jh->b_next_transaction ?
1594 			       jh->b_next_transaction->t_tid : 0,
1595 			       jh->b_jlist);
1596 			WARN_ON(1);
1597 			ret = -EINVAL;
1598 		}
1599 		/* And this case is illegal: we can't reuse another
1600 		 * transaction's data buffer, ever. */
1601 		goto out_unlock_bh;
1602 	}
1603 
1604 	/* That test should have eliminated the following case: */
1605 	J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1606 
1607 	JBUFFER_TRACE(jh, "file as BJ_Metadata");
1608 	spin_lock(&journal->j_list_lock);
1609 	__jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
1610 	spin_unlock(&journal->j_list_lock);
1611 out_unlock_bh:
1612 	spin_unlock(&jh->b_state_lock);
1613 out:
1614 	JBUFFER_TRACE(jh, "exit");
1615 	return ret;
1616 }
1617 
1618 /**
1619  * jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1620  * @handle: transaction handle
1621  * @bh:     bh to 'forget'
1622  *
1623  * We can only do the bforget if there are no commits pending against the
1624  * buffer.  If the buffer is dirty in the current running transaction we
1625  * can safely unlink it.
1626  *
1627  * bh may not be a journalled buffer at all - it may be a non-JBD
1628  * buffer which came off the hashtable.  Check for this.
1629  *
1630  * Decrements bh->b_count by one.
1631  *
1632  * Allow this call even if the handle has aborted --- it may be part of
1633  * the caller's cleanup after an abort.
1634  */
jbd2_journal_forget(handle_t * handle,struct buffer_head * bh)1635 int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
1636 {
1637 	transaction_t *transaction = handle->h_transaction;
1638 	journal_t *journal;
1639 	struct journal_head *jh;
1640 	int drop_reserve = 0;
1641 	int err = 0;
1642 	int was_modified = 0;
1643 
1644 	if (is_handle_aborted(handle))
1645 		return -EROFS;
1646 	journal = transaction->t_journal;
1647 
1648 	BUFFER_TRACE(bh, "entry");
1649 
1650 	jh = jbd2_journal_grab_journal_head(bh);
1651 	if (!jh) {
1652 		__bforget(bh);
1653 		return 0;
1654 	}
1655 
1656 	spin_lock(&jh->b_state_lock);
1657 
1658 	/* Critical error: attempting to delete a bitmap buffer, maybe?
1659 	 * Don't do any jbd operations, and return an error. */
1660 	if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1661 			 "inconsistent data on disk")) {
1662 		err = -EIO;
1663 		goto drop;
1664 	}
1665 
1666 	/* keep track of whether or not this transaction modified us */
1667 	was_modified = jh->b_modified;
1668 
1669 	/*
1670 	 * The buffer's going from the transaction, we must drop
1671 	 * all references -bzzz
1672 	 */
1673 	jh->b_modified = 0;
1674 
1675 	if (jh->b_transaction == transaction) {
1676 		J_ASSERT_JH(jh, !jh->b_frozen_data);
1677 
1678 		/* If we are forgetting a buffer which is already part
1679 		 * of this transaction, then we can just drop it from
1680 		 * the transaction immediately. */
1681 		clear_buffer_dirty(bh);
1682 		clear_buffer_jbddirty(bh);
1683 
1684 		JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1685 
1686 		/*
1687 		 * we only want to drop a reference if this transaction
1688 		 * modified the buffer
1689 		 */
1690 		if (was_modified)
1691 			drop_reserve = 1;
1692 
1693 		/*
1694 		 * We are no longer going to journal this buffer.
1695 		 * However, the commit of this transaction is still
1696 		 * important to the buffer: the delete that we are now
1697 		 * processing might obsolete an old log entry, so by
1698 		 * committing, we can satisfy the buffer's checkpoint.
1699 		 *
1700 		 * So, if we have a checkpoint on the buffer, we should
1701 		 * now refile the buffer on our BJ_Forget list so that
1702 		 * we know to remove the checkpoint after we commit.
1703 		 */
1704 
1705 		spin_lock(&journal->j_list_lock);
1706 		if (jh->b_cp_transaction) {
1707 			__jbd2_journal_temp_unlink_buffer(jh);
1708 			__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1709 		} else {
1710 			__jbd2_journal_unfile_buffer(jh);
1711 			jbd2_journal_put_journal_head(jh);
1712 		}
1713 		spin_unlock(&journal->j_list_lock);
1714 	} else if (jh->b_transaction) {
1715 		J_ASSERT_JH(jh, (jh->b_transaction ==
1716 				 journal->j_committing_transaction));
1717 		/* However, if the buffer is still owned by a prior
1718 		 * (committing) transaction, we can't drop it yet... */
1719 		JBUFFER_TRACE(jh, "belongs to older transaction");
1720 		/* ... but we CAN drop it from the new transaction through
1721 		 * marking the buffer as freed and set j_next_transaction to
1722 		 * the new transaction, so that not only the commit code
1723 		 * knows it should clear dirty bits when it is done with the
1724 		 * buffer, but also the buffer can be checkpointed only
1725 		 * after the new transaction commits. */
1726 
1727 		set_buffer_freed(bh);
1728 
1729 		if (!jh->b_next_transaction) {
1730 			spin_lock(&journal->j_list_lock);
1731 			jh->b_next_transaction = transaction;
1732 			spin_unlock(&journal->j_list_lock);
1733 		} else {
1734 			J_ASSERT(jh->b_next_transaction == transaction);
1735 
1736 			/*
1737 			 * only drop a reference if this transaction modified
1738 			 * the buffer
1739 			 */
1740 			if (was_modified)
1741 				drop_reserve = 1;
1742 		}
1743 	} else {
1744 		/*
1745 		 * Finally, if the buffer is not belongs to any
1746 		 * transaction, we can just drop it now if it has no
1747 		 * checkpoint.
1748 		 */
1749 		spin_lock(&journal->j_list_lock);
1750 		if (!jh->b_cp_transaction) {
1751 			JBUFFER_TRACE(jh, "belongs to none transaction");
1752 			spin_unlock(&journal->j_list_lock);
1753 			goto drop;
1754 		}
1755 
1756 		/*
1757 		 * Otherwise, if the buffer has been written to disk,
1758 		 * it is safe to remove the checkpoint and drop it.
1759 		 */
1760 		if (!buffer_dirty(bh)) {
1761 			__jbd2_journal_remove_checkpoint(jh);
1762 			spin_unlock(&journal->j_list_lock);
1763 			goto drop;
1764 		}
1765 
1766 		/*
1767 		 * The buffer is still not written to disk, we should
1768 		 * attach this buffer to current transaction so that the
1769 		 * buffer can be checkpointed only after the current
1770 		 * transaction commits.
1771 		 */
1772 		clear_buffer_dirty(bh);
1773 		__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1774 		spin_unlock(&journal->j_list_lock);
1775 	}
1776 drop:
1777 	__brelse(bh);
1778 	spin_unlock(&jh->b_state_lock);
1779 	jbd2_journal_put_journal_head(jh);
1780 	if (drop_reserve) {
1781 		/* no need to reserve log space for this block -bzzz */
1782 		handle->h_total_credits++;
1783 	}
1784 	return err;
1785 }
1786 
1787 /**
1788  * jbd2_journal_stop() - complete a transaction
1789  * @handle: transaction to complete.
1790  *
1791  * All done for a particular handle.
1792  *
1793  * There is not much action needed here.  We just return any remaining
1794  * buffer credits to the transaction and remove the handle.  The only
1795  * complication is that we need to start a commit operation if the
1796  * filesystem is marked for synchronous update.
1797  *
1798  * jbd2_journal_stop itself will not usually return an error, but it may
1799  * do so in unusual circumstances.  In particular, expect it to
1800  * return -EIO if a jbd2_journal_abort has been executed since the
1801  * transaction began.
1802  */
jbd2_journal_stop(handle_t * handle)1803 int jbd2_journal_stop(handle_t *handle)
1804 {
1805 	transaction_t *transaction = handle->h_transaction;
1806 	journal_t *journal;
1807 	int err = 0, wait_for_commit = 0;
1808 	tid_t tid;
1809 	pid_t pid;
1810 
1811 	if (--handle->h_ref > 0) {
1812 		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1813 						 handle->h_ref);
1814 		if (is_handle_aborted(handle))
1815 			return -EIO;
1816 		return 0;
1817 	}
1818 	if (!transaction) {
1819 		/*
1820 		 * Handle is already detached from the transaction so there is
1821 		 * nothing to do other than free the handle.
1822 		 */
1823 		memalloc_nofs_restore(handle->saved_alloc_context);
1824 		goto free_and_exit;
1825 	}
1826 	journal = transaction->t_journal;
1827 	tid = transaction->t_tid;
1828 
1829 	if (is_handle_aborted(handle))
1830 		err = -EIO;
1831 
1832 	jbd_debug(4, "Handle %p going down\n", handle);
1833 	trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
1834 				tid, handle->h_type, handle->h_line_no,
1835 				jiffies - handle->h_start_jiffies,
1836 				handle->h_sync, handle->h_requested_credits,
1837 				(handle->h_requested_credits -
1838 				 handle->h_total_credits));
1839 
1840 	/*
1841 	 * Implement synchronous transaction batching.  If the handle
1842 	 * was synchronous, don't force a commit immediately.  Let's
1843 	 * yield and let another thread piggyback onto this
1844 	 * transaction.  Keep doing that while new threads continue to
1845 	 * arrive.  It doesn't cost much - we're about to run a commit
1846 	 * and sleep on IO anyway.  Speeds up many-threaded, many-dir
1847 	 * operations by 30x or more...
1848 	 *
1849 	 * We try and optimize the sleep time against what the
1850 	 * underlying disk can do, instead of having a static sleep
1851 	 * time.  This is useful for the case where our storage is so
1852 	 * fast that it is more optimal to go ahead and force a flush
1853 	 * and wait for the transaction to be committed than it is to
1854 	 * wait for an arbitrary amount of time for new writers to
1855 	 * join the transaction.  We achieve this by measuring how
1856 	 * long it takes to commit a transaction, and compare it with
1857 	 * how long this transaction has been running, and if run time
1858 	 * < commit time then we sleep for the delta and commit.  This
1859 	 * greatly helps super fast disks that would see slowdowns as
1860 	 * more threads started doing fsyncs.
1861 	 *
1862 	 * But don't do this if this process was the most recent one
1863 	 * to perform a synchronous write.  We do this to detect the
1864 	 * case where a single process is doing a stream of sync
1865 	 * writes.  No point in waiting for joiners in that case.
1866 	 *
1867 	 * Setting max_batch_time to 0 disables this completely.
1868 	 */
1869 	pid = current->pid;
1870 	if (handle->h_sync && journal->j_last_sync_writer != pid &&
1871 	    journal->j_max_batch_time) {
1872 		u64 commit_time, trans_time;
1873 
1874 		journal->j_last_sync_writer = pid;
1875 
1876 		read_lock(&journal->j_state_lock);
1877 		commit_time = journal->j_average_commit_time;
1878 		read_unlock(&journal->j_state_lock);
1879 
1880 		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1881 						   transaction->t_start_time));
1882 
1883 		commit_time = max_t(u64, commit_time,
1884 				    1000*journal->j_min_batch_time);
1885 		commit_time = min_t(u64, commit_time,
1886 				    1000*journal->j_max_batch_time);
1887 
1888 		if (trans_time < commit_time) {
1889 			ktime_t expires = ktime_add_ns(ktime_get(),
1890 						       commit_time);
1891 			set_current_state(TASK_UNINTERRUPTIBLE);
1892 			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1893 		}
1894 	}
1895 
1896 	if (handle->h_sync)
1897 		transaction->t_synchronous_commit = 1;
1898 
1899 	/*
1900 	 * If the handle is marked SYNC, we need to set another commit
1901 	 * going!  We also want to force a commit if the transaction is too
1902 	 * old now.
1903 	 */
1904 	if (handle->h_sync ||
1905 	    time_after_eq(jiffies, transaction->t_expires)) {
1906 		/* Do this even for aborted journals: an abort still
1907 		 * completes the commit thread, it just doesn't write
1908 		 * anything to disk. */
1909 
1910 		jbd_debug(2, "transaction too old, requesting commit for "
1911 					"handle %p\n", handle);
1912 		/* This is non-blocking */
1913 		jbd2_log_start_commit(journal, tid);
1914 
1915 		/*
1916 		 * Special case: JBD2_SYNC synchronous updates require us
1917 		 * to wait for the commit to complete.
1918 		 */
1919 		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1920 			wait_for_commit = 1;
1921 	}
1922 
1923 	/*
1924 	 * Once stop_this_handle() drops t_updates, the transaction could start
1925 	 * committing on us and eventually disappear.  So we must not
1926 	 * dereference transaction pointer again after calling
1927 	 * stop_this_handle().
1928 	 */
1929 	stop_this_handle(handle);
1930 
1931 	if (wait_for_commit)
1932 		err = jbd2_log_wait_commit(journal, tid);
1933 
1934 free_and_exit:
1935 	if (handle->h_rsv_handle)
1936 		jbd2_free_handle(handle->h_rsv_handle);
1937 	jbd2_free_handle(handle);
1938 	return err;
1939 }
1940 
1941 /*
1942  *
1943  * List management code snippets: various functions for manipulating the
1944  * transaction buffer lists.
1945  *
1946  */
1947 
1948 /*
1949  * Append a buffer to a transaction list, given the transaction's list head
1950  * pointer.
1951  *
1952  * j_list_lock is held.
1953  *
1954  * jh->b_state_lock is held.
1955  */
1956 
1957 static inline void
__blist_add_buffer(struct journal_head ** list,struct journal_head * jh)1958 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1959 {
1960 	if (!*list) {
1961 		jh->b_tnext = jh->b_tprev = jh;
1962 		*list = jh;
1963 	} else {
1964 		/* Insert at the tail of the list to preserve order */
1965 		struct journal_head *first = *list, *last = first->b_tprev;
1966 		jh->b_tprev = last;
1967 		jh->b_tnext = first;
1968 		last->b_tnext = first->b_tprev = jh;
1969 	}
1970 }
1971 
1972 /*
1973  * Remove a buffer from a transaction list, given the transaction's list
1974  * head pointer.
1975  *
1976  * Called with j_list_lock held, and the journal may not be locked.
1977  *
1978  * jh->b_state_lock is held.
1979  */
1980 
1981 static inline void
__blist_del_buffer(struct journal_head ** list,struct journal_head * jh)1982 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1983 {
1984 	if (*list == jh) {
1985 		*list = jh->b_tnext;
1986 		if (*list == jh)
1987 			*list = NULL;
1988 	}
1989 	jh->b_tprev->b_tnext = jh->b_tnext;
1990 	jh->b_tnext->b_tprev = jh->b_tprev;
1991 }
1992 
1993 /*
1994  * Remove a buffer from the appropriate transaction list.
1995  *
1996  * Note that this function can *change* the value of
1997  * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
1998  * t_reserved_list.  If the caller is holding onto a copy of one of these
1999  * pointers, it could go bad.  Generally the caller needs to re-read the
2000  * pointer from the transaction_t.
2001  *
2002  * Called under j_list_lock.
2003  */
__jbd2_journal_temp_unlink_buffer(struct journal_head * jh)2004 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
2005 {
2006 	struct journal_head **list = NULL;
2007 	transaction_t *transaction;
2008 	struct buffer_head *bh = jh2bh(jh);
2009 
2010 	lockdep_assert_held(&jh->b_state_lock);
2011 	transaction = jh->b_transaction;
2012 	if (transaction)
2013 		assert_spin_locked(&transaction->t_journal->j_list_lock);
2014 
2015 	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2016 	if (jh->b_jlist != BJ_None)
2017 		J_ASSERT_JH(jh, transaction != NULL);
2018 
2019 	switch (jh->b_jlist) {
2020 	case BJ_None:
2021 		return;
2022 	case BJ_Metadata:
2023 		transaction->t_nr_buffers--;
2024 		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
2025 		list = &transaction->t_buffers;
2026 		break;
2027 	case BJ_Forget:
2028 		list = &transaction->t_forget;
2029 		break;
2030 	case BJ_Shadow:
2031 		list = &transaction->t_shadow_list;
2032 		break;
2033 	case BJ_Reserved:
2034 		list = &transaction->t_reserved_list;
2035 		break;
2036 	}
2037 
2038 	__blist_del_buffer(list, jh);
2039 	jh->b_jlist = BJ_None;
2040 	if (transaction && is_journal_aborted(transaction->t_journal))
2041 		clear_buffer_jbddirty(bh);
2042 	else if (test_clear_buffer_jbddirty(bh))
2043 		mark_buffer_dirty(bh);	/* Expose it to the VM */
2044 }
2045 
2046 /*
2047  * Remove buffer from all transactions. The caller is responsible for dropping
2048  * the jh reference that belonged to the transaction.
2049  *
2050  * Called with bh_state lock and j_list_lock
2051  */
__jbd2_journal_unfile_buffer(struct journal_head * jh)2052 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
2053 {
2054 	J_ASSERT_JH(jh, jh->b_transaction != NULL);
2055 	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
2056 
2057 	__jbd2_journal_temp_unlink_buffer(jh);
2058 	jh->b_transaction = NULL;
2059 }
2060 
jbd2_journal_unfile_buffer(journal_t * journal,struct journal_head * jh)2061 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
2062 {
2063 	struct buffer_head *bh = jh2bh(jh);
2064 
2065 	/* Get reference so that buffer cannot be freed before we unlock it */
2066 	get_bh(bh);
2067 	spin_lock(&jh->b_state_lock);
2068 	spin_lock(&journal->j_list_lock);
2069 	__jbd2_journal_unfile_buffer(jh);
2070 	spin_unlock(&journal->j_list_lock);
2071 	spin_unlock(&jh->b_state_lock);
2072 	jbd2_journal_put_journal_head(jh);
2073 	__brelse(bh);
2074 }
2075 
2076 /*
2077  * Called from jbd2_journal_try_to_free_buffers().
2078  *
2079  * Called under jh->b_state_lock
2080  */
2081 static void
__journal_try_to_free_buffer(journal_t * journal,struct buffer_head * bh)2082 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
2083 {
2084 	struct journal_head *jh;
2085 
2086 	jh = bh2jh(bh);
2087 
2088 	if (buffer_locked(bh) || buffer_dirty(bh))
2089 		goto out;
2090 
2091 	if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
2092 		goto out;
2093 
2094 	spin_lock(&journal->j_list_lock);
2095 	if (jh->b_cp_transaction != NULL) {
2096 		/* written-back checkpointed metadata buffer */
2097 		JBUFFER_TRACE(jh, "remove from checkpoint list");
2098 		__jbd2_journal_remove_checkpoint(jh);
2099 	}
2100 	spin_unlock(&journal->j_list_lock);
2101 out:
2102 	return;
2103 }
2104 
2105 /**
2106  * jbd2_journal_try_to_free_buffers() - try to free page buffers.
2107  * @journal: journal for operation
2108  * @page: to try and free
2109  *
2110  * For all the buffers on this page,
2111  * if they are fully written out ordered data, move them onto BUF_CLEAN
2112  * so try_to_free_buffers() can reap them.
2113  *
2114  * This function returns non-zero if we wish try_to_free_buffers()
2115  * to be called. We do this if the page is releasable by try_to_free_buffers().
2116  * We also do it if the page has locked or dirty buffers and the caller wants
2117  * us to perform sync or async writeout.
2118  *
2119  * This complicates JBD locking somewhat.  We aren't protected by the
2120  * BKL here.  We wish to remove the buffer from its committing or
2121  * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
2122  *
2123  * This may *change* the value of transaction_t->t_datalist, so anyone
2124  * who looks at t_datalist needs to lock against this function.
2125  *
2126  * Even worse, someone may be doing a jbd2_journal_dirty_data on this
2127  * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
2128  * will come out of the lock with the buffer dirty, which makes it
2129  * ineligible for release here.
2130  *
2131  * Who else is affected by this?  hmm...  Really the only contender
2132  * is do_get_write_access() - it could be looking at the buffer while
2133  * journal_try_to_free_buffer() is changing its state.  But that
2134  * cannot happen because we never reallocate freed data as metadata
2135  * while the data is part of a transaction.  Yes?
2136  *
2137  * Return 0 on failure, 1 on success
2138  */
jbd2_journal_try_to_free_buffers(journal_t * journal,struct page * page)2139 int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
2140 {
2141 	struct buffer_head *head;
2142 	struct buffer_head *bh;
2143 	int ret = 0;
2144 
2145 	J_ASSERT(PageLocked(page));
2146 
2147 	head = page_buffers(page);
2148 	bh = head;
2149 	do {
2150 		struct journal_head *jh;
2151 
2152 		/*
2153 		 * We take our own ref against the journal_head here to avoid
2154 		 * having to add tons of locking around each instance of
2155 		 * jbd2_journal_put_journal_head().
2156 		 */
2157 		jh = jbd2_journal_grab_journal_head(bh);
2158 		if (!jh)
2159 			continue;
2160 
2161 		spin_lock(&jh->b_state_lock);
2162 		__journal_try_to_free_buffer(journal, bh);
2163 		spin_unlock(&jh->b_state_lock);
2164 		jbd2_journal_put_journal_head(jh);
2165 		if (buffer_jbd(bh))
2166 			goto busy;
2167 	} while ((bh = bh->b_this_page) != head);
2168 
2169 	ret = try_to_free_buffers(page);
2170 busy:
2171 	return ret;
2172 }
2173 
2174 /*
2175  * This buffer is no longer needed.  If it is on an older transaction's
2176  * checkpoint list we need to record it on this transaction's forget list
2177  * to pin this buffer (and hence its checkpointing transaction) down until
2178  * this transaction commits.  If the buffer isn't on a checkpoint list, we
2179  * release it.
2180  * Returns non-zero if JBD no longer has an interest in the buffer.
2181  *
2182  * Called under j_list_lock.
2183  *
2184  * Called under jh->b_state_lock.
2185  */
__dispose_buffer(struct journal_head * jh,transaction_t * transaction)2186 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
2187 {
2188 	int may_free = 1;
2189 	struct buffer_head *bh = jh2bh(jh);
2190 
2191 	if (jh->b_cp_transaction) {
2192 		JBUFFER_TRACE(jh, "on running+cp transaction");
2193 		__jbd2_journal_temp_unlink_buffer(jh);
2194 		/*
2195 		 * We don't want to write the buffer anymore, clear the
2196 		 * bit so that we don't confuse checks in
2197 		 * __journal_file_buffer
2198 		 */
2199 		clear_buffer_dirty(bh);
2200 		__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
2201 		may_free = 0;
2202 	} else {
2203 		JBUFFER_TRACE(jh, "on running transaction");
2204 		__jbd2_journal_unfile_buffer(jh);
2205 		jbd2_journal_put_journal_head(jh);
2206 	}
2207 	return may_free;
2208 }
2209 
2210 /*
2211  * jbd2_journal_invalidatepage
2212  *
2213  * This code is tricky.  It has a number of cases to deal with.
2214  *
2215  * There are two invariants which this code relies on:
2216  *
2217  * i_size must be updated on disk before we start calling invalidatepage on the
2218  * data.
2219  *
2220  *  This is done in ext3 by defining an ext3_setattr method which
2221  *  updates i_size before truncate gets going.  By maintaining this
2222  *  invariant, we can be sure that it is safe to throw away any buffers
2223  *  attached to the current transaction: once the transaction commits,
2224  *  we know that the data will not be needed.
2225  *
2226  *  Note however that we can *not* throw away data belonging to the
2227  *  previous, committing transaction!
2228  *
2229  * Any disk blocks which *are* part of the previous, committing
2230  * transaction (and which therefore cannot be discarded immediately) are
2231  * not going to be reused in the new running transaction
2232  *
2233  *  The bitmap committed_data images guarantee this: any block which is
2234  *  allocated in one transaction and removed in the next will be marked
2235  *  as in-use in the committed_data bitmap, so cannot be reused until
2236  *  the next transaction to delete the block commits.  This means that
2237  *  leaving committing buffers dirty is quite safe: the disk blocks
2238  *  cannot be reallocated to a different file and so buffer aliasing is
2239  *  not possible.
2240  *
2241  *
2242  * The above applies mainly to ordered data mode.  In writeback mode we
2243  * don't make guarantees about the order in which data hits disk --- in
2244  * particular we don't guarantee that new dirty data is flushed before
2245  * transaction commit --- so it is always safe just to discard data
2246  * immediately in that mode.  --sct
2247  */
2248 
2249 /*
2250  * The journal_unmap_buffer helper function returns zero if the buffer
2251  * concerned remains pinned as an anonymous buffer belonging to an older
2252  * transaction.
2253  *
2254  * We're outside-transaction here.  Either or both of j_running_transaction
2255  * and j_committing_transaction may be NULL.
2256  */
journal_unmap_buffer(journal_t * journal,struct buffer_head * bh,int partial_page)2257 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2258 				int partial_page)
2259 {
2260 	transaction_t *transaction;
2261 	struct journal_head *jh;
2262 	int may_free = 1;
2263 
2264 	BUFFER_TRACE(bh, "entry");
2265 
2266 	/*
2267 	 * It is safe to proceed here without the j_list_lock because the
2268 	 * buffers cannot be stolen by try_to_free_buffers as long as we are
2269 	 * holding the page lock. --sct
2270 	 */
2271 
2272 	jh = jbd2_journal_grab_journal_head(bh);
2273 	if (!jh)
2274 		goto zap_buffer_unlocked;
2275 
2276 	/* OK, we have data buffer in journaled mode */
2277 	write_lock(&journal->j_state_lock);
2278 	spin_lock(&jh->b_state_lock);
2279 	spin_lock(&journal->j_list_lock);
2280 
2281 	/*
2282 	 * We cannot remove the buffer from checkpoint lists until the
2283 	 * transaction adding inode to orphan list (let's call it T)
2284 	 * is committed.  Otherwise if the transaction changing the
2285 	 * buffer would be cleaned from the journal before T is
2286 	 * committed, a crash will cause that the correct contents of
2287 	 * the buffer will be lost.  On the other hand we have to
2288 	 * clear the buffer dirty bit at latest at the moment when the
2289 	 * transaction marking the buffer as freed in the filesystem
2290 	 * structures is committed because from that moment on the
2291 	 * block can be reallocated and used by a different page.
2292 	 * Since the block hasn't been freed yet but the inode has
2293 	 * already been added to orphan list, it is safe for us to add
2294 	 * the buffer to BJ_Forget list of the newest transaction.
2295 	 *
2296 	 * Also we have to clear buffer_mapped flag of a truncated buffer
2297 	 * because the buffer_head may be attached to the page straddling
2298 	 * i_size (can happen only when blocksize < pagesize) and thus the
2299 	 * buffer_head can be reused when the file is extended again. So we end
2300 	 * up keeping around invalidated buffers attached to transactions'
2301 	 * BJ_Forget list just to stop checkpointing code from cleaning up
2302 	 * the transaction this buffer was modified in.
2303 	 */
2304 	transaction = jh->b_transaction;
2305 	if (transaction == NULL) {
2306 		/* First case: not on any transaction.  If it
2307 		 * has no checkpoint link, then we can zap it:
2308 		 * it's a writeback-mode buffer so we don't care
2309 		 * if it hits disk safely. */
2310 		if (!jh->b_cp_transaction) {
2311 			JBUFFER_TRACE(jh, "not on any transaction: zap");
2312 			goto zap_buffer;
2313 		}
2314 
2315 		if (!buffer_dirty(bh)) {
2316 			/* bdflush has written it.  We can drop it now */
2317 			__jbd2_journal_remove_checkpoint(jh);
2318 			goto zap_buffer;
2319 		}
2320 
2321 		/* OK, it must be in the journal but still not
2322 		 * written fully to disk: it's metadata or
2323 		 * journaled data... */
2324 
2325 		if (journal->j_running_transaction) {
2326 			/* ... and once the current transaction has
2327 			 * committed, the buffer won't be needed any
2328 			 * longer. */
2329 			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
2330 			may_free = __dispose_buffer(jh,
2331 					journal->j_running_transaction);
2332 			goto zap_buffer;
2333 		} else {
2334 			/* There is no currently-running transaction. So the
2335 			 * orphan record which we wrote for this file must have
2336 			 * passed into commit.  We must attach this buffer to
2337 			 * the committing transaction, if it exists. */
2338 			if (journal->j_committing_transaction) {
2339 				JBUFFER_TRACE(jh, "give to committing trans");
2340 				may_free = __dispose_buffer(jh,
2341 					journal->j_committing_transaction);
2342 				goto zap_buffer;
2343 			} else {
2344 				/* The orphan record's transaction has
2345 				 * committed.  We can cleanse this buffer */
2346 				clear_buffer_jbddirty(bh);
2347 				__jbd2_journal_remove_checkpoint(jh);
2348 				goto zap_buffer;
2349 			}
2350 		}
2351 	} else if (transaction == journal->j_committing_transaction) {
2352 		JBUFFER_TRACE(jh, "on committing transaction");
2353 		/*
2354 		 * The buffer is committing, we simply cannot touch
2355 		 * it. If the page is straddling i_size we have to wait
2356 		 * for commit and try again.
2357 		 */
2358 		if (partial_page) {
2359 			spin_unlock(&journal->j_list_lock);
2360 			spin_unlock(&jh->b_state_lock);
2361 			write_unlock(&journal->j_state_lock);
2362 			jbd2_journal_put_journal_head(jh);
2363 			return -EBUSY;
2364 		}
2365 		/*
2366 		 * OK, buffer won't be reachable after truncate. We just clear
2367 		 * b_modified to not confuse transaction credit accounting, and
2368 		 * set j_next_transaction to the running transaction (if there
2369 		 * is one) and mark buffer as freed so that commit code knows
2370 		 * it should clear dirty bits when it is done with the buffer.
2371 		 */
2372 		set_buffer_freed(bh);
2373 		if (journal->j_running_transaction && buffer_jbddirty(bh))
2374 			jh->b_next_transaction = journal->j_running_transaction;
2375 		jh->b_modified = 0;
2376 		spin_unlock(&journal->j_list_lock);
2377 		spin_unlock(&jh->b_state_lock);
2378 		write_unlock(&journal->j_state_lock);
2379 		jbd2_journal_put_journal_head(jh);
2380 		return 0;
2381 	} else {
2382 		/* Good, the buffer belongs to the running transaction.
2383 		 * We are writing our own transaction's data, not any
2384 		 * previous one's, so it is safe to throw it away
2385 		 * (remember that we expect the filesystem to have set
2386 		 * i_size already for this truncate so recovery will not
2387 		 * expose the disk blocks we are discarding here.) */
2388 		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
2389 		JBUFFER_TRACE(jh, "on running transaction");
2390 		may_free = __dispose_buffer(jh, transaction);
2391 	}
2392 
2393 zap_buffer:
2394 	/*
2395 	 * This is tricky. Although the buffer is truncated, it may be reused
2396 	 * if blocksize < pagesize and it is attached to the page straddling
2397 	 * EOF. Since the buffer might have been added to BJ_Forget list of the
2398 	 * running transaction, journal_get_write_access() won't clear
2399 	 * b_modified and credit accounting gets confused. So clear b_modified
2400 	 * here.
2401 	 */
2402 	jh->b_modified = 0;
2403 	spin_unlock(&journal->j_list_lock);
2404 	spin_unlock(&jh->b_state_lock);
2405 	write_unlock(&journal->j_state_lock);
2406 	jbd2_journal_put_journal_head(jh);
2407 zap_buffer_unlocked:
2408 	clear_buffer_dirty(bh);
2409 	J_ASSERT_BH(bh, !buffer_jbddirty(bh));
2410 	clear_buffer_mapped(bh);
2411 	clear_buffer_req(bh);
2412 	clear_buffer_new(bh);
2413 	clear_buffer_delay(bh);
2414 	clear_buffer_unwritten(bh);
2415 	bh->b_bdev = NULL;
2416 	return may_free;
2417 }
2418 
2419 /**
2420  * jbd2_journal_invalidatepage()
2421  * @journal: journal to use for flush...
2422  * @page:    page to flush
2423  * @offset:  start of the range to invalidate
2424  * @length:  length of the range to invalidate
2425  *
2426  * Reap page buffers containing data after in the specified range in page.
2427  * Can return -EBUSY if buffers are part of the committing transaction and
2428  * the page is straddling i_size. Caller then has to wait for current commit
2429  * and try again.
2430  */
jbd2_journal_invalidatepage(journal_t * journal,struct page * page,unsigned int offset,unsigned int length)2431 int jbd2_journal_invalidatepage(journal_t *journal,
2432 				struct page *page,
2433 				unsigned int offset,
2434 				unsigned int length)
2435 {
2436 	struct buffer_head *head, *bh, *next;
2437 	unsigned int stop = offset + length;
2438 	unsigned int curr_off = 0;
2439 	int partial_page = (offset || length < PAGE_SIZE);
2440 	int may_free = 1;
2441 	int ret = 0;
2442 
2443 	if (!PageLocked(page))
2444 		BUG();
2445 	if (!page_has_buffers(page))
2446 		return 0;
2447 
2448 	BUG_ON(stop > PAGE_SIZE || stop < length);
2449 
2450 	/* We will potentially be playing with lists other than just the
2451 	 * data lists (especially for journaled data mode), so be
2452 	 * cautious in our locking. */
2453 
2454 	head = bh = page_buffers(page);
2455 	do {
2456 		unsigned int next_off = curr_off + bh->b_size;
2457 		next = bh->b_this_page;
2458 
2459 		if (next_off > stop)
2460 			return 0;
2461 
2462 		if (offset <= curr_off) {
2463 			/* This block is wholly outside the truncation point */
2464 			lock_buffer(bh);
2465 			ret = journal_unmap_buffer(journal, bh, partial_page);
2466 			unlock_buffer(bh);
2467 			if (ret < 0)
2468 				return ret;
2469 			may_free &= ret;
2470 		}
2471 		curr_off = next_off;
2472 		bh = next;
2473 
2474 	} while (bh != head);
2475 
2476 	if (!partial_page) {
2477 		if (may_free && try_to_free_buffers(page))
2478 			J_ASSERT(!page_has_buffers(page));
2479 	}
2480 	return 0;
2481 }
2482 
2483 /*
2484  * File a buffer on the given transaction list.
2485  */
__jbd2_journal_file_buffer(struct journal_head * jh,transaction_t * transaction,int jlist)2486 void __jbd2_journal_file_buffer(struct journal_head *jh,
2487 			transaction_t *transaction, int jlist)
2488 {
2489 	struct journal_head **list = NULL;
2490 	int was_dirty = 0;
2491 	struct buffer_head *bh = jh2bh(jh);
2492 
2493 	lockdep_assert_held(&jh->b_state_lock);
2494 	assert_spin_locked(&transaction->t_journal->j_list_lock);
2495 
2496 	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2497 	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2498 				jh->b_transaction == NULL);
2499 
2500 	if (jh->b_transaction && jh->b_jlist == jlist)
2501 		return;
2502 
2503 	if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2504 	    jlist == BJ_Shadow || jlist == BJ_Forget) {
2505 		/*
2506 		 * For metadata buffers, we track dirty bit in buffer_jbddirty
2507 		 * instead of buffer_dirty. We should not see a dirty bit set
2508 		 * here because we clear it in do_get_write_access but e.g.
2509 		 * tune2fs can modify the sb and set the dirty bit at any time
2510 		 * so we try to gracefully handle that.
2511 		 */
2512 		if (buffer_dirty(bh))
2513 			warn_dirty_buffer(bh);
2514 		if (test_clear_buffer_dirty(bh) ||
2515 		    test_clear_buffer_jbddirty(bh))
2516 			was_dirty = 1;
2517 	}
2518 
2519 	if (jh->b_transaction)
2520 		__jbd2_journal_temp_unlink_buffer(jh);
2521 	else
2522 		jbd2_journal_grab_journal_head(bh);
2523 	jh->b_transaction = transaction;
2524 
2525 	switch (jlist) {
2526 	case BJ_None:
2527 		J_ASSERT_JH(jh, !jh->b_committed_data);
2528 		J_ASSERT_JH(jh, !jh->b_frozen_data);
2529 		return;
2530 	case BJ_Metadata:
2531 		transaction->t_nr_buffers++;
2532 		list = &transaction->t_buffers;
2533 		break;
2534 	case BJ_Forget:
2535 		list = &transaction->t_forget;
2536 		break;
2537 	case BJ_Shadow:
2538 		list = &transaction->t_shadow_list;
2539 		break;
2540 	case BJ_Reserved:
2541 		list = &transaction->t_reserved_list;
2542 		break;
2543 	}
2544 
2545 	__blist_add_buffer(list, jh);
2546 	jh->b_jlist = jlist;
2547 
2548 	if (was_dirty)
2549 		set_buffer_jbddirty(bh);
2550 }
2551 
jbd2_journal_file_buffer(struct journal_head * jh,transaction_t * transaction,int jlist)2552 void jbd2_journal_file_buffer(struct journal_head *jh,
2553 				transaction_t *transaction, int jlist)
2554 {
2555 	spin_lock(&jh->b_state_lock);
2556 	spin_lock(&transaction->t_journal->j_list_lock);
2557 	__jbd2_journal_file_buffer(jh, transaction, jlist);
2558 	spin_unlock(&transaction->t_journal->j_list_lock);
2559 	spin_unlock(&jh->b_state_lock);
2560 }
2561 
2562 /*
2563  * Remove a buffer from its current buffer list in preparation for
2564  * dropping it from its current transaction entirely.  If the buffer has
2565  * already started to be used by a subsequent transaction, refile the
2566  * buffer on that transaction's metadata list.
2567  *
2568  * Called under j_list_lock
2569  * Called under jh->b_state_lock
2570  *
2571  * When this function returns true, there's no next transaction to refile to
2572  * and the caller has to drop jh reference through
2573  * jbd2_journal_put_journal_head().
2574  */
__jbd2_journal_refile_buffer(struct journal_head * jh)2575 bool __jbd2_journal_refile_buffer(struct journal_head *jh)
2576 {
2577 	int was_dirty, jlist;
2578 	struct buffer_head *bh = jh2bh(jh);
2579 
2580 	lockdep_assert_held(&jh->b_state_lock);
2581 	if (jh->b_transaction)
2582 		assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2583 
2584 	/* If the buffer is now unused, just drop it. */
2585 	if (jh->b_next_transaction == NULL) {
2586 		__jbd2_journal_unfile_buffer(jh);
2587 		return true;
2588 	}
2589 
2590 	/*
2591 	 * It has been modified by a later transaction: add it to the new
2592 	 * transaction's metadata list.
2593 	 */
2594 
2595 	was_dirty = test_clear_buffer_jbddirty(bh);
2596 	__jbd2_journal_temp_unlink_buffer(jh);
2597 
2598 	/*
2599 	 * b_transaction must be set, otherwise the new b_transaction won't
2600 	 * be holding jh reference
2601 	 */
2602 	J_ASSERT_JH(jh, jh->b_transaction != NULL);
2603 
2604 	/*
2605 	 * We set b_transaction here because b_next_transaction will inherit
2606 	 * our jh reference and thus __jbd2_journal_file_buffer() must not
2607 	 * take a new one.
2608 	 */
2609 	WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
2610 	WRITE_ONCE(jh->b_next_transaction, NULL);
2611 	if (buffer_freed(bh))
2612 		jlist = BJ_Forget;
2613 	else if (jh->b_modified)
2614 		jlist = BJ_Metadata;
2615 	else
2616 		jlist = BJ_Reserved;
2617 	__jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2618 	J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2619 
2620 	if (was_dirty)
2621 		set_buffer_jbddirty(bh);
2622 	return false;
2623 }
2624 
2625 /*
2626  * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2627  * bh reference so that we can safely unlock bh.
2628  *
2629  * The jh and bh may be freed by this call.
2630  */
jbd2_journal_refile_buffer(journal_t * journal,struct journal_head * jh)2631 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2632 {
2633 	bool drop;
2634 
2635 	spin_lock(&jh->b_state_lock);
2636 	spin_lock(&journal->j_list_lock);
2637 	drop = __jbd2_journal_refile_buffer(jh);
2638 	spin_unlock(&jh->b_state_lock);
2639 	spin_unlock(&journal->j_list_lock);
2640 	if (drop)
2641 		jbd2_journal_put_journal_head(jh);
2642 }
2643 
2644 /*
2645  * File inode in the inode list of the handle's transaction
2646  */
jbd2_journal_file_inode(handle_t * handle,struct jbd2_inode * jinode,unsigned long flags,loff_t start_byte,loff_t end_byte)2647 static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
2648 		unsigned long flags, loff_t start_byte, loff_t end_byte)
2649 {
2650 	transaction_t *transaction = handle->h_transaction;
2651 	journal_t *journal;
2652 
2653 	if (is_handle_aborted(handle))
2654 		return -EROFS;
2655 	journal = transaction->t_journal;
2656 
2657 	jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2658 			transaction->t_tid);
2659 
2660 	spin_lock(&journal->j_list_lock);
2661 	jinode->i_flags |= flags;
2662 
2663 	if (jinode->i_dirty_end) {
2664 		jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte);
2665 		jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte);
2666 	} else {
2667 		jinode->i_dirty_start = start_byte;
2668 		jinode->i_dirty_end = end_byte;
2669 	}
2670 
2671 	/* Is inode already attached where we need it? */
2672 	if (jinode->i_transaction == transaction ||
2673 	    jinode->i_next_transaction == transaction)
2674 		goto done;
2675 
2676 	/*
2677 	 * We only ever set this variable to 1 so the test is safe. Since
2678 	 * t_need_data_flush is likely to be set, we do the test to save some
2679 	 * cacheline bouncing
2680 	 */
2681 	if (!transaction->t_need_data_flush)
2682 		transaction->t_need_data_flush = 1;
2683 	/* On some different transaction's list - should be
2684 	 * the committing one */
2685 	if (jinode->i_transaction) {
2686 		J_ASSERT(jinode->i_next_transaction == NULL);
2687 		J_ASSERT(jinode->i_transaction ==
2688 					journal->j_committing_transaction);
2689 		jinode->i_next_transaction = transaction;
2690 		goto done;
2691 	}
2692 	/* Not on any transaction list... */
2693 	J_ASSERT(!jinode->i_next_transaction);
2694 	jinode->i_transaction = transaction;
2695 	list_add(&jinode->i_list, &transaction->t_inode_list);
2696 done:
2697 	spin_unlock(&journal->j_list_lock);
2698 
2699 	return 0;
2700 }
2701 
jbd2_journal_inode_ranged_write(handle_t * handle,struct jbd2_inode * jinode,loff_t start_byte,loff_t length)2702 int jbd2_journal_inode_ranged_write(handle_t *handle,
2703 		struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
2704 {
2705 	return jbd2_journal_file_inode(handle, jinode,
2706 			JI_WRITE_DATA | JI_WAIT_DATA, start_byte,
2707 			start_byte + length - 1);
2708 }
2709 
jbd2_journal_inode_ranged_wait(handle_t * handle,struct jbd2_inode * jinode,loff_t start_byte,loff_t length)2710 int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode,
2711 		loff_t start_byte, loff_t length)
2712 {
2713 	return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA,
2714 			start_byte, start_byte + length - 1);
2715 }
2716 
2717 /*
2718  * File truncate and transaction commit interact with each other in a
2719  * non-trivial way.  If a transaction writing data block A is
2720  * committing, we cannot discard the data by truncate until we have
2721  * written them.  Otherwise if we crashed after the transaction with
2722  * write has committed but before the transaction with truncate has
2723  * committed, we could see stale data in block A.  This function is a
2724  * helper to solve this problem.  It starts writeout of the truncated
2725  * part in case it is in the committing transaction.
2726  *
2727  * Filesystem code must call this function when inode is journaled in
2728  * ordered mode before truncation happens and after the inode has been
2729  * placed on orphan list with the new inode size. The second condition
2730  * avoids the race that someone writes new data and we start
2731  * committing the transaction after this function has been called but
2732  * before a transaction for truncate is started (and furthermore it
2733  * allows us to optimize the case where the addition to orphan list
2734  * happens in the same transaction as write --- we don't have to write
2735  * any data in such case).
2736  */
jbd2_journal_begin_ordered_truncate(journal_t * journal,struct jbd2_inode * jinode,loff_t new_size)2737 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2738 					struct jbd2_inode *jinode,
2739 					loff_t new_size)
2740 {
2741 	transaction_t *inode_trans, *commit_trans;
2742 	int ret = 0;
2743 
2744 	/* This is a quick check to avoid locking if not necessary */
2745 	if (!jinode->i_transaction)
2746 		goto out;
2747 	/* Locks are here just to force reading of recent values, it is
2748 	 * enough that the transaction was not committing before we started
2749 	 * a transaction adding the inode to orphan list */
2750 	read_lock(&journal->j_state_lock);
2751 	commit_trans = journal->j_committing_transaction;
2752 	read_unlock(&journal->j_state_lock);
2753 	spin_lock(&journal->j_list_lock);
2754 	inode_trans = jinode->i_transaction;
2755 	spin_unlock(&journal->j_list_lock);
2756 	if (inode_trans == commit_trans) {
2757 		ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2758 			new_size, LLONG_MAX);
2759 		if (ret)
2760 			jbd2_journal_abort(journal, ret);
2761 	}
2762 out:
2763 	return ret;
2764 }
2765