Lines Matching full:log
35 struct xlog *log,
39 struct xlog *log);
45 struct xlog *log);
48 struct xlog *log,
55 struct xlog *log,
59 struct xlog *log,
65 struct xlog *log);
68 struct xlog *log,
73 struct xlog *log,
83 struct xlog *log);
93 * However, this padding does not get written into the log, and hence we have to
94 * track the space used by the log vectors separately to prevent log space hangs
95 * due to inaccurate accounting (i.e. a leak) of the used log space through the
99 * log. This prepends the data region we return to the caller to copy their data
146 struct xlog *log, in xlog_grant_sub_space() argument
160 space += log->l_logsize; in xlog_grant_sub_space()
172 struct xlog *log, in xlog_grant_add_space() argument
185 tmp = log->l_logsize - space; in xlog_grant_add_space()
222 struct xlog *log, in xlog_ticket_reservation() argument
226 if (head == &log->l_write_head) { in xlog_ticket_reservation()
239 struct xlog *log, in xlog_grant_head_wake() argument
252 * limiting the target to the log head (l_last_sync_lsn) at the in xlog_grant_head_wake()
253 * time. This may not reflect where the log head is now as the in xlog_grant_head_wake()
257 * log that has moved rather than the tail. As the tail didn't in xlog_grant_head_wake()
260 * pushed to the target defined by the old log head location, we in xlog_grant_head_wake()
266 * target reflects both the current log tail and log head in xlog_grant_head_wake()
270 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
273 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wake()
278 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
288 struct xlog *log, in xlog_grant_head_wait() argument
297 if (xlog_is_shutdown(log)) in xlog_grant_head_wait()
299 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
304 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
306 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
308 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
311 if (xlog_is_shutdown(log)) in xlog_grant_head_wait()
313 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
323 * Atomically get the log space required for a log ticket.
341 struct xlog *log, in xlog_grant_head_check() argument
349 ASSERT(!xlog_in_recovery(log)); in xlog_grant_head_check()
357 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
358 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
361 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
363 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
369 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
381 * Do not write to the log on norecovery mounts, if the data or log in xfs_log_writable()
383 * mounts allow internal writes for log recovery and unmount purposes, in xfs_log_writable()
405 struct xlog *log = mp->m_log; in xfs_log_regrant() local
409 if (xlog_is_shutdown(log)) in xfs_log_regrant()
417 * the log. Just add one to the existing tid so that we can see chains in xfs_log_regrant()
418 * of rolling transactions in the log easily. in xfs_log_regrant()
422 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
428 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
430 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
435 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
436 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
437 xlog_verify_grant_tail(log); in xfs_log_regrant()
452 * Reserve log space and return a ticket corresponding to the reservation.
454 * Each reservation is going to reserve extra space for a log record header.
455 * When writes happen to the on-disk log, we don't subtract the length of the
456 * log record header from any reservation. By wasting space in each
467 struct xlog *log = mp->m_log; in xfs_log_reserve() local
472 if (xlog_is_shutdown(log)) in xfs_log_reserve()
478 tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent); in xfs_log_reserve()
481 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
484 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
486 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
491 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
492 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
493 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
494 xlog_verify_grant_tail(log); in xfs_log_reserve()
509 * Run all the pending iclog callbacks and wake log force waiters and iclog
511 * don't care what order we process callbacks here because the log is shut down
524 struct xlog *log) in xlog_state_shutdown_callbacks() argument
529 iclog = log->l_iclog; in xlog_state_shutdown_callbacks()
536 spin_unlock(&log->l_icloglock); in xlog_state_shutdown_callbacks()
540 spin_lock(&log->l_icloglock); in xlog_state_shutdown_callbacks()
543 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_shutdown_callbacks()
545 wake_up_all(&log->l_flush_wait); in xlog_state_shutdown_callbacks()
553 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
555 * within the iclog. We need to ensure that the log tail does not move beyond
570 struct xlog *log, in xlog_state_release_iclog() argument
577 lockdep_assert_held(&log->l_icloglock); in xlog_state_release_iclog()
581 * Grabbing the current log tail needs to be atomic w.r.t. the writing in xlog_state_release_iclog()
582 * of the tail LSN into the iclog so we guarantee that the log tail does in xlog_state_release_iclog()
589 tail_lsn = xlog_assign_tail_lsn(log->l_mp); in xlog_state_release_iclog()
595 if (xlog_is_shutdown(log)) { in xlog_state_release_iclog()
602 xlog_state_shutdown_callbacks(log); in xlog_state_release_iclog()
615 xlog_verify_tail_lsn(log, iclog); in xlog_state_release_iclog()
618 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
619 xlog_sync(log, iclog, ticket); in xlog_state_release_iclog()
620 spin_lock(&log->l_icloglock); in xlog_state_release_iclog()
625 * Mount a log filesystem
628 * log_target - buftarg of on-disk log device
630 * num_bblocks - Number of BBSIZE blocks in on-disk log
641 struct xlog *log; in xfs_log_mount() local
656 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); in xfs_log_mount()
657 if (IS_ERR(log)) { in xfs_log_mount()
658 error = PTR_ERR(log); in xfs_log_mount()
661 mp->m_log = log; in xfs_log_mount()
664 * Validate the given log space and drop a critical message via syslog in xfs_log_mount()
665 * if the log size is too small that would lead to some unexpected in xfs_log_mount()
666 * situations in transaction log space reservation stage. in xfs_log_mount()
670 * remedy the situation as there is no way to grow the log (short of in xfs_log_mount()
675 * filesystem with a log that is too small. in xfs_log_mount()
681 "Log size %d blocks too small, minimum size is %d blocks", in xfs_log_mount()
686 "Log size %d blocks too large, maximum size is %lld blocks", in xfs_log_mount()
691 "log size %lld bytes too large, maximum size is %lld bytes", in xfs_log_mount()
698 "log stripe unit %u bytes must be a multiple of block size", in xfs_log_mount()
705 * Log check errors are always fatal on v5; or whenever bad in xfs_log_mount()
709 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); in xfs_log_mount()
713 xfs_crit(mp, "Log size out of supported range."); in xfs_log_mount()
715 "Continuing onwards, but if log hangs are experienced then please report this message in the bug re… in xfs_log_mount()
719 * Initialize the AIL now we have a log. in xfs_log_mount()
726 log->l_ailp = mp->m_ail; in xfs_log_mount()
729 * skip log recovery on a norecovery mount. pretend it all in xfs_log_mount()
734 * log recovery ignores readonly state and so we need to clear in xfs_log_mount()
739 error = xlog_recover(log); in xfs_log_mount()
743 xfs_warn(mp, "log mount/recovery failed: error %d", in xfs_log_mount()
745 xlog_recover_cancel(log); in xfs_log_mount()
750 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj, in xfs_log_mount()
751 "log"); in xfs_log_mount()
756 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); in xfs_log_mount()
759 * Now the log has been fully initialised and we know were our in xfs_log_mount()
763 xlog_cil_init_post_recovery(log); in xfs_log_mount()
770 xlog_dealloc_log(log); in xfs_log_mount()
781 * If we finish recovery successfully, start the background log work. If we are
789 struct xlog *log = mp->m_log; in xfs_log_mount_finish() local
799 * log recovery ignores readonly state and so we need to clear in xfs_log_mount_finish()
805 * During the second phase of log recovery, we need iget and in xfs_log_mount_finish()
808 * of inodes before we're done replaying log items on those in xfs_log_mount_finish()
817 * in log recovery failure. We have to evict the unreferenced in xfs_log_mount_finish()
826 if (xlog_recovery_needed(log)) in xfs_log_mount_finish()
827 error = xlog_recover_finish(log); in xfs_log_mount_finish()
832 * Drain the buffer LRU after log recovery. This is required for v4 in xfs_log_mount_finish()
840 if (xlog_recovery_needed(log)) { in xfs_log_mount_finish()
852 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); in xfs_log_mount_finish()
856 /* Make sure the log is dead if we're returning failure. */ in xfs_log_mount_finish()
857 ASSERT(!error || xlog_is_shutdown(log)); in xfs_log_mount_finish()
864 * the log.
891 * log force state machine. Waiting on ic_force_wait ensures iclog completions
900 struct xlog *log = iclog->ic_log; in xlog_wait_on_iclog() local
903 if (!xlog_is_shutdown(log) && in xlog_wait_on_iclog()
906 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); in xlog_wait_on_iclog()
907 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xlog_wait_on_iclog()
909 spin_unlock(&log->l_icloglock); in xlog_wait_on_iclog()
912 if (xlog_is_shutdown(log)) in xlog_wait_on_iclog()
924 struct xlog *log, in xlog_write_unmount_record() argument
959 return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len); in xlog_write_unmount_record()
964 * log.
968 struct xlog *log) in xlog_unmount_write() argument
970 struct xfs_mount *mp = log->l_mp; in xlog_unmount_write()
979 error = xlog_write_unmount_record(log, tic); in xlog_unmount_write()
982 * transitioning log state to shutdown. Just continue... in xlog_unmount_write()
988 spin_lock(&log->l_icloglock); in xlog_unmount_write()
989 iclog = log->l_iclog; in xlog_unmount_write()
994 trace_xfs_log_umount_write(log, tic); in xlog_unmount_write()
995 xfs_log_ticket_ungrant(log, tic); in xlog_unmount_write()
1001 struct xlog *log) in xfs_log_unmount_verify_iclog() argument
1003 struct xlog_in_core *iclog = log->l_iclog; in xfs_log_unmount_verify_iclog()
1008 } while ((iclog = iclog->ic_next) != log->l_iclog); in xfs_log_unmount_verify_iclog()
1022 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
1029 if (xlog_is_shutdown(log)) in xfs_log_unmount_write()
1034 * record to force log recovery at next mount, after which the summary in xfs_log_unmount_write()
1045 xfs_log_unmount_verify_iclog(log); in xfs_log_unmount_write()
1046 xlog_unmount_write(log); in xfs_log_unmount_write()
1050 * Empty the log for unmount/freeze.
1052 * To do this, we first need to shut down the background log work so it is not
1053 * trying to cover the log as we clean up. We then need to unpin all objects in
1054 * the log so we can then flush them out. Once they have completed their IO and
1055 * run the callbacks removing themselves from the AIL, we can cover the log.
1062 * Clear log incompat features since we're quiescing the log. Report in xfs_log_quiesce()
1063 * failures, though it's not fatal to have a higher log feature in xfs_log_quiesce()
1064 * protection level than the log contents actually require. in xfs_log_quiesce()
1072 "Failed to clear log incompat features on quiesce"); in xfs_log_quiesce()
1102 * Shut down and release the AIL and Log.
1105 * from the AIL so that the log is empty before we write the unmount record to
1106 * the log. Once this is done, we can tear down the AIL and the log.
1143 * Wake up processes waiting for log space after we have moved the log tail.
1149 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1152 if (xlog_is_shutdown(log)) in xfs_log_space_wake()
1155 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1156 ASSERT(!xlog_in_recovery(log)); in xfs_log_space_wake()
1158 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1159 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
1160 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1161 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1164 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1165 ASSERT(!xlog_in_recovery(log)); in xfs_log_space_wake()
1167 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1168 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1169 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1170 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1176 * covered. To begin the transition to the idle state firstly the log needs to
1178 * we start attempting to cover the log.
1181 * informed that dummy transactions are required to move the log into the idle
1185 * cover the log as we may be in a situation where there isn't log space
1187 * tail of the log is pinned by an item that is modified in the CIL. Hence
1189 * can't start trying to idle the log until both the CIL and AIL are empty.
1195 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1198 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1201 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1202 switch (log->l_covered_state) { in xfs_log_need_covered()
1209 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1211 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1215 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1216 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1218 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1224 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1229 * Explicitly cover the log. This is similar to background log covering but
1231 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1250 * state machine if the log requires covering. Therefore, we must call in xfs_log_cover()
1254 * Fall into the covering sequence if the log needs covering or the in xfs_log_cover()
1264 * To cover the log, commit the superblock twice (at most) in in xfs_log_cover()
1269 * covering the log. Push the AIL one more time to leave it empty, as in xfs_log_cover()
1283 * We may be holding the log iclog lock upon entering this routine.
1289 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1296 * To make sure we always have a valid LSN for the log tail we keep in xlog_assign_tail_lsn_locked()
1297 * track of the last LSN which was committed in log->l_last_sync_lsn, in xlog_assign_tail_lsn_locked()
1304 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1305 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1306 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1324 * Return the space in the log between the tail and the head. The head
1328 * in the log. This works for all places where this function is called
1334 * but then treat it as if the log is empty.
1336 * If the log is shut down, the head and tail may be invalid or out of whack, so
1342 struct xlog *log, in xlog_space_left() argument
1351 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1354 return log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1359 if (xlog_is_shutdown(log)) in xlog_space_left()
1360 return log->l_logsize; in xlog_space_left()
1369 * return the size of the log as the amount of space left. in xlog_space_left()
1371 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); in xlog_space_left()
1372 xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d", in xlog_space_left()
1374 xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d", in xlog_space_left()
1377 return log->l_logsize; in xlog_space_left()
1387 struct xlog *log = iclog->ic_log; in xlog_ioend_work() local
1400 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { in xlog_ioend_work()
1401 xfs_alert(log->l_mp, "log I/O error %d", error); in xlog_ioend_work()
1402 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_ioend_work()
1418 * Return size of each in-core log record buffer.
1428 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1435 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1436 log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1441 log->l_iclog_heads = in xlog_get_iclog_buffer_size()
1443 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; in xlog_get_iclog_buffer_size()
1455 * Clear the log incompat flags if we have the opportunity.
1457 * This only happens if we're about to log the second dummy transaction as part
1458 * of covering the log and we can get the log incompat feature usage lock.
1462 struct xlog *log) in xlog_clear_incompat() argument
1464 struct xfs_mount *mp = log->l_mp; in xlog_clear_incompat()
1470 if (log->l_covered_state != XLOG_STATE_COVER_DONE2) in xlog_clear_incompat()
1473 if (!down_write_trylock(&log->l_incompat_users)) in xlog_clear_incompat()
1477 up_write(&log->l_incompat_users); in xlog_clear_incompat()
1482 * disk. If there is nothing dirty, then we might need to cover the log to
1489 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1491 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1496 * Dump a transaction into the log that contains no real change. in xfs_log_worker()
1497 * This is needed to stamp the current tail LSN into the log in xfs_log_worker()
1502 * will prevent log covering from making progress. Hence we in xfs_log_worker()
1503 * synchronously log the superblock instead to ensure the in xfs_log_worker()
1506 xlog_clear_incompat(log); in xfs_log_worker()
1519 * This routine initializes some of the log structure for a given mount point.
1530 struct xlog *log; in xlog_alloc_log() local
1538 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1539 if (!log) { in xlog_alloc_log()
1540 xfs_warn(mp, "Log allocation failed: No memory!"); in xlog_alloc_log()
1544 log->l_mp = mp; in xlog_alloc_log()
1545 log->l_targ = log_target; in xlog_alloc_log()
1546 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1547 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1548 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1549 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1550 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); in xlog_alloc_log()
1551 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1553 log->l_prev_block = -1; in xlog_alloc_log()
1554 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ in xlog_alloc_log()
1555 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1556 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1557 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1560 log->l_iclog_roundoff = mp->m_sb.sb_logsunit; in xlog_alloc_log()
1562 log->l_iclog_roundoff = BBSIZE; in xlog_alloc_log()
1564 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1565 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1571 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", in xlog_alloc_log()
1578 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", in xlog_alloc_log()
1583 /* for larger sector sizes, must have v2 or external log */ in xlog_alloc_log()
1584 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1587 "log sector size (0x%x) invalid for configuration.", in xlog_alloc_log()
1592 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1594 init_rwsem(&log->l_incompat_users); in xlog_alloc_log()
1596 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1598 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1599 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1601 iclogp = &log->l_iclog; in xlog_alloc_log()
1609 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1610 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1611 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * in xlog_alloc_log()
1622 iclog->ic_data = kvzalloc(log->l_iclog_size, in xlog_alloc_log()
1630 xfs_has_logv2(log->l_mp) ? 2 : 1); in xlog_alloc_log()
1631 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1636 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1638 iclog->ic_log = log; in xlog_alloc_log()
1641 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1650 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1651 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1653 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", in xlog_alloc_log()
1657 if (!log->l_ioend_workqueue) in xlog_alloc_log()
1660 error = xlog_cil_init(log); in xlog_alloc_log()
1663 return log; in xlog_alloc_log()
1666 destroy_workqueue(log->l_ioend_workqueue); in xlog_alloc_log()
1668 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1672 if (prev_iclog == log->l_iclog) in xlog_alloc_log()
1676 kmem_free(log); in xlog_alloc_log()
1682 * Compute the LSN that we'd need to push the log tail towards in order to have
1683 * (a) enough on-disk log space to log the number of bytes specified, (b) at
1684 * least 25% of the log space free, and (c) at least 256 blocks free. If the
1685 * log free space already meets all three thresholds, this function returns
1690 struct xlog *log, in xlog_grant_push_threshold() argument
1701 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_threshold()
1703 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_threshold()
1708 * log to the maximum of what the caller needs, one quarter of the in xlog_grant_push_threshold()
1709 * log, and 256 blocks. in xlog_grant_push_threshold()
1712 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_threshold()
1717 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_threshold()
1720 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_threshold()
1721 threshold_block -= log->l_logBBsize; in xlog_grant_push_threshold()
1728 * log record known to be on disk. Use a snapshot of the last sync lsn in xlog_grant_push_threshold()
1731 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_threshold()
1739 * Push the tail of the log if we need to do so to maintain the free log space
1741 * policy which pushes on an lsn which is further along in the log once we
1747 struct xlog *log, in xlog_grant_push_ail() argument
1752 threshold_lsn = xlog_grant_push_threshold(log, need_bytes); in xlog_grant_push_ail()
1753 if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log)) in xlog_grant_push_ail()
1761 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1769 struct xlog *log, in xlog_pack_data() argument
1789 if (xfs_has_logv2(log->l_mp)) { in xlog_pack_data()
1800 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1806 * Calculate the checksum for a log buffer.
1813 struct xlog *log, in xlog_cksum() argument
1826 if (xfs_has_logv2(log->l_mp)) { in xlog_cksum()
1878 struct xlog *log, in xlog_write_iclog() argument
1883 ASSERT(bno < log->l_logBBsize); in xlog_write_iclog()
1892 * across the log IO to archieve that. in xlog_write_iclog()
1895 if (xlog_is_shutdown(log)) { in xlog_write_iclog()
1898 * the log state machine to propagate I/O errors instead of in xlog_write_iclog()
1911 * writeback throttle from throttling log writes behind background in xlog_write_iclog()
1914 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, in xlog_write_iclog()
1917 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1924 * For external log devices, we also need to flush the data in xlog_write_iclog()
1927 * but it *must* complete before we issue the external log IO. in xlog_write_iclog()
1930 * writeback from the log succeeded. Repeating the flush is in xlog_write_iclog()
1931 * not possible, hence we must shut down with log IO error to in xlog_write_iclog()
1934 if (log->l_targ != log->l_mp->m_ddev_targp && in xlog_write_iclog()
1935 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) { in xlog_write_iclog()
1936 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_write_iclog()
1946 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_write_iclog()
1953 * If this log buffer would straddle the end of the log we will have in xlog_write_iclog()
1956 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_write_iclog()
1959 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1965 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
1973 * written to the start of the log. Watch out for the header magic
1978 struct xlog *log, in xlog_split_iclog() argument
1983 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); in xlog_split_iclog()
1997 struct xlog *log, in xlog_calc_iclog_size() argument
2004 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
2005 count = roundup(count_init, log->l_iclog_roundoff); in xlog_calc_iclog_size()
2010 ASSERT(*roundoff < log->l_iclog_roundoff); in xlog_calc_iclog_size()
2015 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2017 * ptr in the log to point to the next available iclog. This allows further
2019 * Before an in-core log can be written out, the data section must be scanned
2031 * log will require grabbing the lock though.
2033 * The entire log manager uses a logical block numbering scheme. Only
2034 * xlog_write_iclog knows about the fact that the log may not start with
2039 struct xlog *log, in xlog_sync() argument
2051 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
2061 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
2062 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
2066 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
2070 if (xfs_has_logv2(log->l_mp)) in xlog_sync()
2074 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
2075 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
2080 if (bno + BTOBB(count) > log->l_logBBsize) in xlog_sync()
2081 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
2084 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
2087 * Intentionally corrupt the log record CRC based on the error injection in xlog_sync()
2088 * frequency, if defined. This facilitates testing log recovery in the in xlog_sync()
2089 * event of torn writes. Hence, set the IOABORT state to abort the log in xlog_sync()
2094 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { in xlog_sync()
2097 xfs_warn(log->l_mp, in xlog_sync()
2098 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", in xlog_sync()
2102 xlog_verify_iclog(log, iclog, count); in xlog_sync()
2103 xlog_write_iclog(log, iclog, bno, count); in xlog_sync()
2107 * Deallocate a log structure
2111 struct xlog *log) in xlog_dealloc_log() argument
2117 * Cycle all the iclogbuf locks to make sure all log IO completion in xlog_dealloc_log()
2120 iclog = log->l_iclog; in xlog_dealloc_log()
2121 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
2129 * iclog EIO error will try to shut down the log, which accesses the in xlog_dealloc_log()
2132 xlog_cil_destroy(log); in xlog_dealloc_log()
2134 iclog = log->l_iclog; in xlog_dealloc_log()
2135 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
2142 log->l_mp->m_log = NULL; in xlog_dealloc_log()
2143 destroy_workqueue(log->l_ioend_workqueue); in xlog_dealloc_log()
2144 kmem_free(log); in xlog_dealloc_log()
2152 struct xlog *log, in xlog_state_finish_copy() argument
2157 lockdep_assert_held(&log->l_icloglock); in xlog_state_finish_copy()
2191 xfs_warn(mp, " log res = %d", tp->t_log_res); in xlog_print_trans()
2192 xfs_warn(mp, " log count = %d", tp->t_log_count); in xlog_print_trans()
2197 /* dump each log item */ in xlog_print_trans()
2203 xfs_warn(mp, "log item: "); in xlog_print_trans()
2213 /* dump each iovec for the log item */ in xlog_print_trans()
2251 * Write log vectors into a single iclog which is guaranteed by the caller
2252 * to have enough space to write the entire log vector into.
2270 * Ordered log vectors have no regions to write so this in xlog_write_full()
2293 struct xlog *log = iclog->ic_log; in xlog_write_get_more_iclog_space() local
2296 spin_lock(&log->l_icloglock); in xlog_write_get_more_iclog_space()
2298 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_get_more_iclog_space()
2299 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write_get_more_iclog_space()
2300 spin_unlock(&log->l_icloglock); in xlog_write_get_more_iclog_space()
2304 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write_get_more_iclog_space()
2315 * Write log vectors into a single iclog which is smaller than the current chain
2317 * and then stop. We return the log vector that is to be written that cannot
2343 * length otherwise log recovery will just skip over it and in xlog_write_partial()
2390 * space for log transaction opheaders left in the current in xlog_write_partial()
2444 * No more iovecs remain in this logvec so return the next log vec to in xlog_write_partial()
2452 * Write some region out to in-core log
2466 * 2. Write log operation header (header per region)
2473 * 5. Release iclog for potential flush to on-disk log.
2483 * on all log operation writes which don't contain the end of the
2484 * region. The XLOG_END_TRANS bit is used for the in-core log
2493 struct xlog *log, in xlog_write() argument
2508 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_write()
2510 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2511 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_write()
2514 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2531 * If the entire log vec does not fit in the iclog, punt it to in xlog_write()
2559 spin_lock(&log->l_icloglock); in xlog_write()
2560 xlog_state_finish_copy(log, iclog, record_cnt, 0); in xlog_write()
2561 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write()
2562 spin_unlock(&log->l_icloglock); in xlog_write()
2607 struct xlog *log, in xlog_state_activate_iclogs() argument
2610 struct xlog_in_core *iclog = log->l_iclog; in xlog_state_activate_iclogs()
2621 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_activate_iclogs()
2660 struct xlog *log, in xlog_state_clean_iclog() argument
2669 xlog_state_activate_iclogs(log, &iclogs_changed); in xlog_state_clean_iclog()
2673 log->l_covered_state = xlog_covered_state(log->l_covered_state, in xlog_state_clean_iclog()
2680 struct xlog *log) in xlog_get_lowest_lsn() argument
2682 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn()
2693 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2701 * tail of the log half way through a transaction as this may be the only
2702 * transaction in the log and moving the tail to point to the middle of it
2713 * amount of log space bound up in this committing transaction then the
2715 * freeing space in the log. Hence once we've updated the last_sync_lsn we
2717 * no longer bound by the old log head location and can move forwards and make
2722 struct xlog *log, in xlog_state_set_callback() argument
2729 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_set_callback()
2735 atomic64_set(&log->l_last_sync_lsn, header_lsn); in xlog_state_set_callback()
2736 xlog_grant_push_ail(log, 0); in xlog_state_set_callback()
2746 struct xlog *log, in xlog_state_iodone_process_iclog() argument
2767 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_iodone_process_iclog()
2770 xlog_state_set_callback(log, iclog, header_lsn); in xlog_state_iodone_process_iclog()
2791 struct xlog *log) in xlog_state_do_iclog_callbacks() argument
2792 __releases(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2793 __acquires(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2795 struct xlog_in_core *first_iclog = log->l_iclog; in xlog_state_do_iclog_callbacks()
2802 if (xlog_state_iodone_process_iclog(log, iclog)) in xlog_state_do_iclog_callbacks()
2809 spin_unlock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2816 spin_lock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2817 xlog_state_clean_iclog(log, iclog); in xlog_state_do_iclog_callbacks()
2831 struct xlog *log) in xlog_state_do_callback() argument
2836 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2837 while (xlog_state_do_iclog_callbacks(log)) { in xlog_state_do_callback()
2838 if (xlog_is_shutdown(log)) in xlog_state_do_callback()
2844 xfs_warn(log->l_mp, in xlog_state_do_callback()
2850 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE) in xlog_state_do_callback()
2851 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2853 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2861 * global state machine log lock.
2867 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2869 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2875 * split log writes, on the second, we shut down the file system and in xlog_state_done_syncing()
2878 if (!xlog_is_shutdown(log)) { in xlog_state_done_syncing()
2889 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2890 xlog_state_do_callback(log); in xlog_state_done_syncing()
2894 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2904 * log's data space.
2905 * * in-core log pointer to which xlog_write() should write.
2906 * * boolean indicating this is a continued write to an in-core log.
2907 * If this is the last write, then the in-core log's offset field
2913 struct xlog *log, in xlog_state_get_iclog_space() argument
2924 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2925 if (xlog_is_shutdown(log)) { in xlog_state_get_iclog_space()
2926 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2930 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2932 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
2934 /* Wait for log writes to have flushed */ in xlog_state_get_iclog_space()
2935 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
2952 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
2953 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
2955 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
2956 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
2971 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2981 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_state_get_iclog_space()
2982 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2997 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3001 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3016 struct xlog *log, in xfs_log_ticket_regrant() argument
3019 trace_xfs_log_ticket_regrant(log, ticket); in xfs_log_ticket_regrant()
3024 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xfs_log_ticket_regrant()
3026 xlog_grant_sub_space(log, &log->l_write_head.grant, in xfs_log_ticket_regrant()
3030 trace_xfs_log_ticket_regrant_sub(log, ticket); in xfs_log_ticket_regrant()
3034 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xfs_log_ticket_regrant()
3036 trace_xfs_log_ticket_regrant_exit(log, ticket); in xfs_log_ticket_regrant()
3060 struct xlog *log, in xfs_log_ticket_ungrant() argument
3065 trace_xfs_log_ticket_ungrant(log, ticket); in xfs_log_ticket_ungrant()
3070 trace_xfs_log_ticket_ungrant_sub(log, ticket); in xfs_log_ticket_ungrant()
3082 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xfs_log_ticket_ungrant()
3083 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xfs_log_ticket_ungrant()
3085 trace_xfs_log_ticket_ungrant_exit(log, ticket); in xfs_log_ticket_ungrant()
3087 xfs_log_space_wake(log->l_mp); in xfs_log_ticket_ungrant()
3097 struct xlog *log, in xlog_state_switch_iclogs() argument
3102 assert_spin_locked(&log->l_icloglock); in xlog_state_switch_iclogs()
3108 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3109 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3110 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3112 /* roll log?: ic_offset changed later */ in xlog_state_switch_iclogs()
3113 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3115 /* Round up to next log-sunit */ in xlog_state_switch_iclogs()
3116 if (log->l_iclog_roundoff > BBSIZE) { in xlog_state_switch_iclogs()
3117 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff); in xlog_state_switch_iclogs()
3118 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3121 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3125 * when the log wraps to the next cycle. This is to support the in xlog_state_switch_iclogs()
3129 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3130 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3132 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3133 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3134 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3136 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3137 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3170 * Write out all data in the in-core log as of this exact moment in time.
3172 * Data may be written to the in-core log during this call. However,
3201 struct xlog *log = mp->m_log; in xfs_log_force() local
3207 xlog_cil_force(log); in xfs_log_force()
3209 spin_lock(&log->l_icloglock); in xfs_log_force()
3210 if (xlog_is_shutdown(log)) in xfs_log_force()
3213 iclog = log->l_iclog; in xfs_log_force()
3244 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3260 spin_unlock(&log->l_icloglock); in xfs_log_force()
3263 spin_unlock(&log->l_icloglock); in xfs_log_force()
3268 * Force the log to a specific LSN.
3272 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3278 * specific in-core log. When given in-core log finally completes its write
3283 struct xlog *log, in xlog_force_lsn() argument
3292 spin_lock(&log->l_icloglock); in xlog_force_lsn()
3293 if (xlog_is_shutdown(log)) in xlog_force_lsn()
3296 iclog = log->l_iclog; in xlog_force_lsn()
3300 if (iclog == log->l_iclog) in xlog_force_lsn()
3315 * refcnt so we can release the log (which drops the ref count). in xlog_force_lsn()
3325 &log->l_icloglock); in xlog_force_lsn()
3358 spin_unlock(&log->l_icloglock); in xlog_force_lsn()
3361 spin_unlock(&log->l_icloglock); in xlog_force_lsn()
3366 * Force the log to a specific checkpoint sequence.
3371 * a synchronous log force, we will wait on the iclog with the LSN returned by
3381 struct xlog *log = mp->m_log; in xfs_log_force_seq() local
3389 lsn = xlog_cil_force_seq(log, seq); in xfs_log_force_seq()
3393 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); in xfs_log_force_seq()
3396 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); in xfs_log_force_seq()
3423 * Figure out the total log space unit (in bytes) that would be
3424 * required for a log ticket.
3428 struct xlog *log, in xlog_calc_unit_res() argument
3436 * Permanent reservations have up to 'cnt'-1 active log operations in xlog_calc_unit_res()
3437 * in the log. A unit in this case is the amount of space for one in xlog_calc_unit_res()
3438 * of these log operations. Normal reservations have a cnt of 1 in xlog_calc_unit_res()
3442 * which occupy space in the on-disk log. in xlog_calc_unit_res()
3457 * Therefore the commit record is in its own Log Record. in xlog_calc_unit_res()
3479 * increase the space required enough to require more log and op in xlog_calc_unit_res()
3487 * Fundamentally, this means we must pass the entire log vector to in xlog_calc_unit_res()
3490 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xlog_calc_unit_res()
3502 unit_bytes += log->l_iclog_hsize * num_headers; in xlog_calc_unit_res()
3505 unit_bytes += log->l_iclog_hsize; in xlog_calc_unit_res()
3508 unit_bytes += 2 * log->l_iclog_roundoff; in xlog_calc_unit_res()
3524 * Allocate and initialise a new log ticket.
3528 struct xlog *log, in xlog_ticket_alloc() argument
3538 unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs); in xlog_ticket_alloc()
3568 struct xlog *log) in xlog_verify_grant_tail() argument
3573 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3574 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3577 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { in xlog_verify_grant_tail()
3578 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3583 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { in xlog_verify_grant_tail()
3584 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3593 struct xlog *log, in xlog_verify_tail_lsn() argument
3599 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3601 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3602 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3603 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3605 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3607 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3608 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3610 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3612 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3622 * 4. Check fields of each log operation header for:
3625 * C. Length in log record header is correct according to the
3628 * log, check the preceding blocks of the physical log to make sure all
3633 struct xlog *log, in xlog_verify_iclog() argument
3647 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3648 icptr = log->l_iclog; in xlog_verify_iclog()
3649 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3652 if (icptr != log->l_iclog) in xlog_verify_iclog()
3653 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3654 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3656 /* check log magic numbers */ in xlog_verify_iclog()
3658 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3664 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3694 xfs_warn(log->l_mp, in xlog_verify_iclog()
3721 * Perform a forced shutdown on the log.
3723 * This can be called from low level log code to trigger a shutdown, or from the
3727 * a. if the shutdown was not due to a log IO error, flush the logs to
3729 * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3731 * c. Tasks sleeping on log reservations, pinned objects and
3733 * d. The mount is also marked as shut down so that log triggered shutdowns
3736 * Return true if the shutdown cause was a log IO error and we actually shut the
3737 * log down.
3741 struct xlog *log, in xlog_force_shutdown() argument
3746 if (!log) in xlog_force_shutdown()
3750 * Flush all the completed transactions to disk before marking the log in xlog_force_shutdown()
3751 * being shut down. We need to do this first as shutting down the log in xlog_force_shutdown()
3752 * before the force will prevent the log force from flushing the iclogs in xlog_force_shutdown()
3756 * we don't want to touch the log because we don't want to perturb the in xlog_force_shutdown()
3758 * avoid a log force in this case. in xlog_force_shutdown()
3760 * If we are shutting down due to a log IO error, then we must avoid in xlog_force_shutdown()
3761 * trying to write the log as that may just result in more IO errors and in xlog_force_shutdown()
3764 if (!log_error && !xlog_in_recovery(log)) in xlog_force_shutdown()
3765 xfs_log_force(log->l_mp, XFS_LOG_SYNC); in xlog_force_shutdown()
3773 * Much of the log state machine transitions assume that shutdown state in xlog_force_shutdown()
3774 * cannot change once they hold the log->l_icloglock. Hence we need to in xlog_force_shutdown()
3778 spin_lock(&log->l_icloglock); in xlog_force_shutdown()
3779 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) { in xlog_force_shutdown()
3780 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3783 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3786 * If this log shutdown also sets the mount shutdown state, issue a in xlog_force_shutdown()
3789 if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) { in xlog_force_shutdown()
3790 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR, in xlog_force_shutdown()
3791 "Filesystem has been shut down due to log error (0x%x).", in xlog_force_shutdown()
3793 xfs_alert(log->l_mp, in xlog_force_shutdown()
3800 * We don't want anybody waiting for log reservations after this. That in xlog_force_shutdown()
3806 xlog_grant_head_wake_all(&log->l_reserve_head); in xlog_force_shutdown()
3807 xlog_grant_head_wake_all(&log->l_write_head); in xlog_force_shutdown()
3811 * as if the log writes were completed. The abort handling in the log in xlog_force_shutdown()
3815 spin_lock(&log->l_cilp->xc_push_lock); in xlog_force_shutdown()
3816 wake_up_all(&log->l_cilp->xc_start_wait); in xlog_force_shutdown()
3817 wake_up_all(&log->l_cilp->xc_commit_wait); in xlog_force_shutdown()
3818 spin_unlock(&log->l_cilp->xc_push_lock); in xlog_force_shutdown()
3820 spin_lock(&log->l_icloglock); in xlog_force_shutdown()
3821 xlog_state_shutdown_callbacks(log); in xlog_force_shutdown()
3822 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3824 wake_up_var(&log->l_opstate); in xlog_force_shutdown()
3830 struct xlog *log) in xlog_iclogs_empty() argument
3834 iclog = log->l_iclog; in xlog_iclogs_empty()
3842 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
3855 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
3859 * norecovery mode skips mount-time log processing and unconditionally in xfs_log_check_lsn()
3877 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
3882 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
3883 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()
3890 * Notify the log that we're about to start using a feature that is protected
3891 * by a log incompat feature flag. This will prevent log covering from
3896 struct xlog *log) in xlog_use_incompat_feat() argument
3898 down_read(&log->l_incompat_users); in xlog_use_incompat_feat()
3901 /* Notify the log that we've finished using log incompat features. */
3904 struct xlog *log) in xlog_drop_incompat_feat() argument
3906 up_read(&log->l_incompat_users); in xlog_drop_incompat_feat()