Lines Matching refs:log
32 struct xlog *log,
45 struct xlog *log,
49 struct xlog *log,
53 struct xlog *log);
59 struct xlog *log,
64 struct xlog *log,
72 struct xlog *log,
76 struct xlog *log,
81 struct xlog *log,
86 struct xlog *log,
90 struct xlog *log,
94 struct xlog *log,
100 struct xlog *log,
104 struct xlog *log);
107 struct xlog *log,
113 struct xlog *log,
125 struct xlog *log);
129 struct xlog *log, in xlog_grant_sub_space() argument
143 space += log->l_logsize; in xlog_grant_sub_space()
155 struct xlog *log, in xlog_grant_add_space() argument
168 tmp = log->l_logsize - space; in xlog_grant_add_space()
205 struct xlog *log, in xlog_ticket_reservation() argument
209 if (head == &log->l_write_head) { in xlog_ticket_reservation()
222 struct xlog *log, in xlog_grant_head_wake() argument
230 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
235 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
244 struct xlog *log, in xlog_grant_head_wait() argument
253 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
255 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
260 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
262 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
264 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
267 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
269 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
297 struct xlog *log, in xlog_grant_head_check() argument
305 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xlog_grant_head_check()
313 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
314 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
317 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
319 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
325 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
364 struct xlog *log = mp->m_log; in xfs_log_regrant() local
368 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_regrant()
381 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
389 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
391 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
396 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
397 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
398 xlog_verify_grant_tail(log); in xfs_log_regrant()
429 struct xlog *log = mp->m_log; in xfs_log_reserve() local
436 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_reserve()
442 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, in xfs_log_reserve()
449 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
452 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
454 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
459 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
460 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
461 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
462 xlog_verify_grant_tail(log); in xfs_log_reserve()
505 struct xlog *log = mp->m_log; in xfs_log_done() local
508 if (XLOG_FORCED_SHUTDOWN(log) || in xfs_log_done()
514 (xlog_commit_record(log, ticket, iclog, &lsn)))) { in xfs_log_done()
521 trace_xfs_log_done_nonperm(log, ticket); in xfs_log_done()
527 xlog_ungrant_log_space(log, ticket); in xfs_log_done()
529 trace_xfs_log_done_perm(log, ticket); in xfs_log_done()
531 xlog_regrant_reserve_log_space(log, ticket); in xfs_log_done()
847 struct xlog *log = mp->m_log; in xfs_log_write_unmount_record() local
874 error = xlog_write(log, &vec, tic, &lsn, NULL, flags); in xfs_log_write_unmount_record()
883 spin_lock(&log->l_icloglock); in xfs_log_write_unmount_record()
884 iclog = log->l_iclog; in xfs_log_write_unmount_record()
886 xlog_state_want_sync(log, iclog); in xfs_log_write_unmount_record()
887 spin_unlock(&log->l_icloglock); in xfs_log_write_unmount_record()
888 error = xlog_state_release_iclog(log, iclog); in xfs_log_write_unmount_record()
890 spin_lock(&log->l_icloglock); in xfs_log_write_unmount_record()
893 if (!XLOG_FORCED_SHUTDOWN(log)) { in xfs_log_write_unmount_record()
894 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xfs_log_write_unmount_record()
900 spin_unlock(&log->l_icloglock); in xfs_log_write_unmount_record()
905 trace_xfs_log_umount_write(log, tic); in xfs_log_write_unmount_record()
906 xlog_ungrant_log_space(log, tic); in xfs_log_write_unmount_record()
922 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
934 xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) { in xfs_log_unmount_write()
940 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); in xfs_log_unmount_write()
943 first_iclog = iclog = log->l_iclog; in xfs_log_unmount_write()
952 if (! (XLOG_FORCED_SHUTDOWN(log))) { in xfs_log_unmount_write()
968 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
969 iclog = log->l_iclog; in xfs_log_unmount_write()
972 xlog_state_want_sync(log, iclog); in xfs_log_unmount_write()
973 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
974 error = xlog_state_release_iclog(log, iclog); in xfs_log_unmount_write()
976 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
983 &log->l_icloglock); in xfs_log_unmount_write()
985 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
1069 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1072 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_space_wake()
1075 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1076 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1078 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1079 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
1080 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1081 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1084 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1085 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1087 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1088 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1089 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1090 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1114 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1120 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1123 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1124 switch (log->l_covered_state) { in xfs_log_need_covered()
1131 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1133 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1137 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1138 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1140 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1146 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1157 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1172 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1173 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1174 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1207 struct xlog *log, in xlog_space_left() argument
1217 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1220 free_bytes = log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1232 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); in xlog_space_left()
1233 xfs_alert(log->l_mp, in xlog_space_left()
1236 xfs_alert(log->l_mp, in xlog_space_left()
1240 free_bytes = log->l_logsize; in xlog_space_left()
1307 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1313 log->l_iclog_bufs = XLOG_MAX_ICLOGS; in xlog_get_iclog_buffer_size()
1315 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1321 size = log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1322 log->l_iclog_size_log = 0; in xlog_get_iclog_buffer_size()
1324 log->l_iclog_size_log++; in xlog_get_iclog_buffer_size()
1336 log->l_iclog_hsize = xhdrs << BBSHIFT; in xlog_get_iclog_buffer_size()
1337 log->l_iclog_heads = xhdrs; in xlog_get_iclog_buffer_size()
1340 log->l_iclog_hsize = BBSIZE; in xlog_get_iclog_buffer_size()
1341 log->l_iclog_heads = 1; in xlog_get_iclog_buffer_size()
1347 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; in xlog_get_iclog_buffer_size()
1348 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; in xlog_get_iclog_buffer_size()
1351 log->l_iclog_hsize = BBSIZE; in xlog_get_iclog_buffer_size()
1352 log->l_iclog_heads = 1; in xlog_get_iclog_buffer_size()
1357 mp->m_logbufs = log->l_iclog_bufs; in xlog_get_iclog_buffer_size()
1359 mp->m_logbsize = log->l_iclog_size; in xlog_get_iclog_buffer_size()
1380 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1382 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1420 struct xlog *log; in xlog_alloc_log() local
1429 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1430 if (!log) { in xlog_alloc_log()
1435 log->l_mp = mp; in xlog_alloc_log()
1436 log->l_targ = log_target; in xlog_alloc_log()
1437 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1438 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1439 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1440 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1441 log->l_flags |= XLOG_ACTIVE_RECOVERY; in xlog_alloc_log()
1442 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1444 log->l_prev_block = -1; in xlog_alloc_log()
1446 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1447 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1448 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1450 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1451 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1470 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1478 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1480 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1489 BTOBB(log->l_iclog_size), XBF_NO_IOACCT); in xlog_alloc_log()
1504 log->l_xbuf = bp; in xlog_alloc_log()
1506 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1507 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1509 iclogp = &log->l_iclog; in xlog_alloc_log()
1517 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1518 for (i=0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1528 BTOBB(log->l_iclog_size), in xlog_alloc_log()
1542 log->l_iclog_bak[i] = &iclog->ic_header; in xlog_alloc_log()
1548 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); in xlog_alloc_log()
1549 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1554 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; in xlog_alloc_log()
1556 iclog->ic_log = log; in xlog_alloc_log()
1560 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1567 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1568 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1570 error = xlog_cil_init(log); in xlog_alloc_log()
1573 return log; in xlog_alloc_log()
1576 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1582 spinlock_destroy(&log->l_icloglock); in xlog_alloc_log()
1583 xfs_buf_free(log->l_xbuf); in xlog_alloc_log()
1585 kmem_free(log); in xlog_alloc_log()
1597 struct xlog *log, in xlog_commit_record() argument
1602 struct xfs_mount *mp = log->l_mp; in xlog_commit_record()
1615 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, in xlog_commit_record()
1631 struct xlog *log, in xlog_grant_push_ail() argument
1642 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_ail()
1644 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_ail()
1653 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_ail()
1658 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_ail()
1661 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_ail()
1662 threshold_block -= log->l_logBBsize; in xlog_grant_push_ail()
1672 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_ail()
1681 if (!XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_push_ail()
1682 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1690 struct xlog *log, in xlog_pack_data() argument
1710 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_pack_data()
1721 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1734 struct xlog *log, in xlog_cksum() argument
1747 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_cksum()
1833 struct xlog *log, in xlog_sync() argument
1843 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); in xlog_sync()
1846 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
1850 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_sync()
1853 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { in xlog_sync()
1855 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); in xlog_sync()
1861 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && in xlog_sync()
1862 roundoff < log->l_mp->m_sb.sb_logsunit) in xlog_sync()
1864 (log->l_mp->m_sb.sb_logsunit <= 1 && in xlog_sync()
1868 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
1869 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
1872 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1883 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
1886 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { in xlog_sync()
1889 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); in xlog_sync()
1890 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); in xlog_sync()
1914 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1923 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { in xlog_sync()
1926 xfs_warn(log->l_mp, in xlog_sync()
1944 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) in xlog_sync()
1945 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); in xlog_sync()
1949 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); in xlog_sync()
1950 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); in xlog_sync()
1952 xlog_verify_iclog(log, iclog, count, true); in xlog_sync()
1955 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); in xlog_sync()
1975 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); in xlog_sync()
1976 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); in xlog_sync()
1979 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); in xlog_sync()
1994 struct xlog *log) in xlog_dealloc_log() argument
1999 xlog_cil_destroy(log); in xlog_dealloc_log()
2005 iclog = log->l_iclog; in xlog_dealloc_log()
2006 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
2017 xfs_buf_lock(log->l_xbuf); in xlog_dealloc_log()
2018 xfs_buf_unlock(log->l_xbuf); in xlog_dealloc_log()
2019 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); in xlog_dealloc_log()
2020 xfs_buf_free(log->l_xbuf); in xlog_dealloc_log()
2022 iclog = log->l_iclog; in xlog_dealloc_log()
2023 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
2029 spinlock_destroy(&log->l_icloglock); in xlog_dealloc_log()
2031 log->l_mp->m_log = NULL; in xlog_dealloc_log()
2032 kmem_free(log); in xlog_dealloc_log()
2041 struct xlog *log, in xlog_state_finish_copy() argument
2046 spin_lock(&log->l_icloglock); in xlog_state_finish_copy()
2051 spin_unlock(&log->l_icloglock); in xlog_state_finish_copy()
2232 struct xlog *log, in xlog_write_setup_ophdr() argument
2255 xfs_warn(log->l_mp, in xlog_write_setup_ophdr()
2315 struct xlog *log, in xlog_write_copy_finish() argument
2330 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2333 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2341 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2345 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2346 xlog_state_want_sync(log, iclog); in xlog_write_copy_finish()
2347 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2350 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2400 struct xlog *log, in xlog_write() argument
2439 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_write()
2441 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2442 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_write()
2452 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2494 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); in xlog_write()
2507 xlog_verify_dest_ptr(log, ptr); in xlog_write()
2527 error = xlog_write_copy_finish(log, iclog, flags, in xlog_write()
2568 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); in xlog_write()
2570 return xlog_state_release_iclog(log, iclog); in xlog_write()
2595 struct xlog *log) in xlog_state_clean_log() argument
2600 iclog = log->l_iclog; in xlog_state_clean_log()
2635 } while (iclog != log->l_iclog); in xlog_state_clean_log()
2646 switch (log->l_covered_state) { in xlog_state_clean_log()
2650 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2655 log->l_covered_state = XLOG_STATE_COVER_NEED2; in xlog_state_clean_log()
2657 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2662 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_state_clean_log()
2664 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2675 struct xlog *log) in xlog_get_lowest_lsn() argument
2680 lsn_log = log->l_iclog; in xlog_get_lowest_lsn()
2691 } while (lsn_log != log->l_iclog); in xlog_get_lowest_lsn()
2698 struct xlog *log, in xlog_state_do_callback() argument
2715 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2716 first_iclog = iclog = log->l_iclog; in xlog_state_do_callback()
2730 first_iclog = log->l_iclog; in xlog_state_do_callback()
2731 iclog = log->l_iclog; in xlog_state_do_callback()
2785 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_do_callback()
2814 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_do_callback()
2817 atomic64_set(&log->l_last_sync_lsn, in xlog_state_do_callback()
2823 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2851 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2861 xlog_state_clean_log(log); in xlog_state_do_callback()
2872 xfs_warn(log->l_mp, in xlog_state_do_callback()
2892 first_iclog = iclog = log->l_iclog; in xlog_state_do_callback()
2914 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) in xlog_state_do_callback()
2916 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2919 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2941 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2943 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2959 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2971 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2972 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ in xlog_state_done_syncing()
2996 struct xlog *log, in xlog_state_get_iclog_space() argument
3009 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
3010 if (XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_get_iclog_space()
3011 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3015 iclog = log->l_iclog; in xlog_state_get_iclog_space()
3017 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
3020 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
3035 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
3037 log->l_iclog_hsize, in xlog_state_get_iclog_space()
3039 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
3041 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
3042 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
3055 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3066 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3067 error = xlog_state_release_iclog(log, iclog); in xlog_state_get_iclog_space()
3071 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3087 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3092 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3107 struct xlog *log, in xlog_regrant_reserve_log_space() argument
3110 trace_xfs_log_regrant_reserve_enter(log, ticket); in xlog_regrant_reserve_log_space()
3115 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3117 xlog_grant_sub_space(log, &log->l_write_head.grant, in xlog_regrant_reserve_log_space()
3122 trace_xfs_log_regrant_reserve_sub(log, ticket); in xlog_regrant_reserve_log_space()
3128 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3131 trace_xfs_log_regrant_reserve_exit(log, ticket); in xlog_regrant_reserve_log_space()
3154 struct xlog *log, in xlog_ungrant_log_space() argument
3162 trace_xfs_log_ungrant_enter(log, ticket); in xlog_ungrant_log_space()
3163 trace_xfs_log_ungrant_sub(log, ticket); in xlog_ungrant_log_space()
3175 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xlog_ungrant_log_space()
3176 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xlog_ungrant_log_space()
3178 trace_xfs_log_ungrant_exit(log, ticket); in xlog_ungrant_log_space()
3180 xfs_log_space_wake(log->l_mp); in xlog_ungrant_log_space()
3194 struct xlog *log, in xlog_state_release_iclog() argument
3203 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) in xlog_state_release_iclog()
3207 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3215 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); in xlog_state_release_iclog()
3219 xlog_verify_tail_lsn(log, iclog, tail_lsn); in xlog_state_release_iclog()
3222 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3232 return xlog_sync(log, iclog); in xlog_state_release_iclog()
3246 struct xlog *log, in xlog_state_switch_iclogs() argument
3254 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3255 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3256 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3259 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3262 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_state_switch_iclogs()
3263 log->l_mp->m_sb.sb_logsunit > 1) { in xlog_state_switch_iclogs()
3264 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); in xlog_state_switch_iclogs()
3265 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3268 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3276 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3277 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3279 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3280 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3281 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3283 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3284 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3319 struct xlog *log = mp->m_log; in xfs_log_force() local
3326 xlog_cil_force(log); in xfs_log_force()
3328 spin_lock(&log->l_icloglock); in xfs_log_force()
3329 iclog = log->l_iclog; in xfs_log_force()
3359 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3360 spin_unlock(&log->l_icloglock); in xfs_log_force()
3362 if (xlog_state_release_iclog(log, iclog)) in xfs_log_force()
3365 spin_lock(&log->l_icloglock); in xfs_log_force()
3377 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3393 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xfs_log_force()
3399 spin_unlock(&log->l_icloglock); in xfs_log_force()
3402 spin_unlock(&log->l_icloglock); in xfs_log_force()
3414 struct xlog *log = mp->m_log; in __xfs_log_force_lsn() local
3417 spin_lock(&log->l_icloglock); in __xfs_log_force_lsn()
3418 iclog = log->l_iclog; in __xfs_log_force_lsn()
3424 if (iclog == log->l_iclog) in __xfs_log_force_lsn()
3455 &log->l_icloglock); in __xfs_log_force_lsn()
3459 xlog_state_switch_iclogs(log, iclog, 0); in __xfs_log_force_lsn()
3460 spin_unlock(&log->l_icloglock); in __xfs_log_force_lsn()
3461 if (xlog_state_release_iclog(log, iclog)) in __xfs_log_force_lsn()
3465 spin_lock(&log->l_icloglock); in __xfs_log_force_lsn()
3476 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in __xfs_log_force_lsn()
3482 spin_unlock(&log->l_icloglock); in __xfs_log_force_lsn()
3485 spin_unlock(&log->l_icloglock); in __xfs_log_force_lsn()
3532 struct xlog *log, in xlog_state_want_sync() argument
3535 assert_spin_locked(&log->l_icloglock); in xlog_state_want_sync()
3538 xlog_state_switch_iclogs(log, iclog, 0); in xlog_state_want_sync()
3583 struct xlog *log = mp->m_log; in xfs_log_calc_unit_res() local
3642 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xfs_log_calc_unit_res()
3654 unit_bytes += log->l_iclog_hsize * num_headers; in xfs_log_calc_unit_res()
3657 unit_bytes += log->l_iclog_hsize; in xfs_log_calc_unit_res()
3676 struct xlog *log, in xlog_ticket_alloc() argument
3690 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); in xlog_ticket_alloc()
3725 struct xlog *log, in xlog_verify_dest_ptr() argument
3731 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_verify_dest_ptr()
3732 if (ptr >= log->l_iclog_bak[i] && in xlog_verify_dest_ptr()
3733 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) in xlog_verify_dest_ptr()
3738 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); in xlog_verify_dest_ptr()
3754 struct xlog *log) in xlog_verify_grant_tail() argument
3759 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3760 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3763 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3764 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3766 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3770 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3771 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3773 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3781 struct xlog *log, in xlog_verify_tail_lsn() argument
3787 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3789 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3790 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3791 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3793 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3795 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3796 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3798 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3800 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3821 struct xlog *log, in xlog_verify_iclog() argument
3836 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3837 icptr = log->l_iclog; in xlog_verify_iclog()
3838 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3841 if (icptr != log->l_iclog) in xlog_verify_iclog()
3842 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3843 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3847 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3853 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3883 xfs_warn(log->l_mp, in xlog_verify_iclog()
3914 struct xlog *log) in xlog_state_ioerror() argument
3918 iclog = log->l_iclog; in xlog_state_ioerror()
3958 struct xlog *log; in xfs_log_force_umount() local
3961 log = mp->m_log; in xfs_log_force_umount()
3967 if (!log || in xfs_log_force_umount()
3968 log->l_flags & XLOG_ACTIVE_RECOVERY) { in xfs_log_force_umount()
3979 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { in xfs_log_force_umount()
3980 ASSERT(XLOG_FORCED_SHUTDOWN(log)); in xfs_log_force_umount()
3998 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
4007 log->l_flags |= XLOG_IO_ERROR; in xfs_log_force_umount()
4008 retval = xlog_state_ioerror(log); in xfs_log_force_umount()
4009 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
4018 xlog_grant_head_wake_all(&log->l_reserve_head); in xfs_log_force_umount()
4019 xlog_grant_head_wake_all(&log->l_write_head); in xfs_log_force_umount()
4027 wake_up_all(&log->l_cilp->xc_commit_wait); in xfs_log_force_umount()
4028 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); in xfs_log_force_umount()
4034 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
4035 iclog = log->l_iclog; in xfs_log_force_umount()
4039 } while (iclog != log->l_iclog); in xfs_log_force_umount()
4040 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
4049 struct xlog *log) in xlog_iclogs_empty() argument
4053 iclog = log->l_iclog; in xlog_iclogs_empty()
4061 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
4074 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
4096 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
4101 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
4102 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()
4112 struct xlog *log = mp->m_log; in xfs_log_in_recovery() local
4114 return log->l_flags & XLOG_ACTIVE_RECOVERY; in xfs_log_in_recovery()