Lines Matching refs:iclog

43 	struct xlog_in_core	*iclog);
50 struct xlog_in_core **iclog,
60 struct xlog_in_core *iclog,
69 struct xlog_in_core *iclog,
74 struct xlog_in_core *iclog);
526 struct xlog_in_core *iclog; in xlog_state_shutdown_callbacks() local
529 iclog = log->l_iclog; in xlog_state_shutdown_callbacks()
531 if (atomic_read(&iclog->ic_refcnt)) { in xlog_state_shutdown_callbacks()
535 list_splice_init(&iclog->ic_callbacks, &cb_list); in xlog_state_shutdown_callbacks()
541 wake_up_all(&iclog->ic_write_wait); in xlog_state_shutdown_callbacks()
542 wake_up_all(&iclog->ic_force_wait); in xlog_state_shutdown_callbacks()
543 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_shutdown_callbacks()
571 struct xlog_in_core *iclog, in xlog_state_release_iclog() argument
579 trace_xlog_iclog_release(iclog, _RET_IP_); in xlog_state_release_iclog()
586 if ((iclog->ic_state == XLOG_STATE_WANT_SYNC || in xlog_state_release_iclog()
587 (iclog->ic_flags & XLOG_ICL_NEED_FUA)) && in xlog_state_release_iclog()
588 !iclog->ic_header.h_tail_lsn) { in xlog_state_release_iclog()
590 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); in xlog_state_release_iclog()
593 last_ref = atomic_dec_and_test(&iclog->ic_refcnt); in xlog_state_release_iclog()
609 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { in xlog_state_release_iclog()
610 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in xlog_state_release_iclog()
614 iclog->ic_state = XLOG_STATE_SYNCING; in xlog_state_release_iclog()
615 xlog_verify_tail_lsn(log, iclog); in xlog_state_release_iclog()
616 trace_xlog_iclog_syncing(iclog, _RET_IP_); in xlog_state_release_iclog()
619 xlog_sync(log, iclog, ticket); in xlog_state_release_iclog()
880 struct xlog_in_core *iclog) in xlog_force_iclog() argument
882 atomic_inc(&iclog->ic_refcnt); in xlog_force_iclog()
883 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; in xlog_force_iclog()
884 if (iclog->ic_state == XLOG_STATE_ACTIVE) in xlog_force_iclog()
885 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); in xlog_force_iclog()
886 return xlog_state_release_iclog(iclog->ic_log, iclog, NULL); in xlog_force_iclog()
897 struct xlog_in_core *iclog) in xlog_wait_on_iclog() argument
898 __releases(iclog->ic_log->l_icloglock) in xlog_wait_on_iclog()
900 struct xlog *log = iclog->ic_log; in xlog_wait_on_iclog()
902 trace_xlog_iclog_wait_on(iclog, _RET_IP_); in xlog_wait_on_iclog()
904 iclog->ic_state != XLOG_STATE_ACTIVE && in xlog_wait_on_iclog()
905 iclog->ic_state != XLOG_STATE_DIRTY) { in xlog_wait_on_iclog()
907 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xlog_wait_on_iclog()
971 struct xlog_in_core *iclog; in xlog_unmount_write() local
989 iclog = log->l_iclog; in xlog_unmount_write()
990 error = xlog_force_iclog(iclog); in xlog_unmount_write()
991 xlog_wait_on_iclog(iclog); in xlog_unmount_write()
1003 struct xlog_in_core *iclog = log->l_iclog; in xfs_log_unmount_verify_iclog() local
1006 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in xfs_log_unmount_verify_iclog()
1007 ASSERT(iclog->ic_offset == 0); in xfs_log_unmount_verify_iclog()
1008 } while ((iclog = iclog->ic_next) != log->l_iclog); in xfs_log_unmount_verify_iclog()
1385 struct xlog_in_core *iclog = in xlog_ioend_work() local
1387 struct xlog *log = iclog->ic_log; in xlog_ioend_work()
1390 error = blk_status_to_errno(iclog->ic_bio.bi_status); in xlog_ioend_work()
1393 if (iclog->ic_fail_crc) in xlog_ioend_work()
1405 xlog_state_done_syncing(iclog); in xlog_ioend_work()
1406 bio_uninit(&iclog->ic_bio); in xlog_ioend_work()
1414 up(&iclog->ic_sema); in xlog_ioend_work()
1533 xlog_in_core_t *iclog, *prev_iclog=NULL; in xlog_alloc_log() local
1614 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); in xlog_alloc_log()
1615 if (!iclog) in xlog_alloc_log()
1618 *iclogp = iclog; in xlog_alloc_log()
1619 iclog->ic_prev = prev_iclog; in xlog_alloc_log()
1620 prev_iclog = iclog; in xlog_alloc_log()
1622 iclog->ic_data = kvzalloc(log->l_iclog_size, in xlog_alloc_log()
1624 if (!iclog->ic_data) in xlog_alloc_log()
1626 head = &iclog->ic_header; in xlog_alloc_log()
1636 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1637 iclog->ic_state = XLOG_STATE_ACTIVE; in xlog_alloc_log()
1638 iclog->ic_log = log; in xlog_alloc_log()
1639 atomic_set(&iclog->ic_refcnt, 0); in xlog_alloc_log()
1640 INIT_LIST_HEAD(&iclog->ic_callbacks); in xlog_alloc_log()
1641 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1643 init_waitqueue_head(&iclog->ic_force_wait); in xlog_alloc_log()
1644 init_waitqueue_head(&iclog->ic_write_wait); in xlog_alloc_log()
1645 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); in xlog_alloc_log()
1646 sema_init(&iclog->ic_sema, 1); in xlog_alloc_log()
1648 iclogp = &iclog->ic_next; in xlog_alloc_log()
1668 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1669 prev_iclog = iclog->ic_next; in xlog_alloc_log()
1670 kmem_free(iclog->ic_data); in xlog_alloc_log()
1671 kmem_free(iclog); in xlog_alloc_log()
1770 struct xlog_in_core *iclog, in xlog_pack_data() argument
1774 int size = iclog->ic_offset + roundoff; in xlog_pack_data()
1778 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); in xlog_pack_data()
1780 dp = iclog->ic_datap; in xlog_pack_data()
1784 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; in xlog_pack_data()
1790 xlog_in_core_2_t *xhdr = iclog->ic_data; in xlog_pack_data()
1849 struct xlog_in_core *iclog = bio->bi_private; in xlog_bio_end_io() local
1851 queue_work(iclog->ic_log->l_ioend_workqueue, in xlog_bio_end_io()
1852 &iclog->ic_end_io_work); in xlog_bio_end_io()
1879 struct xlog_in_core *iclog, in xlog_write_iclog() argument
1884 trace_xlog_iclog_write(iclog, _RET_IP_); in xlog_write_iclog()
1894 down(&iclog->ic_sema); in xlog_write_iclog()
1903 xlog_state_done_syncing(iclog); in xlog_write_iclog()
1904 up(&iclog->ic_sema); in xlog_write_iclog()
1914 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, in xlog_write_iclog()
1917 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1918 iclog->ic_bio.bi_end_io = xlog_bio_end_io; in xlog_write_iclog()
1919 iclog->ic_bio.bi_private = iclog; in xlog_write_iclog()
1921 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { in xlog_write_iclog()
1922 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; in xlog_write_iclog()
1940 if (iclog->ic_flags & XLOG_ICL_NEED_FUA) in xlog_write_iclog()
1941 iclog->ic_bio.bi_opf |= REQ_FUA; in xlog_write_iclog()
1943 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); in xlog_write_iclog()
1945 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { in xlog_write_iclog()
1949 if (is_vmalloc_addr(iclog->ic_data)) in xlog_write_iclog()
1950 flush_kernel_vmap_range(iclog->ic_data, count); in xlog_write_iclog()
1959 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1961 bio_chain(split, &iclog->ic_bio); in xlog_write_iclog()
1965 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
1968 submit_bio(&iclog->ic_bio); in xlog_write_iclog()
1998 struct xlog_in_core *iclog, in xlog_calc_iclog_size() argument
2004 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
2040 struct xlog_in_core *iclog, in xlog_sync() argument
2048 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); in xlog_sync()
2049 trace_xlog_iclog_sync(iclog, _RET_IP_); in xlog_sync()
2051 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
2066 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
2069 size = iclog->ic_offset; in xlog_sync()
2072 iclog->ic_header.h_len = cpu_to_be32(size); in xlog_sync()
2077 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); in xlog_sync()
2081 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
2084 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
2085 iclog->ic_datap, size); in xlog_sync()
2095 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); in xlog_sync()
2096 iclog->ic_fail_crc = true; in xlog_sync()
2099 be64_to_cpu(iclog->ic_header.h_lsn)); in xlog_sync()
2102 xlog_verify_iclog(log, iclog, count); in xlog_sync()
2103 xlog_write_iclog(log, iclog, bno, count); in xlog_sync()
2113 xlog_in_core_t *iclog, *next_iclog; in xlog_dealloc_log() local
2120 iclog = log->l_iclog; in xlog_dealloc_log()
2122 down(&iclog->ic_sema); in xlog_dealloc_log()
2123 up(&iclog->ic_sema); in xlog_dealloc_log()
2124 iclog = iclog->ic_next; in xlog_dealloc_log()
2134 iclog = log->l_iclog; in xlog_dealloc_log()
2136 next_iclog = iclog->ic_next; in xlog_dealloc_log()
2137 kmem_free(iclog->ic_data); in xlog_dealloc_log()
2138 kmem_free(iclog); in xlog_dealloc_log()
2139 iclog = next_iclog; in xlog_dealloc_log()
2153 struct xlog_in_core *iclog, in xlog_state_finish_copy() argument
2159 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); in xlog_state_finish_copy()
2160 iclog->ic_offset += copy_bytes; in xlog_state_finish_copy()
2231 struct xlog_in_core *iclog, in xlog_write_iovec() argument
2239 ASSERT(*log_offset < iclog->ic_log->l_iclog_size); in xlog_write_iovec()
2243 memcpy(iclog->ic_datap + *log_offset, data, write_len); in xlog_write_iovec()
2258 struct xlog_in_core *iclog, in xlog_write_full() argument
2266 ASSERT(*log_offset + *len <= iclog->ic_size || in xlog_write_full()
2267 iclog->ic_state == XLOG_STATE_WANT_SYNC); in xlog_write_full()
2278 xlog_write_iovec(iclog, log_offset, reg->i_addr, in xlog_write_full()
2292 struct xlog_in_core *iclog = *iclogp; in xlog_write_get_more_iclog_space() local
2293 struct xlog *log = iclog->ic_log; in xlog_write_get_more_iclog_space()
2297 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC); in xlog_write_get_more_iclog_space()
2298 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_get_more_iclog_space()
2299 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write_get_more_iclog_space()
2304 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write_get_more_iclog_space()
2310 *iclogp = iclog; in xlog_write_get_more_iclog_space()
2330 struct xlog_in_core *iclog = *iclogp; in xlog_write_partial() local
2352 if (iclog->ic_size - *log_offset <= in xlog_write_partial()
2355 &iclog, log_offset, *len, record_cnt, in xlog_write_partial()
2362 rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset); in xlog_write_partial()
2369 xlog_write_iovec(iclog, log_offset, reg->i_addr, in xlog_write_partial()
2406 &iclog, log_offset, in xlog_write_partial()
2412 ophdr = iclog->ic_datap + *log_offset; in xlog_write_partial()
2428 if (rlen <= iclog->ic_size - *log_offset) in xlog_write_partial()
2433 rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset); in xlog_write_partial()
2436 xlog_write_iovec(iclog, log_offset, in xlog_write_partial()
2447 *iclogp = iclog; in xlog_write_partial()
2500 struct xlog_in_core *iclog = NULL; in xlog_write() local
2514 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2519 ASSERT(log_offset <= iclog->ic_size - 1); in xlog_write()
2527 xlog_cil_set_ctx_write_state(ctx, iclog); in xlog_write()
2535 lv->lv_bytes > iclog->ic_size - log_offset) { in xlog_write()
2536 error = xlog_write_partial(lv, ticket, &iclog, in xlog_write()
2547 xlog_write_full(lv, ticket, iclog, &log_offset, in xlog_write()
2560 xlog_state_finish_copy(log, iclog, record_cnt, 0); in xlog_write()
2561 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write()
2569 struct xlog_in_core *iclog, in xlog_state_activate_iclog() argument
2572 ASSERT(list_empty_careful(&iclog->ic_callbacks)); in xlog_state_activate_iclog()
2573 trace_xlog_iclog_activate(iclog, _RET_IP_); in xlog_state_activate_iclog()
2582 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { in xlog_state_activate_iclog()
2592 iclog->ic_state = XLOG_STATE_ACTIVE; in xlog_state_activate_iclog()
2593 iclog->ic_offset = 0; in xlog_state_activate_iclog()
2594 iclog->ic_header.h_num_logops = 0; in xlog_state_activate_iclog()
2595 memset(iclog->ic_header.h_cycle_data, 0, in xlog_state_activate_iclog()
2596 sizeof(iclog->ic_header.h_cycle_data)); in xlog_state_activate_iclog()
2597 iclog->ic_header.h_lsn = 0; in xlog_state_activate_iclog()
2598 iclog->ic_header.h_tail_lsn = 0; in xlog_state_activate_iclog()
2610 struct xlog_in_core *iclog = log->l_iclog; in xlog_state_activate_iclogs() local
2613 if (iclog->ic_state == XLOG_STATE_DIRTY) in xlog_state_activate_iclogs()
2614 xlog_state_activate_iclog(iclog, iclogs_changed); in xlog_state_activate_iclogs()
2619 else if (iclog->ic_state != XLOG_STATE_ACTIVE) in xlog_state_activate_iclogs()
2621 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_activate_iclogs()
2682 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn() local
2686 if (iclog->ic_state == XLOG_STATE_ACTIVE || in xlog_get_lowest_lsn()
2687 iclog->ic_state == XLOG_STATE_DIRTY) in xlog_get_lowest_lsn()
2690 lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_get_lowest_lsn()
2693 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2723 struct xlog_in_core *iclog, in xlog_state_set_callback() argument
2726 trace_xlog_iclog_callback(iclog, _RET_IP_); in xlog_state_set_callback()
2727 iclog->ic_state = XLOG_STATE_CALLBACK; in xlog_state_set_callback()
2732 if (list_empty_careful(&iclog->ic_callbacks)) in xlog_state_set_callback()
2747 struct xlog_in_core *iclog) in xlog_state_iodone_process_iclog() argument
2752 switch (iclog->ic_state) { in xlog_state_iodone_process_iclog()
2766 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_state_iodone_process_iclog()
2770 xlog_state_set_callback(log, iclog, header_lsn); in xlog_state_iodone_process_iclog()
2796 struct xlog_in_core *iclog = first_iclog; in xlog_state_do_iclog_callbacks() local
2802 if (xlog_state_iodone_process_iclog(log, iclog)) in xlog_state_do_iclog_callbacks()
2804 if (iclog->ic_state != XLOG_STATE_CALLBACK) { in xlog_state_do_iclog_callbacks()
2805 iclog = iclog->ic_next; in xlog_state_do_iclog_callbacks()
2808 list_splice_init(&iclog->ic_callbacks, &cb_list); in xlog_state_do_iclog_callbacks()
2811 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_); in xlog_state_do_iclog_callbacks()
2813 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_); in xlog_state_do_iclog_callbacks()
2817 xlog_state_clean_iclog(log, iclog); in xlog_state_do_iclog_callbacks()
2818 iclog = iclog->ic_next; in xlog_state_do_iclog_callbacks()
2819 } while (iclog != first_iclog); in xlog_state_do_iclog_callbacks()
2865 struct xlog_in_core *iclog) in xlog_state_done_syncing() argument
2867 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing()
2870 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); in xlog_state_done_syncing()
2871 trace_xlog_iclog_sync_done(iclog, _RET_IP_); in xlog_state_done_syncing()
2879 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); in xlog_state_done_syncing()
2880 iclog->ic_state = XLOG_STATE_DONE_SYNC; in xlog_state_done_syncing()
2888 wake_up_all(&iclog->ic_write_wait); in xlog_state_done_syncing()
2921 xlog_in_core_t *iclog; in xlog_state_get_iclog_space() local
2930 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2931 if (iclog->ic_state != XLOG_STATE_ACTIVE) { in xlog_state_get_iclog_space()
2939 head = &iclog->ic_header; in xlog_state_get_iclog_space()
2941 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ in xlog_state_get_iclog_space()
2942 log_offset = iclog->ic_offset; in xlog_state_get_iclog_space()
2944 trace_xlog_iclog_get_space(iclog, _RET_IP_); in xlog_state_get_iclog_space()
2968 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { in xlog_state_get_iclog_space()
2971 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2980 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) in xlog_state_get_iclog_space()
2981 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_state_get_iclog_space()
2994 if (len <= iclog->ic_size - iclog->ic_offset) in xlog_state_get_iclog_space()
2995 iclog->ic_offset += len; in xlog_state_get_iclog_space()
2997 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2998 *iclogp = iclog; in xlog_state_get_iclog_space()
3000 ASSERT(iclog->ic_offset <= iclog->ic_size); in xlog_state_get_iclog_space()
3098 struct xlog_in_core *iclog, in xlog_state_switch_iclogs() argument
3101 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in xlog_state_switch_iclogs()
3103 trace_xlog_iclog_switch(iclog, _RET_IP_); in xlog_state_switch_iclogs()
3106 eventual_size = iclog->ic_offset; in xlog_state_switch_iclogs()
3107 iclog->ic_state = XLOG_STATE_WANT_SYNC; in xlog_state_switch_iclogs()
3108 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3136 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3137 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3149 struct xlog_in_core *iclog, in xlog_force_and_check_iclog() argument
3152 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_force_and_check_iclog()
3156 error = xlog_force_iclog(iclog); in xlog_force_and_check_iclog()
3164 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) in xlog_force_and_check_iclog()
3202 struct xlog_in_core *iclog; in xfs_log_force() local
3213 iclog = log->l_iclog; in xfs_log_force()
3214 trace_xlog_iclog_force(iclog, _RET_IP_); in xfs_log_force()
3216 if (iclog->ic_state == XLOG_STATE_DIRTY || in xfs_log_force()
3217 (iclog->ic_state == XLOG_STATE_ACTIVE && in xfs_log_force()
3218 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { in xfs_log_force()
3227 iclog = iclog->ic_prev; in xfs_log_force()
3228 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { in xfs_log_force()
3229 if (atomic_read(&iclog->ic_refcnt) == 0) { in xfs_log_force()
3233 if (xlog_force_and_check_iclog(iclog, &completed)) in xfs_log_force()
3244 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3254 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) in xfs_log_force()
3255 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; in xfs_log_force()
3258 return xlog_wait_on_iclog(iclog); in xfs_log_force()
3289 struct xlog_in_core *iclog; in xlog_force_lsn() local
3296 iclog = log->l_iclog; in xlog_force_lsn()
3297 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { in xlog_force_lsn()
3298 trace_xlog_iclog_force_lsn(iclog, _RET_IP_); in xlog_force_lsn()
3299 iclog = iclog->ic_next; in xlog_force_lsn()
3300 if (iclog == log->l_iclog) in xlog_force_lsn()
3304 switch (iclog->ic_state) { in xlog_force_lsn()
3322 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || in xlog_force_lsn()
3323 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { in xlog_force_lsn()
3324 xlog_wait(&iclog->ic_prev->ic_write_wait, in xlog_force_lsn()
3328 if (xlog_force_and_check_iclog(iclog, &completed)) in xlog_force_lsn()
3343 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; in xlog_force_lsn()
3356 return xlog_wait_on_iclog(iclog); in xlog_force_lsn()
3594 struct xlog_in_core *iclog) in xlog_verify_tail_lsn() argument
3596 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); in xlog_verify_tail_lsn()
3602 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3611 if (blocks < BTOBB(iclog->ic_offset) + 1) in xlog_verify_tail_lsn()
3634 struct xlog_in_core *iclog, in xlog_verify_iclog() argument
3657 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) in xlog_verify_iclog()
3660 base_ptr = ptr = &iclog->ic_header; in xlog_verify_iclog()
3661 p = &iclog->ic_header; in xlog_verify_iclog()
3669 len = be32_to_cpu(iclog->ic_header.h_num_logops); in xlog_verify_iclog()
3670 base_ptr = ptr = iclog->ic_datap; in xlog_verify_iclog()
3672 xhdr = iclog->ic_data; in xlog_verify_iclog()
3682 idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap); in xlog_verify_iclog()
3690 iclog->ic_header.h_cycle_data[idx]); in xlog_verify_iclog()
3706 idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap); in xlog_verify_iclog()
3712 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); in xlog_verify_iclog()
3832 xlog_in_core_t *iclog; in xlog_iclogs_empty() local
3834 iclog = log->l_iclog; in xlog_iclogs_empty()
3839 if (iclog->ic_header.h_num_logops) in xlog_iclogs_empty()
3841 iclog = iclog->ic_next; in xlog_iclogs_empty()
3842 } while (iclog != log->l_iclog); in xlog_iclogs_empty()