Lines Matching full:log
18 #include "raid5-log.h"
28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
30 * In write through mode, the reclaim runs every log->max_free_space.
70 * writes are committed from the log device. Therefore, a stripe in
72 * - write to log device
87 sector_t device_size; /* log device size, round to
92 sector_t last_checkpoint; /* log tail. where recovery scan
94 u64 last_cp_seq; /* log tail sequence */
96 sector_t log_start; /* log head. where new data appends */
97 u64 seq; /* log head sequence */
107 * written to the log */
109 * written to the log but not yet written
111 struct list_head flushing_ios; /* io_units which are waiting for log
113 struct list_head finished_ios; /* io_units which settle down in log disk */
133 struct list_head no_space_stripes; /* pending stripes, log has no space */
205 * unit is written to log disk with normal write, as we always flush log disk
210 struct r5l_log *log; member
221 struct list_head log_sibling; /* log->running_ios */
244 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
246 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
250 bool r5c_is_writeback(struct r5l_log *log) in r5c_is_writeback() argument
252 return (log != NULL && in r5c_is_writeback()
253 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK); in r5c_is_writeback()
256 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) in r5l_ring_add() argument
259 if (start >= log->device_size) in r5l_ring_add()
260 start = start - log->device_size; in r5l_ring_add()
264 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, in r5l_ring_distance() argument
270 return end + log->device_size - start; in r5l_ring_distance()
273 static bool r5l_has_free_space(struct r5l_log *log, sector_t size) in r5l_has_free_space() argument
277 used_size = r5l_ring_distance(log, log->last_checkpoint, in r5l_has_free_space()
278 log->log_start); in r5l_has_free_space()
280 return log->device_size > used_size + size; in r5l_has_free_space()
324 void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
331 if (!r5c_is_writeback(conf->log)) in r5c_check_stripe_cache_usage()
347 r5l_wake_reclaim(conf->log, 0); in r5c_check_stripe_cache_usage()
356 if (!r5c_is_writeback(conf->log)) in r5c_check_cached_full_stripe()
366 r5l_wake_reclaim(conf->log, 0); in r5c_check_cached_full_stripe()
370 * Total log space (in sectors) needed to flush all data in cache
372 * To avoid deadlock due to log space, it is necessary to reserve log
373 * space to flush critical stripes (stripes that occupying log space near
374 * last_checkpoint). This function helps check how much log space is
377 * To reduce log space requirements, two mechanisms are used to give cache
399 struct r5l_log *log = conf->log; in r5c_log_required_to_flush_cache() local
401 if (!r5c_is_writeback(log)) in r5c_log_required_to_flush_cache()
405 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) + in r5c_log_required_to_flush_cache()
410 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
412 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
413 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
416 static inline void r5c_update_log_state(struct r5l_log *log) in r5c_update_log_state() argument
418 struct r5conf *conf = log->rdev->mddev->private; in r5c_update_log_state()
423 if (!r5c_is_writeback(log)) in r5c_update_log_state()
426 free_space = r5l_ring_distance(log, log->log_start, in r5c_update_log_state()
427 log->last_checkpoint); in r5c_update_log_state()
442 r5l_wake_reclaim(log, 0); in r5c_update_log_state()
452 struct r5l_log *log = conf->log; in r5c_make_stripe_write_out() local
454 BUG_ON(!r5c_is_writeback(log)); in r5c_make_stripe_write_out()
490 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
494 struct r5l_log *log = sh->raid_conf->log; in r5c_finish_cache_stripe() local
496 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { in r5c_finish_cache_stripe()
527 static void r5l_log_run_stripes(struct r5l_log *log) in r5l_log_run_stripes() argument
531 lockdep_assert_held(&log->io_list_lock); in r5l_log_run_stripes()
533 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { in r5l_log_run_stripes()
538 list_move_tail(&io->log_sibling, &log->finished_ios); in r5l_log_run_stripes()
543 static void r5l_move_to_end_ios(struct r5l_log *log) in r5l_move_to_end_ios() argument
547 lockdep_assert_held(&log->io_list_lock); in r5l_move_to_end_ios()
549 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { in r5l_move_to_end_ios()
553 list_move_tail(&io->log_sibling, &log->io_end_ios); in r5l_move_to_end_ios()
562 struct r5l_log *log = io->log; in r5l_log_endio() local
568 md_error(log->rdev->mddev, log->rdev); in r5l_log_endio()
571 mempool_free(io->meta_page, &log->meta_pool); in r5l_log_endio()
573 spin_lock_irqsave(&log->io_list_lock, flags); in r5l_log_endio()
585 if (log->need_cache_flush && !list_empty(&io->stripe_list)) in r5l_log_endio()
586 r5l_move_to_end_ios(log); in r5l_log_endio()
588 r5l_log_run_stripes(log); in r5l_log_endio()
589 if (!list_empty(&log->running_ios)) { in r5l_log_endio()
594 io_deferred = list_first_entry(&log->running_ios, in r5l_log_endio()
597 schedule_work(&log->deferred_io_work); in r5l_log_endio()
600 spin_unlock_irqrestore(&log->io_list_lock, flags); in r5l_log_endio()
602 if (log->need_cache_flush) in r5l_log_endio()
603 md_wakeup_thread(log->rdev->mddev->thread); in r5l_log_endio()
624 static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) in r5l_do_submit_io() argument
628 spin_lock_irqsave(&log->io_list_lock, flags); in r5l_do_submit_io()
630 spin_unlock_irqrestore(&log->io_list_lock, flags); in r5l_do_submit_io()
661 struct r5l_log *log = container_of(work, struct r5l_log, in r5l_submit_io_async() local
666 spin_lock_irqsave(&log->io_list_lock, flags); in r5l_submit_io_async()
667 if (!list_empty(&log->running_ios)) { in r5l_submit_io_async()
668 io = list_first_entry(&log->running_ios, struct r5l_io_unit, in r5l_submit_io_async()
675 spin_unlock_irqrestore(&log->io_list_lock, flags); in r5l_submit_io_async()
677 r5l_do_submit_io(log, io); in r5l_submit_io_async()
682 struct r5l_log *log = container_of(work, struct r5l_log, in r5c_disable_writeback_async() local
684 struct mddev *mddev = log->rdev->mddev; in r5c_disable_writeback_async()
688 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) in r5c_disable_writeback_async()
695 conf->log == NULL || in r5c_disable_writeback_async()
700 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; in r5c_disable_writeback_async()
706 static void r5l_submit_current_io(struct r5l_log *log) in r5l_submit_current_io() argument
708 struct r5l_io_unit *io = log->current_io; in r5l_submit_current_io()
719 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); in r5l_submit_current_io()
722 log->current_io = NULL; in r5l_submit_current_io()
723 spin_lock_irqsave(&log->io_list_lock, flags); in r5l_submit_current_io()
725 if (io != list_first_entry(&log->running_ios, in r5l_submit_current_io()
731 spin_unlock_irqrestore(&log->io_list_lock, flags); in r5l_submit_current_io()
733 r5l_do_submit_io(log, io); in r5l_submit_current_io()
736 static struct bio *r5l_bio_alloc(struct r5l_log *log) in r5l_bio_alloc() argument
738 struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS, in r5l_bio_alloc()
739 REQ_OP_WRITE, GFP_NOIO, &log->bs); in r5l_bio_alloc()
741 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; in r5l_bio_alloc()
746 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) in r5_reserve_log_entry() argument
748 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); in r5_reserve_log_entry()
750 r5c_update_log_state(log); in r5_reserve_log_entry()
752 * If we filled up the log device start from the beginning again, in r5_reserve_log_entry()
755 * Note: for this to work properly the log size needs to me a multiple in r5_reserve_log_entry()
758 if (log->log_start == 0) in r5_reserve_log_entry()
761 io->log_end = log->log_start; in r5_reserve_log_entry()
764 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) in r5l_new_meta() argument
769 io = mempool_alloc(&log->io_pool, GFP_ATOMIC); in r5l_new_meta()
774 io->log = log; in r5l_new_meta()
780 io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO); in r5l_new_meta()
785 block->seq = cpu_to_le64(log->seq); in r5l_new_meta()
786 block->position = cpu_to_le64(log->log_start); in r5l_new_meta()
788 io->log_start = log->log_start; in r5l_new_meta()
790 io->seq = log->seq++; in r5l_new_meta()
792 io->current_bio = r5l_bio_alloc(log); in r5l_new_meta()
797 r5_reserve_log_entry(log, io); in r5l_new_meta()
799 spin_lock_irq(&log->io_list_lock); in r5l_new_meta()
800 list_add_tail(&io->log_sibling, &log->running_ios); in r5l_new_meta()
801 spin_unlock_irq(&log->io_list_lock); in r5l_new_meta()
806 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) in r5l_get_meta() argument
808 if (log->current_io && in r5l_get_meta()
809 log->current_io->meta_offset + payload_size > PAGE_SIZE) in r5l_get_meta()
810 r5l_submit_current_io(log); in r5l_get_meta()
812 if (!log->current_io) { in r5l_get_meta()
813 log->current_io = r5l_new_meta(log); in r5l_get_meta()
814 if (!log->current_io) in r5l_get_meta()
821 static void r5l_append_payload_meta(struct r5l_log *log, u16 type, in r5l_append_payload_meta() argument
826 struct r5l_io_unit *io = log->current_io; in r5l_append_payload_meta()
843 static void r5l_append_payload_page(struct r5l_log *log, struct page *page) in r5l_append_payload_page() argument
845 struct r5l_io_unit *io = log->current_io; in r5l_append_payload_page()
850 io->current_bio = r5l_bio_alloc(log); in r5l_append_payload_page()
858 r5_reserve_log_entry(log, io); in r5l_append_payload_page()
861 static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) in r5l_append_flush_payload() argument
863 struct mddev *mddev = log->rdev->mddev; in r5l_append_flush_payload()
877 mutex_lock(&log->io_mutex); in r5l_append_flush_payload()
880 if (r5l_get_meta(log, meta_size)) { in r5l_append_flush_payload()
881 mutex_unlock(&log->io_mutex); in r5l_append_flush_payload()
886 io = log->current_io; in r5l_append_flush_payload()
898 mutex_unlock(&log->io_mutex); in r5l_append_flush_payload()
901 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, in r5l_log_stripe() argument
915 ret = r5l_get_meta(log, meta_size); in r5l_log_stripe()
919 io = log->current_io; in r5l_log_stripe()
931 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) { in r5l_log_stripe()
939 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, in r5l_log_stripe()
942 r5l_append_payload_page(log, sh->dev[i].page); in r5l_log_stripe()
946 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, in r5l_log_stripe()
949 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); in r5l_log_stripe()
950 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); in r5l_log_stripe()
952 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, in r5l_log_stripe()
955 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); in r5l_log_stripe()
963 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) in r5l_log_stripe()
969 spin_lock_irq(&log->stripe_in_journal_lock); in r5l_log_stripe()
971 &log->stripe_in_journal_list); in r5l_log_stripe()
972 spin_unlock_irq(&log->stripe_in_journal_lock); in r5l_log_stripe()
973 atomic_inc(&log->stripe_in_journal_count); in r5l_log_stripe()
979 static inline void r5l_add_no_space_stripe(struct r5l_log *log, in r5l_add_no_space_stripe() argument
982 spin_lock(&log->no_space_stripes_lock); in r5l_add_no_space_stripe()
983 list_add_tail(&sh->log_list, &log->no_space_stripes); in r5l_add_no_space_stripe()
984 spin_unlock(&log->no_space_stripes_lock); in r5l_add_no_space_stripe()
989 * data from log to raid disks), so we shouldn't wait for reclaim here
991 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) in r5l_write_stripe() argument
1001 if (!log) in r5l_write_stripe()
1006 /* the stripe is written to log, we start writing it to raid */ in r5l_write_stripe()
1025 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, in r5l_write_stripe()
1040 mutex_lock(&log->io_mutex); in r5l_write_stripe()
1044 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { in r5l_write_stripe()
1045 if (!r5l_has_free_space(log, reserve)) { in r5l_write_stripe()
1046 r5l_add_no_space_stripe(log, sh); in r5l_write_stripe()
1049 ret = r5l_log_stripe(log, sh, data_pages, parity_pages); in r5l_write_stripe()
1051 spin_lock_irq(&log->io_list_lock); in r5l_write_stripe()
1053 &log->no_mem_stripes); in r5l_write_stripe()
1054 spin_unlock_irq(&log->io_list_lock); in r5l_write_stripe()
1059 * log space critical, do not process stripes that are in r5l_write_stripe()
1064 r5l_add_no_space_stripe(log, sh); in r5l_write_stripe()
1067 } else if (!r5l_has_free_space(log, reserve)) { in r5l_write_stripe()
1068 if (sh->log_start == log->last_checkpoint) in r5l_write_stripe()
1071 r5l_add_no_space_stripe(log, sh); in r5l_write_stripe()
1073 ret = r5l_log_stripe(log, sh, data_pages, parity_pages); in r5l_write_stripe()
1075 spin_lock_irq(&log->io_list_lock); in r5l_write_stripe()
1077 &log->no_mem_stripes); in r5l_write_stripe()
1078 spin_unlock_irq(&log->io_list_lock); in r5l_write_stripe()
1083 mutex_unlock(&log->io_mutex); in r5l_write_stripe()
1085 r5l_wake_reclaim(log, reserve); in r5l_write_stripe()
1089 void r5l_write_stripe_run(struct r5l_log *log) in r5l_write_stripe_run() argument
1091 if (!log) in r5l_write_stripe_run()
1093 mutex_lock(&log->io_mutex); in r5l_write_stripe_run()
1094 r5l_submit_current_io(log); in r5l_write_stripe_run()
1095 mutex_unlock(&log->io_mutex); in r5l_write_stripe_run()
1098 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) in r5l_handle_flush_request() argument
1100 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { in r5l_handle_flush_request()
1103 * we flush log disk cache first, then write stripe data to in r5l_handle_flush_request()
1104 * raid disks. So if bio is finished, the log disk cache is in r5l_handle_flush_request()
1106 * the bio from log disk, so we don't need to flush again in r5l_handle_flush_request()
1116 mutex_lock(&log->io_mutex); in r5l_handle_flush_request()
1117 r5l_get_meta(log, 0); in r5l_handle_flush_request()
1118 bio_list_add(&log->current_io->flush_barriers, bio); in r5l_handle_flush_request()
1119 log->current_io->has_flush = 1; in r5l_handle_flush_request()
1120 log->current_io->has_null_flush = 1; in r5l_handle_flush_request()
1121 atomic_inc(&log->current_io->pending_stripe); in r5l_handle_flush_request()
1122 r5l_submit_current_io(log); in r5l_handle_flush_request()
1123 mutex_unlock(&log->io_mutex); in r5l_handle_flush_request()
1130 /* This will run after log space is reclaimed */
1131 static void r5l_run_no_space_stripes(struct r5l_log *log) in r5l_run_no_space_stripes() argument
1135 spin_lock(&log->no_space_stripes_lock); in r5l_run_no_space_stripes()
1136 while (!list_empty(&log->no_space_stripes)) { in r5l_run_no_space_stripes()
1137 sh = list_first_entry(&log->no_space_stripes, in r5l_run_no_space_stripes()
1143 spin_unlock(&log->no_space_stripes_lock); in r5l_run_no_space_stripes()
1148 * for write through mode, returns log->next_checkpoint
1154 struct r5l_log *log = conf->log; in r5c_calculate_new_cp() local
1158 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) in r5c_calculate_new_cp()
1159 return log->next_checkpoint; in r5c_calculate_new_cp()
1161 spin_lock_irqsave(&log->stripe_in_journal_lock, flags); in r5c_calculate_new_cp()
1162 if (list_empty(&conf->log->stripe_in_journal_list)) { in r5c_calculate_new_cp()
1164 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); in r5c_calculate_new_cp()
1165 return log->next_checkpoint; in r5c_calculate_new_cp()
1167 sh = list_first_entry(&conf->log->stripe_in_journal_list, in r5c_calculate_new_cp()
1170 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); in r5c_calculate_new_cp()
1174 static sector_t r5l_reclaimable_space(struct r5l_log *log) in r5l_reclaimable_space() argument
1176 struct r5conf *conf = log->rdev->mddev->private; in r5l_reclaimable_space()
1178 return r5l_ring_distance(log, log->last_checkpoint, in r5l_reclaimable_space()
1182 static void r5l_run_no_mem_stripe(struct r5l_log *log) in r5l_run_no_mem_stripe() argument
1186 lockdep_assert_held(&log->io_list_lock); in r5l_run_no_mem_stripe()
1188 if (!list_empty(&log->no_mem_stripes)) { in r5l_run_no_mem_stripe()
1189 sh = list_first_entry(&log->no_mem_stripes, in r5l_run_no_mem_stripe()
1197 static bool r5l_complete_finished_ios(struct r5l_log *log) in r5l_complete_finished_ios() argument
1202 lockdep_assert_held(&log->io_list_lock); in r5l_complete_finished_ios()
1204 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { in r5l_complete_finished_ios()
1209 log->next_checkpoint = io->log_start; in r5l_complete_finished_ios()
1212 mempool_free(io, &log->io_pool); in r5l_complete_finished_ios()
1213 r5l_run_no_mem_stripe(log); in r5l_complete_finished_ios()
1223 struct r5l_log *log = io->log; in __r5l_stripe_write_finished() local
1224 struct r5conf *conf = log->rdev->mddev->private; in __r5l_stripe_write_finished()
1227 spin_lock_irqsave(&log->io_list_lock, flags); in __r5l_stripe_write_finished()
1230 if (!r5l_complete_finished_ios(log)) { in __r5l_stripe_write_finished()
1231 spin_unlock_irqrestore(&log->io_list_lock, flags); in __r5l_stripe_write_finished()
1235 if (r5l_reclaimable_space(log) > log->max_free_space || in __r5l_stripe_write_finished()
1237 r5l_wake_reclaim(log, 0); in __r5l_stripe_write_finished()
1239 spin_unlock_irqrestore(&log->io_list_lock, flags); in __r5l_stripe_write_finished()
1240 wake_up(&log->iounit_wait); in __r5l_stripe_write_finished()
1256 struct r5l_log *log = container_of(bio, struct r5l_log, in r5l_log_flush_endio() local
1262 md_error(log->rdev->mddev, log->rdev); in r5l_log_flush_endio()
1265 spin_lock_irqsave(&log->io_list_lock, flags); in r5l_log_flush_endio()
1266 list_for_each_entry(io, &log->flushing_ios, log_sibling) in r5l_log_flush_endio()
1268 list_splice_tail_init(&log->flushing_ios, &log->finished_ios); in r5l_log_flush_endio()
1269 spin_unlock_irqrestore(&log->io_list_lock, flags); in r5l_log_flush_endio()
1274 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1275 * broken meta in the middle of a log causes recovery can't find meta at the
1276 * head of log. If operations require meta at the head persistent in log, we
1277 * must make sure meta before it persistent in log too. A case is:
1279 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1280 * data/parity must be persistent in log before we do the write to raid disks.
1284 * one whose data/parity is in log.
1286 void r5l_flush_stripe_to_raid(struct r5l_log *log) in r5l_flush_stripe_to_raid() argument
1290 if (!log || !log->need_cache_flush) in r5l_flush_stripe_to_raid()
1293 spin_lock_irq(&log->io_list_lock); in r5l_flush_stripe_to_raid()
1295 if (!list_empty(&log->flushing_ios)) { in r5l_flush_stripe_to_raid()
1296 spin_unlock_irq(&log->io_list_lock); in r5l_flush_stripe_to_raid()
1299 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); in r5l_flush_stripe_to_raid()
1300 do_flush = !list_empty(&log->flushing_ios); in r5l_flush_stripe_to_raid()
1301 spin_unlock_irq(&log->io_list_lock); in r5l_flush_stripe_to_raid()
1305 bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0, in r5l_flush_stripe_to_raid()
1307 log->flush_bio.bi_end_io = r5l_log_flush_endio; in r5l_flush_stripe_to_raid()
1308 submit_bio(&log->flush_bio); in r5l_flush_stripe_to_raid()
1311 static void r5l_write_super(struct r5l_log *log, sector_t cp);
1312 static void r5l_write_super_and_discard_space(struct r5l_log *log, in r5l_write_super_and_discard_space() argument
1315 struct block_device *bdev = log->rdev->bdev; in r5l_write_super_and_discard_space()
1318 r5l_write_super(log, end); in r5l_write_super_and_discard_space()
1323 mddev = log->rdev->mddev; in r5l_write_super_and_discard_space()
1326 * superblock is updated to new log tail. Updating superblock (either in r5l_write_super_and_discard_space()
1343 if (log->last_checkpoint < end) { in r5l_write_super_and_discard_space()
1345 log->last_checkpoint + log->rdev->data_offset, in r5l_write_super_and_discard_space()
1346 end - log->last_checkpoint, GFP_NOIO); in r5l_write_super_and_discard_space()
1349 log->last_checkpoint + log->rdev->data_offset, in r5l_write_super_and_discard_space()
1350 log->device_size - log->last_checkpoint, in r5l_write_super_and_discard_space()
1352 blkdev_issue_discard(bdev, log->rdev->data_offset, end, in r5l_write_super_and_discard_space()
1402 if (!conf->log) in r5c_flush_cache()
1423 struct r5l_log *log = conf->log; in r5c_do_reclaim() local
1431 if (!r5c_is_writeback(log)) in r5c_do_reclaim()
1465 /* if log space is tight, flush stripes on stripe_in_journal_list */ in r5c_do_reclaim()
1467 spin_lock_irqsave(&log->stripe_in_journal_lock, flags); in r5c_do_reclaim()
1469 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) { in r5c_do_reclaim()
1487 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); in r5c_do_reclaim()
1491 r5l_run_no_space_stripes(log); in r5c_do_reclaim()
1496 static void r5l_do_reclaim(struct r5l_log *log) in r5l_do_reclaim() argument
1498 struct r5conf *conf = log->rdev->mddev->private; in r5l_do_reclaim()
1499 sector_t reclaim_target = xchg(&log->reclaim_target, 0); in r5l_do_reclaim()
1504 spin_lock_irq(&log->io_list_lock); in r5l_do_reclaim()
1505 write_super = r5l_reclaimable_space(log) > log->max_free_space || in r5l_do_reclaim()
1506 reclaim_target != 0 || !list_empty(&log->no_space_stripes); in r5l_do_reclaim()
1513 reclaimable = r5l_reclaimable_space(log); in r5l_do_reclaim()
1515 (list_empty(&log->running_ios) && in r5l_do_reclaim()
1516 list_empty(&log->io_end_ios) && in r5l_do_reclaim()
1517 list_empty(&log->flushing_ios) && in r5l_do_reclaim()
1518 list_empty(&log->finished_ios))) in r5l_do_reclaim()
1521 md_wakeup_thread(log->rdev->mddev->thread); in r5l_do_reclaim()
1522 wait_event_lock_irq(log->iounit_wait, in r5l_do_reclaim()
1523 r5l_reclaimable_space(log) > reclaimable, in r5l_do_reclaim()
1524 log->io_list_lock); in r5l_do_reclaim()
1528 spin_unlock_irq(&log->io_list_lock); in r5l_do_reclaim()
1535 * here, because the log area might be reused soon and we don't want to in r5l_do_reclaim()
1538 r5l_write_super_and_discard_space(log, next_checkpoint); in r5l_do_reclaim()
1540 mutex_lock(&log->io_mutex); in r5l_do_reclaim()
1541 log->last_checkpoint = next_checkpoint; in r5l_do_reclaim()
1542 r5c_update_log_state(log); in r5l_do_reclaim()
1543 mutex_unlock(&log->io_mutex); in r5l_do_reclaim()
1545 r5l_run_no_space_stripes(log); in r5l_do_reclaim()
1552 struct r5l_log *log = conf->log; in r5l_reclaim_thread() local
1554 if (!log) in r5l_reclaim_thread()
1557 r5l_do_reclaim(log); in r5l_reclaim_thread()
1560 void r5l_wake_reclaim(struct r5l_log *log, sector_t space) in r5l_wake_reclaim() argument
1565 if (!log) in r5l_wake_reclaim()
1568 target = READ_ONCE(log->reclaim_target); in r5l_wake_reclaim()
1572 } while (!try_cmpxchg(&log->reclaim_target, &target, new)); in r5l_wake_reclaim()
1573 md_wakeup_thread(log->reclaim_thread); in r5l_wake_reclaim()
1576 void r5l_quiesce(struct r5l_log *log, int quiesce) in r5l_quiesce() argument
1578 struct mddev *mddev = log->rdev->mddev; in r5l_quiesce()
1580 log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex)); in r5l_quiesce()
1586 r5l_wake_reclaim(log, MaxSector); in r5l_quiesce()
1587 r5l_do_reclaim(log); in r5l_quiesce()
1594 struct r5l_log *log = conf->log; in r5l_log_disk_error() local
1597 if (!log) in r5l_log_disk_error()
1600 return test_bit(Faulty, &log->rdev->flags); in r5l_log_disk_error()
1616 * in recovery, log is read sequentially. It is not efficient to
1618 * reads multiple pages with one IO, so further log read can
1628 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, in r5l_recovery_allocate_ra_pool() argument
1651 static void r5l_recovery_free_ra_pool(struct r5l_log *log, in r5l_recovery_free_ra_pool() argument
1666 static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, in r5l_recovery_fetch_ra_pool() argument
1673 bio_init(&bio, log->rdev->bdev, ctx->ra_bvec, in r5l_recovery_fetch_ra_pool()
1675 bio.bi_iter.bi_sector = log->rdev->data_offset + offset; in r5l_recovery_fetch_ra_pool()
1685 offset = r5l_ring_add(log, offset, BLOCK_SECTORS); in r5l_recovery_fetch_ra_pool()
1700 static int r5l_recovery_read_page(struct r5l_log *log, in r5l_recovery_read_page() argument
1709 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset); in r5l_recovery_read_page()
1724 static int r5l_recovery_read_meta_block(struct r5l_log *log, in r5l_recovery_read_meta_block() argument
1732 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos); in r5l_recovery_read_meta_block()
1746 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); in r5l_recovery_read_meta_block()
1759 r5l_recovery_create_empty_meta_block(struct r5l_log *log, in r5l_recovery_create_empty_meta_block() argument
1774 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, in r5l_log_write_empty_meta_block() argument
1783 r5l_recovery_create_empty_meta_block(log, page, pos, seq); in r5l_log_write_empty_meta_block()
1785 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, in r5l_log_write_empty_meta_block()
1787 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE | in r5l_log_write_empty_meta_block()
1803 static void r5l_recovery_load_data(struct r5l_log *log, in r5l_recovery_load_data() argument
1809 struct mddev *mddev = log->rdev->mddev; in r5l_recovery_load_data()
1816 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset); in r5l_recovery_load_data()
1825 static void r5l_recovery_load_parity(struct r5l_log *log, in r5l_recovery_load_parity() argument
1831 struct mddev *mddev = log->rdev->mddev; in r5l_recovery_load_parity()
1835 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset); in r5l_recovery_load_parity()
1842 log, ctx, sh->dev[sh->qd_idx].page, in r5l_recovery_load_parity()
1843 r5l_ring_add(log, log_offset, BLOCK_SECTORS)); in r5l_recovery_load_parity()
1977 r5l_recovery_verify_data_checksum(struct r5l_log *log, in r5l_recovery_verify_data_checksum() argument
1985 r5l_recovery_read_page(log, ctx, page, log_offset); in r5l_recovery_verify_data_checksum()
1987 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); in r5l_recovery_verify_data_checksum()
1997 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log, in r5l_recovery_verify_data_checksum_for_mb() argument
2000 struct mddev *mddev = log->rdev->mddev; in r5l_recovery_verify_data_checksum_for_mb()
2004 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); in r5l_recovery_verify_data_checksum_for_mb()
2019 log, ctx, page, log_offset, in r5l_recovery_verify_data_checksum_for_mb()
2024 log, ctx, page, log_offset, in r5l_recovery_verify_data_checksum_for_mb()
2029 log, ctx, page, in r5l_recovery_verify_data_checksum_for_mb()
2030 r5l_ring_add(log, log_offset, in r5l_recovery_verify_data_checksum_for_mb()
2044 log_offset = r5l_ring_add(log, log_offset, in r5l_recovery_verify_data_checksum_for_mb()
2070 r5c_recovery_analyze_meta_block(struct r5l_log *log, in r5c_recovery_analyze_meta_block() argument
2074 struct mddev *mddev = log->rdev->mddev; in r5c_recovery_analyze_meta_block()
2090 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx); in r5c_recovery_analyze_meta_block()
2098 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); in r5c_recovery_analyze_meta_block()
2181 r5l_recovery_load_data(log, sh, ctx, payload, in r5c_recovery_analyze_meta_block()
2184 r5l_recovery_load_parity(log, sh, ctx, payload, in r5c_recovery_analyze_meta_block()
2189 log_offset = r5l_ring_add(log, log_offset, in r5c_recovery_analyze_meta_block()
2204 static void r5c_recovery_load_one_stripe(struct r5l_log *log, in r5c_recovery_load_one_stripe() argument
2220 * Scan through the log for all to-be-flushed data
2235 static int r5c_recovery_flush_log(struct r5l_log *log, in r5c_recovery_flush_log() argument
2241 /* scan through the log */ in r5c_recovery_flush_log()
2243 if (r5l_recovery_read_meta_block(log, ctx)) in r5c_recovery_flush_log()
2246 ret = r5c_recovery_analyze_meta_block(log, ctx, in r5c_recovery_flush_log()
2255 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); in r5c_recovery_flush_log()
2269 r5c_recovery_load_one_stripe(log, sh); in r5c_recovery_flush_log()
2278 * log will start here. but we can't let superblock point to last valid
2279 * meta block. The log might looks like:
2291 * Before recovery, the log looks like the following
2294 * | valid log | invalid log |
2297 * |- log->last_checkpoint
2298 * |- log->last_cp_seq
2300 * Now we scan through the log until we see invalid entry
2303 * | valid log | invalid log |
2306 * |- log->last_checkpoint |- ctx->pos
2307 * |- log->last_cp_seq |- ctx->seq
2313 * | valid log | invalid log |
2316 * |- log->last_checkpoint |- ctx->pos+1
2317 * |- log->last_cp_seq |- ctx->seq+10001
2324 * | valid log | data only stripes | invalid log |
2327 * |- log->last_checkpoint |- ctx->pos+n
2328 * |- log->last_cp_seq |- ctx->seq+10000+n
2331 * again from log->last_checkpoint.
2336 * | old log | data only stripes | invalid log |
2339 * |- log->last_checkpoint |- ctx->pos+n
2340 * |- log->last_cp_seq |- ctx->seq+10000+n
2343 * point on, the recovery will start from new log->last_checkpoint.
2346 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, in r5c_recovery_rewrite_data_only_stripes() argument
2350 struct mddev *mddev = log->rdev->mddev; in r5c_recovery_rewrite_data_only_stripes()
2370 r5l_recovery_create_empty_meta_block(log, page, in r5c_recovery_rewrite_data_only_stripes()
2374 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); in r5c_recovery_rewrite_data_only_stripes()
2390 crc32c_le(log->uuid_checksum, addr, in r5c_recovery_rewrite_data_only_stripes()
2393 sync_page_io(log->rdev, write_pos, PAGE_SIZE, in r5c_recovery_rewrite_data_only_stripes()
2395 write_pos = r5l_ring_add(log, write_pos, in r5c_recovery_rewrite_data_only_stripes()
2403 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, in r5c_recovery_rewrite_data_only_stripes()
2405 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, in r5c_recovery_rewrite_data_only_stripes()
2408 list_add_tail(&sh->r5c, &log->stripe_in_journal_list); in r5c_recovery_rewrite_data_only_stripes()
2409 atomic_inc(&log->stripe_in_journal_count); in r5c_recovery_rewrite_data_only_stripes()
2414 log->next_checkpoint = next_checkpoint; in r5c_recovery_rewrite_data_only_stripes()
2419 static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, in r5c_recovery_flush_data_only_stripes() argument
2422 struct mddev *mddev = log->rdev->mddev; in r5c_recovery_flush_data_only_stripes()
2434 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK; in r5c_recovery_flush_data_only_stripes()
2447 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; in r5c_recovery_flush_data_only_stripes()
2452 static int r5l_recovery_log(struct r5l_log *log) in r5l_recovery_log() argument
2454 struct mddev *mddev = log->rdev->mddev; in r5l_recovery_log()
2463 ctx->pos = log->last_checkpoint; in r5l_recovery_log()
2464 ctx->seq = log->last_cp_seq; in r5l_recovery_log()
2473 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) { in r5l_recovery_log()
2478 ret = r5c_recovery_flush_log(log, ctx); in r5l_recovery_log()
2495 log->next_checkpoint = ctx->pos; in r5l_recovery_log()
2496 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++); in r5l_recovery_log()
2497 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); in r5l_recovery_log()
2498 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) { in r5l_recovery_log()
2505 log->log_start = ctx->pos; in r5l_recovery_log()
2506 log->seq = ctx->seq; in r5l_recovery_log()
2507 log->last_checkpoint = pos; in r5l_recovery_log()
2508 r5l_write_super(log, pos); in r5l_recovery_log()
2510 r5c_recovery_flush_data_only_stripes(log, ctx); in r5l_recovery_log()
2513 r5l_recovery_free_ra_pool(log, ctx); in r5l_recovery_log()
2521 static void r5l_write_super(struct r5l_log *log, sector_t cp) in r5l_write_super() argument
2523 struct mddev *mddev = log->rdev->mddev; in r5l_write_super()
2525 log->rdev->journal_tail = cp; in r5l_write_super()
2539 if (!conf || !conf->log) in r5c_journal_mode_show()
2542 switch (conf->log->r5c_journal_mode) { in r5c_journal_mode_show()
2579 if (!conf || !conf->log) in r5c_journal_mode_set()
2587 conf->log->r5c_journal_mode = mode; in r5c_journal_mode_set()
2638 struct r5l_log *log = conf->log; in r5c_try_caching_write() local
2647 BUG_ON(!r5c_is_writeback(log)); in r5c_try_caching_write()
2697 spin_lock(&log->tree_lock); in r5c_try_caching_write()
2698 pslot = radix_tree_lookup_slot(&log->big_stripe_tree, in r5c_try_caching_write()
2702 pslot, &log->tree_lock) >> in r5c_try_caching_write()
2705 &log->big_stripe_tree, pslot, in r5c_try_caching_write()
2713 &log->big_stripe_tree, tree_index, in r5c_try_caching_write()
2716 spin_unlock(&log->tree_lock); in r5c_try_caching_write()
2721 spin_unlock(&log->tree_lock); in r5c_try_caching_write()
2805 struct r5l_log *log = conf->log; in r5c_finish_stripe_write_out() local
2812 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) in r5c_finish_stripe_write_out()
2818 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) in r5c_finish_stripe_write_out()
2840 spin_lock_irq(&log->stripe_in_journal_lock); in r5c_finish_stripe_write_out()
2842 spin_unlock_irq(&log->stripe_in_journal_lock); in r5c_finish_stripe_write_out()
2845 atomic_dec(&log->stripe_in_journal_count); in r5c_finish_stripe_write_out()
2846 r5c_update_log_state(log); in r5c_finish_stripe_write_out()
2852 spin_lock(&log->tree_lock); in r5c_finish_stripe_write_out()
2853 pslot = radix_tree_lookup_slot(&log->big_stripe_tree, in r5c_finish_stripe_write_out()
2857 pslot, &log->tree_lock) >> in r5c_finish_stripe_write_out()
2860 radix_tree_delete(&log->big_stripe_tree, tree_index); in r5c_finish_stripe_write_out()
2863 &log->big_stripe_tree, pslot, in r5c_finish_stripe_write_out()
2865 spin_unlock(&log->tree_lock); in r5c_finish_stripe_write_out()
2880 r5l_append_flush_payload(log, sh->sector); in r5c_finish_stripe_write_out()
2886 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) in r5c_cache_data() argument
2894 BUG_ON(!log); in r5c_cache_data()
2902 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, in r5c_cache_data()
2916 mutex_lock(&log->io_mutex); in r5c_cache_data()
2922 r5l_add_no_space_stripe(log, sh); in r5c_cache_data()
2923 else if (!r5l_has_free_space(log, reserve)) { in r5c_cache_data()
2924 if (sh->log_start == log->last_checkpoint) in r5c_cache_data()
2927 r5l_add_no_space_stripe(log, sh); in r5c_cache_data()
2929 ret = r5l_log_stripe(log, sh, pages, 0); in r5c_cache_data()
2931 spin_lock_irq(&log->io_list_lock); in r5c_cache_data()
2932 list_add_tail(&sh->log_list, &log->no_mem_stripes); in r5c_cache_data()
2933 spin_unlock_irq(&log->io_list_lock); in r5c_cache_data()
2937 mutex_unlock(&log->io_mutex); in r5c_cache_data()
2944 struct r5l_log *log = conf->log; in r5c_big_stripe_cached() local
2948 if (!log) in r5c_big_stripe_cached()
2953 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index); in r5c_big_stripe_cached()
2957 static int r5l_load_log(struct r5l_log *log) in r5l_load_log() argument
2959 struct md_rdev *rdev = log->rdev; in r5l_load_log()
2962 sector_t cp = log->rdev->journal_tail; in r5l_load_log()
2987 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); in r5l_load_log()
2998 log->last_cp_seq = get_random_u32(); in r5l_load_log()
3000 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq); in r5l_load_log()
3002 * Make sure super points to correct address. Log might have in r5l_load_log()
3003 * data very soon. If super hasn't correct log tail address, in r5l_load_log()
3004 * recovery can't find the log in r5l_load_log()
3006 r5l_write_super(log, cp); in r5l_load_log()
3008 log->last_cp_seq = le64_to_cpu(mb->seq); in r5l_load_log()
3010 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); in r5l_load_log()
3011 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; in r5l_load_log()
3012 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) in r5l_load_log()
3013 log->max_free_space = RECLAIM_MAX_FREE_SPACE; in r5l_load_log()
3014 log->last_checkpoint = cp; in r5l_load_log()
3019 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS); in r5l_load_log()
3020 log->seq = log->last_cp_seq + 1; in r5l_load_log()
3021 log->next_checkpoint = cp; in r5l_load_log()
3023 ret = r5l_recovery_log(log); in r5l_load_log()
3025 r5c_update_log_state(log); in r5l_load_log()
3032 int r5l_start(struct r5l_log *log) in r5l_start() argument
3036 if (!log) in r5l_start()
3039 ret = r5l_load_log(log); in r5l_start()
3041 struct mddev *mddev = log->rdev->mddev; in r5l_start()
3052 struct r5l_log *log = conf->log; in r5c_update_on_rdev_error() local
3054 if (!log) in r5c_update_on_rdev_error()
3059 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) in r5c_update_on_rdev_error()
3060 schedule_work(&log->disable_writeback_work); in r5c_update_on_rdev_error()
3065 struct r5l_log *log; in r5l_init_log() local
3090 log = kzalloc(sizeof(*log), GFP_KERNEL); in r5l_init_log()
3091 if (!log) in r5l_init_log()
3093 log->rdev = rdev; in r5l_init_log()
3094 log->need_cache_flush = bdev_write_cache(rdev->bdev); in r5l_init_log()
3095 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, in r5l_init_log()
3098 mutex_init(&log->io_mutex); in r5l_init_log()
3100 spin_lock_init(&log->io_list_lock); in r5l_init_log()
3101 INIT_LIST_HEAD(&log->running_ios); in r5l_init_log()
3102 INIT_LIST_HEAD(&log->io_end_ios); in r5l_init_log()
3103 INIT_LIST_HEAD(&log->flushing_ios); in r5l_init_log()
3104 INIT_LIST_HEAD(&log->finished_ios); in r5l_init_log()
3106 log->io_kc = KMEM_CACHE(r5l_io_unit, 0); in r5l_init_log()
3107 if (!log->io_kc) in r5l_init_log()
3110 ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc); in r5l_init_log()
3114 ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS); in r5l_init_log()
3118 ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0); in r5l_init_log()
3122 spin_lock_init(&log->tree_lock); in r5l_init_log()
3123 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN); in r5l_init_log()
3125 thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev, in r5l_init_log()
3131 rcu_assign_pointer(log->reclaim_thread, thread); in r5l_init_log()
3133 init_waitqueue_head(&log->iounit_wait); in r5l_init_log()
3135 INIT_LIST_HEAD(&log->no_mem_stripes); in r5l_init_log()
3137 INIT_LIST_HEAD(&log->no_space_stripes); in r5l_init_log()
3138 spin_lock_init(&log->no_space_stripes_lock); in r5l_init_log()
3140 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); in r5l_init_log()
3141 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); in r5l_init_log()
3143 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; in r5l_init_log()
3144 INIT_LIST_HEAD(&log->stripe_in_journal_list); in r5l_init_log()
3145 spin_lock_init(&log->stripe_in_journal_lock); in r5l_init_log()
3146 atomic_set(&log->stripe_in_journal_count, 0); in r5l_init_log()
3148 conf->log = log; in r5l_init_log()
3154 mempool_exit(&log->meta_pool); in r5l_init_log()
3156 bioset_exit(&log->bs); in r5l_init_log()
3158 mempool_exit(&log->io_pool); in r5l_init_log()
3160 kmem_cache_destroy(log->io_kc); in r5l_init_log()
3162 kfree(log); in r5l_init_log()
3168 struct r5l_log *log = conf->log; in r5l_exit_log() local
3170 md_unregister_thread(conf->mddev, &log->reclaim_thread); in r5l_exit_log()
3173 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to in r5l_exit_log()
3176 conf->log = NULL; in r5l_exit_log()
3178 flush_work(&log->disable_writeback_work); in r5l_exit_log()
3180 mempool_exit(&log->meta_pool); in r5l_exit_log()
3181 bioset_exit(&log->bs); in r5l_exit_log()
3182 mempool_exit(&log->io_pool); in r5l_exit_log()
3183 kmem_cache_destroy(log->io_kc); in r5l_exit_log()
3184 kfree(log); in r5l_exit_log()