Lines Matching refs:wc
104 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode) argument
105 #define WC_MODE_FUA(wc) ((wc)->writeback_fua) argument
107 #define WC_MODE_PMEM(wc) false argument
108 #define WC_MODE_FUA(wc) false argument
110 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc)) argument
228 struct dm_writecache *wc; member
237 struct dm_writecache *wc; member
246 static void wc_lock(struct dm_writecache *wc) in wc_lock() argument
248 mutex_lock(&wc->lock); in wc_lock()
251 static void wc_unlock(struct dm_writecache *wc) in wc_unlock() argument
253 mutex_unlock(&wc->lock); in wc_unlock()
257 static int persistent_memory_claim(struct dm_writecache *wc) in persistent_memory_claim() argument
267 wc->memory_vmapped = false; in persistent_memory_claim()
269 s = wc->memory_map_size; in persistent_memory_claim()
280 offset = get_start_sect(wc->ssd_dev->bdev); in persistent_memory_claim()
289 da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); in persistent_memory_claim()
291 wc->memory_map = NULL; in persistent_memory_claim()
296 wc->memory_map = NULL; in persistent_memory_claim()
302 wc->memory_map = NULL; in persistent_memory_claim()
311 daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, in persistent_memory_claim()
328 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); in persistent_memory_claim()
329 if (!wc->memory_map) { in persistent_memory_claim()
334 wc->memory_vmapped = true; in persistent_memory_claim()
339 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT; in persistent_memory_claim()
340 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT; in persistent_memory_claim()
351 static int persistent_memory_claim(struct dm_writecache *wc) in persistent_memory_claim() argument
357 static void persistent_memory_release(struct dm_writecache *wc) in persistent_memory_release() argument
359 if (wc->memory_vmapped) in persistent_memory_release()
360 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT)); in persistent_memory_release()
388 static struct wc_memory_superblock *sb(struct dm_writecache *wc) in sb() argument
390 return wc->memory_map; in sb()
393 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e) in memory_entry() argument
395 return &sb(wc)->entries[e->index]; in memory_entry()
398 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) in memory_data() argument
400 return (char *)wc->block_start + (e->index << wc->block_size_bits); in memory_data()
403 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) in cache_sector() argument
405 return wc->start_sector + wc->metadata_sectors + in cache_sector()
406 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); in cache_sector()
409 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e) in read_original_sector() argument
414 return le64_to_cpu(memory_entry(wc, e)->original_sector); in read_original_sector()
418 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e) in read_seq_count() argument
423 return le64_to_cpu(memory_entry(wc, e)->seq_count); in read_seq_count()
427 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e) in clear_seq_count() argument
432 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1)); in clear_seq_count()
435 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e, in write_original_sector_seq_count() argument
445 pmem_assign(*memory_entry(wc, e), me); in write_original_sector_seq_count()
448 #define writecache_error(wc, err, msg, arg...) \ argument
450 if (!cmpxchg(&(wc)->error, 0, err)) \
452 wake_up(&(wc)->freelist_wait); \
455 #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error))) argument
457 static void writecache_flush_all_metadata(struct dm_writecache *wc) in writecache_flush_all_metadata() argument
459 if (!WC_MODE_PMEM(wc)) in writecache_flush_all_metadata()
460 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size); in writecache_flush_all_metadata()
463 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size) in writecache_flush_region() argument
465 if (!WC_MODE_PMEM(wc)) in writecache_flush_region()
466 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY, in writecache_flush_region()
467 wc->dirty_bitmap); in writecache_flush_region()
470 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
473 struct dm_writecache *wc; member
483 writecache_error(endio->wc, -EIO, "error writing metadata"); in writecache_notify_io()
489 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction) in writecache_wait_for_ios() argument
491 wait_event(wc->bio_in_progress_wait[direction], in writecache_wait_for_ios()
492 !atomic_read(&wc->bio_in_progress[direction])); in writecache_wait_for_ios()
495 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) in ssd_commit_flushed() argument
500 wc, in ssd_commit_flushed()
504 unsigned bitmap_bits = wc->dirty_bitmap_size * 8; in ssd_commit_flushed()
509 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i); in ssd_commit_flushed()
512 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i); in ssd_commit_flushed()
514 region.bdev = wc->ssd_dev->bdev; in ssd_commit_flushed()
518 if (unlikely(region.sector >= wc->metadata_sectors)) in ssd_commit_flushed()
520 if (unlikely(region.sector + region.count > wc->metadata_sectors)) in ssd_commit_flushed()
521 region.count = wc->metadata_sectors - region.sector; in ssd_commit_flushed()
523 region.sector += wc->start_sector; in ssd_commit_flushed()
528 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY; in ssd_commit_flushed()
529 req.client = wc->dm_io; in ssd_commit_flushed()
542 writecache_wait_for_ios(wc, WRITE); in ssd_commit_flushed()
544 writecache_disk_flush(wc, wc->ssd_dev); in ssd_commit_flushed()
546 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size); in ssd_commit_flushed()
549 static void ssd_commit_superblock(struct dm_writecache *wc) in ssd_commit_superblock() argument
555 region.bdev = wc->ssd_dev->bdev; in ssd_commit_superblock()
557 region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT; in ssd_commit_superblock()
559 if (unlikely(region.sector + region.count > wc->metadata_sectors)) in ssd_commit_superblock()
560 region.count = wc->metadata_sectors - region.sector; in ssd_commit_superblock()
562 region.sector += wc->start_sector; in ssd_commit_superblock()
567 req.mem.ptr.vma = (char *)wc->memory_map; in ssd_commit_superblock()
568 req.client = wc->dm_io; in ssd_commit_superblock()
574 writecache_error(wc, r, "error writing superblock"); in ssd_commit_superblock()
577 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) in writecache_commit_flushed() argument
579 if (WC_MODE_PMEM(wc)) in writecache_commit_flushed()
582 ssd_commit_flushed(wc, wait_for_ios); in writecache_commit_flushed()
585 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) in writecache_disk_flush() argument
598 req.client = wc->dm_io; in writecache_disk_flush()
603 writecache_error(wc, r, "error flushing metadata: %d", r); in writecache_disk_flush()
609 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc, in writecache_find_entry() argument
613 struct rb_node *node = wc->tree.rb_node; in writecache_find_entry()
620 if (read_original_sector(wc, e) == block) in writecache_find_entry()
623 node = (read_original_sector(wc, e) >= block ? in writecache_find_entry()
628 if (read_original_sector(wc, e) >= block) { in writecache_find_entry()
649 if (read_original_sector(wc, e2) != block) in writecache_find_entry()
655 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins) in writecache_insert_entry() argument
658 struct rb_node **node = &wc->tree.rb_node, *parent = NULL; in writecache_insert_entry()
663 if (read_original_sector(wc, e) > read_original_sector(wc, ins)) in writecache_insert_entry()
669 rb_insert_color(&ins->rb_node, &wc->tree); in writecache_insert_entry()
670 list_add(&ins->lru, &wc->lru); in writecache_insert_entry()
674 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e) in writecache_unlink() argument
677 rb_erase(&e->rb_node, &wc->tree); in writecache_unlink()
680 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e) in writecache_add_to_freelist() argument
682 if (WC_MODE_SORT_FREELIST(wc)) { in writecache_add_to_freelist()
683 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL; in writecache_add_to_freelist()
685 wc->current_free = e; in writecache_add_to_freelist()
694 rb_insert_color(&e->rb_node, &wc->freetree); in writecache_add_to_freelist()
696 list_add_tail(&e->lru, &wc->freelist); in writecache_add_to_freelist()
698 wc->freelist_size++; in writecache_add_to_freelist()
701 static inline void writecache_verify_watermark(struct dm_writecache *wc) in writecache_verify_watermark() argument
703 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) in writecache_verify_watermark()
704 queue_work(wc->writeback_wq, &wc->writeback_work); in writecache_verify_watermark()
709 struct dm_writecache *wc = from_timer(wc, t, max_age_timer); in writecache_max_age_timer() local
711 if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) { in writecache_max_age_timer()
712 queue_work(wc->writeback_wq, &wc->writeback_work); in writecache_max_age_timer()
713 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV); in writecache_max_age_timer()
717 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_se… in writecache_pop_from_freelist() argument
721 if (WC_MODE_SORT_FREELIST(wc)) { in writecache_pop_from_freelist()
723 if (unlikely(!wc->current_free)) in writecache_pop_from_freelist()
725 e = wc->current_free; in writecache_pop_from_freelist()
726 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector)) in writecache_pop_from_freelist()
729 rb_erase(&e->rb_node, &wc->freetree); in writecache_pop_from_freelist()
731 next = rb_first(&wc->freetree); in writecache_pop_from_freelist()
732 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL; in writecache_pop_from_freelist()
734 if (unlikely(list_empty(&wc->freelist))) in writecache_pop_from_freelist()
736 e = container_of(wc->freelist.next, struct wc_entry, lru); in writecache_pop_from_freelist()
737 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector)) in writecache_pop_from_freelist()
741 wc->freelist_size--; in writecache_pop_from_freelist()
743 writecache_verify_watermark(wc); in writecache_pop_from_freelist()
748 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e) in writecache_free_entry() argument
750 writecache_unlink(wc, e); in writecache_free_entry()
751 writecache_add_to_freelist(wc, e); in writecache_free_entry()
752 clear_seq_count(wc, e); in writecache_free_entry()
753 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); in writecache_free_entry()
754 if (unlikely(waitqueue_active(&wc->freelist_wait))) in writecache_free_entry()
755 wake_up(&wc->freelist_wait); in writecache_free_entry()
758 static void writecache_wait_on_freelist(struct dm_writecache *wc) in writecache_wait_on_freelist() argument
762 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE); in writecache_wait_on_freelist()
763 wc_unlock(wc); in writecache_wait_on_freelist()
765 finish_wait(&wc->freelist_wait, &wait); in writecache_wait_on_freelist()
766 wc_lock(wc); in writecache_wait_on_freelist()
769 static void writecache_poison_lists(struct dm_writecache *wc) in writecache_poison_lists() argument
774 memset(&wc->tree, -1, sizeof wc->tree); in writecache_poison_lists()
775 wc->lru.next = LIST_POISON1; in writecache_poison_lists()
776 wc->lru.prev = LIST_POISON2; in writecache_poison_lists()
777 wc->freelist.next = LIST_POISON1; in writecache_poison_lists()
778 wc->freelist.prev = LIST_POISON2; in writecache_poison_lists()
781 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e) in writecache_flush_entry() argument
783 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); in writecache_flush_entry()
784 if (WC_MODE_PMEM(wc)) in writecache_flush_entry()
785 writecache_flush_region(wc, memory_data(wc, e), wc->block_size); in writecache_flush_entry()
788 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e) in writecache_entry_is_committed() argument
790 return read_seq_count(wc, e) < wc->seq_count; in writecache_entry_is_committed()
793 static void writecache_flush(struct dm_writecache *wc) in writecache_flush() argument
798 wc->uncommitted_blocks = 0; in writecache_flush()
799 del_timer(&wc->autocommit_timer); in writecache_flush()
801 if (list_empty(&wc->lru)) in writecache_flush()
804 e = container_of(wc->lru.next, struct wc_entry, lru); in writecache_flush()
805 if (writecache_entry_is_committed(wc, e)) { in writecache_flush()
806 if (wc->overwrote_committed) { in writecache_flush()
807 writecache_wait_for_ios(wc, WRITE); in writecache_flush()
808 writecache_disk_flush(wc, wc->ssd_dev); in writecache_flush()
809 wc->overwrote_committed = false; in writecache_flush()
814 writecache_flush_entry(wc, e); in writecache_flush()
815 if (unlikely(e->lru.next == &wc->lru)) in writecache_flush()
818 if (writecache_entry_is_committed(wc, e2)) in writecache_flush()
823 writecache_commit_flushed(wc, true); in writecache_flush()
825 wc->seq_count++; in writecache_flush()
826 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count)); in writecache_flush()
827 if (WC_MODE_PMEM(wc)) in writecache_flush()
828 writecache_commit_flushed(wc, false); in writecache_flush()
830 ssd_commit_superblock(wc); in writecache_flush()
832 wc->overwrote_committed = false; in writecache_flush()
841 if (read_original_sector(wc, e2) == read_original_sector(wc, e) && in writecache_flush()
843 writecache_free_entry(wc, e2); in writecache_flush()
847 if (unlikely(e->lru.prev == &wc->lru)) in writecache_flush()
854 writecache_commit_flushed(wc, false); in writecache_flush()
859 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work); in writecache_flush_work() local
861 wc_lock(wc); in writecache_flush_work()
862 writecache_flush(wc); in writecache_flush_work()
863 wc_unlock(wc); in writecache_flush_work()
868 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer); in writecache_autocommit_timer() local
869 if (!writecache_has_error(wc)) in writecache_autocommit_timer()
870 queue_work(wc->writeback_wq, &wc->flush_work); in writecache_autocommit_timer()
873 static void writecache_schedule_autocommit(struct dm_writecache *wc) in writecache_schedule_autocommit() argument
875 if (!timer_pending(&wc->autocommit_timer)) in writecache_schedule_autocommit()
876 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies); in writecache_schedule_autocommit()
879 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end) in writecache_discard() argument
884 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ); in writecache_discard()
888 while (read_original_sector(wc, e) < end) { in writecache_discard()
893 if (!WC_MODE_PMEM(wc)) { in writecache_discard()
894 writecache_wait_for_ios(wc, READ); in writecache_discard()
895 writecache_wait_for_ios(wc, WRITE); in writecache_discard()
899 if (!writecache_entry_is_committed(wc, e)) in writecache_discard()
900 wc->uncommitted_blocks--; in writecache_discard()
901 writecache_free_entry(wc, e); in writecache_discard()
911 writecache_commit_flushed(wc, false); in writecache_discard()
914 static bool writecache_wait_for_writeback(struct dm_writecache *wc) in writecache_wait_for_writeback() argument
916 if (wc->writeback_size) { in writecache_wait_for_writeback()
917 writecache_wait_on_freelist(wc); in writecache_wait_for_writeback()
925 struct dm_writecache *wc = ti->private; in writecache_suspend() local
928 del_timer_sync(&wc->autocommit_timer); in writecache_suspend()
929 del_timer_sync(&wc->max_age_timer); in writecache_suspend()
931 wc_lock(wc); in writecache_suspend()
932 writecache_flush(wc); in writecache_suspend()
933 flush_on_suspend = wc->flush_on_suspend; in writecache_suspend()
935 wc->flush_on_suspend = false; in writecache_suspend()
936 wc->writeback_all++; in writecache_suspend()
937 queue_work(wc->writeback_wq, &wc->writeback_work); in writecache_suspend()
939 wc_unlock(wc); in writecache_suspend()
941 drain_workqueue(wc->writeback_wq); in writecache_suspend()
943 wc_lock(wc); in writecache_suspend()
945 wc->writeback_all--; in writecache_suspend()
946 while (writecache_wait_for_writeback(wc)); in writecache_suspend()
948 if (WC_MODE_PMEM(wc)) in writecache_suspend()
949 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size); in writecache_suspend()
951 writecache_poison_lists(wc); in writecache_suspend()
953 wc_unlock(wc); in writecache_suspend()
956 static int writecache_alloc_entries(struct dm_writecache *wc) in writecache_alloc_entries() argument
960 if (wc->entries) in writecache_alloc_entries()
962 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); in writecache_alloc_entries()
963 if (!wc->entries) in writecache_alloc_entries()
965 for (b = 0; b < wc->n_blocks; b++) { in writecache_alloc_entries()
966 struct wc_entry *e = &wc->entries[b]; in writecache_alloc_entries()
975 static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) in writecache_read_metadata() argument
980 region.bdev = wc->ssd_dev->bdev; in writecache_read_metadata()
981 region.sector = wc->start_sector; in writecache_read_metadata()
986 req.mem.ptr.vma = (char *)wc->memory_map; in writecache_read_metadata()
987 req.client = wc->dm_io; in writecache_read_metadata()
995 struct dm_writecache *wc = ti->private; in writecache_resume() local
1001 wc_lock(wc); in writecache_resume()
1003 wc->data_device_sectors = bdev_nr_sectors(wc->dev->bdev); in writecache_resume()
1005 if (WC_MODE_PMEM(wc)) { in writecache_resume()
1006 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); in writecache_resume()
1008 r = writecache_read_metadata(wc, wc->metadata_sectors); in writecache_resume()
1011 writecache_error(wc, r, "unable to read metadata: %d", r); in writecache_resume()
1013 memset((char *)wc->memory_map + sb_entries_offset, -1, in writecache_resume()
1014 (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); in writecache_resume()
1018 wc->tree = RB_ROOT; in writecache_resume()
1019 INIT_LIST_HEAD(&wc->lru); in writecache_resume()
1020 if (WC_MODE_SORT_FREELIST(wc)) { in writecache_resume()
1021 wc->freetree = RB_ROOT; in writecache_resume()
1022 wc->current_free = NULL; in writecache_resume()
1024 INIT_LIST_HEAD(&wc->freelist); in writecache_resume()
1026 wc->freelist_size = 0; in writecache_resume()
1028 r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count, in writecache_resume()
1031 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r); in writecache_resume()
1034 wc->seq_count = le64_to_cpu(sb_seq_count); in writecache_resume()
1037 for (b = 0; b < wc->n_blocks; b++) { in writecache_resume()
1038 struct wc_entry *e = &wc->entries[b]; in writecache_resume()
1040 if (writecache_has_error(wc)) { in writecache_resume()
1045 r = copy_mc_to_kernel(&wme, memory_entry(wc, e), in writecache_resume()
1048 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d", in writecache_resume()
1059 for (b = 0; b < wc->n_blocks; b++) { in writecache_resume()
1060 struct wc_entry *e = &wc->entries[b]; in writecache_resume()
1061 if (!writecache_entry_is_committed(wc, e)) { in writecache_resume()
1062 if (read_seq_count(wc, e) != -1) { in writecache_resume()
1064 clear_seq_count(wc, e); in writecache_resume()
1067 writecache_add_to_freelist(wc, e); in writecache_resume()
1071 old = writecache_find_entry(wc, read_original_sector(wc, e), 0); in writecache_resume()
1073 writecache_insert_entry(wc, e); in writecache_resume()
1075 if (read_seq_count(wc, old) == read_seq_count(wc, e)) { in writecache_resume()
1076 writecache_error(wc, -EINVAL, in writecache_resume()
1078 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e), in writecache_resume()
1079 (unsigned long long)read_seq_count(wc, e)); in writecache_resume()
1081 if (read_seq_count(wc, old) > read_seq_count(wc, e)) { in writecache_resume()
1084 writecache_free_entry(wc, old); in writecache_resume()
1085 writecache_insert_entry(wc, e); in writecache_resume()
1094 writecache_flush_all_metadata(wc); in writecache_resume()
1095 writecache_commit_flushed(wc, false); in writecache_resume()
1098 writecache_verify_watermark(wc); in writecache_resume()
1100 if (wc->max_age != MAX_AGE_UNSPECIFIED) in writecache_resume()
1101 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV); in writecache_resume()
1103 wc_unlock(wc); in writecache_resume()
1106 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc) in process_flush_mesg() argument
1111 wc_lock(wc); in process_flush_mesg()
1112 if (dm_suspended(wc->ti)) { in process_flush_mesg()
1113 wc_unlock(wc); in process_flush_mesg()
1116 if (writecache_has_error(wc)) { in process_flush_mesg()
1117 wc_unlock(wc); in process_flush_mesg()
1121 writecache_flush(wc); in process_flush_mesg()
1122 wc->writeback_all++; in process_flush_mesg()
1123 queue_work(wc->writeback_wq, &wc->writeback_work); in process_flush_mesg()
1124 wc_unlock(wc); in process_flush_mesg()
1126 flush_workqueue(wc->writeback_wq); in process_flush_mesg()
1128 wc_lock(wc); in process_flush_mesg()
1129 wc->writeback_all--; in process_flush_mesg()
1130 if (writecache_has_error(wc)) { in process_flush_mesg()
1131 wc_unlock(wc); in process_flush_mesg()
1134 wc_unlock(wc); in process_flush_mesg()
1139 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc) in process_flush_on_suspend_mesg() argument
1144 wc_lock(wc); in process_flush_on_suspend_mesg()
1145 wc->flush_on_suspend = true; in process_flush_on_suspend_mesg()
1146 wc_unlock(wc); in process_flush_on_suspend_mesg()
1151 static void activate_cleaner(struct dm_writecache *wc) in activate_cleaner() argument
1153 wc->flush_on_suspend = true; in activate_cleaner()
1154 wc->cleaner = true; in activate_cleaner()
1155 wc->freelist_high_watermark = wc->n_blocks; in activate_cleaner()
1156 wc->freelist_low_watermark = wc->n_blocks; in activate_cleaner()
1159 static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc) in process_cleaner_mesg() argument
1164 wc_lock(wc); in process_cleaner_mesg()
1165 activate_cleaner(wc); in process_cleaner_mesg()
1166 if (!dm_suspended(wc->ti)) in process_cleaner_mesg()
1167 writecache_verify_watermark(wc); in process_cleaner_mesg()
1168 wc_unlock(wc); in process_cleaner_mesg()
1173 static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writecache *wc) in process_clear_stats_mesg() argument
1178 wc_lock(wc); in process_clear_stats_mesg()
1179 memset(&wc->stats, 0, sizeof wc->stats); in process_clear_stats_mesg()
1180 wc_unlock(wc); in process_clear_stats_mesg()
1189 struct dm_writecache *wc = ti->private; in writecache_message() local
1192 r = process_flush_mesg(argc, argv, wc); in writecache_message()
1194 r = process_flush_on_suspend_mesg(argc, argv, wc); in writecache_message()
1196 r = process_cleaner_mesg(argc, argv, wc); in writecache_message()
1198 r = process_clear_stats_mesg(argc, argv, wc); in writecache_message()
1241 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data) in bio_copy_block() argument
1246 unsigned remaining_size = wc->block_size; in bio_copy_block()
1260 writecache_error(wc, r, "hardware memory error when reading data: %d", r); in bio_copy_block()
1278 struct dm_writecache *wc = data; in writecache_flush_thread() local
1283 wc_lock(wc); in writecache_flush_thread()
1284 bio = bio_list_pop(&wc->flush_list); in writecache_flush_thread()
1287 wc_unlock(wc); in writecache_flush_thread()
1299 writecache_discard(wc, bio->bi_iter.bi_sector, in writecache_flush_thread()
1301 wc_unlock(wc); in writecache_flush_thread()
1302 bio_set_dev(bio, wc->dev->bdev); in writecache_flush_thread()
1305 writecache_flush(wc); in writecache_flush_thread()
1306 wc_unlock(wc); in writecache_flush_thread()
1307 if (writecache_has_error(wc)) in writecache_flush_thread()
1316 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio) in writecache_offload_bio() argument
1318 if (bio_list_empty(&wc->flush_list)) in writecache_offload_bio()
1319 wake_up_process(wc->flush_thread); in writecache_offload_bio()
1320 bio_list_add(&wc->flush_list, bio); in writecache_offload_bio()
1331 static enum wc_map_op writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio, in writecache_map_remap_origin() argument
1336 read_original_sector(wc, e) - bio->bi_iter.bi_sector; in writecache_map_remap_origin()
1344 static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio) in writecache_map_read() argument
1350 wc->stats.reads++; in writecache_map_read()
1351 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map_read()
1352 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) { in writecache_map_read()
1353 wc->stats.read_hits++; in writecache_map_read()
1354 if (WC_MODE_PMEM(wc)) { in writecache_map_read()
1355 bio_copy_block(wc, bio, memory_data(wc, e)); in writecache_map_read()
1360 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT); in writecache_map_read()
1361 bio_set_dev(bio, wc->ssd_dev->bdev); in writecache_map_read()
1362 bio->bi_iter.bi_sector = cache_sector(wc, e); in writecache_map_read()
1363 if (!writecache_entry_is_committed(wc, e)) in writecache_map_read()
1364 writecache_wait_for_ios(wc, WRITE); in writecache_map_read()
1368 map_op = writecache_map_remap_origin(wc, bio, e); in writecache_map_read()
1374 static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio, in writecache_bio_copy_ssd() argument
1377 unsigned bio_size = wc->block_size; in writecache_bio_copy_ssd()
1378 sector_t start_cache_sec = cache_sector(wc, e); in writecache_bio_copy_ssd()
1383 struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec); in writecache_bio_copy_ssd()
1386 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + in writecache_bio_copy_ssd()
1387 (bio_size >> SECTOR_SHIFT), wc->seq_count); in writecache_bio_copy_ssd()
1388 writecache_insert_entry(wc, f); in writecache_bio_copy_ssd()
1389 wc->uncommitted_blocks++; in writecache_bio_copy_ssd()
1398 if (read_original_sector(wc, f) != in writecache_bio_copy_ssd()
1399 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) in writecache_bio_copy_ssd()
1403 if (writecache_entry_is_committed(wc, f)) in writecache_bio_copy_ssd()
1404 wc->overwrote_committed = true; in writecache_bio_copy_ssd()
1407 bio_size += wc->block_size; in writecache_bio_copy_ssd()
1408 current_cache_sec += wc->block_size >> SECTOR_SHIFT; in writecache_bio_copy_ssd()
1411 bio_set_dev(bio, wc->ssd_dev->bdev); in writecache_bio_copy_ssd()
1415 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) { in writecache_bio_copy_ssd()
1416 wc->uncommitted_blocks = 0; in writecache_bio_copy_ssd()
1417 queue_work(wc->writeback_wq, &wc->flush_work); in writecache_bio_copy_ssd()
1419 writecache_schedule_autocommit(wc); in writecache_bio_copy_ssd()
1425 static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio) in writecache_map_write() argument
1432 wc->stats.writes++; in writecache_map_write()
1433 if (writecache_has_error(wc)) in writecache_map_write()
1435 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); in writecache_map_write()
1437 if (!writecache_entry_is_committed(wc, e)) { in writecache_map_write()
1438 wc->stats.write_hits_uncommitted++; in writecache_map_write()
1442 wc->stats.write_hits_committed++; in writecache_map_write()
1443 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) { in writecache_map_write()
1444 wc->overwrote_committed = true; in writecache_map_write()
1450 if (unlikely(wc->cleaner) || in writecache_map_write()
1451 (wc->metadata_only && !(bio->bi_opf & REQ_META))) in writecache_map_write()
1454 e = writecache_pop_from_freelist(wc, (sector_t)-1); in writecache_map_write()
1456 if (!WC_MODE_PMEM(wc) && !found_entry) { in writecache_map_write()
1458 wc->stats.writes_around++; in writecache_map_write()
1459 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map_write()
1460 return writecache_map_remap_origin(wc, bio, e); in writecache_map_write()
1462 wc->stats.writes_blocked_on_freelist++; in writecache_map_write()
1463 writecache_wait_on_freelist(wc); in writecache_map_write()
1466 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count); in writecache_map_write()
1467 writecache_insert_entry(wc, e); in writecache_map_write()
1468 wc->uncommitted_blocks++; in writecache_map_write()
1469 wc->stats.writes_allocate++; in writecache_map_write()
1471 if (WC_MODE_PMEM(wc)) in writecache_map_write()
1472 bio_copy_block(wc, bio, memory_data(wc, e)); in writecache_map_write()
1474 return writecache_bio_copy_ssd(wc, bio, e, search_used); in writecache_map_write()
1477 if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks)) in writecache_map_write()
1478 writecache_flush(wc); in writecache_map_write()
1480 writecache_schedule_autocommit(wc); in writecache_map_write()
1485 static enum wc_map_op writecache_map_flush(struct dm_writecache *wc, struct bio *bio) in writecache_map_flush() argument
1487 if (writecache_has_error(wc)) in writecache_map_flush()
1490 if (WC_MODE_PMEM(wc)) { in writecache_map_flush()
1491 wc->stats.flushes++; in writecache_map_flush()
1492 writecache_flush(wc); in writecache_map_flush()
1493 if (writecache_has_error(wc)) in writecache_map_flush()
1495 else if (unlikely(wc->cleaner) || unlikely(wc->metadata_only)) in writecache_map_flush()
1502 wc->stats.flushes++; in writecache_map_flush()
1503 writecache_offload_bio(wc, bio); in writecache_map_flush()
1507 static enum wc_map_op writecache_map_discard(struct dm_writecache *wc, struct bio *bio) in writecache_map_discard() argument
1509 wc->stats.discards++; in writecache_map_discard()
1511 if (writecache_has_error(wc)) in writecache_map_discard()
1514 if (WC_MODE_PMEM(wc)) { in writecache_map_discard()
1515 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio)); in writecache_map_discard()
1519 writecache_offload_bio(wc, bio); in writecache_map_discard()
1525 struct dm_writecache *wc = ti->private; in writecache_map() local
1530 wc_lock(wc); in writecache_map()
1533 map_op = writecache_map_flush(wc, bio); in writecache_map()
1540 (wc->block_size / 512 - 1)) != 0)) { in writecache_map()
1543 bio->bi_iter.bi_size, wc->block_size); in writecache_map()
1549 map_op = writecache_map_discard(wc, bio); in writecache_map()
1554 map_op = writecache_map_read(wc, bio); in writecache_map()
1556 map_op = writecache_map_write(wc, bio); in writecache_map()
1560 if (likely(wc->pause != 0)) { in writecache_map()
1562 dm_iot_io_begin(&wc->iot, 1); in writecache_map()
1566 bio_set_dev(bio, wc->dev->bdev); in writecache_map()
1567 wc_unlock(wc); in writecache_map()
1573 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]); in writecache_map()
1574 wc_unlock(wc); in writecache_map()
1578 wc_unlock(wc); in writecache_map()
1583 wc_unlock(wc); in writecache_map()
1587 wc_unlock(wc); in writecache_map()
1599 struct dm_writecache *wc = ti->private; in writecache_end_io() local
1603 if (atomic_dec_and_test(&wc->bio_in_progress[dir])) in writecache_end_io()
1604 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir]))) in writecache_end_io()
1605 wake_up(&wc->bio_in_progress_wait[dir]); in writecache_end_io()
1607 dm_iot_io_end(&wc->iot, 1); in writecache_end_io()
1615 struct dm_writecache *wc = ti->private; in writecache_iterate_devices() local
1617 return fn(ti, wc->dev, 0, ti->len, data); in writecache_iterate_devices()
1622 struct dm_writecache *wc = ti->private; in writecache_io_hints() local
1624 if (limits->logical_block_size < wc->block_size) in writecache_io_hints()
1625 limits->logical_block_size = wc->block_size; in writecache_io_hints()
1627 if (limits->physical_block_size < wc->block_size) in writecache_io_hints()
1628 limits->physical_block_size = wc->block_size; in writecache_io_hints()
1630 if (limits->io_min < wc->block_size) in writecache_io_hints()
1631 limits->io_min = wc->block_size; in writecache_io_hints()
1638 struct dm_writecache *wc = wb->wc; in writecache_writeback_endio() local
1641 raw_spin_lock_irqsave(&wc->endio_list_lock, flags); in writecache_writeback_endio()
1642 if (unlikely(list_empty(&wc->endio_list))) in writecache_writeback_endio()
1643 wake_up_process(wc->endio_thread); in writecache_writeback_endio()
1644 list_add_tail(&wb->endio_entry, &wc->endio_list); in writecache_writeback_endio()
1645 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags); in writecache_writeback_endio()
1651 struct dm_writecache *wc = c->wc; in writecache_copy_endio() local
1655 raw_spin_lock_irq(&wc->endio_list_lock); in writecache_copy_endio()
1656 if (unlikely(list_empty(&wc->endio_list))) in writecache_copy_endio()
1657 wake_up_process(wc->endio_thread); in writecache_copy_endio()
1658 list_add_tail(&c->endio_entry, &wc->endio_list); in writecache_copy_endio()
1659 raw_spin_unlock_irq(&wc->endio_list_lock); in writecache_copy_endio()
1662 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list) in __writecache_endio_pmem() argument
1674 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status), in __writecache_endio_pmem()
1682 if (!writecache_has_error(wc)) in __writecache_endio_pmem()
1683 writecache_free_entry(wc, e); in __writecache_endio_pmem()
1684 BUG_ON(!wc->writeback_size); in __writecache_endio_pmem()
1685 wc->writeback_size--; in __writecache_endio_pmem()
1688 writecache_commit_flushed(wc, false); in __writecache_endio_pmem()
1689 wc_unlock(wc); in __writecache_endio_pmem()
1690 wc_lock(wc); in __writecache_endio_pmem()
1701 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list) in __writecache_endio_ssd() argument
1711 writecache_error(wc, c->error, "copy error"); in __writecache_endio_ssd()
1718 if (!writecache_has_error(wc)) in __writecache_endio_ssd()
1719 writecache_free_entry(wc, e); in __writecache_endio_ssd()
1721 BUG_ON(!wc->writeback_size); in __writecache_endio_ssd()
1722 wc->writeback_size--; in __writecache_endio_ssd()
1725 mempool_free(c, &wc->copy_pool); in __writecache_endio_ssd()
1731 struct dm_writecache *wc = data; in writecache_endio_thread() local
1736 raw_spin_lock_irq(&wc->endio_list_lock); in writecache_endio_thread()
1737 if (!list_empty(&wc->endio_list)) in writecache_endio_thread()
1740 raw_spin_unlock_irq(&wc->endio_list_lock); in writecache_endio_thread()
1752 list = wc->endio_list; in writecache_endio_thread()
1754 INIT_LIST_HEAD(&wc->endio_list); in writecache_endio_thread()
1755 raw_spin_unlock_irq(&wc->endio_list_lock); in writecache_endio_thread()
1757 if (!WC_MODE_FUA(wc)) in writecache_endio_thread()
1758 writecache_disk_flush(wc, wc->dev); in writecache_endio_thread()
1760 wc_lock(wc); in writecache_endio_thread()
1762 if (WC_MODE_PMEM(wc)) { in writecache_endio_thread()
1763 __writecache_endio_pmem(wc, &list); in writecache_endio_thread()
1765 __writecache_endio_ssd(wc, &list); in writecache_endio_thread()
1766 writecache_wait_for_ios(wc, READ); in writecache_endio_thread()
1769 writecache_commit_flushed(wc, false); in writecache_endio_thread()
1771 wc_unlock(wc); in writecache_endio_thread()
1779 struct dm_writecache *wc = wb->wc; in wc_add_block() local
1780 unsigned block_size = wc->block_size; in wc_add_block()
1781 void *address = memory_data(wc, e); in wc_add_block()
1785 if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) in wc_add_block()
1797 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl) in __writeback_throttle() argument
1799 if (unlikely(wc->max_writeback_jobs)) { in __writeback_throttle()
1800 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) { in __writeback_throttle()
1801 wc_lock(wc); in __writeback_throttle()
1802 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs) in __writeback_throttle()
1803 writecache_wait_on_freelist(wc); in __writeback_throttle()
1804 wc_unlock(wc); in __writeback_throttle()
1810 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl) in __writecache_writeback_pmem() argument
1824 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); in __writecache_writeback_pmem()
1826 wb->wc = wc; in __writecache_writeback_pmem()
1828 bio_set_dev(bio, wc->dev->bdev); in __writecache_writeback_pmem()
1829 bio->bi_iter.bi_sector = read_original_sector(wc, e); in __writecache_writeback_pmem()
1845 if (read_original_sector(wc, f) != in __writecache_writeback_pmem()
1846 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) in __writecache_writeback_pmem()
1855 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA); in __writecache_writeback_pmem()
1856 if (writecache_has_error(wc)) { in __writecache_writeback_pmem()
1866 __writeback_throttle(wc, wbl); in __writecache_writeback_pmem()
1870 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl) in __writecache_writeback_ssd() argument
1883 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT); in __writecache_writeback_ssd()
1885 from.bdev = wc->ssd_dev->bdev; in __writecache_writeback_ssd()
1886 from.sector = cache_sector(wc, e); in __writecache_writeback_ssd()
1888 to.bdev = wc->dev->bdev; in __writecache_writeback_ssd()
1889 to.sector = read_original_sector(wc, e); in __writecache_writeback_ssd()
1892 c = mempool_alloc(&wc->copy_pool, GFP_NOIO); in __writecache_writeback_ssd()
1893 c->wc = wc; in __writecache_writeback_ssd()
1897 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) { in __writecache_writeback_ssd()
1905 if (unlikely(to.sector + to.count > wc->data_device_sectors)) { in __writecache_writeback_ssd()
1906 if (to.sector >= wc->data_device_sectors) { in __writecache_writeback_ssd()
1910 from.count = to.count = wc->data_device_sectors - to.sector; in __writecache_writeback_ssd()
1913 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); in __writecache_writeback_ssd()
1915 __writeback_throttle(wc, wbl); in __writecache_writeback_ssd()
1921 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work); in writecache_writeback() local
1929 if (!WC_MODE_PMEM(wc)) { in writecache_writeback()
1931 dm_kcopyd_client_flush(wc->dm_kcopyd); in writecache_writeback()
1934 if (likely(wc->pause != 0)) { in writecache_writeback()
1937 if (unlikely(wc->cleaner) || unlikely(wc->writeback_all) || in writecache_writeback()
1938 unlikely(dm_suspended(wc->ti))) in writecache_writeback()
1940 idle = dm_iot_idle_time(&wc->iot); in writecache_writeback()
1941 if (idle >= wc->pause) in writecache_writeback()
1943 idle = wc->pause - idle; in writecache_writeback()
1950 wc_lock(wc); in writecache_writeback()
1952 if (writecache_has_error(wc)) { in writecache_writeback()
1953 wc_unlock(wc); in writecache_writeback()
1957 if (unlikely(wc->writeback_all)) { in writecache_writeback()
1958 if (writecache_wait_for_writeback(wc)) in writecache_writeback()
1962 if (wc->overwrote_committed) { in writecache_writeback()
1963 writecache_wait_for_ios(wc, WRITE); in writecache_writeback()
1970 while (!list_empty(&wc->lru) && in writecache_writeback()
1971 (wc->writeback_all || in writecache_writeback()
1972 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark || in writecache_writeback()
1973 (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >= in writecache_writeback()
1974 wc->max_age - wc->max_age / MAX_AGE_DIV))) { in writecache_writeback()
1978 likely(!wc->writeback_all)) { in writecache_writeback()
1979 if (likely(!dm_suspended(wc->ti))) in writecache_writeback()
1980 queue_work(wc->writeback_wq, &wc->writeback_work); in writecache_writeback()
1984 if (unlikely(wc->writeback_all)) { in writecache_writeback()
1986 writecache_flush(wc); in writecache_writeback()
1987 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node); in writecache_writeback()
1991 e = container_of(wc->lru.prev, struct wc_entry, lru); in writecache_writeback()
1993 if (unlikely(!writecache_entry_is_committed(wc, e))) { in writecache_writeback()
1994 writecache_flush(wc); in writecache_writeback()
1999 if (unlikely(read_original_sector(wc, f) == in writecache_writeback()
2000 read_original_sector(wc, e))) { in writecache_writeback()
2007 wc->writeback_size++; in writecache_writeback()
2020 if (unlikely(read_original_sector(wc, g) == in writecache_writeback()
2021 read_original_sector(wc, f))) { in writecache_writeback()
2025 if (read_original_sector(wc, g) != in writecache_writeback()
2026 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT)) in writecache_writeback()
2030 if (unlikely(!writecache_entry_is_committed(wc, g))) in writecache_writeback()
2033 if (!WC_MODE_PMEM(wc)) { in writecache_writeback()
2042 wc->writeback_size++; in writecache_writeback()
2050 if (unlikely(wc->writeback_all)) { in writecache_writeback()
2062 list_splice_tail(&skipped, &wc->lru); in writecache_writeback()
2068 writecache_wait_for_writeback(wc); in writecache_writeback()
2071 wc_unlock(wc); in writecache_writeback()
2075 if (WC_MODE_PMEM(wc)) in writecache_writeback()
2076 __writecache_writeback_pmem(wc, &wbl); in writecache_writeback()
2078 __writecache_writeback_ssd(wc, &wbl); in writecache_writeback()
2082 if (unlikely(wc->writeback_all)) { in writecache_writeback()
2083 wc_lock(wc); in writecache_writeback()
2084 while (writecache_wait_for_writeback(wc)); in writecache_writeback()
2085 wc_unlock(wc); in writecache_writeback()
2124 static int init_memory(struct dm_writecache *wc) in init_memory() argument
2129 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL); in init_memory()
2133 r = writecache_alloc_entries(wc); in init_memory()
2137 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++) in init_memory()
2138 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0)); in init_memory()
2139 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION)); in init_memory()
2140 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size)); in init_memory()
2141 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks)); in init_memory()
2142 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0)); in init_memory()
2144 for (b = 0; b < wc->n_blocks; b++) { in init_memory()
2145 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); in init_memory()
2149 writecache_flush_all_metadata(wc); in init_memory()
2150 writecache_commit_flushed(wc, false); in init_memory()
2151 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC)); in init_memory()
2152 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic); in init_memory()
2153 writecache_commit_flushed(wc, false); in init_memory()
2160 struct dm_writecache *wc = ti->private; in writecache_dtr() local
2162 if (!wc) in writecache_dtr()
2165 if (wc->endio_thread) in writecache_dtr()
2166 kthread_stop(wc->endio_thread); in writecache_dtr()
2168 if (wc->flush_thread) in writecache_dtr()
2169 kthread_stop(wc->flush_thread); in writecache_dtr()
2171 bioset_exit(&wc->bio_set); in writecache_dtr()
2173 mempool_exit(&wc->copy_pool); in writecache_dtr()
2175 if (wc->writeback_wq) in writecache_dtr()
2176 destroy_workqueue(wc->writeback_wq); in writecache_dtr()
2178 if (wc->dev) in writecache_dtr()
2179 dm_put_device(ti, wc->dev); in writecache_dtr()
2181 if (wc->ssd_dev) in writecache_dtr()
2182 dm_put_device(ti, wc->ssd_dev); in writecache_dtr()
2184 vfree(wc->entries); in writecache_dtr()
2186 if (wc->memory_map) { in writecache_dtr()
2187 if (WC_MODE_PMEM(wc)) in writecache_dtr()
2188 persistent_memory_release(wc); in writecache_dtr()
2190 vfree(wc->memory_map); in writecache_dtr()
2193 if (wc->dm_kcopyd) in writecache_dtr()
2194 dm_kcopyd_client_destroy(wc->dm_kcopyd); in writecache_dtr()
2196 if (wc->dm_io) in writecache_dtr()
2197 dm_io_client_destroy(wc->dm_io); in writecache_dtr()
2199 vfree(wc->dirty_bitmap); in writecache_dtr()
2201 kfree(wc); in writecache_dtr()
2206 struct dm_writecache *wc; in writecache_ctr() local
2225 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL); in writecache_ctr()
2226 if (!wc) { in writecache_ctr()
2231 ti->private = wc; in writecache_ctr()
2232 wc->ti = ti; in writecache_ctr()
2234 mutex_init(&wc->lock); in writecache_ctr()
2235 wc->max_age = MAX_AGE_UNSPECIFIED; in writecache_ctr()
2236 writecache_poison_lists(wc); in writecache_ctr()
2237 init_waitqueue_head(&wc->freelist_wait); in writecache_ctr()
2238 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0); in writecache_ctr()
2239 timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0); in writecache_ctr()
2242 atomic_set(&wc->bio_in_progress[i], 0); in writecache_ctr()
2243 init_waitqueue_head(&wc->bio_in_progress_wait[i]); in writecache_ctr()
2246 wc->dm_io = dm_io_client_create(); in writecache_ctr()
2247 if (IS_ERR(wc->dm_io)) { in writecache_ctr()
2248 r = PTR_ERR(wc->dm_io); in writecache_ctr()
2250 wc->dm_io = NULL; in writecache_ctr()
2254 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1); in writecache_ctr()
2255 if (!wc->writeback_wq) { in writecache_ctr()
2260 INIT_WORK(&wc->writeback_work, writecache_writeback); in writecache_ctr()
2261 INIT_WORK(&wc->flush_work, writecache_flush_work); in writecache_ctr()
2263 dm_iot_init(&wc->iot); in writecache_ctr()
2265 raw_spin_lock_init(&wc->endio_list_lock); in writecache_ctr()
2266 INIT_LIST_HEAD(&wc->endio_list); in writecache_ctr()
2267 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio"); in writecache_ctr()
2268 if (IS_ERR(wc->endio_thread)) { in writecache_ctr()
2269 r = PTR_ERR(wc->endio_thread); in writecache_ctr()
2270 wc->endio_thread = NULL; in writecache_ctr()
2274 wake_up_process(wc->endio_thread); in writecache_ctr()
2284 wc->pmem_mode = false; in writecache_ctr()
2287 wc->pmem_mode = true; in writecache_ctr()
2288 wc->writeback_fua = true; in writecache_ctr()
2303 if (WC_MODE_PMEM(wc)) { in writecache_ctr()
2304 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE, in writecache_ctr()
2312 wc->pause = PAUSE_WRITEBACK; in writecache_ctr()
2313 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct)); in writecache_ctr()
2326 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev); in writecache_ctr()
2339 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev); in writecache_ctr()
2344 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); in writecache_ctr()
2352 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 || in writecache_ctr()
2353 wc->block_size < 512 || wc->block_size > PAGE_SIZE || in writecache_ctr()
2354 (wc->block_size & (wc->block_size - 1))) { in writecache_ctr()
2359 if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || in writecache_ctr()
2360 wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { in writecache_ctr()
2365 wc->block_size_bits = __ffs(wc->block_size); in writecache_ctr()
2367 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; in writecache_ctr()
2368 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM; in writecache_ctr()
2369 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC); in writecache_ctr()
2385 wc->start_sector = start_sector; in writecache_ctr()
2386 wc->start_sector_set = true; in writecache_ctr()
2387 if (wc->start_sector != start_sector || in writecache_ctr()
2388 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT) in writecache_ctr()
2396 wc->high_wm_percent_value = high_wm_percent; in writecache_ctr()
2397 wc->high_wm_percent_set = true; in writecache_ctr()
2404 wc->low_wm_percent_value = low_wm_percent; in writecache_ctr()
2405 wc->low_wm_percent_set = true; in writecache_ctr()
2408 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1) in writecache_ctr()
2410 wc->max_writeback_jobs_set = true; in writecache_ctr()
2413 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1) in writecache_ctr()
2415 wc->autocommit_blocks_set = true; in writecache_ctr()
2423 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs); in writecache_ctr()
2424 wc->autocommit_time_value = autocommit_msecs; in writecache_ctr()
2425 wc->autocommit_time_set = true; in writecache_ctr()
2433 wc->max_age = msecs_to_jiffies(max_age_msecs); in writecache_ctr()
2434 wc->max_age_set = true; in writecache_ctr()
2435 wc->max_age_value = max_age_msecs; in writecache_ctr()
2437 wc->cleaner_set = true; in writecache_ctr()
2438 wc->cleaner = true; in writecache_ctr()
2440 if (WC_MODE_PMEM(wc)) { in writecache_ctr()
2441 wc->writeback_fua = true; in writecache_ctr()
2442 wc->writeback_fua_set = true; in writecache_ctr()
2445 if (WC_MODE_PMEM(wc)) { in writecache_ctr()
2446 wc->writeback_fua = false; in writecache_ctr()
2447 wc->writeback_fua_set = true; in writecache_ctr()
2450 wc->metadata_only = true; in writecache_ctr()
2453 if (WC_MODE_PMEM(wc)) in writecache_ctr()
2460 wc->pause = msecs_to_jiffies(pause_msecs); in writecache_ctr()
2461 wc->pause_set = true; in writecache_ctr()
2462 wc->pause_value = pause_msecs; in writecache_ctr()
2477 if (WC_MODE_PMEM(wc)) { in writecache_ctr()
2478 if (!dax_synchronous(wc->ssd_dev->dax_dev)) { in writecache_ctr()
2484 r = persistent_memory_claim(wc); in writecache_ctr()
2493 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT; in writecache_ctr()
2495 bio_list_init(&wc->flush_list); in writecache_ctr()
2496 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); in writecache_ctr()
2497 if (IS_ERR(wc->flush_thread)) { in writecache_ctr()
2498 r = PTR_ERR(wc->flush_thread); in writecache_ctr()
2499 wc->flush_thread = NULL; in writecache_ctr()
2503 wake_up_process(wc->flush_thread); in writecache_ctr()
2505 r = calculate_memory_size(wc->memory_map_size, wc->block_size, in writecache_ctr()
2512 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) + in writecache_ctr()
2521 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits); in writecache_ctr()
2522 if (!wc->memory_map) { in writecache_ctr()
2528 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle); in writecache_ctr()
2529 if (IS_ERR(wc->dm_kcopyd)) { in writecache_ctr()
2530 r = PTR_ERR(wc->dm_kcopyd); in writecache_ctr()
2532 wc->dm_kcopyd = NULL; in writecache_ctr()
2536 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT); in writecache_ctr()
2537 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) / in writecache_ctr()
2539 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size); in writecache_ctr()
2540 if (!wc->dirty_bitmap) { in writecache_ctr()
2546 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); in writecache_ctr()
2553 r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock)); in writecache_ctr()
2559 r = init_memory(wc); in writecache_ctr()
2564 r = copy_mc_to_kernel(&s, sb(wc), in writecache_ctr()
2584 if (le32_to_cpu(s.block_size) != wc->block_size) { in writecache_ctr()
2590 wc->n_blocks = le64_to_cpu(s.n_blocks); in writecache_ctr()
2592 offset = wc->n_blocks * sizeof(struct wc_memory_entry); in writecache_ctr()
2593 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) { in writecache_ctr()
2602 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1); in writecache_ctr()
2603 data_size = wc->n_blocks * (size_t)wc->block_size; in writecache_ctr()
2604 if (!offset || (data_size / wc->block_size != wc->n_blocks) || in writecache_ctr()
2607 if (offset + data_size > wc->memory_map_size) { in writecache_ctr()
2613 wc->metadata_sectors = offset >> SECTOR_SHIFT; in writecache_ctr()
2614 wc->block_start = (char *)sb(wc) + offset; in writecache_ctr()
2616 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent); in writecache_ctr()
2619 wc->freelist_high_watermark = x; in writecache_ctr()
2620 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent); in writecache_ctr()
2623 wc->freelist_low_watermark = x; in writecache_ctr()
2625 if (wc->cleaner) in writecache_ctr()
2626 activate_cleaner(wc); in writecache_ctr()
2628 r = writecache_alloc_entries(wc); in writecache_ctr()
2634 ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2; in writecache_ctr()
2638 if (WC_MODE_PMEM(wc)) in writecache_ctr()
2639 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size); in writecache_ctr()
2654 struct dm_writecache *wc = ti->private; in writecache_status() local
2661 writecache_has_error(wc), in writecache_status()
2662 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size, in writecache_status()
2663 (unsigned long long)wc->writeback_size, in writecache_status()
2664 wc->stats.reads, in writecache_status()
2665 wc->stats.read_hits, in writecache_status()
2666 wc->stats.writes, in writecache_status()
2667 wc->stats.write_hits_uncommitted, in writecache_status()
2668 wc->stats.write_hits_committed, in writecache_status()
2669 wc->stats.writes_around, in writecache_status()
2670 wc->stats.writes_allocate, in writecache_status()
2671 wc->stats.writes_blocked_on_freelist, in writecache_status()
2672 wc->stats.flushes, in writecache_status()
2673 wc->stats.discards); in writecache_status()
2676 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's', in writecache_status()
2677 wc->dev->name, wc->ssd_dev->name, wc->block_size); in writecache_status()
2679 if (wc->start_sector_set) in writecache_status()
2681 if (wc->high_wm_percent_set) in writecache_status()
2683 if (wc->low_wm_percent_set) in writecache_status()
2685 if (wc->max_writeback_jobs_set) in writecache_status()
2687 if (wc->autocommit_blocks_set) in writecache_status()
2689 if (wc->autocommit_time_set) in writecache_status()
2691 if (wc->max_age_set) in writecache_status()
2693 if (wc->cleaner_set) in writecache_status()
2695 if (wc->writeback_fua_set) in writecache_status()
2697 if (wc->metadata_only) in writecache_status()
2699 if (wc->pause_set) in writecache_status()
2703 if (wc->start_sector_set) in writecache_status()
2704 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector); in writecache_status()
2705 if (wc->high_wm_percent_set) in writecache_status()
2706 DMEMIT(" high_watermark %u", wc->high_wm_percent_value); in writecache_status()
2707 if (wc->low_wm_percent_set) in writecache_status()
2708 DMEMIT(" low_watermark %u", wc->low_wm_percent_value); in writecache_status()
2709 if (wc->max_writeback_jobs_set) in writecache_status()
2710 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs); in writecache_status()
2711 if (wc->autocommit_blocks_set) in writecache_status()
2712 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks); in writecache_status()
2713 if (wc->autocommit_time_set) in writecache_status()
2714 DMEMIT(" autocommit_time %u", wc->autocommit_time_value); in writecache_status()
2715 if (wc->max_age_set) in writecache_status()
2716 DMEMIT(" max_age %u", wc->max_age_value); in writecache_status()
2717 if (wc->cleaner_set) in writecache_status()
2719 if (wc->writeback_fua_set) in writecache_status()
2720 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no"); in writecache_status()
2721 if (wc->metadata_only) in writecache_status()
2723 if (wc->pause_set) in writecache_status()
2724 DMEMIT(" pause_writeback %u", wc->pause_value); in writecache_status()