Lines Matching refs:zmd
187 unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_id() argument
189 return ((unsigned int)(zone - zmd->zones)); in dmz_id()
192 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_sect() argument
194 return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift; in dmz_start_sect()
197 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_block() argument
199 return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift; in dmz_start_block()
202 unsigned int dmz_nr_chunks(struct dmz_metadata *zmd) in dmz_nr_chunks() argument
204 return zmd->nr_chunks; in dmz_nr_chunks()
207 unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd) in dmz_nr_rnd_zones() argument
209 return zmd->nr_rnd; in dmz_nr_rnd_zones()
212 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd) in dmz_nr_unmap_rnd_zones() argument
214 return atomic_read(&zmd->unmap_nr_rnd); in dmz_nr_unmap_rnd_zones()
221 void dmz_lock_map(struct dmz_metadata *zmd) in dmz_lock_map() argument
223 mutex_lock(&zmd->map_lock); in dmz_lock_map()
226 void dmz_unlock_map(struct dmz_metadata *zmd) in dmz_unlock_map() argument
228 mutex_unlock(&zmd->map_lock); in dmz_unlock_map()
238 void dmz_lock_metadata(struct dmz_metadata *zmd) in dmz_lock_metadata() argument
240 down_read(&zmd->mblk_sem); in dmz_lock_metadata()
243 void dmz_unlock_metadata(struct dmz_metadata *zmd) in dmz_unlock_metadata() argument
245 up_read(&zmd->mblk_sem); in dmz_unlock_metadata()
253 void dmz_lock_flush(struct dmz_metadata *zmd) in dmz_lock_flush() argument
255 mutex_lock(&zmd->mblk_flush_lock); in dmz_lock_flush()
258 void dmz_unlock_flush(struct dmz_metadata *zmd) in dmz_unlock_flush() argument
260 mutex_unlock(&zmd->mblk_flush_lock); in dmz_unlock_flush()
266 static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd, in dmz_alloc_mblock() argument
272 if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) { in dmz_alloc_mblock()
273 spin_lock(&zmd->mblk_lock); in dmz_alloc_mblock()
274 mblk = list_first_entry_or_null(&zmd->mblk_lru_list, in dmz_alloc_mblock()
278 rb_erase(&mblk->node, &zmd->mblk_rbtree); in dmz_alloc_mblock()
281 spin_unlock(&zmd->mblk_lock); in dmz_alloc_mblock()
304 atomic_inc(&zmd->nr_mblks); in dmz_alloc_mblock()
312 static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) in dmz_free_mblock() argument
317 atomic_dec(&zmd->nr_mblks); in dmz_free_mblock()
323 static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) in dmz_insert_mblock() argument
325 struct rb_root *root = &zmd->mblk_rbtree; in dmz_insert_mblock()
344 static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd, in dmz_lookup_mblock() argument
347 struct rb_root *root = &zmd->mblk_rbtree; in dmz_lookup_mblock()
387 static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd, in dmz_fetch_mblock() argument
391 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; in dmz_fetch_mblock()
395 mblk = dmz_alloc_mblock(zmd, mblk_no); in dmz_fetch_mblock()
399 spin_lock(&zmd->mblk_lock); in dmz_fetch_mblock()
402 dmz_insert_mblock(zmd, mblk); in dmz_fetch_mblock()
403 spin_unlock(&zmd->mblk_lock); in dmz_fetch_mblock()
407 dmz_free_mblock(zmd, mblk); in dmz_fetch_mblock()
412 bio_set_dev(bio, zmd->dev->bdev); in dmz_fetch_mblock()
425 static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd, in dmz_shrink_mblock_cache() argument
431 if (!zmd->max_nr_mblks) in dmz_shrink_mblock_cache()
434 while (!list_empty(&zmd->mblk_lru_list) && in dmz_shrink_mblock_cache()
435 atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks && in dmz_shrink_mblock_cache()
437 mblk = list_first_entry(&zmd->mblk_lru_list, in dmz_shrink_mblock_cache()
440 rb_erase(&mblk->node, &zmd->mblk_rbtree); in dmz_shrink_mblock_cache()
441 dmz_free_mblock(zmd, mblk); in dmz_shrink_mblock_cache()
454 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker); in dmz_mblock_shrinker_count() local
456 return atomic_read(&zmd->nr_mblks); in dmz_mblock_shrinker_count()
465 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker); in dmz_mblock_shrinker_scan() local
468 spin_lock(&zmd->mblk_lock); in dmz_mblock_shrinker_scan()
469 count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan); in dmz_mblock_shrinker_scan()
470 spin_unlock(&zmd->mblk_lock); in dmz_mblock_shrinker_scan()
478 static void dmz_release_mblock(struct dmz_metadata *zmd, in dmz_release_mblock() argument
485 spin_lock(&zmd->mblk_lock); in dmz_release_mblock()
489 rb_erase(&mblk->node, &zmd->mblk_rbtree); in dmz_release_mblock()
490 dmz_free_mblock(zmd, mblk); in dmz_release_mblock()
492 list_add_tail(&mblk->link, &zmd->mblk_lru_list); in dmz_release_mblock()
493 dmz_shrink_mblock_cache(zmd, 1); in dmz_release_mblock()
497 spin_unlock(&zmd->mblk_lock); in dmz_release_mblock()
504 static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, in dmz_get_mblock() argument
510 spin_lock(&zmd->mblk_lock); in dmz_get_mblock()
511 mblk = dmz_lookup_mblock(zmd, mblk_no); in dmz_get_mblock()
518 spin_unlock(&zmd->mblk_lock); in dmz_get_mblock()
522 mblk = dmz_fetch_mblock(zmd, mblk_no); in dmz_get_mblock()
531 dmz_release_mblock(zmd, mblk); in dmz_get_mblock()
541 static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) in dmz_dirty_mblock() argument
543 spin_lock(&zmd->mblk_lock); in dmz_dirty_mblock()
545 list_add_tail(&mblk->link, &zmd->mblk_dirty_list); in dmz_dirty_mblock()
546 spin_unlock(&zmd->mblk_lock); in dmz_dirty_mblock()
552 static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, in dmz_write_mblock() argument
555 sector_t block = zmd->sb[set].block + mblk->no; in dmz_write_mblock()
567 bio_set_dev(bio, zmd->dev->bdev); in dmz_write_mblock()
578 static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, in dmz_rdwr_block() argument
589 bio_set_dev(bio, zmd->dev->bdev); in dmz_rdwr_block()
601 static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) in dmz_write_sb() argument
603 sector_t block = zmd->sb[set].block; in dmz_write_sb()
604 struct dmz_mblock *mblk = zmd->sb[set].mblk; in dmz_write_sb()
605 struct dmz_super *sb = zmd->sb[set].sb; in dmz_write_sb()
606 u64 sb_gen = zmd->sb_gen + 1; in dmz_write_sb()
615 sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks); in dmz_write_sb()
616 sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq); in dmz_write_sb()
617 sb->nr_chunks = cpu_to_le32(zmd->nr_chunks); in dmz_write_sb()
619 sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks); in dmz_write_sb()
620 sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks); in dmz_write_sb()
625 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); in dmz_write_sb()
627 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); in dmz_write_sb()
635 static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, in dmz_write_dirty_mblocks() argument
646 dmz_write_mblock(zmd, mblk, set); in dmz_write_dirty_mblocks()
661 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); in dmz_write_dirty_mblocks()
669 static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd, in dmz_log_dirty_mblocks() argument
672 unsigned int log_set = zmd->mblk_primary ^ 0x1; in dmz_log_dirty_mblocks()
676 ret = dmz_write_dirty_mblocks(zmd, write_list, log_set); in dmz_log_dirty_mblocks()
684 ret = dmz_write_sb(zmd, log_set); in dmz_log_dirty_mblocks()
694 int dmz_flush_metadata(struct dmz_metadata *zmd) in dmz_flush_metadata() argument
700 if (WARN_ON(!zmd)) in dmz_flush_metadata()
710 down_write(&zmd->mblk_sem); in dmz_flush_metadata()
716 dmz_lock_flush(zmd); in dmz_flush_metadata()
719 spin_lock(&zmd->mblk_lock); in dmz_flush_metadata()
720 list_splice_init(&zmd->mblk_dirty_list, &write_list); in dmz_flush_metadata()
721 spin_unlock(&zmd->mblk_lock); in dmz_flush_metadata()
725 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); in dmz_flush_metadata()
734 ret = dmz_log_dirty_mblocks(zmd, &write_list); in dmz_flush_metadata()
742 ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary); in dmz_flush_metadata()
746 ret = dmz_write_sb(zmd, zmd->mblk_primary); in dmz_flush_metadata()
754 spin_lock(&zmd->mblk_lock); in dmz_flush_metadata()
757 list_add_tail(&mblk->link, &zmd->mblk_lru_list); in dmz_flush_metadata()
758 spin_unlock(&zmd->mblk_lock); in dmz_flush_metadata()
761 zmd->sb_gen++; in dmz_flush_metadata()
764 spin_lock(&zmd->mblk_lock); in dmz_flush_metadata()
765 list_splice(&write_list, &zmd->mblk_dirty_list); in dmz_flush_metadata()
766 spin_unlock(&zmd->mblk_lock); in dmz_flush_metadata()
769 dmz_unlock_flush(zmd); in dmz_flush_metadata()
770 up_write(&zmd->mblk_sem); in dmz_flush_metadata()
778 static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb) in dmz_check_sb() argument
781 struct dmz_dev *dev = zmd->dev; in dmz_check_sb()
810 nr_meta_zones >= zmd->nr_rnd_zones) { in dmz_check_sb()
816 le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) { in dmz_check_sb()
821 nr_data_zones = zmd->nr_useable_zones - in dmz_check_sb()
830 zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks); in dmz_check_sb()
831 zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq); in dmz_check_sb()
832 zmd->nr_chunks = le32_to_cpu(sb->nr_chunks); in dmz_check_sb()
833 zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks); in dmz_check_sb()
834 zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks); in dmz_check_sb()
835 zmd->nr_meta_zones = nr_meta_zones; in dmz_check_sb()
836 zmd->nr_data_zones = nr_data_zones; in dmz_check_sb()
844 static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set) in dmz_read_sb() argument
846 return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block, in dmz_read_sb()
847 zmd->sb[set].mblk->page); in dmz_read_sb()
855 static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd) in dmz_lookup_secondary_sb() argument
857 unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks; in dmz_lookup_secondary_sb()
862 mblk = dmz_alloc_mblock(zmd, 0); in dmz_lookup_secondary_sb()
866 zmd->sb[1].mblk = mblk; in dmz_lookup_secondary_sb()
867 zmd->sb[1].sb = mblk->data; in dmz_lookup_secondary_sb()
870 zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks; in dmz_lookup_secondary_sb()
871 for (i = 0; i < zmd->nr_rnd_zones - 1; i++) { in dmz_lookup_secondary_sb()
872 if (dmz_read_sb(zmd, 1) != 0) in dmz_lookup_secondary_sb()
874 if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC) in dmz_lookup_secondary_sb()
876 zmd->sb[1].block += zone_nr_blocks; in dmz_lookup_secondary_sb()
879 dmz_free_mblock(zmd, mblk); in dmz_lookup_secondary_sb()
880 zmd->sb[1].mblk = NULL; in dmz_lookup_secondary_sb()
888 static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set) in dmz_get_sb() argument
894 mblk = dmz_alloc_mblock(zmd, 0); in dmz_get_sb()
898 zmd->sb[set].mblk = mblk; in dmz_get_sb()
899 zmd->sb[set].sb = mblk->data; in dmz_get_sb()
902 ret = dmz_read_sb(zmd, set); in dmz_get_sb()
904 dmz_free_mblock(zmd, mblk); in dmz_get_sb()
905 zmd->sb[set].mblk = NULL; in dmz_get_sb()
915 static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set) in dmz_recover_mblocks() argument
921 dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set); in dmz_recover_mblocks()
924 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone); in dmz_recover_mblocks()
926 zmd->sb[1].block = zmd->sb[0].block + in dmz_recover_mblocks()
927 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); in dmz_recover_mblocks()
935 for (i = 1; i < zmd->nr_meta_blocks; i++) { in dmz_recover_mblocks()
936 ret = dmz_rdwr_block(zmd, REQ_OP_READ, in dmz_recover_mblocks()
937 zmd->sb[src_set].block + i, page); in dmz_recover_mblocks()
940 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, in dmz_recover_mblocks()
941 zmd->sb[dst_set].block + i, page); in dmz_recover_mblocks()
947 if (!zmd->sb[dst_set].mblk) { in dmz_recover_mblocks()
948 zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0); in dmz_recover_mblocks()
949 if (!zmd->sb[dst_set].mblk) { in dmz_recover_mblocks()
953 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data; in dmz_recover_mblocks()
956 ret = dmz_write_sb(zmd, dst_set); in dmz_recover_mblocks()
966 static int dmz_load_sb(struct dmz_metadata *zmd) in dmz_load_sb() argument
973 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone); in dmz_load_sb()
974 ret = dmz_get_sb(zmd, 0); in dmz_load_sb()
976 dmz_dev_err(zmd->dev, "Read primary super block failed"); in dmz_load_sb()
980 ret = dmz_check_sb(zmd, zmd->sb[0].sb); in dmz_load_sb()
985 zmd->sb[1].block = zmd->sb[0].block + in dmz_load_sb()
986 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); in dmz_load_sb()
987 ret = dmz_get_sb(zmd, 1); in dmz_load_sb()
989 ret = dmz_lookup_secondary_sb(zmd); in dmz_load_sb()
992 dmz_dev_err(zmd->dev, "Read secondary super block failed"); in dmz_load_sb()
996 ret = dmz_check_sb(zmd, zmd->sb[1].sb); in dmz_load_sb()
1002 dmz_dev_err(zmd->dev, "No valid super block found"); in dmz_load_sb()
1007 sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen); in dmz_load_sb()
1009 ret = dmz_recover_mblocks(zmd, 0); in dmz_load_sb()
1012 sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen); in dmz_load_sb()
1014 ret = dmz_recover_mblocks(zmd, 1); in dmz_load_sb()
1017 dmz_dev_err(zmd->dev, "Recovery failed"); in dmz_load_sb()
1022 zmd->sb_gen = sb_gen[0]; in dmz_load_sb()
1023 zmd->mblk_primary = 0; in dmz_load_sb()
1025 zmd->sb_gen = sb_gen[1]; in dmz_load_sb()
1026 zmd->mblk_primary = 1; in dmz_load_sb()
1029 dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)", in dmz_load_sb()
1030 zmd->mblk_primary, zmd->sb_gen); in dmz_load_sb()
1038 static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_init_zone() argument
1041 struct dmz_dev *dev = zmd->dev; in dmz_init_zone()
1056 zmd->nr_rnd_zones++; in dmz_init_zone()
1074 zmd->nr_useable_zones++; in dmz_init_zone()
1076 zmd->nr_rnd_zones++; in dmz_init_zone()
1077 if (!zmd->sb_zone) { in dmz_init_zone()
1079 zmd->sb_zone = zone; in dmz_init_zone()
1090 static void dmz_drop_zones(struct dmz_metadata *zmd) in dmz_drop_zones() argument
1092 kfree(zmd->zones); in dmz_drop_zones()
1093 zmd->zones = NULL; in dmz_drop_zones()
1106 static int dmz_init_zones(struct dmz_metadata *zmd) in dmz_init_zones() argument
1108 struct dmz_dev *dev = zmd->dev; in dmz_init_zones()
1116 zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3; in dmz_init_zones()
1117 zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT; in dmz_init_zones()
1120 zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL); in dmz_init_zones()
1121 if (!zmd->zones) in dmz_init_zones()
1141 zone = zmd->zones; in dmz_init_zones()
1154 ret = dmz_init_zone(zmd, zone, &blkz[i]); in dmz_init_zones()
1170 dmz_drop_zones(zmd); in dmz_init_zones()
1178 static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_update_zone() argument
1185 ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), in dmz_update_zone()
1188 dmz_dev_err(zmd->dev, "Get zone %u report failed", in dmz_update_zone()
1189 dmz_id(zmd, zone)); in dmz_update_zone()
1212 static int dmz_handle_seq_write_err(struct dmz_metadata *zmd, in dmz_handle_seq_write_err() argument
1219 ret = dmz_update_zone(zmd, zone); in dmz_handle_seq_write_err()
1223 dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)", in dmz_handle_seq_write_err()
1224 dmz_id(zmd, zone), zone->wp_block, wp); in dmz_handle_seq_write_err()
1227 dmz_invalidate_blocks(zmd, zone, zone->wp_block, in dmz_handle_seq_write_err()
1234 static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id) in dmz_get() argument
1236 return &zmd->zones[zone_id]; in dmz_get()
1242 static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_reset_zone() argument
1256 struct dmz_dev *dev = zmd->dev; in dmz_reset_zone()
1259 dmz_start_sect(zmd, zone), in dmz_reset_zone()
1263 dmz_id(zmd, zone), ret); in dmz_reset_zone()
1275 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1280 static int dmz_load_mapping(struct dmz_metadata *zmd) in dmz_load_mapping() argument
1282 struct dmz_dev *dev = zmd->dev; in dmz_load_mapping()
1291 zmd->map_mblk = kcalloc(zmd->nr_map_blocks, in dmz_load_mapping()
1293 if (!zmd->map_mblk) in dmz_load_mapping()
1297 while (chunk < zmd->nr_chunks) { in dmz_load_mapping()
1300 dmap_mblk = dmz_get_mblock(zmd, i + 1); in dmz_load_mapping()
1303 zmd->map_mblk[i] = dmap_mblk; in dmz_load_mapping()
1320 dzone = dmz_get(zmd, dzone_id); in dmz_load_mapping()
1323 dmz_get_zone_weight(zmd, dzone); in dmz_load_mapping()
1326 list_add_tail(&dzone->link, &zmd->map_rnd_list); in dmz_load_mapping()
1328 list_add_tail(&dzone->link, &zmd->map_seq_list); in dmz_load_mapping()
1341 bzone = dmz_get(zmd, bzone_id); in dmz_load_mapping()
1353 dmz_get_zone_weight(zmd, bzone); in dmz_load_mapping()
1354 list_add_tail(&bzone->link, &zmd->map_rnd_list); in dmz_load_mapping()
1368 dzone = dmz_get(zmd, i); in dmz_load_mapping()
1373 zmd->nr_rnd++; in dmz_load_mapping()
1375 zmd->nr_seq++; in dmz_load_mapping()
1386 list_add_tail(&dzone->link, &zmd->unmap_rnd_list); in dmz_load_mapping()
1387 atomic_inc(&zmd->unmap_nr_rnd); in dmz_load_mapping()
1388 } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) { in dmz_load_mapping()
1389 list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list); in dmz_load_mapping()
1390 atomic_inc(&zmd->nr_reserved_seq_zones); in dmz_load_mapping()
1391 zmd->nr_seq--; in dmz_load_mapping()
1393 list_add_tail(&dzone->link, &zmd->unmap_seq_list); in dmz_load_mapping()
1394 atomic_inc(&zmd->unmap_nr_seq); in dmz_load_mapping()
1404 static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, in dmz_set_chunk_mapping() argument
1407 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; in dmz_set_chunk_mapping()
1413 dmz_dirty_mblock(zmd, dmap_mblk); in dmz_set_chunk_mapping()
1420 static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in __dmz_lru_zone() argument
1428 list_add_tail(&zone->link, &zmd->map_seq_list); in __dmz_lru_zone()
1431 list_add_tail(&zone->link, &zmd->map_rnd_list); in __dmz_lru_zone()
1439 static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_lru_zone() argument
1441 __dmz_lru_zone(zmd, zone); in dmz_lru_zone()
1443 __dmz_lru_zone(zmd, zone->bzone); in dmz_lru_zone()
1449 static void dmz_wait_for_free_zones(struct dmz_metadata *zmd) in dmz_wait_for_free_zones() argument
1453 prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE); in dmz_wait_for_free_zones()
1454 dmz_unlock_map(zmd); in dmz_wait_for_free_zones()
1455 dmz_unlock_metadata(zmd); in dmz_wait_for_free_zones()
1459 dmz_lock_metadata(zmd); in dmz_wait_for_free_zones()
1460 dmz_lock_map(zmd); in dmz_wait_for_free_zones()
1461 finish_wait(&zmd->free_wq, &wait); in dmz_wait_for_free_zones()
1494 static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_wait_for_reclaim() argument
1496 dmz_unlock_map(zmd); in dmz_wait_for_reclaim()
1497 dmz_unlock_metadata(zmd); in dmz_wait_for_reclaim()
1499 dmz_lock_metadata(zmd); in dmz_wait_for_reclaim()
1500 dmz_lock_map(zmd); in dmz_wait_for_reclaim()
1506 static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) in dmz_get_rnd_zone_for_reclaim() argument
1511 if (list_empty(&zmd->map_rnd_list)) in dmz_get_rnd_zone_for_reclaim()
1514 list_for_each_entry(zone, &zmd->map_rnd_list, link) { in dmz_get_rnd_zone_for_reclaim()
1529 static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) in dmz_get_seq_zone_for_reclaim() argument
1533 if (list_empty(&zmd->map_seq_list)) in dmz_get_seq_zone_for_reclaim()
1536 list_for_each_entry(zone, &zmd->map_seq_list, link) { in dmz_get_seq_zone_for_reclaim()
1549 struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd) in dmz_get_zone_for_reclaim() argument
1561 dmz_lock_map(zmd); in dmz_get_zone_for_reclaim()
1562 if (list_empty(&zmd->reserved_seq_zones_list)) in dmz_get_zone_for_reclaim()
1563 zone = dmz_get_seq_zone_for_reclaim(zmd); in dmz_get_zone_for_reclaim()
1565 zone = dmz_get_rnd_zone_for_reclaim(zmd); in dmz_get_zone_for_reclaim()
1566 dmz_unlock_map(zmd); in dmz_get_zone_for_reclaim()
1601 struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op) in dmz_get_chunk_mapping() argument
1603 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; in dmz_get_chunk_mapping()
1610 dmz_lock_map(zmd); in dmz_get_chunk_mapping()
1623 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); in dmz_get_chunk_mapping()
1625 dmz_wait_for_free_zones(zmd); in dmz_get_chunk_mapping()
1629 dmz_map_zone(zmd, dzone, chunk); in dmz_get_chunk_mapping()
1633 dzone = dmz_get(zmd, dzone_id); in dmz_get_chunk_mapping()
1641 ret = dmz_handle_seq_write_err(zmd, dzone); in dmz_get_chunk_mapping()
1656 dmz_wait_for_reclaim(zmd, dzone); in dmz_get_chunk_mapping()
1660 dmz_lru_zone(zmd, dzone); in dmz_get_chunk_mapping()
1662 dmz_unlock_map(zmd); in dmz_get_chunk_mapping()
1673 void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone) in dmz_put_chunk_mapping() argument
1677 dmz_lock_map(zmd); in dmz_put_chunk_mapping()
1682 dmz_lru_zone(zmd, bzone); in dmz_put_chunk_mapping()
1685 dmz_unmap_zone(zmd, bzone); in dmz_put_chunk_mapping()
1686 dmz_free_zone(zmd, bzone); in dmz_put_chunk_mapping()
1694 dmz_lru_zone(zmd, dzone); in dmz_put_chunk_mapping()
1697 dmz_unmap_zone(zmd, dzone); in dmz_put_chunk_mapping()
1698 dmz_free_zone(zmd, dzone); in dmz_put_chunk_mapping()
1701 dmz_unlock_map(zmd); in dmz_put_chunk_mapping()
1708 struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd, in dmz_get_chunk_buffer() argument
1713 dmz_lock_map(zmd); in dmz_get_chunk_buffer()
1720 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); in dmz_get_chunk_buffer()
1722 dmz_wait_for_free_zones(zmd); in dmz_get_chunk_buffer()
1727 dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone), in dmz_get_chunk_buffer()
1728 dmz_id(zmd, bzone)); in dmz_get_chunk_buffer()
1734 list_add_tail(&bzone->link, &zmd->map_rnd_list); in dmz_get_chunk_buffer()
1736 dmz_unlock_map(zmd); in dmz_get_chunk_buffer()
1745 struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags) in dmz_alloc_zone() argument
1751 list = &zmd->unmap_rnd_list; in dmz_alloc_zone()
1753 list = &zmd->unmap_seq_list; in dmz_alloc_zone()
1761 list_empty(&zmd->reserved_seq_zones_list)) in dmz_alloc_zone()
1764 zone = list_first_entry(&zmd->reserved_seq_zones_list, in dmz_alloc_zone()
1767 atomic_dec(&zmd->nr_reserved_seq_zones); in dmz_alloc_zone()
1775 atomic_dec(&zmd->unmap_nr_rnd); in dmz_alloc_zone()
1777 atomic_dec(&zmd->unmap_nr_seq); in dmz_alloc_zone()
1780 dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone)); in dmz_alloc_zone()
1792 void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_free_zone() argument
1796 dmz_reset_zone(zmd, zone); in dmz_free_zone()
1800 list_add_tail(&zone->link, &zmd->unmap_rnd_list); in dmz_free_zone()
1801 atomic_inc(&zmd->unmap_nr_rnd); in dmz_free_zone()
1802 } else if (atomic_read(&zmd->nr_reserved_seq_zones) < in dmz_free_zone()
1803 zmd->nr_reserved_seq) { in dmz_free_zone()
1804 list_add_tail(&zone->link, &zmd->reserved_seq_zones_list); in dmz_free_zone()
1805 atomic_inc(&zmd->nr_reserved_seq_zones); in dmz_free_zone()
1807 list_add_tail(&zone->link, &zmd->unmap_seq_list); in dmz_free_zone()
1808 atomic_inc(&zmd->unmap_nr_seq); in dmz_free_zone()
1811 wake_up_all(&zmd->free_wq); in dmz_free_zone()
1818 void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone, in dmz_map_zone() argument
1822 dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone), in dmz_map_zone()
1826 list_add_tail(&dzone->link, &zmd->map_rnd_list); in dmz_map_zone()
1828 list_add_tail(&dzone->link, &zmd->map_seq_list); in dmz_map_zone()
1835 void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_unmap_zone() argument
1850 dzone_id = dmz_id(zmd, zone->bzone); in dmz_unmap_zone()
1866 dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED); in dmz_unmap_zone()
1907 static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd, in dmz_get_bitmap() argument
1911 sector_t bitmap_block = 1 + zmd->nr_map_blocks + in dmz_get_bitmap()
1912 (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) + in dmz_get_bitmap()
1915 return dmz_get_mblock(zmd, bitmap_block); in dmz_get_bitmap()
1921 int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, in dmz_copy_valid_blocks() argument
1928 while (chunk_block < zmd->dev->zone_nr_blocks) { in dmz_copy_valid_blocks()
1929 from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block); in dmz_copy_valid_blocks()
1932 to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block); in dmz_copy_valid_blocks()
1934 dmz_release_mblock(zmd, from_mblk); in dmz_copy_valid_blocks()
1939 dmz_dirty_mblock(zmd, to_mblk); in dmz_copy_valid_blocks()
1941 dmz_release_mblock(zmd, to_mblk); in dmz_copy_valid_blocks()
1942 dmz_release_mblock(zmd, from_mblk); in dmz_copy_valid_blocks()
1956 int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, in dmz_merge_valid_blocks() argument
1963 while (chunk_block < zmd->dev->zone_nr_blocks) { in dmz_merge_valid_blocks()
1965 ret = dmz_first_valid_block(zmd, from_zone, &chunk_block); in dmz_merge_valid_blocks()
1970 ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks); in dmz_merge_valid_blocks()
1983 int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_validate_blocks() argument
1987 unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks; in dmz_validate_blocks()
1991 dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks", in dmz_validate_blocks()
1992 dmz_id(zmd, zone), (unsigned long long)chunk_block, in dmz_validate_blocks()
1999 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_validate_blocks()
2009 dmz_dirty_mblock(zmd, mblk); in dmz_validate_blocks()
2012 dmz_release_mblock(zmd, mblk); in dmz_validate_blocks()
2021 dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u", in dmz_validate_blocks()
2022 dmz_id(zmd, zone), zone->weight, in dmz_validate_blocks()
2064 int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_invalidate_blocks() argument
2071 dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks", in dmz_invalidate_blocks()
2072 dmz_id(zmd, zone), (u64)chunk_block, nr_blocks); in dmz_invalidate_blocks()
2074 WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks); in dmz_invalidate_blocks()
2078 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_invalidate_blocks()
2089 dmz_dirty_mblock(zmd, mblk); in dmz_invalidate_blocks()
2092 dmz_release_mblock(zmd, mblk); in dmz_invalidate_blocks()
2101 dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u", in dmz_invalidate_blocks()
2102 dmz_id(zmd, zone), zone->weight, n); in dmz_invalidate_blocks()
2112 static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_test_block() argument
2118 WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks); in dmz_test_block()
2121 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_test_block()
2129 dmz_release_mblock(zmd, mblk); in dmz_test_block()
2138 static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_to_next_set_block() argument
2147 WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks); in dmz_to_next_set_block()
2151 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_to_next_set_block()
2163 dmz_release_mblock(zmd, mblk); in dmz_to_next_set_block()
2180 int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_block_valid() argument
2185 valid = dmz_test_block(zmd, zone, chunk_block); in dmz_block_valid()
2190 return dmz_to_next_set_block(zmd, zone, chunk_block, in dmz_block_valid()
2191 zmd->dev->zone_nr_blocks - chunk_block, 0); in dmz_block_valid()
2200 int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_first_valid_block() argument
2206 ret = dmz_to_next_set_block(zmd, zone, start_block, in dmz_first_valid_block()
2207 zmd->dev->zone_nr_blocks - start_block, 1); in dmz_first_valid_block()
2214 return dmz_to_next_set_block(zmd, zone, start_block, in dmz_first_valid_block()
2215 zmd->dev->zone_nr_blocks - start_block, 0); in dmz_first_valid_block()
2249 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_get_zone_weight() argument
2254 unsigned int nr_blocks = zmd->dev->zone_nr_blocks; in dmz_get_zone_weight()
2260 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_get_zone_weight()
2272 dmz_release_mblock(zmd, mblk); in dmz_get_zone_weight()
2284 static void dmz_cleanup_metadata(struct dmz_metadata *zmd) in dmz_cleanup_metadata() argument
2291 if (zmd->map_mblk) { in dmz_cleanup_metadata()
2292 for (i = 0; i < zmd->nr_map_blocks; i++) in dmz_cleanup_metadata()
2293 dmz_release_mblock(zmd, zmd->map_mblk[i]); in dmz_cleanup_metadata()
2294 kfree(zmd->map_mblk); in dmz_cleanup_metadata()
2295 zmd->map_mblk = NULL; in dmz_cleanup_metadata()
2300 if (zmd->sb[i].mblk) { in dmz_cleanup_metadata()
2301 dmz_free_mblock(zmd, zmd->sb[i].mblk); in dmz_cleanup_metadata()
2302 zmd->sb[i].mblk = NULL; in dmz_cleanup_metadata()
2307 while (!list_empty(&zmd->mblk_dirty_list)) { in dmz_cleanup_metadata()
2308 mblk = list_first_entry(&zmd->mblk_dirty_list, in dmz_cleanup_metadata()
2310 dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", in dmz_cleanup_metadata()
2313 rb_erase(&mblk->node, &zmd->mblk_rbtree); in dmz_cleanup_metadata()
2314 dmz_free_mblock(zmd, mblk); in dmz_cleanup_metadata()
2317 while (!list_empty(&zmd->mblk_lru_list)) { in dmz_cleanup_metadata()
2318 mblk = list_first_entry(&zmd->mblk_lru_list, in dmz_cleanup_metadata()
2321 rb_erase(&mblk->node, &zmd->mblk_rbtree); in dmz_cleanup_metadata()
2322 dmz_free_mblock(zmd, mblk); in dmz_cleanup_metadata()
2326 root = &zmd->mblk_rbtree; in dmz_cleanup_metadata()
2328 dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", in dmz_cleanup_metadata()
2331 dmz_free_mblock(zmd, mblk); in dmz_cleanup_metadata()
2335 dmz_drop_zones(zmd); in dmz_cleanup_metadata()
2337 mutex_destroy(&zmd->mblk_flush_lock); in dmz_cleanup_metadata()
2338 mutex_destroy(&zmd->map_lock); in dmz_cleanup_metadata()
2346 struct dmz_metadata *zmd; in dmz_ctr_metadata() local
2351 zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL); in dmz_ctr_metadata()
2352 if (!zmd) in dmz_ctr_metadata()
2355 zmd->dev = dev; in dmz_ctr_metadata()
2356 zmd->mblk_rbtree = RB_ROOT; in dmz_ctr_metadata()
2357 init_rwsem(&zmd->mblk_sem); in dmz_ctr_metadata()
2358 mutex_init(&zmd->mblk_flush_lock); in dmz_ctr_metadata()
2359 spin_lock_init(&zmd->mblk_lock); in dmz_ctr_metadata()
2360 INIT_LIST_HEAD(&zmd->mblk_lru_list); in dmz_ctr_metadata()
2361 INIT_LIST_HEAD(&zmd->mblk_dirty_list); in dmz_ctr_metadata()
2363 mutex_init(&zmd->map_lock); in dmz_ctr_metadata()
2364 atomic_set(&zmd->unmap_nr_rnd, 0); in dmz_ctr_metadata()
2365 INIT_LIST_HEAD(&zmd->unmap_rnd_list); in dmz_ctr_metadata()
2366 INIT_LIST_HEAD(&zmd->map_rnd_list); in dmz_ctr_metadata()
2368 atomic_set(&zmd->unmap_nr_seq, 0); in dmz_ctr_metadata()
2369 INIT_LIST_HEAD(&zmd->unmap_seq_list); in dmz_ctr_metadata()
2370 INIT_LIST_HEAD(&zmd->map_seq_list); in dmz_ctr_metadata()
2372 atomic_set(&zmd->nr_reserved_seq_zones, 0); in dmz_ctr_metadata()
2373 INIT_LIST_HEAD(&zmd->reserved_seq_zones_list); in dmz_ctr_metadata()
2375 init_waitqueue_head(&zmd->free_wq); in dmz_ctr_metadata()
2378 ret = dmz_init_zones(zmd); in dmz_ctr_metadata()
2383 ret = dmz_load_sb(zmd); in dmz_ctr_metadata()
2388 zid = dmz_id(zmd, zmd->sb_zone); in dmz_ctr_metadata()
2389 for (i = 0; i < zmd->nr_meta_zones << 1; i++) { in dmz_ctr_metadata()
2390 zone = dmz_get(zmd, zid + i); in dmz_ctr_metadata()
2397 ret = dmz_load_mapping(zmd); in dmz_ctr_metadata()
2407 zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16; in dmz_ctr_metadata()
2408 zmd->max_nr_mblks = zmd->min_nr_mblks + 512; in dmz_ctr_metadata()
2409 zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count; in dmz_ctr_metadata()
2410 zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan; in dmz_ctr_metadata()
2411 zmd->mblk_shrinker.seeks = DEFAULT_SEEKS; in dmz_ctr_metadata()
2414 ret = register_shrinker(&zmd->mblk_shrinker); in dmz_ctr_metadata()
2428 zmd->nr_meta_zones * 2); in dmz_ctr_metadata()
2430 zmd->nr_data_zones, zmd->nr_chunks); in dmz_ctr_metadata()
2432 zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd)); in dmz_ctr_metadata()
2434 zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq)); in dmz_ctr_metadata()
2436 zmd->nr_reserved_seq); in dmz_ctr_metadata()
2440 zmd->nr_meta_blocks, zmd->max_nr_mblks); in dmz_ctr_metadata()
2442 zmd->nr_map_blocks); in dmz_ctr_metadata()
2444 zmd->nr_bitmap_blocks); in dmz_ctr_metadata()
2446 *metadata = zmd; in dmz_ctr_metadata()
2450 dmz_cleanup_metadata(zmd); in dmz_ctr_metadata()
2451 kfree(zmd); in dmz_ctr_metadata()
2460 void dmz_dtr_metadata(struct dmz_metadata *zmd) in dmz_dtr_metadata() argument
2462 unregister_shrinker(&zmd->mblk_shrinker); in dmz_dtr_metadata()
2463 dmz_cleanup_metadata(zmd); in dmz_dtr_metadata()
2464 kfree(zmd); in dmz_dtr_metadata()
2470 int dmz_resume_metadata(struct dmz_metadata *zmd) in dmz_resume_metadata() argument
2472 struct dmz_dev *dev = zmd->dev; in dmz_resume_metadata()
2480 zone = dmz_get(zmd, i); in dmz_resume_metadata()
2488 ret = dmz_update_zone(zmd, zone); in dmz_resume_metadata()
2506 dmz_invalidate_blocks(zmd, zone, zone->wp_block, in dmz_resume_metadata()