Lines Matching full:zones
32 * followed on disk by the mapping table of chunks to zones and the bitmap
38 * All metadata blocks are stored in conventional zones, starting from
57 /* The number of sequential zones reserved for reclaim */
175 struct xarray zones; member
305 return xa_load(&zmd->zones, zone_id); in dmz_get()
316 if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) { in dmz_insert()
1089 dmz_dev_err(dev, "Invalid number of reserved sequential zones"); in dmz_check_sb()
1425 * Tertiary superblock zones are always at the in dmz_init_zone()
1461 * Free zones descriptors.
1468 struct dm_zone *zone = xa_load(&zmd->zones, idx); in dmz_drop_zones()
1471 xa_erase(&zmd->zones, idx); in dmz_drop_zones()
1473 xa_destroy(&zmd->zones); in dmz_drop_zones()
1514 DMERR("(%s): No zones found", zmd->devname); in dmz_init_zones()
1517 xa_init(&zmd->zones); in dmz_init_zones()
1525 DMDEBUG("(%s): Failed to emulate zones, error %d", in dmz_init_zones()
1544 DMDEBUG("(%s): Failed to report zones, error %d", in dmz_init_zones()
1561 DMDEBUG("(%s): Failed to report zones, error %d", in dmz_init_zones()
1659 * Ignore offline zones, read only zones, in dmz_reset_zone()
1660 * and conventional zones. in dmz_reset_zone()
1789 * At this point, only meta zones and mapped data zones were in dmz_load_mapping()
1790 * fully initialized. All remaining zones are unmapped data in dmz_load_mapping()
1791 * zones. Finish initializing those here. in dmz_load_mapping()
1855 * The list of mapped zones is maintained in LRU order.
1877 * The list of mapped random zones is maintained
1912 /* Active zones cannot be reclaimed */ in dmz_lock_zone_reclaim()
1956 /* If we have cache zones select from the cache zone list */ in dmz_get_rnd_zone_for_reclaim()
1959 /* Try to relaim random zones, too, when idle */ in dmz_get_rnd_zone_for_reclaim()
1987 * If we come here, none of the zones inspected could be locked for in dmz_get_rnd_zone_for_reclaim()
2033 * (1) There is no free sequential zones. Then a random data zone in dmz_get_zone_for_reclaim()
2131 * Write and discard change the block validity of data zones and their buffer
2132 * zones. Check here that valid blocks are still present. If all blocks are
2133 * invalid, the zones can be unmapped on the fly without waiting for reclaim
2222 /* Schedule reclaim to ensure free zones are available */ in dmz_alloc_zone()
2253 * Fallback to the reserved sequential zones in dmz_alloc_zone()
2430 /* Get the zones bitmap blocks */ in dmz_copy_valid_blocks()
2465 /* Get the zones bitmap blocks */ in dmz_merge_valid_blocks()
2861 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors (offset %llu)", in dmz_print_dev()
2867 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors", in dmz_print_dev()
2919 /* Set metadata zones starting from sb_zone */ in dmz_ctr_metadata()
2944 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow in dmz_ctr_metadata()
2964 dmz_zmd_info(zmd, " %u zones of %llu 512-byte logical sectors", in dmz_ctr_metadata()
2966 dmz_zmd_debug(zmd, " %u metadata zones", in dmz_ctr_metadata()
2968 dmz_zmd_debug(zmd, " %u data zones for %u chunks", in dmz_ctr_metadata()
2970 dmz_zmd_debug(zmd, " %u cache zones (%u unmapped)", in dmz_ctr_metadata()
2973 dmz_zmd_debug(zmd, " %u random zones (%u unmapped)", in dmz_ctr_metadata()
2976 dmz_zmd_debug(zmd, " %u sequential zones (%u unmapped)", in dmz_ctr_metadata()
2980 dmz_zmd_debug(zmd, " %u reserved sequential data zones", in dmz_ctr_metadata()
3021 /* Check zones */ in dmz_resume_metadata()