Lines Matching full:zone

33  * blocks indicating zone block validity.
39 * the first conventional zone found on disk.
87 * and give the zone ID (dzone_id) mapping the chunk on disk.
88 * This zone may be sequential or random. If it is a sequential
89 * zone, a second zone (bzone_id) used as a write buffer may
90 * also be specified. This second zone will always be a randomly
91 * writeable zone.
137 struct dm_zone *zone; member
174 /* Zone information array */
192 /* Zone allocation management */
221 static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_dev_zone_id() argument
223 if (WARN_ON(!zone)) in dmz_dev_zone_id()
226 return zone->id - zone->dev->zone_offset; in dmz_dev_zone_id()
229 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_sect() argument
231 unsigned int zone_id = dmz_dev_zone_id(zmd, zone); in dmz_start_sect()
236 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_block() argument
238 unsigned int zone_id = dmz_dev_zone_id(zmd, zone); in dmz_start_block()
311 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL); in dmz_insert() local
313 if (!zone) in dmz_insert()
316 if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) { in dmz_insert()
317 kfree(zone); in dmz_insert()
321 INIT_LIST_HEAD(&zone->link); in dmz_insert()
322 atomic_set(&zone->refcount, 0); in dmz_insert()
323 zone->id = zone_id; in dmz_insert()
324 zone->chunk = DMZ_MAP_UNMAPPED; in dmz_insert()
325 zone->dev = dev; in dmz_insert()
327 return zone; in dmz_insert()
359 * The map lock also protects all the zone lists.
375 * the map lock and zone state management (active and reclaim state are
807 sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift; in dmz_write_sb()
1030 if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) { in dmz_check_sb()
1034 (u64)dsb->zone->id << zmd->zone_nr_blocks_shift); in dmz_check_sb()
1134 unsigned int zone_id = zmd->sb[0].zone->id; in dmz_lookup_secondary_sb()
1147 zmd->sb[1].zone = dmz_get(zmd, zone_id + 1); in dmz_lookup_secondary_sb()
1155 zmd->sb[1].zone = dmz_get(zmd, zone_id + i); in dmz_lookup_secondary_sb()
1160 zmd->sb[1].zone = NULL; in dmz_lookup_secondary_sb()
1206 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone); in dmz_recover_mblocks()
1208 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone); in dmz_recover_mblocks()
1252 if (!zmd->sb[0].zone) { in dmz_load_sb()
1253 dmz_zmd_err(zmd, "Primary super block zone not set"); in dmz_load_sb()
1258 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone); in dmz_load_sb()
1259 zmd->sb[0].dev = zmd->sb[0].zone->dev; in dmz_load_sb()
1271 if (!zmd->sb[1].zone) { in dmz_load_sb()
1273 zmd->sb[0].zone->id + zmd->nr_meta_zones; in dmz_load_sb()
1275 zmd->sb[1].zone = dmz_get(zmd, zone_id); in dmz_load_sb()
1277 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone); in dmz_load_sb()
1342 sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset); in dmz_load_sb()
1344 if (!dmz_is_meta(sb->zone)) { in dmz_load_sb()
1346 "Tertiary super block zone %u not marked as metadata zone", in dmz_load_sb()
1347 sb->zone->id); in dmz_load_sb()
1370 * Initialize a zone descriptor.
1377 struct dm_zone *zone; in dmz_init_zone() local
1379 zone = dmz_insert(zmd, idx, dev); in dmz_init_zone()
1380 if (IS_ERR(zone)) in dmz_init_zone()
1381 return PTR_ERR(zone); in dmz_init_zone()
1385 /* Ignore the eventual runt (smaller) zone */ in dmz_init_zone()
1386 set_bit(DMZ_OFFLINE, &zone->flags); in dmz_init_zone()
1395 set_bit(DMZ_RND, &zone->flags); in dmz_init_zone()
1399 set_bit(DMZ_SEQ, &zone->flags); in dmz_init_zone()
1405 if (dmz_is_rnd(zone)) in dmz_init_zone()
1406 zone->wp_block = 0; in dmz_init_zone()
1408 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); in dmz_init_zone()
1411 set_bit(DMZ_OFFLINE, &zone->flags); in dmz_init_zone()
1413 set_bit(DMZ_READ_ONLY, &zone->flags); in dmz_init_zone()
1416 if (dmz_is_rnd(zone)) { in dmz_init_zone()
1418 if (zmd->nr_devs == 1 && !zmd->sb[0].zone) { in dmz_init_zone()
1419 /* Primary super block zone */ in dmz_init_zone()
1420 zmd->sb[0].zone = zone; in dmz_init_zone()
1427 * as metadata zone. in dmz_init_zone()
1429 set_bit(DMZ_META, &zone->flags); in dmz_init_zone()
1441 struct dm_zone *zone; in dmz_emulate_zones() local
1443 zone = dmz_insert(zmd, idx, dev); in dmz_emulate_zones()
1444 if (IS_ERR(zone)) in dmz_emulate_zones()
1445 return PTR_ERR(zone); in dmz_emulate_zones()
1446 set_bit(DMZ_CACHE, &zone->flags); in dmz_emulate_zones()
1447 zone->wp_block = 0; in dmz_emulate_zones()
1451 /* Disable runt zone */ in dmz_emulate_zones()
1452 set_bit(DMZ_OFFLINE, &zone->flags); in dmz_emulate_zones()
1468 struct dm_zone *zone = xa_load(&zmd->zones, idx); in dmz_drop_zones() local
1470 kfree(zone); in dmz_drop_zones()
1477 * Allocate and initialize zone descriptors using the zone
1496 /* Allocate zone array */ in dmz_init_zones()
1519 DMDEBUG("(%s): Using %zu B for zone information", in dmz_init_zones()
1532 * Primary superblock zone is always at zone 0 when multiple in dmz_init_zones()
1535 zmd->sb[0].zone = dmz_get(zmd, 0); in dmz_init_zones()
1554 * Get zone information and initialize zone descriptors. At the same in dmz_init_zones()
1556 * first randomly writable zone. in dmz_init_zones()
1573 struct dm_zone *zone = data; in dmz_update_zone_cb() local
1575 clear_bit(DMZ_OFFLINE, &zone->flags); in dmz_update_zone_cb()
1576 clear_bit(DMZ_READ_ONLY, &zone->flags); in dmz_update_zone_cb()
1578 set_bit(DMZ_OFFLINE, &zone->flags); in dmz_update_zone_cb()
1580 set_bit(DMZ_READ_ONLY, &zone->flags); in dmz_update_zone_cb()
1582 if (dmz_is_seq(zone)) in dmz_update_zone_cb()
1583 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); in dmz_update_zone_cb()
1585 zone->wp_block = 0; in dmz_update_zone_cb()
1590 * Update a zone information.
1592 static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_update_zone() argument
1594 struct dmz_dev *dev = zone->dev; in dmz_update_zone()
1602 * Get zone information from disk. Since blkdev_report_zones() uses in dmz_update_zone()
1608 ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1, in dmz_update_zone()
1609 dmz_update_zone_cb, zone); in dmz_update_zone()
1615 dmz_dev_err(dev, "Get zone %u report failed", in dmz_update_zone()
1616 zone->id); in dmz_update_zone()
1625 * Check a zone write pointer position when the zone is marked
1629 struct dm_zone *zone) in dmz_handle_seq_write_err() argument
1631 struct dmz_dev *dev = zone->dev; in dmz_handle_seq_write_err()
1635 wp = zone->wp_block; in dmz_handle_seq_write_err()
1636 ret = dmz_update_zone(zmd, zone); in dmz_handle_seq_write_err()
1640 dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)", in dmz_handle_seq_write_err()
1641 zone->id, zone->wp_block, wp); in dmz_handle_seq_write_err()
1643 if (zone->wp_block < wp) { in dmz_handle_seq_write_err()
1644 dmz_invalidate_blocks(zmd, zone, zone->wp_block, in dmz_handle_seq_write_err()
1645 wp - zone->wp_block); in dmz_handle_seq_write_err()
1652 * Reset a zone write pointer.
1654 static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_reset_zone() argument
1662 if (dmz_is_offline(zone) || in dmz_reset_zone()
1663 dmz_is_readonly(zone) || in dmz_reset_zone()
1664 dmz_is_rnd(zone)) in dmz_reset_zone()
1667 if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) { in dmz_reset_zone()
1668 struct dmz_dev *dev = zone->dev; in dmz_reset_zone()
1671 dmz_start_sect(zmd, zone), in dmz_reset_zone()
1674 dmz_dev_err(dev, "Reset zone %u failed %d", in dmz_reset_zone()
1675 zone->id, ret); in dmz_reset_zone()
1681 clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); in dmz_reset_zone()
1682 zone->wp_block = 0; in dmz_reset_zone()
1687 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1707 /* Get chunk mapping table blocks and initialize zone mapping */ in dmz_load_mapping()
1720 /* Check data zone */ in dmz_load_mapping()
1726 dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u", in dmz_load_mapping()
1733 dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present", in dmz_load_mapping()
1748 /* Check buffer zone */ in dmz_load_mapping()
1754 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u", in dmz_load_mapping()
1761 dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present", in dmz_load_mapping()
1766 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u", in dmz_load_mapping()
1814 /* Unmapped data zone */ in dmz_load_mapping()
1856 * This rotates a zone at the end of its map list.
1858 static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in __dmz_lru_zone() argument
1860 if (list_empty(&zone->link)) in __dmz_lru_zone()
1863 list_del_init(&zone->link); in __dmz_lru_zone()
1864 if (dmz_is_seq(zone)) { in __dmz_lru_zone()
1865 /* LRU rotate sequential zone */ in __dmz_lru_zone()
1866 list_add_tail(&zone->link, &zone->dev->map_seq_list); in __dmz_lru_zone()
1867 } else if (dmz_is_cache(zone)) { in __dmz_lru_zone()
1868 /* LRU rotate cache zone */ in __dmz_lru_zone()
1869 list_add_tail(&zone->link, &zmd->map_cache_list); in __dmz_lru_zone()
1871 /* LRU rotate random zone */ in __dmz_lru_zone()
1872 list_add_tail(&zone->link, &zone->dev->map_rnd_list); in __dmz_lru_zone()
1878 * in LRU order. This rotates a zone at the end of the list.
1880 static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_lru_zone() argument
1882 __dmz_lru_zone(zmd, zone); in dmz_lru_zone()
1883 if (zone->bzone) in dmz_lru_zone()
1884 __dmz_lru_zone(zmd, zone->bzone); in dmz_lru_zone()
1888 * Wait for any zone to be freed.
1906 * Lock a zone for reclaim (set the zone RECLAIM bit).
1907 * Returns false if the zone cannot be locked or if it is already locked
1910 int dmz_lock_zone_reclaim(struct dm_zone *zone) in dmz_lock_zone_reclaim() argument
1913 if (dmz_is_active(zone)) in dmz_lock_zone_reclaim()
1916 return !test_and_set_bit(DMZ_RECLAIM, &zone->flags); in dmz_lock_zone_reclaim()
1920 * Clear a zone reclaim flag.
1922 void dmz_unlock_zone_reclaim(struct dm_zone *zone) in dmz_unlock_zone_reclaim() argument
1924 WARN_ON(dmz_is_active(zone)); in dmz_unlock_zone_reclaim()
1925 WARN_ON(!dmz_in_reclaim(zone)); in dmz_unlock_zone_reclaim()
1927 clear_bit_unlock(DMZ_RECLAIM, &zone->flags); in dmz_unlock_zone_reclaim()
1929 wake_up_bit(&zone->flags, DMZ_RECLAIM); in dmz_unlock_zone_reclaim()
1933 * Wait for a zone reclaim to complete.
1935 static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_wait_for_reclaim() argument
1939 set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags); in dmz_wait_for_reclaim()
1940 wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ); in dmz_wait_for_reclaim()
1941 clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags); in dmz_wait_for_reclaim()
1947 * Select a cache or random write zone for reclaim.
1953 struct dm_zone *zone, *maxw_z = NULL; in dmz_get_rnd_zone_for_reclaim() local
1956 /* If we have cache zones select from the cache zone list */ in dmz_get_rnd_zone_for_reclaim()
1966 * Find the buffer zone with the heaviest weight or the first (oldest) in dmz_get_rnd_zone_for_reclaim()
1967 * data zone that can be reclaimed. in dmz_get_rnd_zone_for_reclaim()
1969 list_for_each_entry(zone, zone_list, link) { in dmz_get_rnd_zone_for_reclaim()
1970 if (dmz_is_buf(zone)) { in dmz_get_rnd_zone_for_reclaim()
1971 dzone = zone->bzone; in dmz_get_rnd_zone_for_reclaim()
1977 dzone = zone; in dmz_get_rnd_zone_for_reclaim()
1989 * first zone that can be reclaimed regardless of its weitght. in dmz_get_rnd_zone_for_reclaim()
1991 list_for_each_entry(zone, zone_list, link) { in dmz_get_rnd_zone_for_reclaim()
1992 if (dmz_is_buf(zone)) { in dmz_get_rnd_zone_for_reclaim()
1993 dzone = zone->bzone; in dmz_get_rnd_zone_for_reclaim()
1997 dzone = zone; in dmz_get_rnd_zone_for_reclaim()
2006 * Select a buffered sequential zone for reclaim.
2011 struct dm_zone *zone; in dmz_get_seq_zone_for_reclaim() local
2013 list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) { in dmz_get_seq_zone_for_reclaim()
2014 if (!zone->bzone) in dmz_get_seq_zone_for_reclaim()
2016 if (dmz_lock_zone_reclaim(zone)) in dmz_get_seq_zone_for_reclaim()
2017 return zone; in dmz_get_seq_zone_for_reclaim()
2024 * Select a zone for reclaim.
2029 struct dm_zone *zone = NULL; in dmz_get_zone_for_reclaim() local
2032 * Search for a zone candidate to reclaim: 2 cases are possible. in dmz_get_zone_for_reclaim()
2033 * (1) There is no free sequential zones. Then a random data zone in dmz_get_zone_for_reclaim()
2034 * cannot be reclaimed. So choose a sequential zone to reclaim so in dmz_get_zone_for_reclaim()
2035 * that afterward a random zone can be reclaimed. in dmz_get_zone_for_reclaim()
2036 * (2) At least one free sequential zone is available, then choose in dmz_get_zone_for_reclaim()
2037 * the oldest random zone (data or buffer) that can be locked. in dmz_get_zone_for_reclaim()
2041 zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx); in dmz_get_zone_for_reclaim()
2042 if (!zone) in dmz_get_zone_for_reclaim()
2043 zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle); in dmz_get_zone_for_reclaim()
2046 return zone; in dmz_get_zone_for_reclaim()
2050 * Get the zone mapping a chunk, if the chunk is mapped already.
2051 * If no mapping exist and the operation is WRITE, a zone is
2053 * The zone returned will be set to the active state.
2077 /* Allocate a random zone */ in dmz_get_chunk_mapping()
2091 /* The chunk is already mapped: get the mapping zone */ in dmz_get_chunk_mapping()
2114 * If the zone is being reclaimed, the chunk mapping may change in dmz_get_chunk_mapping()
2115 * to a different zone. So wait for reclaim and retry. Otherwise, in dmz_get_chunk_mapping()
2116 * activate the zone (this will prevent reclaim from touching it). in dmz_get_chunk_mapping()
2147 /* Empty buffer zone: reclaim it */ in dmz_put_chunk_mapping()
2154 /* Deactivate the data zone */ in dmz_put_chunk_mapping()
2159 /* Unbuffered inactive empty data zone: reclaim it */ in dmz_put_chunk_mapping()
2168 * Allocate and map a random zone to buffer a chunk
2169 * already mapped to a sequential zone.
2183 /* Allocate a random zone */ in dmz_get_chunk_buffer()
2212 * Get an unmapped (free) zone.
2219 struct dm_zone *zone; in dmz_alloc_zone() local
2239 * No free zone: return NULL if this is for not reclaim. in dmz_alloc_zone()
2255 zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list, in dmz_alloc_zone()
2257 if (zone) { in dmz_alloc_zone()
2258 list_del_init(&zone->link); in dmz_alloc_zone()
2261 return zone; in dmz_alloc_zone()
2264 zone = list_first_entry(list, struct dm_zone, link); in dmz_alloc_zone()
2265 list_del_init(&zone->link); in dmz_alloc_zone()
2267 if (dmz_is_cache(zone)) in dmz_alloc_zone()
2269 else if (dmz_is_rnd(zone)) in dmz_alloc_zone()
2270 atomic_dec(&zone->dev->unmap_nr_rnd); in dmz_alloc_zone()
2272 atomic_dec(&zone->dev->unmap_nr_seq); in dmz_alloc_zone()
2274 if (dmz_is_offline(zone)) { in dmz_alloc_zone()
2275 dmz_zmd_warn(zmd, "Zone %u is offline", zone->id); in dmz_alloc_zone()
2276 zone = NULL; in dmz_alloc_zone()
2279 if (dmz_is_meta(zone)) { in dmz_alloc_zone()
2280 dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id); in dmz_alloc_zone()
2281 zone = NULL; in dmz_alloc_zone()
2284 return zone; in dmz_alloc_zone()
2288 * Free a zone.
2291 void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_free_zone() argument
2293 /* If this is a sequential zone, reset it */ in dmz_free_zone()
2294 if (dmz_is_seq(zone)) in dmz_free_zone()
2295 dmz_reset_zone(zmd, zone); in dmz_free_zone()
2297 /* Return the zone to its type unmap list */ in dmz_free_zone()
2298 if (dmz_is_cache(zone)) { in dmz_free_zone()
2299 list_add_tail(&zone->link, &zmd->unmap_cache_list); in dmz_free_zone()
2301 } else if (dmz_is_rnd(zone)) { in dmz_free_zone()
2302 list_add_tail(&zone->link, &zone->dev->unmap_rnd_list); in dmz_free_zone()
2303 atomic_inc(&zone->dev->unmap_nr_rnd); in dmz_free_zone()
2304 } else if (dmz_is_reserved(zone)) { in dmz_free_zone()
2305 list_add_tail(&zone->link, &zmd->reserved_seq_zones_list); in dmz_free_zone()
2308 list_add_tail(&zone->link, &zone->dev->unmap_seq_list); in dmz_free_zone()
2309 atomic_inc(&zone->dev->unmap_nr_seq); in dmz_free_zone()
2316 * Map a chunk to a zone.
2335 * Unmap a zone.
2338 void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_unmap_zone() argument
2340 unsigned int chunk = zone->chunk; in dmz_unmap_zone()
2348 if (test_and_clear_bit(DMZ_BUF, &zone->flags)) { in dmz_unmap_zone()
2350 * Unmapping the chunk buffer zone: clear only in dmz_unmap_zone()
2353 dzone_id = zone->bzone->id; in dmz_unmap_zone()
2354 zone->bzone->bzone = NULL; in dmz_unmap_zone()
2355 zone->bzone = NULL; in dmz_unmap_zone()
2359 * Unmapping the chunk data zone: the zone must in dmz_unmap_zone()
2362 if (WARN_ON(zone->bzone)) { in dmz_unmap_zone()
2363 zone->bzone->bzone = NULL; in dmz_unmap_zone()
2364 zone->bzone = NULL; in dmz_unmap_zone()
2371 zone->chunk = DMZ_MAP_UNMAPPED; in dmz_unmap_zone()
2372 list_del_init(&zone->link); in dmz_unmap_zone()
2408 * Get the bitmap block storing the bit for chunk_block in zone.
2411 struct dm_zone *zone, in dmz_get_bitmap() argument
2415 (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) + in dmz_get_bitmap()
2467 /* Get a valid region from the source zone */ in dmz_merge_valid_blocks()
2486 int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_validate_blocks() argument
2494 dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks", in dmz_validate_blocks()
2495 zone->id, (unsigned long long)chunk_block, in dmz_validate_blocks()
2502 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_validate_blocks()
2521 if (likely(zone->weight + n <= zone_nr_blocks)) in dmz_validate_blocks()
2522 zone->weight += n; in dmz_validate_blocks()
2524 dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u", in dmz_validate_blocks()
2525 zone->id, zone->weight, in dmz_validate_blocks()
2527 zone->weight = zone_nr_blocks; in dmz_validate_blocks()
2567 int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_invalidate_blocks() argument
2574 dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks", in dmz_invalidate_blocks()
2575 zone->id, (u64)chunk_block, nr_blocks); in dmz_invalidate_blocks()
2581 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_invalidate_blocks()
2601 if (zone->weight >= n) in dmz_invalidate_blocks()
2602 zone->weight -= n; in dmz_invalidate_blocks()
2604 dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u", in dmz_invalidate_blocks()
2605 zone->id, zone->weight, n); in dmz_invalidate_blocks()
2606 zone->weight = 0; in dmz_invalidate_blocks()
2615 static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_test_block() argument
2624 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_test_block()
2641 static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_to_next_set_block() argument
2655 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_to_next_set_block()
2684 int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_block_valid() argument
2689 valid = dmz_test_block(zmd, zone, chunk_block); in dmz_block_valid()
2694 return dmz_to_next_set_block(zmd, zone, chunk_block, in dmz_block_valid()
2699 * Find the first valid block from @chunk_block in @zone.
2704 int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone, in dmz_first_valid_block() argument
2710 ret = dmz_to_next_set_block(zmd, zone, start_block, in dmz_first_valid_block()
2718 return dmz_to_next_set_block(zmd, zone, start_block, in dmz_first_valid_block()
2751 * Get a zone weight.
2753 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_get_zone_weight() argument
2764 mblk = dmz_get_bitmap(zmd, zone, chunk_block); in dmz_get_zone_weight()
2782 zone->weight = n; in dmz_get_zone_weight()
2794 /* Release zone mapping resources */ in dmz_cleanup_metadata()
2838 /* Free the zone descriptors */ in dmz_cleanup_metadata()
2881 struct dm_zone *zone; in dmz_ctr_metadata() local
2909 /* Initialize zone descriptors */ in dmz_ctr_metadata()
2921 zone = dmz_get(zmd, zmd->sb[0].zone->id + i); in dmz_ctr_metadata()
2922 if (!zone) { in dmz_ctr_metadata()
2924 "metadata zone %u not present", i); in dmz_ctr_metadata()
2928 if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) { in dmz_ctr_metadata()
2930 "metadata zone %d is not random", i); in dmz_ctr_metadata()
2934 set_bit(DMZ_META, &zone->flags); in dmz_ctr_metadata()
2985 dmz_zmd_debug(zmd, " %u data zone mapping blocks", in dmz_ctr_metadata()
3012 * Check zone information on resume.
3016 struct dm_zone *zone; in dmz_resume_metadata() local
3023 zone = dmz_get(zmd, i); in dmz_resume_metadata()
3024 if (!zone) { in dmz_resume_metadata()
3025 dmz_zmd_err(zmd, "Unable to get zone %u", i); in dmz_resume_metadata()
3028 wp_block = zone->wp_block; in dmz_resume_metadata()
3030 ret = dmz_update_zone(zmd, zone); in dmz_resume_metadata()
3032 dmz_zmd_err(zmd, "Broken zone %u", i); in dmz_resume_metadata()
3036 if (dmz_is_offline(zone)) { in dmz_resume_metadata()
3037 dmz_zmd_warn(zmd, "Zone %u is offline", i); in dmz_resume_metadata()
3042 if (!dmz_is_seq(zone)) in dmz_resume_metadata()
3043 zone->wp_block = 0; in dmz_resume_metadata()
3044 else if (zone->wp_block != wp_block) { in dmz_resume_metadata()
3045 dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)", in dmz_resume_metadata()
3046 i, (u64)zone->wp_block, (u64)wp_block); in dmz_resume_metadata()
3047 zone->wp_block = wp_block; in dmz_resume_metadata()
3048 dmz_invalidate_blocks(zmd, zone, zone->wp_block, in dmz_resume_metadata()
3049 zmd->zone_nr_blocks - zone->wp_block); in dmz_resume_metadata()