Lines Matching refs:t
97 static inline sector_t *get_node(struct dm_table *t, in get_node() argument
100 return t->index[l] + (n * KEYS_PER_NODE); in get_node()
107 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) in high() argument
109 for (; l < t->depth - 1; l++) in high()
112 if (n >= t->counts[l]) in high()
115 return get_node(t, l, n)[KEYS_PER_NODE - 1]; in high()
122 static int setup_btree_index(unsigned int l, struct dm_table *t) in setup_btree_index() argument
127 for (n = 0U; n < t->counts[l]; n++) { in setup_btree_index()
128 node = get_node(t, l, n); in setup_btree_index()
131 node[k] = high(t, l + 1, get_child(n, k)); in setup_btree_index()
159 static int alloc_targets(struct dm_table *t, unsigned int num) in alloc_targets() argument
175 vfree(t->highs); in alloc_targets()
177 t->num_allocated = num; in alloc_targets()
178 t->highs = n_highs; in alloc_targets()
179 t->targets = n_targets; in alloc_targets()
187 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); in dm_table_create() local
189 if (!t) in dm_table_create()
192 INIT_LIST_HEAD(&t->devices); in dm_table_create()
193 INIT_LIST_HEAD(&t->target_callbacks); in dm_table_create()
201 kfree(t); in dm_table_create()
205 if (alloc_targets(t, num_targets)) { in dm_table_create()
206 kfree(t); in dm_table_create()
210 t->type = DM_TYPE_NONE; in dm_table_create()
211 t->mode = mode; in dm_table_create()
212 t->md = md; in dm_table_create()
213 *result = t; in dm_table_create()
231 void dm_table_destroy(struct dm_table *t) in dm_table_destroy() argument
235 if (!t) in dm_table_destroy()
239 if (t->depth >= 2) in dm_table_destroy()
240 vfree(t->index[t->depth - 2]); in dm_table_destroy()
243 for (i = 0; i < t->num_targets; i++) { in dm_table_destroy()
244 struct dm_target *tgt = t->targets + i; in dm_table_destroy()
252 vfree(t->highs); in dm_table_destroy()
255 free_devices(&t->devices, t->md); in dm_table_destroy()
257 dm_free_md_mempools(t->mempools); in dm_table_destroy()
259 kfree(t); in dm_table_destroy()
432 struct dm_table *t = ti->table; in dm_get_device() local
434 BUG_ON(!t); in dm_get_device()
440 dd = find_device(&t->devices, dev); in dm_get_device()
446 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { in dm_get_device()
452 list_add(&dd->list, &t->devices); in dm_get_device()
456 r = upgrade_mode(dd, mode, t->md); in dm_get_device()
708 int dm_table_add_target(struct dm_table *t, const char *type, in dm_table_add_target() argument
715 if (t->singleton) { in dm_table_add_target()
717 dm_device_name(t->md), t->targets->type->name); in dm_table_add_target()
721 BUG_ON(t->num_targets >= t->num_allocated); in dm_table_add_target()
723 tgt = t->targets + t->num_targets; in dm_table_add_target()
727 DMERR("%s: zero-length target", dm_device_name(t->md)); in dm_table_add_target()
733 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); in dm_table_add_target()
738 if (t->num_targets) { in dm_table_add_target()
742 t->singleton = true; in dm_table_add_target()
745 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { in dm_table_add_target()
750 if (t->immutable_target_type) { in dm_table_add_target()
751 if (t->immutable_target_type != tgt->type) { in dm_table_add_target()
756 if (t->num_targets) { in dm_table_add_target()
760 t->immutable_target_type = tgt->type; in dm_table_add_target()
764 t->integrity_added = 1; in dm_table_add_target()
766 tgt->table = t; in dm_table_add_target()
774 if (!adjoin(t, tgt)) { in dm_table_add_target()
790 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; in dm_table_add_target()
794 dm_device_name(t->md), type); in dm_table_add_target()
799 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); in dm_table_add_target()
875 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) in dm_table_set_type() argument
877 t->type = type; in dm_table_set_type()
898 bool dm_table_supports_dax(struct dm_table *t, in dm_table_supports_dax() argument
905 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_dax()
906 ti = dm_table_get_target(t, i); in dm_table_supports_dax()
919 static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
940 static int dm_table_determine_type(struct dm_table *t) in dm_table_determine_type() argument
946 struct list_head *devices = dm_table_get_devices(t); in dm_table_determine_type()
947 enum dm_queue_mode live_md_type = dm_get_md_type(t->md); in dm_table_determine_type()
950 if (t->type != DM_TYPE_NONE) { in dm_table_determine_type()
952 if (t->type == DM_TYPE_BIO_BASED) { in dm_table_determine_type()
956 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); in dm_table_determine_type()
957 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); in dm_table_determine_type()
961 for (i = 0; i < t->num_targets; i++) { in dm_table_determine_type()
962 tgt = t->targets + i; in dm_table_determine_type()
992 t->type = DM_TYPE_BIO_BASED; in dm_table_determine_type()
993 if (dm_table_supports_dax(t, device_supports_dax, &page_size) || in dm_table_determine_type()
995 t->type = DM_TYPE_DAX_BIO_BASED; in dm_table_determine_type()
998 tgt = dm_table_get_immutable_target(t); in dm_table_determine_type()
999 if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) { in dm_table_determine_type()
1000 t->type = DM_TYPE_NVME_BIO_BASED; in dm_table_determine_type()
1003 t->type = DM_TYPE_NVME_BIO_BASED; in dm_table_determine_type()
1011 t->type = DM_TYPE_REQUEST_BASED; in dm_table_determine_type()
1020 if (t->num_targets > 1) { in dm_table_determine_type()
1022 t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based"); in dm_table_determine_type()
1028 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); in dm_table_determine_type()
1032 t->type = live_table->type; in dm_table_determine_type()
1033 dm_put_live_table(t->md, srcu_idx); in dm_table_determine_type()
1037 tgt = dm_table_get_immutable_target(t); in dm_table_determine_type()
1060 enum dm_queue_mode dm_table_get_type(struct dm_table *t) in dm_table_get_type() argument
1062 return t->type; in dm_table_get_type()
1065 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) in dm_table_get_immutable_target_type() argument
1067 return t->immutable_target_type; in dm_table_get_immutable_target_type()
1070 struct dm_target *dm_table_get_immutable_target(struct dm_table *t) in dm_table_get_immutable_target() argument
1073 if (t->num_targets > 1 || in dm_table_get_immutable_target()
1074 !dm_target_is_immutable(t->targets[0].type)) in dm_table_get_immutable_target()
1077 return t->targets; in dm_table_get_immutable_target()
1080 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) in dm_table_get_wildcard_target() argument
1085 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_get_wildcard_target()
1086 ti = dm_table_get_target(t, i); in dm_table_get_wildcard_target()
1094 bool dm_table_bio_based(struct dm_table *t) in dm_table_bio_based() argument
1096 return __table_type_bio_based(dm_table_get_type(t)); in dm_table_bio_based()
1099 bool dm_table_request_based(struct dm_table *t) in dm_table_request_based() argument
1101 return __table_type_request_based(dm_table_get_type(t)); in dm_table_request_based()
1104 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) in dm_table_alloc_md_mempools() argument
1106 enum dm_queue_mode type = dm_table_get_type(t); in dm_table_alloc_md_mempools()
1118 for (i = 0; i < t->num_targets; i++) { in dm_table_alloc_md_mempools()
1119 ti = t->targets + i; in dm_table_alloc_md_mempools()
1124 t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, in dm_table_alloc_md_mempools()
1126 if (!t->mempools) in dm_table_alloc_md_mempools()
1132 void dm_table_free_md_mempools(struct dm_table *t) in dm_table_free_md_mempools() argument
1134 dm_free_md_mempools(t->mempools); in dm_table_free_md_mempools()
1135 t->mempools = NULL; in dm_table_free_md_mempools()
1138 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) in dm_table_get_md_mempools() argument
1140 return t->mempools; in dm_table_get_md_mempools()
1143 static int setup_indexes(struct dm_table *t) in setup_indexes() argument
1150 for (i = t->depth - 2; i >= 0; i--) { in setup_indexes()
1151 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); in setup_indexes()
1152 total += t->counts[i]; in setup_indexes()
1160 for (i = t->depth - 2; i >= 0; i--) { in setup_indexes()
1161 t->index[i] = indexes; in setup_indexes()
1162 indexes += (KEYS_PER_NODE * t->counts[i]); in setup_indexes()
1163 setup_btree_index(i, t); in setup_indexes()
1172 static int dm_table_build_index(struct dm_table *t) in dm_table_build_index() argument
1178 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); in dm_table_build_index()
1179 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); in dm_table_build_index()
1182 t->counts[t->depth - 1] = leaf_nodes; in dm_table_build_index()
1183 t->index[t->depth - 1] = t->highs; in dm_table_build_index()
1185 if (t->depth >= 2) in dm_table_build_index()
1186 r = setup_indexes(t); in dm_table_build_index()
1200 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) in dm_table_get_integrity_disk() argument
1202 struct list_head *devices = dm_table_get_devices(t); in dm_table_get_integrity_disk()
1207 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_get_integrity_disk()
1208 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_get_integrity_disk()
1228 dm_device_name(t->md), in dm_table_get_integrity_disk()
1244 static int dm_table_register_integrity(struct dm_table *t) in dm_table_register_integrity() argument
1246 struct mapped_device *md = t->md; in dm_table_register_integrity()
1250 if (t->integrity_added) in dm_table_register_integrity()
1253 template_disk = dm_table_get_integrity_disk(t); in dm_table_register_integrity()
1258 t->integrity_supported = true; in dm_table_register_integrity()
1275 dm_device_name(t->md), in dm_table_register_integrity()
1281 t->integrity_supported = true; in dm_table_register_integrity()
1289 int dm_table_complete(struct dm_table *t) in dm_table_complete() argument
1293 r = dm_table_determine_type(t); in dm_table_complete()
1299 r = dm_table_build_index(t); in dm_table_complete()
1305 r = dm_table_register_integrity(t); in dm_table_complete()
1311 r = dm_table_alloc_md_mempools(t, t->md); in dm_table_complete()
1319 void dm_table_event_callback(struct dm_table *t, in dm_table_event_callback() argument
1323 t->event_fn = fn; in dm_table_event_callback()
1324 t->event_context = context; in dm_table_event_callback()
1328 void dm_table_event(struct dm_table *t) in dm_table_event() argument
1337 if (t->event_fn) in dm_table_event()
1338 t->event_fn(t->event_context); in dm_table_event()
1343 inline sector_t dm_table_get_size(struct dm_table *t) in dm_table_get_size() argument
1345 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; in dm_table_get_size()
1349 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) in dm_table_get_target() argument
1351 if (index >= t->num_targets) in dm_table_get_target()
1354 return t->targets + index; in dm_table_get_target()
1363 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) in dm_table_find_target() argument
1368 if (unlikely(sector >= dm_table_get_size(t))) in dm_table_find_target()
1371 for (l = 0; l < t->depth; l++) { in dm_table_find_target()
1373 node = get_node(t, l, n); in dm_table_find_target()
1380 return &t->targets[(KEYS_PER_NODE * n) + k]; in dm_table_find_target()
1428 static bool dm_table_supports_zoned_model(struct dm_table *t, in dm_table_supports_zoned_model() argument
1434 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_zoned_model()
1435 ti = dm_table_get_target(t, i); in dm_table_supports_zoned_model()
1458 static bool dm_table_matches_zone_sectors(struct dm_table *t, in dm_table_matches_zone_sectors() argument
1464 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_matches_zone_sectors()
1465 ti = dm_table_get_target(t, i); in dm_table_matches_zone_sectors()
1606 static void dm_table_verify_integrity(struct dm_table *t) in dm_table_verify_integrity() argument
1610 if (t->integrity_added) in dm_table_verify_integrity()
1613 if (t->integrity_supported) { in dm_table_verify_integrity()
1618 template_disk = dm_table_get_integrity_disk(t); in dm_table_verify_integrity()
1620 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) in dm_table_verify_integrity()
1624 if (integrity_profile_exists(dm_disk(t->md))) { in dm_table_verify_integrity()
1626 dm_device_name(t->md)); in dm_table_verify_integrity()
1627 blk_integrity_unregister(dm_disk(t->md)); in dm_table_verify_integrity()
1640 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) in dm_table_supports_flush() argument
1651 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_flush()
1652 ti = dm_table_get_target(t, i); in dm_table_supports_flush()
1682 static int dm_table_supports_dax_write_cache(struct dm_table *t) in dm_table_supports_dax_write_cache() argument
1687 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_dax_write_cache()
1688 ti = dm_table_get_target(t, i); in dm_table_supports_dax_write_cache()
1715 static bool dm_table_all_devices_attribute(struct dm_table *t, in dm_table_all_devices_attribute() argument
1721 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_all_devices_attribute()
1722 ti = dm_table_get_target(t, i); in dm_table_all_devices_attribute()
1741 static bool dm_table_does_not_support_partial_completion(struct dm_table *t) in dm_table_does_not_support_partial_completion() argument
1743 return dm_table_all_devices_attribute(t, device_no_partial_completion); in dm_table_does_not_support_partial_completion()
1754 static bool dm_table_supports_write_same(struct dm_table *t) in dm_table_supports_write_same() argument
1759 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_write_same()
1760 ti = dm_table_get_target(t, i); in dm_table_supports_write_same()
1781 static bool dm_table_supports_write_zeroes(struct dm_table *t) in dm_table_supports_write_zeroes() argument
1786 while (i < dm_table_get_num_targets(t)) { in dm_table_supports_write_zeroes()
1787 ti = dm_table_get_target(t, i++); in dm_table_supports_write_zeroes()
1808 static bool dm_table_supports_discards(struct dm_table *t) in dm_table_supports_discards() argument
1813 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_discards()
1814 ti = dm_table_get_target(t, i); in dm_table_supports_discards()
1842 static bool dm_table_supports_secure_erase(struct dm_table *t) in dm_table_supports_secure_erase() argument
1847 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_secure_erase()
1848 ti = dm_table_get_target(t, i); in dm_table_supports_secure_erase()
1875 static bool dm_table_requires_stable_pages(struct dm_table *t) in dm_table_requires_stable_pages() argument
1880 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_requires_stable_pages()
1881 ti = dm_table_get_target(t, i); in dm_table_requires_stable_pages()
1891 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, in dm_table_set_restrictions() argument
1902 if (!dm_table_supports_discards(t)) { in dm_table_set_restrictions()
1913 if (dm_table_supports_secure_erase(t)) in dm_table_set_restrictions()
1916 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { in dm_table_set_restrictions()
1918 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) in dm_table_set_restrictions()
1923 if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { in dm_table_set_restrictions()
1925 if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) in dm_table_set_restrictions()
1926 set_dax_synchronous(t->md->dax_dev); in dm_table_set_restrictions()
1931 if (dm_table_supports_dax_write_cache(t)) in dm_table_set_restrictions()
1932 dax_write_cache(t->md->dax_dev, true); in dm_table_set_restrictions()
1935 if (dm_table_all_devices_attribute(t, device_is_nonrot)) in dm_table_set_restrictions()
1940 if (!dm_table_supports_write_same(t)) in dm_table_set_restrictions()
1942 if (!dm_table_supports_write_zeroes(t)) in dm_table_set_restrictions()
1945 dm_table_verify_integrity(t); in dm_table_set_restrictions()
1951 if (dm_table_requires_stable_pages(t)) in dm_table_set_restrictions()
1962 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) in dm_table_set_restrictions()
1973 blk_revalidate_disk_zones(t->md->disk); in dm_table_set_restrictions()
1979 unsigned int dm_table_get_num_targets(struct dm_table *t) in dm_table_get_num_targets() argument
1981 return t->num_targets; in dm_table_get_num_targets()
1984 struct list_head *dm_table_get_devices(struct dm_table *t) in dm_table_get_devices() argument
1986 return &t->devices; in dm_table_get_devices()
1989 fmode_t dm_table_get_mode(struct dm_table *t) in dm_table_get_mode() argument
1991 return t->mode; in dm_table_get_mode()
2001 static void suspend_targets(struct dm_table *t, enum suspend_mode mode) in suspend_targets() argument
2003 int i = t->num_targets; in suspend_targets()
2004 struct dm_target *ti = t->targets; in suspend_targets()
2006 lockdep_assert_held(&t->md->suspend_lock); in suspend_targets()
2027 void dm_table_presuspend_targets(struct dm_table *t) in dm_table_presuspend_targets() argument
2029 if (!t) in dm_table_presuspend_targets()
2032 suspend_targets(t, PRESUSPEND); in dm_table_presuspend_targets()
2035 void dm_table_presuspend_undo_targets(struct dm_table *t) in dm_table_presuspend_undo_targets() argument
2037 if (!t) in dm_table_presuspend_undo_targets()
2040 suspend_targets(t, PRESUSPEND_UNDO); in dm_table_presuspend_undo_targets()
2043 void dm_table_postsuspend_targets(struct dm_table *t) in dm_table_postsuspend_targets() argument
2045 if (!t) in dm_table_postsuspend_targets()
2048 suspend_targets(t, POSTSUSPEND); in dm_table_postsuspend_targets()
2051 int dm_table_resume_targets(struct dm_table *t) in dm_table_resume_targets() argument
2055 lockdep_assert_held(&t->md->suspend_lock); in dm_table_resume_targets()
2057 for (i = 0; i < t->num_targets; i++) { in dm_table_resume_targets()
2058 struct dm_target *ti = t->targets + i; in dm_table_resume_targets()
2066 dm_device_name(t->md), ti->type->name, r); in dm_table_resume_targets()
2071 for (i = 0; i < t->num_targets; i++) { in dm_table_resume_targets()
2072 struct dm_target *ti = t->targets + i; in dm_table_resume_targets()
2081 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) in dm_table_add_target_callbacks() argument
2083 list_add(&cb->list, &t->target_callbacks); in dm_table_add_target_callbacks()
2087 int dm_table_any_congested(struct dm_table *t, int bdi_bits) in dm_table_any_congested() argument
2090 struct list_head *devices = dm_table_get_devices(t); in dm_table_any_congested()
2102 dm_device_name(t->md), in dm_table_any_congested()
2106 list_for_each_entry(cb, &t->target_callbacks, list) in dm_table_any_congested()
2113 struct mapped_device *dm_table_get_md(struct dm_table *t) in dm_table_get_md() argument
2115 return t->md; in dm_table_get_md()
2119 const char *dm_table_device_name(struct dm_table *t) in dm_table_device_name() argument
2121 return dm_device_name(t->md); in dm_table_device_name()
2125 void dm_table_run_md_queue_async(struct dm_table *t) in dm_table_run_md_queue_async() argument
2130 if (!dm_table_request_based(t)) in dm_table_run_md_queue_async()
2133 md = dm_table_get_md(t); in dm_table_run_md_queue_async()