Lines Matching refs:t

60 static inline sector_t *get_node(struct dm_table *t,  in get_node()  argument
63 return t->index[l] + (n * KEYS_PER_NODE); in get_node()
70 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) in high() argument
72 for (; l < t->depth - 1; l++) in high()
75 if (n >= t->counts[l]) in high()
78 return get_node(t, l, n)[KEYS_PER_NODE - 1]; in high()
85 static int setup_btree_index(unsigned int l, struct dm_table *t) in setup_btree_index() argument
90 for (n = 0U; n < t->counts[l]; n++) { in setup_btree_index()
91 node = get_node(t, l, n); in setup_btree_index()
94 node[k] = high(t, l + 1, get_child(n, k)); in setup_btree_index()
104 static int alloc_targets(struct dm_table *t, unsigned int num) in alloc_targets() argument
120 kvfree(t->highs); in alloc_targets()
122 t->num_allocated = num; in alloc_targets()
123 t->highs = n_highs; in alloc_targets()
124 t->targets = n_targets; in alloc_targets()
132 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); in dm_table_create() local
134 if (!t) in dm_table_create()
137 INIT_LIST_HEAD(&t->devices); in dm_table_create()
138 init_rwsem(&t->devices_lock); in dm_table_create()
146 kfree(t); in dm_table_create()
150 if (alloc_targets(t, num_targets)) { in dm_table_create()
151 kfree(t); in dm_table_create()
155 t->type = DM_TYPE_NONE; in dm_table_create()
156 t->mode = mode; in dm_table_create()
157 t->md = md; in dm_table_create()
158 *result = t; in dm_table_create()
176 static void dm_table_destroy_crypto_profile(struct dm_table *t);
178 void dm_table_destroy(struct dm_table *t) in dm_table_destroy() argument
180 if (!t) in dm_table_destroy()
184 if (t->depth >= 2) in dm_table_destroy()
185 kvfree(t->index[t->depth - 2]); in dm_table_destroy()
188 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_destroy()
189 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_destroy()
197 kvfree(t->highs); in dm_table_destroy()
200 free_devices(&t->devices, t->md); in dm_table_destroy()
202 dm_free_md_mempools(t->mempools); in dm_table_destroy()
204 dm_table_destroy_crypto_profile(t); in dm_table_destroy()
206 kfree(t); in dm_table_destroy()
342 struct dm_table *t = ti->table; in dm_get_device() local
344 BUG_ON(!t); in dm_get_device()
360 if (dev == disk_devt(t->md->disk)) in dm_get_device()
363 down_write(&t->devices_lock); in dm_get_device()
365 dd = find_device(&t->devices, dev); in dm_get_device()
373 r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev); in dm_get_device()
380 list_add(&dd->list, &t->devices); in dm_get_device()
384 r = upgrade_mode(dd, mode, t->md); in dm_get_device()
390 up_write(&t->devices_lock); in dm_get_device()
395 up_write(&t->devices_lock); in dm_get_device()
432 struct dm_table *t = ti->table; in dm_put_device() local
433 struct list_head *devices = &t->devices; in dm_put_device()
436 down_write(&t->devices_lock); in dm_put_device()
446 dm_device_name(t->md), d->name); in dm_put_device()
450 dm_put_table_device(t->md, d); in dm_put_device()
456 up_write(&t->devices_lock); in dm_put_device()
463 static int adjoin(struct dm_table *t, struct dm_target *ti) in adjoin() argument
467 if (!t->num_targets) in adjoin()
470 prev = &t->targets[t->num_targets - 1]; in adjoin()
577 static int validate_hardware_logical_block_alignment(struct dm_table *t, in validate_hardware_logical_block_alignment() argument
605 for (i = 0; i < t->num_targets; i++) { in validate_hardware_logical_block_alignment()
606 ti = dm_table_get_target(t, i); in validate_hardware_logical_block_alignment()
634 dm_device_name(t->md), i, in validate_hardware_logical_block_alignment()
644 int dm_table_add_target(struct dm_table *t, const char *type, in dm_table_add_target() argument
651 if (t->singleton) { in dm_table_add_target()
653 dm_device_name(t->md), t->targets->type->name); in dm_table_add_target()
657 BUG_ON(t->num_targets >= t->num_allocated); in dm_table_add_target()
659 ti = t->targets + t->num_targets; in dm_table_add_target()
663 DMERR("%s: zero-length target", dm_device_name(t->md)); in dm_table_add_target()
669 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); in dm_table_add_target()
674 if (t->num_targets) { in dm_table_add_target()
678 t->singleton = true; in dm_table_add_target()
682 !(t->mode & BLK_OPEN_WRITE)) { in dm_table_add_target()
687 if (t->immutable_target_type) { in dm_table_add_target()
688 if (t->immutable_target_type != ti->type) { in dm_table_add_target()
693 if (t->num_targets) { in dm_table_add_target()
697 t->immutable_target_type = ti->type; in dm_table_add_target()
701 t->integrity_added = 1; in dm_table_add_target()
703 ti->table = t; in dm_table_add_target()
711 if (!adjoin(t, ti)) { in dm_table_add_target()
727 t->highs[t->num_targets++] = ti->begin + ti->len - 1; in dm_table_add_target()
731 dm_device_name(t->md), type); in dm_table_add_target()
739 DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r)); in dm_table_add_target()
813 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) in dm_table_set_type() argument
815 t->type = type; in dm_table_set_type()
837 static bool dm_table_supports_dax(struct dm_table *t, in dm_table_supports_dax() argument
841 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_supports_dax()
842 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_supports_dax()
868 static int dm_table_determine_type(struct dm_table *t) in dm_table_determine_type() argument
872 struct list_head *devices = dm_table_get_devices(t); in dm_table_determine_type()
873 enum dm_queue_mode live_md_type = dm_get_md_type(t->md); in dm_table_determine_type()
875 if (t->type != DM_TYPE_NONE) { in dm_table_determine_type()
877 if (t->type == DM_TYPE_BIO_BASED) { in dm_table_determine_type()
881 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); in dm_table_determine_type()
885 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_determine_type()
886 ti = dm_table_get_target(t, i); in dm_table_determine_type()
915 t->type = DM_TYPE_BIO_BASED; in dm_table_determine_type()
916 if (dm_table_supports_dax(t, device_not_dax_capable) || in dm_table_determine_type()
918 t->type = DM_TYPE_DAX_BIO_BASED; in dm_table_determine_type()
925 t->type = DM_TYPE_REQUEST_BASED; in dm_table_determine_type()
934 if (t->num_targets > 1) { in dm_table_determine_type()
941 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); in dm_table_determine_type()
945 t->type = live_table->type; in dm_table_determine_type()
946 dm_put_live_table(t->md, srcu_idx); in dm_table_determine_type()
950 ti = dm_table_get_immutable_target(t); in dm_table_determine_type()
969 enum dm_queue_mode dm_table_get_type(struct dm_table *t) in dm_table_get_type() argument
971 return t->type; in dm_table_get_type()
974 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) in dm_table_get_immutable_target_type() argument
976 return t->immutable_target_type; in dm_table_get_immutable_target_type()
979 struct dm_target *dm_table_get_immutable_target(struct dm_table *t) in dm_table_get_immutable_target() argument
982 if (t->num_targets > 1 || in dm_table_get_immutable_target()
983 !dm_target_is_immutable(t->targets[0].type)) in dm_table_get_immutable_target()
986 return t->targets; in dm_table_get_immutable_target()
989 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) in dm_table_get_wildcard_target() argument
991 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_get_wildcard_target()
992 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_get_wildcard_target()
1001 bool dm_table_bio_based(struct dm_table *t) in dm_table_bio_based() argument
1003 return __table_type_bio_based(dm_table_get_type(t)); in dm_table_bio_based()
1006 bool dm_table_request_based(struct dm_table *t) in dm_table_request_based() argument
1008 return __table_type_request_based(dm_table_get_type(t)); in dm_table_request_based()
1011 static bool dm_table_supports_poll(struct dm_table *t);
1013 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) in dm_table_alloc_md_mempools() argument
1015 enum dm_queue_mode type = dm_table_get_type(t); in dm_table_alloc_md_mempools()
1035 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_alloc_md_mempools()
1036 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_alloc_md_mempools()
1048 dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0)) in dm_table_alloc_md_mempools()
1050 if (t->integrity_supported && in dm_table_alloc_md_mempools()
1056 if (t->integrity_supported && in dm_table_alloc_md_mempools()
1060 t->mempools = pools; in dm_table_alloc_md_mempools()
1068 static int setup_indexes(struct dm_table *t) in setup_indexes() argument
1075 for (i = t->depth - 2; i >= 0; i--) { in setup_indexes()
1076 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); in setup_indexes()
1077 total += t->counts[i]; in setup_indexes()
1085 for (i = t->depth - 2; i >= 0; i--) { in setup_indexes()
1086 t->index[i] = indexes; in setup_indexes()
1087 indexes += (KEYS_PER_NODE * t->counts[i]); in setup_indexes()
1088 setup_btree_index(i, t); in setup_indexes()
1097 static int dm_table_build_index(struct dm_table *t) in dm_table_build_index() argument
1103 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); in dm_table_build_index()
1104 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); in dm_table_build_index()
1107 t->counts[t->depth - 1] = leaf_nodes; in dm_table_build_index()
1108 t->index[t->depth - 1] = t->highs; in dm_table_build_index()
1110 if (t->depth >= 2) in dm_table_build_index()
1111 r = setup_indexes(t); in dm_table_build_index()
1125 static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) in dm_table_get_integrity_disk() argument
1127 struct list_head *devices = dm_table_get_devices(t); in dm_table_get_integrity_disk()
1131 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_get_integrity_disk()
1132 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_get_integrity_disk()
1153 dm_device_name(t->md), in dm_table_get_integrity_disk()
1169 static int dm_table_register_integrity(struct dm_table *t) in dm_table_register_integrity() argument
1171 struct mapped_device *md = t->md; in dm_table_register_integrity()
1175 if (t->integrity_added) in dm_table_register_integrity()
1178 template_disk = dm_table_get_integrity_disk(t); in dm_table_register_integrity()
1183 t->integrity_supported = true; in dm_table_register_integrity()
1199 dm_device_name(t->md), in dm_table_register_integrity()
1205 t->integrity_supported = true; in dm_table_register_integrity()
1234 struct dm_table *t; in dm_keyslot_evict() local
1237 t = dm_get_live_table(md, &srcu_idx); in dm_keyslot_evict()
1238 if (!t) in dm_keyslot_evict()
1241 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_keyslot_evict()
1242 struct dm_target *ti = dm_table_get_target(t, i); in dm_keyslot_evict()
1279 static void dm_table_destroy_crypto_profile(struct dm_table *t) in dm_table_destroy_crypto_profile() argument
1281 dm_destroy_crypto_profile(t->crypto_profile); in dm_table_destroy_crypto_profile()
1282 t->crypto_profile = NULL; in dm_table_destroy_crypto_profile()
1294 static int dm_table_construct_crypto_profile(struct dm_table *t) in dm_table_construct_crypto_profile() argument
1304 dmcp->md = t->md; in dm_table_construct_crypto_profile()
1313 for (i = 0; i < t->num_targets; i++) { in dm_table_construct_crypto_profile()
1314 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_construct_crypto_profile()
1327 if (t->md->queue && in dm_table_construct_crypto_profile()
1329 t->md->queue->crypto_profile)) { in dm_table_construct_crypto_profile()
1356 t->crypto_profile = profile; in dm_table_construct_crypto_profile()
1362 struct dm_table *t) in dm_update_crypto_profile() argument
1364 if (!t->crypto_profile) in dm_update_crypto_profile()
1369 blk_crypto_register(t->crypto_profile, q); in dm_update_crypto_profile()
1372 t->crypto_profile); in dm_update_crypto_profile()
1373 dm_destroy_crypto_profile(t->crypto_profile); in dm_update_crypto_profile()
1375 t->crypto_profile = NULL; in dm_update_crypto_profile()
1380 static int dm_table_construct_crypto_profile(struct dm_table *t) in dm_table_construct_crypto_profile() argument
1389 static void dm_table_destroy_crypto_profile(struct dm_table *t) in dm_table_destroy_crypto_profile() argument
1394 struct dm_table *t) in dm_update_crypto_profile() argument
1404 int dm_table_complete(struct dm_table *t) in dm_table_complete() argument
1408 r = dm_table_determine_type(t); in dm_table_complete()
1414 r = dm_table_build_index(t); in dm_table_complete()
1420 r = dm_table_register_integrity(t); in dm_table_complete()
1426 r = dm_table_construct_crypto_profile(t); in dm_table_complete()
1432 r = dm_table_alloc_md_mempools(t, t->md); in dm_table_complete()
1440 void dm_table_event_callback(struct dm_table *t, in dm_table_event_callback() argument
1444 t->event_fn = fn; in dm_table_event_callback()
1445 t->event_context = context; in dm_table_event_callback()
1449 void dm_table_event(struct dm_table *t) in dm_table_event() argument
1452 if (t->event_fn) in dm_table_event()
1453 t->event_fn(t->event_context); in dm_table_event()
1458 inline sector_t dm_table_get_size(struct dm_table *t) in dm_table_get_size() argument
1460 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; in dm_table_get_size()
1470 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) in dm_table_find_target() argument
1475 if (unlikely(sector >= dm_table_get_size(t))) in dm_table_find_target()
1478 for (l = 0; l < t->depth; l++) { in dm_table_find_target()
1480 node = get_node(t, l, n); in dm_table_find_target()
1487 return &t->targets[(KEYS_PER_NODE * n) + k]; in dm_table_find_target()
1521 static bool dm_table_any_dev_attr(struct dm_table *t, in dm_table_any_dev_attr() argument
1524 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_any_dev_attr()
1525 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_any_dev_attr()
1545 static bool dm_table_supports_poll(struct dm_table *t) in dm_table_supports_poll() argument
1547 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_supports_poll()
1548 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_supports_poll()
1564 bool dm_table_has_no_data_devices(struct dm_table *t) in dm_table_has_no_data_devices() argument
1566 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_has_no_data_devices()
1567 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_has_no_data_devices()
1597 static bool dm_table_supports_zoned_model(struct dm_table *t, in dm_table_supports_zoned_model() argument
1600 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_supports_zoned_model()
1601 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_supports_zoned_model()
1632 static int validate_hardware_zoned_model(struct dm_table *t, in validate_hardware_zoned_model() argument
1639 if (!dm_table_supports_zoned_model(t, zoned_model)) { in validate_hardware_zoned_model()
1641 dm_device_name(t->md)); in validate_hardware_zoned_model()
1649 if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) { in validate_hardware_zoned_model()
1651 dm_device_name(t->md)); in validate_hardware_zoned_model()
1661 int dm_calculate_queue_limits(struct dm_table *t, in dm_calculate_queue_limits() argument
1670 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_calculate_queue_limits()
1671 struct dm_target *ti = dm_table_get_target(t, i); in dm_calculate_queue_limits()
1717 dm_device_name(t->md), in dm_calculate_queue_limits()
1737 if (validate_hardware_zoned_model(t, zoned_model, zone_sectors)) in dm_calculate_queue_limits()
1740 return validate_hardware_logical_block_alignment(t, limits); in dm_calculate_queue_limits()
1748 static void dm_table_verify_integrity(struct dm_table *t) in dm_table_verify_integrity() argument
1752 if (t->integrity_added) in dm_table_verify_integrity()
1755 if (t->integrity_supported) { in dm_table_verify_integrity()
1760 template_disk = dm_table_get_integrity_disk(t); in dm_table_verify_integrity()
1762 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) in dm_table_verify_integrity()
1766 if (integrity_profile_exists(dm_disk(t->md))) { in dm_table_verify_integrity()
1768 dm_device_name(t->md)); in dm_table_verify_integrity()
1769 blk_integrity_unregister(dm_disk(t->md)); in dm_table_verify_integrity()
1782 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) in dm_table_supports_flush() argument
1790 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_supports_flush()
1791 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_supports_flush()
1843 static bool dm_table_supports_write_zeroes(struct dm_table *t) in dm_table_supports_write_zeroes() argument
1845 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_supports_write_zeroes()
1846 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_supports_write_zeroes()
1865 static bool dm_table_supports_nowait(struct dm_table *t) in dm_table_supports_nowait() argument
1867 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_supports_nowait()
1868 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_supports_nowait()
1887 static bool dm_table_supports_discards(struct dm_table *t) in dm_table_supports_discards() argument
1889 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_supports_discards()
1890 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_supports_discards()
1916 static bool dm_table_supports_secure_erase(struct dm_table *t) in dm_table_supports_secure_erase() argument
1918 for (unsigned int i = 0; i < t->num_targets; i++) { in dm_table_supports_secure_erase()
1919 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_supports_secure_erase()
1939 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, in dm_table_set_restrictions() argument
1950 if (dm_table_supports_nowait(t)) in dm_table_set_restrictions()
1955 if (!dm_table_supports_discards(t)) { in dm_table_set_restrictions()
1963 if (!dm_table_supports_secure_erase(t)) in dm_table_set_restrictions()
1966 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { in dm_table_set_restrictions()
1968 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) in dm_table_set_restrictions()
1973 if (dm_table_supports_dax(t, device_not_dax_capable)) { in dm_table_set_restrictions()
1975 if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) in dm_table_set_restrictions()
1976 set_dax_synchronous(t->md->dax_dev); in dm_table_set_restrictions()
1980 if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) in dm_table_set_restrictions()
1981 dax_write_cache(t->md->dax_dev, true); in dm_table_set_restrictions()
1984 if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) in dm_table_set_restrictions()
1989 if (!dm_table_supports_write_zeroes(t)) in dm_table_set_restrictions()
1992 dm_table_verify_integrity(t); in dm_table_set_restrictions()
2001 if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) in dm_table_set_restrictions()
2013 dm_table_any_dev_attr(t, device_is_not_random, NULL)) in dm_table_set_restrictions()
2021 r = dm_set_zones_restrictions(t, q); in dm_table_set_restrictions()
2028 dm_update_crypto_profile(q, t); in dm_table_set_restrictions()
2029 disk_update_readahead(t->md->disk); in dm_table_set_restrictions()
2038 if (__table_type_bio_based(t->type)) { in dm_table_set_restrictions()
2039 if (dm_table_supports_poll(t)) in dm_table_set_restrictions()
2048 struct list_head *dm_table_get_devices(struct dm_table *t) in dm_table_get_devices() argument
2050 return &t->devices; in dm_table_get_devices()
2053 blk_mode_t dm_table_get_mode(struct dm_table *t) in dm_table_get_mode() argument
2055 return t->mode; in dm_table_get_mode()
2065 static void suspend_targets(struct dm_table *t, enum suspend_mode mode) in suspend_targets() argument
2067 lockdep_assert_held(&t->md->suspend_lock); in suspend_targets()
2069 for (unsigned int i = 0; i < t->num_targets; i++) { in suspend_targets()
2070 struct dm_target *ti = dm_table_get_target(t, i); in suspend_targets()
2089 void dm_table_presuspend_targets(struct dm_table *t) in dm_table_presuspend_targets() argument
2091 if (!t) in dm_table_presuspend_targets()
2094 suspend_targets(t, PRESUSPEND); in dm_table_presuspend_targets()
2097 void dm_table_presuspend_undo_targets(struct dm_table *t) in dm_table_presuspend_undo_targets() argument
2099 if (!t) in dm_table_presuspend_undo_targets()
2102 suspend_targets(t, PRESUSPEND_UNDO); in dm_table_presuspend_undo_targets()
2105 void dm_table_postsuspend_targets(struct dm_table *t) in dm_table_postsuspend_targets() argument
2107 if (!t) in dm_table_postsuspend_targets()
2110 suspend_targets(t, POSTSUSPEND); in dm_table_postsuspend_targets()
2113 int dm_table_resume_targets(struct dm_table *t) in dm_table_resume_targets() argument
2118 lockdep_assert_held(&t->md->suspend_lock); in dm_table_resume_targets()
2120 for (i = 0; i < t->num_targets; i++) { in dm_table_resume_targets()
2121 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_resume_targets()
2129 dm_device_name(t->md), ti->type->name, r); in dm_table_resume_targets()
2134 for (i = 0; i < t->num_targets; i++) { in dm_table_resume_targets()
2135 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_resume_targets()
2144 struct mapped_device *dm_table_get_md(struct dm_table *t) in dm_table_get_md() argument
2146 return t->md; in dm_table_get_md()
2150 const char *dm_table_device_name(struct dm_table *t) in dm_table_device_name() argument
2152 return dm_device_name(t->md); in dm_table_device_name()
2156 void dm_table_run_md_queue_async(struct dm_table *t) in dm_table_run_md_queue_async() argument
2158 if (!dm_table_request_based(t)) in dm_table_run_md_queue_async()
2161 if (t->md->queue) in dm_table_run_md_queue_async()
2162 blk_mq_run_hw_queues(t->md->queue, true); in dm_table_run_md_queue_async()