Lines Matching refs:nullb
246 static void null_del_dev(struct nullb *nullb);
248 static struct nullb *null_find_dev_by_name(const char *name);
353 if (!dev->nullb) in nullb_update_nr_hw_queues()
378 set = dev->nullb->tag_set; in nullb_update_nr_hw_queues()
462 null_del_dev(dev->nullb); in nullb_device_power_store()
601 null_del_dev(dev->nullb); in nullb_group_drop_item()
647 static inline int null_cache_active(struct nullb *nullb) in null_cache_active() argument
649 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_cache_active()
847 static void null_free_sector(struct nullb *nullb, sector_t sector, in null_free_sector() argument
855 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_free_sector()
868 nullb->dev->curr_cache -= PAGE_SIZE; in null_free_sector()
873 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, in null_radix_tree_insert() argument
878 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_radix_tree_insert()
885 nullb->dev->curr_cache += PAGE_SIZE; in null_radix_tree_insert()
919 static struct nullb_page *__null_lookup_page(struct nullb *nullb, in __null_lookup_page() argument
930 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in __null_lookup_page()
940 static struct nullb_page *null_lookup_page(struct nullb *nullb, in null_lookup_page() argument
946 page = __null_lookup_page(nullb, sector, for_write, true); in null_lookup_page()
949 return __null_lookup_page(nullb, sector, for_write, false); in null_lookup_page()
952 static struct nullb_page *null_insert_page(struct nullb *nullb, in null_insert_page() argument
954 __releases(&nullb->lock) in null_insert_page()
955 __acquires(&nullb->lock) in null_insert_page()
960 t_page = null_lookup_page(nullb, sector, true, ignore_cache); in null_insert_page()
964 spin_unlock_irq(&nullb->lock); in null_insert_page()
973 spin_lock_irq(&nullb->lock); in null_insert_page()
976 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); in null_insert_page()
983 spin_lock_irq(&nullb->lock); in null_insert_page()
984 return null_lookup_page(nullb, sector, true, ignore_cache); in null_insert_page()
987 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) in null_flush_cache_page() argument
997 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); in null_flush_cache_page()
1003 ret = radix_tree_delete_item(&nullb->dev->data, in null_flush_cache_page()
1017 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { in null_flush_cache_page()
1021 nullb->dev->blocksize); in null_flush_cache_page()
1029 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); in null_flush_cache_page()
1031 nullb->dev->curr_cache -= PAGE_SIZE; in null_flush_cache_page()
1036 static int null_make_cache_space(struct nullb *nullb, unsigned long n) in null_make_cache_space() argument
1043 if ((nullb->dev->cache_size * 1024 * 1024) > in null_make_cache_space()
1044 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) in null_make_cache_space()
1047 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, in null_make_cache_space()
1048 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); in null_make_cache_space()
1054 nullb->cache_flush_pos = c_pages[i]->page->index; in null_make_cache_space()
1069 err = null_flush_cache_page(nullb, c_pages[i]); in null_make_cache_space()
1078 nullb->cache_flush_pos = 0; in null_make_cache_space()
1081 spin_unlock_irq(&nullb->lock); in null_make_cache_space()
1082 spin_lock_irq(&nullb->lock); in null_make_cache_space()
1089 static int copy_to_nullb(struct nullb *nullb, struct page *source, in copy_to_nullb() argument
1098 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_to_nullb()
1100 if (null_cache_active(nullb) && !is_fua) in copy_to_nullb()
1101 null_make_cache_space(nullb, PAGE_SIZE); in copy_to_nullb()
1104 t_page = null_insert_page(nullb, sector, in copy_to_nullb()
1105 !null_cache_active(nullb) || is_fua); in copy_to_nullb()
1118 null_free_sector(nullb, sector, true); in copy_to_nullb()
1126 static int copy_from_nullb(struct nullb *nullb, struct page *dest, in copy_from_nullb() argument
1135 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_from_nullb()
1138 t_page = null_lookup_page(nullb, sector, false, in copy_from_nullb()
1139 !null_cache_active(nullb)); in copy_from_nullb()
1158 static void nullb_fill_pattern(struct nullb *nullb, struct page *page, in nullb_fill_pattern() argument
1171 struct nullb *nullb = dev->nullb; in null_handle_discard() local
1175 spin_lock_irq(&nullb->lock); in null_handle_discard()
1178 null_free_sector(nullb, sector, false); in null_handle_discard()
1179 if (null_cache_active(nullb)) in null_handle_discard()
1180 null_free_sector(nullb, sector, true); in null_handle_discard()
1184 spin_unlock_irq(&nullb->lock); in null_handle_discard()
1189 static int null_handle_flush(struct nullb *nullb) in null_handle_flush() argument
1193 if (!null_cache_active(nullb)) in null_handle_flush()
1196 spin_lock_irq(&nullb->lock); in null_handle_flush()
1198 err = null_make_cache_space(nullb, in null_handle_flush()
1199 nullb->dev->cache_size * 1024 * 1024); in null_handle_flush()
1200 if (err || nullb->dev->curr_cache == 0) in null_handle_flush()
1204 WARN_ON(!radix_tree_empty(&nullb->dev->cache)); in null_handle_flush()
1205 spin_unlock_irq(&nullb->lock); in null_handle_flush()
1209 static int null_transfer(struct nullb *nullb, struct page *page, in null_transfer() argument
1213 struct nullb_device *dev = nullb->dev; in null_transfer()
1219 valid_len = null_zone_valid_read_len(nullb, in null_transfer()
1223 err = copy_from_nullb(nullb, page, off, in null_transfer()
1230 nullb_fill_pattern(nullb, page, len, off); in null_transfer()
1234 err = copy_to_nullb(nullb, page, off, sector, len, is_fua); in null_transfer()
1243 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_rq() local
1250 spin_lock_irq(&nullb->lock); in null_handle_rq()
1253 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_rq()
1257 spin_unlock_irq(&nullb->lock); in null_handle_rq()
1262 spin_unlock_irq(&nullb->lock); in null_handle_rq()
1270 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_bio() local
1277 spin_lock_irq(&nullb->lock); in null_handle_bio()
1280 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_bio()
1284 spin_unlock_irq(&nullb->lock); in null_handle_bio()
1289 spin_unlock_irq(&nullb->lock); in null_handle_bio()
1293 static void null_stop_queue(struct nullb *nullb) in null_stop_queue() argument
1295 struct request_queue *q = nullb->q; in null_stop_queue()
1297 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_stop_queue()
1301 static void null_restart_queue_async(struct nullb *nullb) in null_restart_queue_async() argument
1303 struct request_queue *q = nullb->q; in null_restart_queue_async()
1305 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_restart_queue_async()
1312 struct nullb *nullb = dev->nullb; in null_handle_throttled() local
1316 if (!hrtimer_active(&nullb->bw_timer)) in null_handle_throttled()
1317 hrtimer_restart(&nullb->bw_timer); in null_handle_throttled()
1319 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { in null_handle_throttled()
1320 null_stop_queue(nullb); in null_handle_throttled()
1322 if (atomic_long_read(&nullb->cur_bytes) > 0) in null_handle_throttled()
1323 null_restart_queue_async(nullb); in null_handle_throttled()
1438 struct nullb *nullb = dev->nullb; in null_handle_cmd() local
1448 cmd->error = errno_to_blk_status(null_handle_flush(nullb)); in null_handle_cmd()
1468 struct nullb *nullb = container_of(timer, struct nullb, bw_timer); in nullb_bwtimer_fn() local
1470 unsigned int mbps = nullb->dev->mbps; in nullb_bwtimer_fn()
1472 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) in nullb_bwtimer_fn()
1475 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); in nullb_bwtimer_fn()
1476 null_restart_queue_async(nullb); in nullb_bwtimer_fn()
1478 hrtimer_forward_now(&nullb->bw_timer, timer_interval); in nullb_bwtimer_fn()
1483 static void nullb_setup_bwtimer(struct nullb *nullb) in nullb_setup_bwtimer() argument
1487 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1488 nullb->bw_timer.function = nullb_bwtimer_fn; in nullb_setup_bwtimer()
1489 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); in nullb_setup_bwtimer()
1490 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1493 static struct nullb_queue *nullb_to_queue(struct nullb *nullb) in nullb_to_queue() argument
1497 if (nullb->nr_queues != 1) in nullb_to_queue()
1498 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue()
1500 return &nullb->queues[index]; in nullb_to_queue()
1507 struct nullb *nullb = bio->bi_bdev->bd_disk->private_data; in null_submit_bio() local
1508 struct nullb_queue *nq = nullb_to_queue(nullb); in null_submit_bio()
1533 struct nullb *nullb = set->driver_data; in null_map_queues() local
1538 if (nullb) { in null_map_queues()
1539 struct nullb_device *dev = nullb->dev; in null_map_queues()
1694 static void cleanup_queues(struct nullb *nullb) in cleanup_queues() argument
1698 for (i = 0; i < nullb->nr_queues; i++) in cleanup_queues()
1699 cleanup_queue(&nullb->queues[i]); in cleanup_queues()
1701 kfree(nullb->queues); in cleanup_queues()
1707 struct nullb *nullb = nq->dev->nullb; in null_exit_hctx() local
1709 nullb->nr_queues--; in null_exit_hctx()
1712 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) in null_init_queue() argument
1715 nq->queue_depth = nullb->queue_depth; in null_init_queue()
1716 nq->dev = nullb->dev; in null_init_queue()
1724 struct nullb *nullb = hctx->queue->queuedata; in null_init_hctx() local
1732 nq = &nullb->queues[hctx_idx]; in null_init_hctx()
1734 null_init_queue(nullb, nq); in null_init_hctx()
1735 nullb->nr_queues++; in null_init_hctx()
1750 static void null_del_dev(struct nullb *nullb) in null_del_dev() argument
1754 if (!nullb) in null_del_dev()
1757 dev = nullb->dev; in null_del_dev()
1759 ida_simple_remove(&nullb_indexes, nullb->index); in null_del_dev()
1761 list_del_init(&nullb->list); in null_del_dev()
1763 del_gendisk(nullb->disk); in null_del_dev()
1765 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { in null_del_dev()
1766 hrtimer_cancel(&nullb->bw_timer); in null_del_dev()
1767 atomic_long_set(&nullb->cur_bytes, LONG_MAX); in null_del_dev()
1768 null_restart_queue_async(nullb); in null_del_dev()
1771 put_disk(nullb->disk); in null_del_dev()
1773 nullb->tag_set == &nullb->__tag_set) in null_del_dev()
1774 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev()
1775 cleanup_queues(nullb); in null_del_dev()
1776 if (null_cache_active(nullb)) in null_del_dev()
1777 null_free_device_storage(nullb->dev, true); in null_del_dev()
1778 kfree(nullb); in null_del_dev()
1779 dev->nullb = NULL; in null_del_dev()
1782 static void null_config_discard(struct nullb *nullb) in null_config_discard() argument
1784 if (nullb->dev->discard == false) in null_config_discard()
1787 if (!nullb->dev->memory_backed) { in null_config_discard()
1788 nullb->dev->discard = false; in null_config_discard()
1793 if (nullb->dev->zoned) { in null_config_discard()
1794 nullb->dev->discard = false; in null_config_discard()
1799 nullb->q->limits.discard_granularity = nullb->dev->blocksize; in null_config_discard()
1800 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); in null_config_discard()
1837 static int setup_queues(struct nullb *nullb) in setup_queues() argument
1844 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue), in setup_queues()
1846 if (!nullb->queues) in setup_queues()
1849 nullb->queue_depth = nullb->dev->hw_queue_depth; in setup_queues()
1853 static int init_driver_queues(struct nullb *nullb) in init_driver_queues() argument
1858 for (i = 0; i < nullb->dev->submit_queues; i++) { in init_driver_queues()
1859 nq = &nullb->queues[i]; in init_driver_queues()
1861 null_init_queue(nullb, nq); in init_driver_queues()
1866 nullb->nr_queues++; in init_driver_queues()
1871 static int null_gendisk_register(struct nullb *nullb) in null_gendisk_register() argument
1873 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT; in null_gendisk_register()
1874 struct gendisk *disk = nullb->disk; in null_gendisk_register()
1879 disk->first_minor = nullb->index; in null_gendisk_register()
1881 if (queue_is_mq(nullb->q)) in null_gendisk_register()
1885 disk->private_data = nullb; in null_gendisk_register()
1886 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); in null_gendisk_register()
1888 if (nullb->dev->zoned) { in null_gendisk_register()
1889 int ret = null_register_zoned_dev(nullb); in null_gendisk_register()
1898 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) in null_init_tag_set() argument
1905 if (nullb) { in null_init_tag_set()
1906 hw_queues = nullb->dev->submit_queues; in null_init_tag_set()
1907 poll_queues = nullb->dev->poll_queues; in null_init_tag_set()
1908 queue_depth = nullb->dev->hw_queue_depth; in null_init_tag_set()
1909 numa_node = nullb->dev->home_node; in null_init_tag_set()
1910 if (nullb->dev->no_sched) in null_init_tag_set()
1912 if (nullb->dev->shared_tag_bitmap) in null_init_tag_set()
1914 if (nullb->dev->blocking) in null_init_tag_set()
1932 set->driver_data = nullb; in null_init_tag_set()
2017 struct nullb *nullb; in null_add_dev() local
2024 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); in null_add_dev()
2025 if (!nullb) { in null_add_dev()
2029 nullb->dev = dev; in null_add_dev()
2030 dev->nullb = nullb; in null_add_dev()
2032 spin_lock_init(&nullb->lock); in null_add_dev()
2034 rv = setup_queues(nullb); in null_add_dev()
2040 nullb->tag_set = &tag_set; in null_add_dev()
2043 nullb->tag_set = &nullb->__tag_set; in null_add_dev()
2044 rv = null_init_tag_set(nullb, nullb->tag_set); in null_add_dev()
2053 nullb->tag_set->timeout = 5 * HZ; in null_add_dev()
2054 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb); in null_add_dev()
2055 if (IS_ERR(nullb->disk)) { in null_add_dev()
2056 rv = PTR_ERR(nullb->disk); in null_add_dev()
2059 nullb->q = nullb->disk->queue; in null_add_dev()
2062 nullb->disk = blk_alloc_disk(nullb->dev->home_node); in null_add_dev()
2063 if (!nullb->disk) in null_add_dev()
2066 nullb->q = nullb->disk->queue; in null_add_dev()
2067 rv = init_driver_queues(nullb); in null_add_dev()
2074 nullb_setup_bwtimer(nullb); in null_add_dev()
2078 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_add_dev()
2079 blk_queue_write_cache(nullb->q, true, true); in null_add_dev()
2083 rv = null_init_zoned_dev(dev, nullb->q); in null_add_dev()
2088 nullb->q->queuedata = nullb; in null_add_dev()
2089 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); in null_add_dev()
2090 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); in null_add_dev()
2098 nullb->index = rv; in null_add_dev()
2102 blk_queue_logical_block_size(nullb->q, dev->blocksize); in null_add_dev()
2103 blk_queue_physical_block_size(nullb->q, dev->blocksize); in null_add_dev()
2105 dev->max_sectors = queue_max_hw_sectors(nullb->q); in null_add_dev()
2108 blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); in null_add_dev()
2111 blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); in null_add_dev()
2113 null_config_discard(nullb); in null_add_dev()
2117 snprintf(nullb->disk_name, sizeof(nullb->disk_name), in null_add_dev()
2120 sprintf(nullb->disk_name, "nullb%d", nullb->index); in null_add_dev()
2123 rv = null_gendisk_register(nullb); in null_add_dev()
2128 list_add_tail(&nullb->list, &nullb_list); in null_add_dev()
2131 pr_info("disk %s created\n", nullb->disk_name); in null_add_dev()
2136 ida_free(&nullb_indexes, nullb->index); in null_add_dev()
2140 put_disk(nullb->disk); in null_add_dev()
2142 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) in null_add_dev()
2143 blk_mq_free_tag_set(nullb->tag_set); in null_add_dev()
2145 cleanup_queues(nullb); in null_add_dev()
2147 kfree(nullb); in null_add_dev()
2148 dev->nullb = NULL; in null_add_dev()
2153 static struct nullb *null_find_dev_by_name(const char *name) in null_find_dev_by_name()
2155 struct nullb *nullb = NULL, *nb; in null_find_dev_by_name() local
2160 nullb = nb; in null_find_dev_by_name()
2166 return nullb; in null_find_dev_by_name()
2187 static void null_destroy_dev(struct nullb *nullb) in null_destroy_dev() argument
2189 struct nullb_device *dev = nullb->dev; in null_destroy_dev()
2191 null_del_dev(nullb); in null_destroy_dev()
2199 struct nullb *nullb; in null_init() local
2267 nullb = list_entry(nullb_list.next, struct nullb, list); in null_init()
2268 null_destroy_dev(nullb); in null_init()
2281 struct nullb *nullb; in null_exit() local
2289 nullb = list_entry(nullb_list.next, struct nullb, list); in null_exit()
2290 null_destroy_dev(nullb); in null_exit()