Lines Matching refs:nullb

193 static void null_del_dev(struct nullb *nullb);
326 null_del_dev(dev->nullb); in nullb_device_power_store()
453 null_del_dev(dev->nullb); in nullb_group_drop_item()
492 static inline int null_cache_active(struct nullb *nullb) in null_cache_active() argument
494 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_cache_active()
657 struct nullb *nullb = rq->q->queuedata; in null_softirq_done_fn() local
659 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_softirq_done_fn()
701 static void null_free_sector(struct nullb *nullb, sector_t sector, in null_free_sector() argument
709 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_free_sector()
722 nullb->dev->curr_cache -= PAGE_SIZE; in null_free_sector()
727 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, in null_radix_tree_insert() argument
732 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_radix_tree_insert()
739 nullb->dev->curr_cache += PAGE_SIZE; in null_radix_tree_insert()
773 static struct nullb_page *__null_lookup_page(struct nullb *nullb, in __null_lookup_page() argument
784 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in __null_lookup_page()
794 static struct nullb_page *null_lookup_page(struct nullb *nullb, in null_lookup_page() argument
800 page = __null_lookup_page(nullb, sector, for_write, true); in null_lookup_page()
803 return __null_lookup_page(nullb, sector, for_write, false); in null_lookup_page()
806 static struct nullb_page *null_insert_page(struct nullb *nullb, in null_insert_page() argument
808 __releases(&nullb->lock) in null_insert_page()
809 __acquires(&nullb->lock) in null_insert_page()
814 t_page = null_lookup_page(nullb, sector, true, ignore_cache); in null_insert_page()
818 spin_unlock_irq(&nullb->lock); in null_insert_page()
827 spin_lock_irq(&nullb->lock); in null_insert_page()
830 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); in null_insert_page()
837 spin_lock_irq(&nullb->lock); in null_insert_page()
838 return null_lookup_page(nullb, sector, true, ignore_cache); in null_insert_page()
841 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) in null_flush_cache_page() argument
851 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); in null_flush_cache_page()
857 ret = radix_tree_delete_item(&nullb->dev->data, in null_flush_cache_page()
871 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { in null_flush_cache_page()
875 nullb->dev->blocksize); in null_flush_cache_page()
883 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); in null_flush_cache_page()
885 nullb->dev->curr_cache -= PAGE_SIZE; in null_flush_cache_page()
890 static int null_make_cache_space(struct nullb *nullb, unsigned long n) in null_make_cache_space() argument
897 if ((nullb->dev->cache_size * 1024 * 1024) > in null_make_cache_space()
898 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) in null_make_cache_space()
901 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, in null_make_cache_space()
902 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); in null_make_cache_space()
908 nullb->cache_flush_pos = c_pages[i]->page->index; in null_make_cache_space()
923 err = null_flush_cache_page(nullb, c_pages[i]); in null_make_cache_space()
932 nullb->cache_flush_pos = 0; in null_make_cache_space()
935 spin_unlock_irq(&nullb->lock); in null_make_cache_space()
936 spin_lock_irq(&nullb->lock); in null_make_cache_space()
943 static int copy_to_nullb(struct nullb *nullb, struct page *source, in copy_to_nullb() argument
952 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_to_nullb()
954 if (null_cache_active(nullb) && !is_fua) in copy_to_nullb()
955 null_make_cache_space(nullb, PAGE_SIZE); in copy_to_nullb()
958 t_page = null_insert_page(nullb, sector, in copy_to_nullb()
959 !null_cache_active(nullb) || is_fua); in copy_to_nullb()
972 null_free_sector(nullb, sector, true); in copy_to_nullb()
980 static int copy_from_nullb(struct nullb *nullb, struct page *dest, in copy_from_nullb() argument
989 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_from_nullb()
992 t_page = null_lookup_page(nullb, sector, false, in copy_from_nullb()
993 !null_cache_active(nullb)); in copy_from_nullb()
1012 static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n) in null_handle_discard() argument
1016 spin_lock_irq(&nullb->lock); in null_handle_discard()
1018 temp = min_t(size_t, n, nullb->dev->blocksize); in null_handle_discard()
1019 null_free_sector(nullb, sector, false); in null_handle_discard()
1020 if (null_cache_active(nullb)) in null_handle_discard()
1021 null_free_sector(nullb, sector, true); in null_handle_discard()
1025 spin_unlock_irq(&nullb->lock); in null_handle_discard()
1028 static int null_handle_flush(struct nullb *nullb) in null_handle_flush() argument
1032 if (!null_cache_active(nullb)) in null_handle_flush()
1035 spin_lock_irq(&nullb->lock); in null_handle_flush()
1037 err = null_make_cache_space(nullb, in null_handle_flush()
1038 nullb->dev->cache_size * 1024 * 1024); in null_handle_flush()
1039 if (err || nullb->dev->curr_cache == 0) in null_handle_flush()
1043 WARN_ON(!radix_tree_empty(&nullb->dev->cache)); in null_handle_flush()
1044 spin_unlock_irq(&nullb->lock); in null_handle_flush()
1048 static int null_transfer(struct nullb *nullb, struct page *page, in null_transfer() argument
1055 err = copy_from_nullb(nullb, page, off, sector, len); in null_transfer()
1059 err = copy_to_nullb(nullb, page, off, sector, len, is_fua); in null_transfer()
1068 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_rq() local
1078 null_handle_discard(nullb, sector, blk_rq_bytes(rq)); in null_handle_rq()
1082 spin_lock_irq(&nullb->lock); in null_handle_rq()
1085 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_rq()
1089 spin_unlock_irq(&nullb->lock); in null_handle_rq()
1094 spin_unlock_irq(&nullb->lock); in null_handle_rq()
1102 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_bio() local
1112 null_handle_discard(nullb, sector, in null_handle_bio()
1117 spin_lock_irq(&nullb->lock); in null_handle_bio()
1120 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_bio()
1124 spin_unlock_irq(&nullb->lock); in null_handle_bio()
1129 spin_unlock_irq(&nullb->lock); in null_handle_bio()
1133 static void null_stop_queue(struct nullb *nullb) in null_stop_queue() argument
1135 struct request_queue *q = nullb->q; in null_stop_queue()
1137 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_stop_queue()
1146 static void null_restart_queue_async(struct nullb *nullb) in null_restart_queue_async() argument
1148 struct request_queue *q = nullb->q; in null_restart_queue_async()
1151 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_restart_queue_async()
1160 static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd) in cmd_report_zone() argument
1166 cmd->error = null_zone_report(nullb, cmd->bio); in cmd_report_zone()
1171 cmd->error = null_zone_report(nullb, cmd->rq->bio); in cmd_report_zone()
1182 struct nullb *nullb = dev->nullb; in null_handle_cmd() local
1185 if (cmd_report_zone(nullb, cmd)) in null_handle_cmd()
1191 if (!hrtimer_active(&nullb->bw_timer)) in null_handle_cmd()
1192 hrtimer_restart(&nullb->bw_timer); in null_handle_cmd()
1195 &nullb->cur_bytes) < 0) { in null_handle_cmd()
1196 null_stop_queue(nullb); in null_handle_cmd()
1198 if (atomic_long_read(&nullb->cur_bytes) > 0) in null_handle_cmd()
1199 null_restart_queue_async(nullb); in null_handle_cmd()
1201 struct request_queue *q = nullb->q; in null_handle_cmd()
1214 if (nullb->dev->badblocks.shift != -1) { in null_handle_cmd()
1231 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector, in null_handle_cmd()
1241 err = null_handle_flush(nullb); in null_handle_cmd()
1246 err = null_handle_flush(nullb); in null_handle_cmd()
1304 struct nullb *nullb = container_of(timer, struct nullb, bw_timer); in nullb_bwtimer_fn() local
1306 unsigned int mbps = nullb->dev->mbps; in nullb_bwtimer_fn()
1308 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) in nullb_bwtimer_fn()
1311 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); in nullb_bwtimer_fn()
1312 null_restart_queue_async(nullb); in nullb_bwtimer_fn()
1314 hrtimer_forward_now(&nullb->bw_timer, timer_interval); in nullb_bwtimer_fn()
1319 static void nullb_setup_bwtimer(struct nullb *nullb) in nullb_setup_bwtimer() argument
1323 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1324 nullb->bw_timer.function = nullb_bwtimer_fn; in nullb_setup_bwtimer()
1325 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); in nullb_setup_bwtimer()
1326 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1329 static struct nullb_queue *nullb_to_queue(struct nullb *nullb) in nullb_to_queue() argument
1333 if (nullb->nr_queues != 1) in nullb_to_queue()
1334 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue()
1336 return &nullb->queues[index]; in nullb_to_queue()
1341 struct nullb *nullb = q->queuedata; in null_queue_bio() local
1342 struct nullb_queue *nq = nullb_to_queue(nullb); in null_queue_bio()
1361 struct nullb *nullb = q->queuedata; in null_rq_prep_fn() local
1362 struct nullb_queue *nq = nullb_to_queue(nullb); in null_rq_prep_fn()
1470 static void cleanup_queues(struct nullb *nullb) in cleanup_queues() argument
1474 for (i = 0; i < nullb->nr_queues; i++) in cleanup_queues()
1475 cleanup_queue(&nullb->queues[i]); in cleanup_queues()
1477 kfree(nullb->queues); in cleanup_queues()
1480 static void null_del_dev(struct nullb *nullb) in null_del_dev() argument
1482 struct nullb_device *dev = nullb->dev; in null_del_dev()
1484 ida_simple_remove(&nullb_indexes, nullb->index); in null_del_dev()
1486 list_del_init(&nullb->list); in null_del_dev()
1488 del_gendisk(nullb->disk); in null_del_dev()
1490 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { in null_del_dev()
1491 hrtimer_cancel(&nullb->bw_timer); in null_del_dev()
1492 atomic_long_set(&nullb->cur_bytes, LONG_MAX); in null_del_dev()
1493 null_restart_queue_async(nullb); in null_del_dev()
1496 blk_cleanup_queue(nullb->q); in null_del_dev()
1498 nullb->tag_set == &nullb->__tag_set) in null_del_dev()
1499 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev()
1500 put_disk(nullb->disk); in null_del_dev()
1501 cleanup_queues(nullb); in null_del_dev()
1502 if (null_cache_active(nullb)) in null_del_dev()
1503 null_free_device_storage(nullb->dev, true); in null_del_dev()
1504 kfree(nullb); in null_del_dev()
1505 dev->nullb = NULL; in null_del_dev()
1508 static void null_config_discard(struct nullb *nullb) in null_config_discard() argument
1510 if (nullb->dev->discard == false) in null_config_discard()
1512 nullb->q->limits.discard_granularity = nullb->dev->blocksize; in null_config_discard()
1513 nullb->q->limits.discard_alignment = nullb->dev->blocksize; in null_config_discard()
1514 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); in null_config_discard()
1515 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); in null_config_discard()
1533 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) in null_init_queue() argument
1535 BUG_ON(!nullb); in null_init_queue()
1539 nq->queue_depth = nullb->queue_depth; in null_init_queue()
1540 nq->dev = nullb->dev; in null_init_queue()
1543 static void null_init_queues(struct nullb *nullb) in null_init_queues() argument
1545 struct request_queue *q = nullb->q; in null_init_queues()
1553 nq = &nullb->queues[i]; in null_init_queues()
1555 null_init_queue(nullb, nq); in null_init_queues()
1556 nullb->nr_queues++; in null_init_queues()
1586 static int setup_queues(struct nullb *nullb) in setup_queues() argument
1588 nullb->queues = kcalloc(nullb->dev->submit_queues, in setup_queues()
1591 if (!nullb->queues) in setup_queues()
1594 nullb->nr_queues = 0; in setup_queues()
1595 nullb->queue_depth = nullb->dev->hw_queue_depth; in setup_queues()
1600 static int init_driver_queues(struct nullb *nullb) in init_driver_queues() argument
1605 for (i = 0; i < nullb->dev->submit_queues; i++) { in init_driver_queues()
1606 nq = &nullb->queues[i]; in init_driver_queues()
1608 null_init_queue(nullb, nq); in init_driver_queues()
1613 nullb->nr_queues++; in init_driver_queues()
1618 static int null_gendisk_register(struct nullb *nullb) in null_gendisk_register() argument
1623 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); in null_gendisk_register()
1626 size = (sector_t)nullb->dev->size * 1024 * 1024ULL; in null_gendisk_register()
1631 disk->first_minor = nullb->index; in null_gendisk_register()
1633 disk->private_data = nullb; in null_gendisk_register()
1634 disk->queue = nullb->q; in null_gendisk_register()
1635 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); in null_gendisk_register()
1641 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) in null_init_tag_set() argument
1644 set->nr_hw_queues = nullb ? nullb->dev->submit_queues : in null_init_tag_set()
1646 set->queue_depth = nullb ? nullb->dev->hw_queue_depth : in null_init_tag_set()
1648 set->numa_node = nullb ? nullb->dev->home_node : g_home_node; in null_init_tag_set()
1655 if ((nullb && nullb->dev->blocking) || g_blocking) in null_init_tag_set()
1717 struct nullb *nullb; in null_add_dev() local
1722 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); in null_add_dev()
1723 if (!nullb) { in null_add_dev()
1727 nullb->dev = dev; in null_add_dev()
1728 dev->nullb = nullb; in null_add_dev()
1730 spin_lock_init(&nullb->lock); in null_add_dev()
1732 rv = setup_queues(nullb); in null_add_dev()
1738 nullb->tag_set = &tag_set; in null_add_dev()
1741 nullb->tag_set = &nullb->__tag_set; in null_add_dev()
1742 rv = null_init_tag_set(nullb, nullb->tag_set); in null_add_dev()
1751 nullb->tag_set->timeout = 5 * HZ; in null_add_dev()
1752 nullb->q = blk_mq_init_queue(nullb->tag_set); in null_add_dev()
1753 if (IS_ERR(nullb->q)) { in null_add_dev()
1757 null_init_queues(nullb); in null_add_dev()
1759 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node, in null_add_dev()
1761 if (!nullb->q) { in null_add_dev()
1765 blk_queue_make_request(nullb->q, null_queue_bio); in null_add_dev()
1766 rv = init_driver_queues(nullb); in null_add_dev()
1770 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, in null_add_dev()
1772 if (!nullb->q) { in null_add_dev()
1780 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); in null_add_dev()
1781 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); in null_add_dev()
1782 blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn); in null_add_dev()
1783 nullb->q->rq_timeout = 5 * HZ; in null_add_dev()
1784 rv = init_driver_queues(nullb); in null_add_dev()
1791 nullb_setup_bwtimer(nullb); in null_add_dev()
1795 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_add_dev()
1796 blk_queue_write_cache(nullb->q, true, true); in null_add_dev()
1797 blk_queue_flush_queueable(nullb->q, true); in null_add_dev()
1805 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects); in null_add_dev()
1806 nullb->q->limits.zoned = BLK_ZONED_HM; in null_add_dev()
1809 nullb->q->queuedata = nullb; in null_add_dev()
1810 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); in null_add_dev()
1811 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); in null_add_dev()
1814 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); in null_add_dev()
1815 dev->index = nullb->index; in null_add_dev()
1818 blk_queue_logical_block_size(nullb->q, dev->blocksize); in null_add_dev()
1819 blk_queue_physical_block_size(nullb->q, dev->blocksize); in null_add_dev()
1821 null_config_discard(nullb); in null_add_dev()
1823 sprintf(nullb->disk_name, "nullb%d", nullb->index); in null_add_dev()
1825 rv = null_gendisk_register(nullb); in null_add_dev()
1830 list_add_tail(&nullb->list, &nullb_list); in null_add_dev()
1838 blk_cleanup_queue(nullb->q); in null_add_dev()
1840 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) in null_add_dev()
1841 blk_mq_free_tag_set(nullb->tag_set); in null_add_dev()
1843 cleanup_queues(nullb); in null_add_dev()
1845 kfree(nullb); in null_add_dev()
1854 struct nullb *nullb; in null_init() local
1918 nullb = list_entry(nullb_list.next, struct nullb, list); in null_init()
1919 dev = nullb->dev; in null_init()
1920 null_del_dev(nullb); in null_init()
1934 struct nullb *nullb; in null_exit() local
1944 nullb = list_entry(nullb_list.next, struct nullb, list); in null_exit()
1945 dev = nullb->dev; in null_exit()
1946 null_del_dev(nullb); in null_exit()