Lines Matching +full:blocking +full:- +full:io
1 // SPDX-License-Identifier: GPL-2.0-only
39 * CACHE: Device is using a write-back cache.
65 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
66 #define NULLB_PAGE_FREE (MAP_SZ - 2)
86 MODULE_PARM_DESC(no_sched, "No io scheduler");
103 * Documentation/fault-injection/fault-injection.rst.
126 return -EINVAL; in null_param_store_val()
129 return -EINVAL; in null_param_store_val()
165 module_param_named(blocking, g_blocking, bool, 0444);
166 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
170 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
174 MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
190 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
202 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: …
206 MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
210 MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Def…
214 MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
222 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
226 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Defa…
320 to_nullb_device(item)->NAME, page); \
336 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
337 ret = -EBUSY; \
340 dev->NAME = new_value; \
353 if (!dev->nullb) in nullb_update_nr_hw_queues()
360 return -EINVAL; in nullb_update_nr_hw_queues()
363 * Make sure that null_init_hctx() does not access nullb->queues[] past in nullb_update_nr_hw_queues()
367 return -EINVAL; in nullb_update_nr_hw_queues()
373 dev->prev_submit_queues = dev->submit_queues; in nullb_update_nr_hw_queues()
374 dev->prev_poll_queues = dev->poll_queues; in nullb_update_nr_hw_queues()
375 dev->submit_queues = submit_queues; in nullb_update_nr_hw_queues()
376 dev->poll_queues = poll_queues; in nullb_update_nr_hw_queues()
378 set = dev->nullb->tag_set; in nullb_update_nr_hw_queues()
381 ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM; in nullb_update_nr_hw_queues()
385 dev->submit_queues = dev->prev_submit_queues; in nullb_update_nr_hw_queues()
386 dev->poll_queues = dev->prev_poll_queues; in nullb_update_nr_hw_queues()
395 return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); in nullb_apply_submit_queues()
401 return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); in nullb_apply_poll_queues()
415 NULLB_DEVICE_ATTR(blocking, bool, NULL);
433 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); in nullb_device_power_show()
447 if (!dev->power && newp) { in nullb_device_power_store()
448 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) in nullb_device_power_store()
452 clear_bit(NULLB_DEV_FL_UP, &dev->flags); in nullb_device_power_store()
456 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); in nullb_device_power_store()
457 dev->power = newp; in nullb_device_power_store()
458 } else if (dev->power && !newp) { in nullb_device_power_store()
459 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { in nullb_device_power_store()
461 dev->power = newp; in nullb_device_power_store()
462 null_del_dev(dev->nullb); in nullb_device_power_store()
465 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); in nullb_device_power_store()
477 return badblocks_show(&t_dev->badblocks, page, 0); in nullb_device_badblocks_show()
490 return -ENOMEM; in nullb_device_badblocks_store()
494 ret = -EINVAL; in nullb_device_badblocks_store()
495 if (buf[0] != '+' && buf[0] != '-') in nullb_device_badblocks_store()
497 tmp = strchr(&buf[1], '-'); in nullb_device_badblocks_store()
507 ret = -EINVAL; in nullb_device_badblocks_store()
511 cmpxchg(&t_dev->badblocks.shift, -1, 0); in nullb_device_badblocks_store()
513 ret = badblocks_set(&t_dev->badblocks, start, in nullb_device_badblocks_store()
514 end - start + 1, 1); in nullb_device_badblocks_store()
516 ret = badblocks_clear(&t_dev->badblocks, start, in nullb_device_badblocks_store()
517 end - start + 1); in nullb_device_badblocks_store()
600 fault_config_init(&dev->timeout_config, "timeout_inject"); in nullb_add_fault_config()
601 fault_config_init(&dev->requeue_config, "requeue_inject"); in nullb_add_fault_config()
602 fault_config_init(&dev->init_hctx_fault_config, "init_hctx_fault_inject"); in nullb_add_fault_config()
604 configfs_add_default_group(&dev->timeout_config.group, &dev->group); in nullb_add_fault_config()
605 configfs_add_default_group(&dev->requeue_config.group, &dev->group); in nullb_add_fault_config()
606 configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group); in nullb_add_fault_config()
623 return ERR_PTR(-EEXIST); in nullb_group_make_group()
627 return ERR_PTR(-ENOMEM); in nullb_group_make_group()
629 config_group_init_type_name(&dev->group, name, &nullb_device_type); in nullb_group_make_group()
632 return &dev->group; in nullb_group_make_group()
640 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { in nullb_group_drop_item()
642 dev->power = false; in nullb_group_drop_item()
643 null_del_dev(dev->nullb); in nullb_group_drop_item()
653 "badblocks,blocking,blocksize,cache_size," in memb_group_features_show()
691 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_cache_active()
703 dev->timeout_config.attr = null_timeout_attr; in null_alloc_dev()
704 dev->requeue_config.attr = null_requeue_attr; in null_alloc_dev()
705 dev->init_hctx_fault_config.attr = null_init_hctx_attr; in null_alloc_dev()
708 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); in null_alloc_dev()
709 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); in null_alloc_dev()
710 if (badblocks_init(&dev->badblocks, 0)) { in null_alloc_dev()
715 dev->size = g_gb * 1024; in null_alloc_dev()
716 dev->completion_nsec = g_completion_nsec; in null_alloc_dev()
717 dev->submit_queues = g_submit_queues; in null_alloc_dev()
718 dev->prev_submit_queues = g_submit_queues; in null_alloc_dev()
719 dev->poll_queues = g_poll_queues; in null_alloc_dev()
720 dev->prev_poll_queues = g_poll_queues; in null_alloc_dev()
721 dev->home_node = g_home_node; in null_alloc_dev()
722 dev->queue_mode = g_queue_mode; in null_alloc_dev()
723 dev->blocksize = g_bs; in null_alloc_dev()
724 dev->max_sectors = g_max_sectors; in null_alloc_dev()
725 dev->irqmode = g_irqmode; in null_alloc_dev()
726 dev->hw_queue_depth = g_hw_queue_depth; in null_alloc_dev()
727 dev->blocking = g_blocking; in null_alloc_dev()
728 dev->memory_backed = g_memory_backed; in null_alloc_dev()
729 dev->discard = g_discard; in null_alloc_dev()
730 dev->cache_size = g_cache_size; in null_alloc_dev()
731 dev->mbps = g_mbps; in null_alloc_dev()
732 dev->use_per_node_hctx = g_use_per_node_hctx; in null_alloc_dev()
733 dev->zoned = g_zoned; in null_alloc_dev()
734 dev->zone_size = g_zone_size; in null_alloc_dev()
735 dev->zone_capacity = g_zone_capacity; in null_alloc_dev()
736 dev->zone_nr_conv = g_zone_nr_conv; in null_alloc_dev()
737 dev->zone_max_open = g_zone_max_open; in null_alloc_dev()
738 dev->zone_max_active = g_zone_max_active; in null_alloc_dev()
739 dev->virt_boundary = g_virt_boundary; in null_alloc_dev()
740 dev->no_sched = g_no_sched; in null_alloc_dev()
741 dev->shared_tag_bitmap = g_shared_tag_bitmap; in null_alloc_dev()
751 badblocks_exit(&dev->badblocks); in null_free_dev()
757 clear_bit_unlock(tag, nq->tag_map); in put_tag()
759 if (waitqueue_active(&nq->wait)) in put_tag()
760 wake_up(&nq->wait); in put_tag()
768 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); in get_tag()
769 if (tag >= nq->queue_depth) in get_tag()
770 return -1U; in get_tag()
771 } while (test_and_set_bit_lock(tag, nq->tag_map)); in get_tag()
778 put_tag(cmd->nq, cmd->tag); in free_cmd()
789 if (tag != -1U) { in __alloc_cmd()
790 cmd = &nq->cmds[tag]; in __alloc_cmd()
791 cmd->tag = tag; in __alloc_cmd()
792 cmd->error = BLK_STS_OK; in __alloc_cmd()
793 cmd->nq = nq; in __alloc_cmd()
794 if (nq->dev->irqmode == NULL_IRQ_TIMER) { in __alloc_cmd()
795 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, in __alloc_cmd()
797 cmd->timer.function = null_cmd_timer_expired; in __alloc_cmd()
817 cmd->bio = bio; in alloc_cmd()
820 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); in alloc_cmd()
822 finish_wait(&nq->wait, &wait); in alloc_cmd()
828 int queue_mode = cmd->nq->dev->queue_mode; in end_cmd()
832 blk_mq_end_request(cmd->rq, cmd->error); in end_cmd()
835 cmd->bio->bi_status = cmd->error; in end_cmd()
836 bio_endio(cmd->bio); in end_cmd()
852 ktime_t kt = cmd->nq->dev->completion_nsec; in null_cmd_end_timer()
854 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); in null_cmd_end_timer()
870 t_page->page = alloc_pages(GFP_NOIO, 0); in null_alloc_page()
871 if (!t_page->page) { in null_alloc_page()
876 memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); in null_alloc_page()
882 __set_bit(NULLB_PAGE_FREE, t_page->bitmap); in null_free_page()
883 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) in null_free_page()
885 __free_page(t_page->page); in null_free_page()
891 int size = MAP_SZ - 2; in null_page_empty()
893 return find_first_bit(page->bitmap, size) == size; in null_page_empty()
904 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_free_sector()
910 __clear_bit(sector_bit, t_page->bitmap); in null_free_sector()
917 nullb->dev->curr_cache -= PAGE_SIZE; in null_free_sector()
927 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_radix_tree_insert()
932 WARN_ON(!t_page || t_page->page->index != idx); in null_radix_tree_insert()
934 nullb->dev->curr_cache += PAGE_SIZE; in null_radix_tree_insert()
946 root = is_cache ? &dev->cache : &dev->data; in null_free_device_storage()
955 pos = t_pages[i]->page->index; in null_free_device_storage()
965 dev->curr_cache = 0; in null_free_device_storage()
979 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in __null_lookup_page()
981 WARN_ON(t_page && t_page->page->index != idx); in __null_lookup_page()
983 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) in __null_lookup_page()
1003 __releases(&nullb->lock) in null_insert_page()
1004 __acquires(&nullb->lock) in null_insert_page()
1013 spin_unlock_irq(&nullb->lock); in null_insert_page()
1022 spin_lock_irq(&nullb->lock); in null_insert_page()
1024 t_page->page->index = idx; in null_insert_page()
1032 spin_lock_irq(&nullb->lock); in null_insert_page()
1044 idx = c_page->page->index; in null_flush_cache_page()
1048 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); in null_flush_cache_page()
1049 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { in null_flush_cache_page()
1052 ret = radix_tree_delete_item(&nullb->dev->data, in null_flush_cache_page()
1060 return -ENOMEM; in null_flush_cache_page()
1062 src = kmap_local_page(c_page->page); in null_flush_cache_page()
1063 dst = kmap_local_page(t_page->page); in null_flush_cache_page()
1066 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { in null_flush_cache_page()
1067 if (test_bit(i, c_page->bitmap)) { in null_flush_cache_page()
1070 nullb->dev->blocksize); in null_flush_cache_page()
1071 __set_bit(i, t_page->bitmap); in null_flush_cache_page()
1078 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); in null_flush_cache_page()
1080 nullb->dev->curr_cache -= PAGE_SIZE; in null_flush_cache_page()
1092 if ((nullb->dev->cache_size * 1024 * 1024) > in null_make_cache_space()
1093 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) in null_make_cache_space()
1096 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, in null_make_cache_space()
1097 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); in null_make_cache_space()
1103 nullb->cache_flush_pos = c_pages[i]->page->index; in null_make_cache_space()
1108 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) in null_make_cache_space()
1111 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); in null_make_cache_space()
1127 nullb->cache_flush_pos = 0; in null_make_cache_space()
1130 spin_unlock_irq(&nullb->lock); in null_make_cache_space()
1131 spin_lock_irq(&nullb->lock); in null_make_cache_space()
1146 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_to_nullb()
1155 return -ENOSPC; in copy_to_nullb()
1157 memcpy_page(t_page->page, offset, source, off + count, temp); in copy_to_nullb()
1159 __set_bit(sector & SECTOR_MASK, t_page->bitmap); in copy_to_nullb()
1178 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_from_nullb()
1185 memcpy_page(dest, off + count, t_page->page, offset, in copy_from_nullb()
1205 struct nullb *nullb = dev->nullb; in null_handle_discard()
1209 spin_lock_irq(&nullb->lock); in null_handle_discard()
1211 temp = min_t(size_t, n, dev->blocksize); in null_handle_discard()
1216 n -= temp; in null_handle_discard()
1218 spin_unlock_irq(&nullb->lock); in null_handle_discard()
1230 spin_lock_irq(&nullb->lock); in null_handle_flush()
1233 nullb->dev->cache_size * 1024 * 1024); in null_handle_flush()
1234 if (err || nullb->dev->curr_cache == 0) in null_handle_flush()
1238 WARN_ON(!radix_tree_empty(&nullb->dev->cache)); in null_handle_flush()
1239 spin_unlock_irq(&nullb->lock); in null_handle_flush()
1247 struct nullb_device *dev = nullb->dev; in null_transfer()
1252 if (dev->zoned) in null_transfer()
1260 len -= valid_len; in null_transfer()
1276 struct request *rq = cmd->rq; in null_handle_rq()
1277 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_rq()
1284 spin_lock_irq(&nullb->lock); in null_handle_rq()
1289 rq->cmd_flags & REQ_FUA); in null_handle_rq()
1291 spin_unlock_irq(&nullb->lock); in null_handle_rq()
1296 spin_unlock_irq(&nullb->lock); in null_handle_rq()
1303 struct bio *bio = cmd->bio; in null_handle_bio()
1304 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_bio()
1307 sector_t sector = bio->bi_iter.bi_sector; in null_handle_bio()
1311 spin_lock_irq(&nullb->lock); in null_handle_bio()
1316 bio->bi_opf & REQ_FUA); in null_handle_bio()
1318 spin_unlock_irq(&nullb->lock); in null_handle_bio()
1323 spin_unlock_irq(&nullb->lock); in null_handle_bio()
1329 struct request_queue *q = nullb->q; in null_stop_queue()
1331 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_stop_queue()
1337 struct request_queue *q = nullb->q; in null_restart_queue_async()
1339 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_restart_queue_async()
1345 struct nullb_device *dev = cmd->nq->dev; in null_handle_throttled()
1346 struct nullb *nullb = dev->nullb; in null_handle_throttled()
1348 struct request *rq = cmd->rq; in null_handle_throttled()
1350 if (!hrtimer_active(&nullb->bw_timer)) in null_handle_throttled()
1351 hrtimer_restart(&nullb->bw_timer); in null_handle_throttled()
1353 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { in null_handle_throttled()
1356 if (atomic_long_read(&nullb->cur_bytes) > 0) in null_handle_throttled()
1368 struct badblocks *bb = &cmd->nq->dev->badblocks; in null_handle_badblocks()
1383 struct nullb_device *dev = cmd->nq->dev; in null_handle_memory_backed()
1389 if (dev->queue_mode == NULL_Q_BIO) in null_handle_memory_backed()
1399 struct nullb_device *dev = cmd->nq->dev; in nullb_zero_read_cmd_buffer()
1402 if (dev->memory_backed) in nullb_zero_read_cmd_buffer()
1405 if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer()
1406 zero_fill_bio(cmd->bio); in nullb_zero_read_cmd_buffer()
1407 } else if (req_op(cmd->rq) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer()
1408 __rq_for_each_bio(bio, cmd->rq) in nullb_zero_read_cmd_buffer()
1418 * data buffers of read commands. Zero-initialize these buffers in nullb_complete_cmd()
1425 /* Complete IO by inline, softirq or timer */ in nullb_complete_cmd()
1426 switch (cmd->nq->dev->irqmode) { in nullb_complete_cmd()
1428 switch (cmd->nq->dev->queue_mode) { in nullb_complete_cmd()
1430 blk_mq_complete_request(cmd->rq); in nullb_complete_cmd()
1452 struct nullb_device *dev = cmd->nq->dev; in null_process_cmd()
1455 if (dev->badblocks.shift != -1) { in null_process_cmd()
1461 if (dev->memory_backed) in null_process_cmd()
1470 struct nullb_device *dev = cmd->nq->dev; in null_handle_cmd()
1471 struct nullb *nullb = dev->nullb; in null_handle_cmd()
1474 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { in null_handle_cmd()
1481 cmd->error = errno_to_blk_status(null_handle_flush(nullb)); in null_handle_cmd()
1485 if (dev->zoned) in null_handle_cmd()
1491 if (cmd->error == BLK_STS_OK) in null_handle_cmd()
1492 cmd->error = sts; in null_handle_cmd()
1503 unsigned int mbps = nullb->dev->mbps; in nullb_bwtimer_fn()
1505 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) in nullb_bwtimer_fn()
1508 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); in nullb_bwtimer_fn()
1511 hrtimer_forward_now(&nullb->bw_timer, timer_interval); in nullb_bwtimer_fn()
1520 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1521 nullb->bw_timer.function = nullb_bwtimer_fn; in nullb_setup_bwtimer()
1522 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); in nullb_setup_bwtimer()
1523 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1530 if (nullb->nr_queues != 1) in nullb_to_queue()
1531 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue()
1533 return &nullb->queues[index]; in nullb_to_queue()
1538 sector_t sector = bio->bi_iter.bi_sector; in null_submit_bio()
1540 struct nullb *nullb = bio->bi_bdev->bd_disk->private_data; in null_submit_bio()
1551 struct nullb_device *dev = cmd->nq->dev; in should_timeout_request()
1553 return should_fail(&dev->timeout_config.attr, 1); in should_timeout_request()
1559 struct nullb_device *dev = cmd->nq->dev; in should_requeue_request()
1561 return should_fail(&dev->requeue_config.attr, 1); in should_requeue_request()
1566 return should_fail(&dev->init_hctx_fault_config.attr, 1); in should_init_hctx_fail()
1590 struct nullb *nullb = set->driver_data; in null_map_queues()
1596 struct nullb_device *dev = nullb->dev; in null_map_queues()
1604 if (set->nr_hw_queues == in null_map_queues()
1605 dev->submit_queues + dev->poll_queues) { in null_map_queues()
1606 submit_queues = dev->submit_queues; in null_map_queues()
1607 poll_queues = dev->poll_queues; in null_map_queues()
1608 } else if (set->nr_hw_queues == in null_map_queues()
1609 dev->prev_submit_queues + dev->prev_poll_queues) { in null_map_queues()
1610 submit_queues = dev->prev_submit_queues; in null_map_queues()
1611 poll_queues = dev->prev_poll_queues; in null_map_queues()
1614 set->nr_hw_queues); in null_map_queues()
1621 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in null_map_queues()
1622 struct blk_mq_queue_map *map = &set->map[i]; in null_map_queues()
1626 map->nr_queues = submit_queues; in null_map_queues()
1629 map->nr_queues = 0; in null_map_queues()
1632 map->nr_queues = poll_queues; in null_map_queues()
1635 map->queue_offset = qoff; in null_map_queues()
1636 qoff += map->nr_queues; in null_map_queues()
1643 struct nullb_queue *nq = hctx->driver_data; in null_poll()
1648 spin_lock(&nq->poll_lock); in null_poll()
1649 list_splice_init(&nq->poll_list, &list); in null_poll()
1652 spin_unlock(&nq->poll_lock); in null_poll()
1659 list_del_init(&req->queuelist); in null_poll()
1661 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), in null_poll()
1663 if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error, in null_poll()
1674 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in null_timeout_rq()
1677 if (hctx->type == HCTX_TYPE_POLL) { in null_timeout_rq()
1678 struct nullb_queue *nq = hctx->driver_data; in null_timeout_rq()
1680 spin_lock(&nq->poll_lock); in null_timeout_rq()
1683 spin_unlock(&nq->poll_lock); in null_timeout_rq()
1686 list_del_init(&rq->queuelist); in null_timeout_rq()
1687 spin_unlock(&nq->poll_lock); in null_timeout_rq()
1693 * If the device is marked as blocking (i.e. memory backed or zoned in null_timeout_rq()
1699 cmd->error = BLK_STS_TIMEOUT; in null_timeout_rq()
1700 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) in null_timeout_rq()
1708 struct request *rq = bd->rq; in null_queue_rq()
1710 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq()
1713 const bool is_poll = hctx->type == HCTX_TYPE_POLL; in null_queue_rq()
1715 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq()
1717 if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) { in null_queue_rq()
1718 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in null_queue_rq()
1719 cmd->timer.function = null_cmd_timer_expired; in null_queue_rq()
1721 cmd->rq = rq; in null_queue_rq()
1722 cmd->error = BLK_STS_OK; in null_queue_rq()
1723 cmd->nq = nq; in null_queue_rq()
1724 cmd->fake_timeout = should_timeout_request(rq) || in null_queue_rq()
1725 blk_should_fake_timeout(rq->q); in null_queue_rq()
1734 nq->requeue_selection++; in null_queue_rq()
1735 if (nq->requeue_selection & 1) in null_queue_rq()
1742 spin_lock(&nq->poll_lock); in null_queue_rq()
1743 list_add_tail(&rq->queuelist, &nq->poll_list); in null_queue_rq()
1744 spin_unlock(&nq->poll_lock); in null_queue_rq()
1747 if (cmd->fake_timeout) in null_queue_rq()
1755 bitmap_free(nq->tag_map); in cleanup_queue()
1756 kfree(nq->cmds); in cleanup_queue()
1763 for (i = 0; i < nullb->nr_queues; i++) in cleanup_queues()
1764 cleanup_queue(&nullb->queues[i]); in cleanup_queues()
1766 kfree(nullb->queues); in cleanup_queues()
1771 struct nullb_queue *nq = hctx->driver_data; in null_exit_hctx()
1772 struct nullb *nullb = nq->dev->nullb; in null_exit_hctx()
1774 nullb->nr_queues--; in null_exit_hctx()
1779 init_waitqueue_head(&nq->wait); in null_init_queue()
1780 nq->queue_depth = nullb->queue_depth; in null_init_queue()
1781 nq->dev = nullb->dev; in null_init_queue()
1782 INIT_LIST_HEAD(&nq->poll_list); in null_init_queue()
1783 spin_lock_init(&nq->poll_lock); in null_init_queue()
1789 struct nullb *nullb = hctx->queue->queuedata; in null_init_hctx()
1792 if (should_init_hctx_fail(nullb->dev)) in null_init_hctx()
1793 return -EFAULT; in null_init_hctx()
1795 nq = &nullb->queues[hctx_idx]; in null_init_hctx()
1796 hctx->driver_data = nq; in null_init_hctx()
1798 nullb->nr_queues++; in null_init_hctx()
1820 dev = nullb->dev; in null_del_dev()
1822 ida_simple_remove(&nullb_indexes, nullb->index); in null_del_dev()
1824 list_del_init(&nullb->list); in null_del_dev()
1826 del_gendisk(nullb->disk); in null_del_dev()
1828 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { in null_del_dev()
1829 hrtimer_cancel(&nullb->bw_timer); in null_del_dev()
1830 atomic_long_set(&nullb->cur_bytes, LONG_MAX); in null_del_dev()
1834 put_disk(nullb->disk); in null_del_dev()
1835 if (dev->queue_mode == NULL_Q_MQ && in null_del_dev()
1836 nullb->tag_set == &nullb->__tag_set) in null_del_dev()
1837 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev()
1840 null_free_device_storage(nullb->dev, true); in null_del_dev()
1842 dev->nullb = NULL; in null_del_dev()
1847 if (nullb->dev->discard == false) in null_config_discard()
1850 if (!nullb->dev->memory_backed) { in null_config_discard()
1851 nullb->dev->discard = false; in null_config_discard()
1856 if (nullb->dev->zoned) { in null_config_discard()
1857 nullb->dev->discard = false; in null_config_discard()
1862 nullb->q->limits.discard_granularity = nullb->dev->blocksize; in null_config_discard()
1863 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); in null_config_discard()
1882 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL); in setup_commands()
1883 if (!nq->cmds) in setup_commands()
1884 return -ENOMEM; in setup_commands()
1886 nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL); in setup_commands()
1887 if (!nq->tag_map) { in setup_commands()
1888 kfree(nq->cmds); in setup_commands()
1889 return -ENOMEM; in setup_commands()
1892 for (i = 0; i < nq->queue_depth; i++) { in setup_commands()
1893 cmd = &nq->cmds[i]; in setup_commands()
1894 cmd->tag = -1U; in setup_commands()
1907 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue), in setup_queues()
1909 if (!nullb->queues) in setup_queues()
1910 return -ENOMEM; in setup_queues()
1912 nullb->queue_depth = nullb->dev->hw_queue_depth; in setup_queues()
1921 for (i = 0; i < nullb->dev->submit_queues; i++) { in init_driver_queues()
1922 nq = &nullb->queues[i]; in init_driver_queues()
1929 nullb->nr_queues++; in init_driver_queues()
1936 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT; in null_gendisk_register()
1937 struct gendisk *disk = nullb->disk; in null_gendisk_register()
1941 disk->major = null_major; in null_gendisk_register()
1942 disk->first_minor = nullb->index; in null_gendisk_register()
1943 disk->minors = 1; in null_gendisk_register()
1944 if (queue_is_mq(nullb->q)) in null_gendisk_register()
1945 disk->fops = &null_rq_ops; in null_gendisk_register()
1947 disk->fops = &null_bio_ops; in null_gendisk_register()
1948 disk->private_data = nullb; in null_gendisk_register()
1949 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); in null_gendisk_register()
1951 if (nullb->dev->zoned) { in null_gendisk_register()
1969 hw_queues = nullb->dev->submit_queues; in null_init_tag_set()
1970 poll_queues = nullb->dev->poll_queues; in null_init_tag_set()
1971 queue_depth = nullb->dev->hw_queue_depth; in null_init_tag_set()
1972 numa_node = nullb->dev->home_node; in null_init_tag_set()
1973 if (nullb->dev->no_sched) in null_init_tag_set()
1975 if (nullb->dev->shared_tag_bitmap) in null_init_tag_set()
1977 if (nullb->dev->blocking) in null_init_tag_set()
1992 set->ops = &null_mq_ops; in null_init_tag_set()
1993 set->cmd_size = sizeof(struct nullb_cmd); in null_init_tag_set()
1994 set->flags = flags; in null_init_tag_set()
1995 set->driver_data = nullb; in null_init_tag_set()
1996 set->nr_hw_queues = hw_queues; in null_init_tag_set()
1997 set->queue_depth = queue_depth; in null_init_tag_set()
1998 set->numa_node = numa_node; in null_init_tag_set()
2000 set->nr_hw_queues += poll_queues; in null_init_tag_set()
2001 set->nr_maps = 3; in null_init_tag_set()
2003 set->nr_maps = 1; in null_init_tag_set()
2011 if (dev->queue_mode == NULL_Q_RQ) { in null_validate_conf()
2012 pr_err("legacy IO path is no longer available\n"); in null_validate_conf()
2013 return -EINVAL; in null_validate_conf()
2016 dev->blocksize = round_down(dev->blocksize, 512); in null_validate_conf()
2017 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); in null_validate_conf()
2019 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { in null_validate_conf()
2020 if (dev->submit_queues != nr_online_nodes) in null_validate_conf()
2021 dev->submit_queues = nr_online_nodes; in null_validate_conf()
2022 } else if (dev->submit_queues > nr_cpu_ids) in null_validate_conf()
2023 dev->submit_queues = nr_cpu_ids; in null_validate_conf()
2024 else if (dev->submit_queues == 0) in null_validate_conf()
2025 dev->submit_queues = 1; in null_validate_conf()
2026 dev->prev_submit_queues = dev->submit_queues; in null_validate_conf()
2028 if (dev->poll_queues > g_poll_queues) in null_validate_conf()
2029 dev->poll_queues = g_poll_queues; in null_validate_conf()
2030 dev->prev_poll_queues = dev->poll_queues; in null_validate_conf()
2032 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); in null_validate_conf()
2033 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); in null_validate_conf()
2035 /* Do memory allocation, so set blocking */ in null_validate_conf()
2036 if (dev->memory_backed) in null_validate_conf()
2037 dev->blocking = true; in null_validate_conf()
2039 dev->cache_size = 0; in null_validate_conf()
2040 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, in null_validate_conf()
2041 dev->cache_size); in null_validate_conf()
2042 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); in null_validate_conf()
2044 if (dev->queue_mode == NULL_Q_BIO) in null_validate_conf()
2045 dev->mbps = 0; in null_validate_conf()
2047 if (dev->zoned && in null_validate_conf()
2048 (!dev->zone_size || !is_power_of_2(dev->zone_size))) { in null_validate_conf()
2049 pr_err("zone_size must be power-of-two\n"); in null_validate_conf()
2050 return -EINVAL; in null_validate_conf()
2065 attr->verbose = 0; in __null_setup_fault()
2092 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); in null_add_dev()
2094 rv = -ENOMEM; in null_add_dev()
2097 nullb->dev = dev; in null_add_dev()
2098 dev->nullb = nullb; in null_add_dev()
2100 spin_lock_init(&nullb->lock); in null_add_dev()
2106 if (dev->queue_mode == NULL_Q_MQ) { in null_add_dev()
2108 nullb->tag_set = &tag_set; in null_add_dev()
2111 nullb->tag_set = &nullb->__tag_set; in null_add_dev()
2112 rv = null_init_tag_set(nullb, nullb->tag_set); in null_add_dev()
2118 nullb->tag_set->timeout = 5 * HZ; in null_add_dev()
2119 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb); in null_add_dev()
2120 if (IS_ERR(nullb->disk)) { in null_add_dev()
2121 rv = PTR_ERR(nullb->disk); in null_add_dev()
2124 nullb->q = nullb->disk->queue; in null_add_dev()
2125 } else if (dev->queue_mode == NULL_Q_BIO) { in null_add_dev()
2126 rv = -ENOMEM; in null_add_dev()
2127 nullb->disk = blk_alloc_disk(nullb->dev->home_node); in null_add_dev()
2128 if (!nullb->disk) in null_add_dev()
2131 nullb->q = nullb->disk->queue; in null_add_dev()
2137 if (dev->mbps) { in null_add_dev()
2138 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); in null_add_dev()
2142 if (dev->cache_size > 0) { in null_add_dev()
2143 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_add_dev()
2144 blk_queue_write_cache(nullb->q, true, true); in null_add_dev()
2147 if (dev->zoned) { in null_add_dev()
2148 rv = null_init_zoned_dev(dev, nullb->q); in null_add_dev()
2153 nullb->q->queuedata = nullb; in null_add_dev()
2154 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); in null_add_dev()
2162 nullb->index = rv; in null_add_dev()
2163 dev->index = rv; in null_add_dev()
2166 blk_queue_logical_block_size(nullb->q, dev->blocksize); in null_add_dev()
2167 blk_queue_physical_block_size(nullb->q, dev->blocksize); in null_add_dev()
2168 if (!dev->max_sectors) in null_add_dev()
2169 dev->max_sectors = queue_max_hw_sectors(nullb->q); in null_add_dev()
2170 dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS); in null_add_dev()
2171 blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); in null_add_dev()
2173 if (dev->virt_boundary) in null_add_dev()
2174 blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); in null_add_dev()
2178 if (config_item_name(&dev->group.cg_item)) { in null_add_dev()
2180 snprintf(nullb->disk_name, sizeof(nullb->disk_name), in null_add_dev()
2181 "%s", config_item_name(&dev->group.cg_item)); in null_add_dev()
2183 sprintf(nullb->disk_name, "nullb%d", nullb->index); in null_add_dev()
2191 list_add_tail(&nullb->list, &nullb_list); in null_add_dev()
2194 pr_info("disk %s created\n", nullb->disk_name); in null_add_dev()
2199 ida_free(&nullb_indexes, nullb->index); in null_add_dev()
2203 put_disk(nullb->disk); in null_add_dev()
2205 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) in null_add_dev()
2206 blk_mq_free_tag_set(nullb->tag_set); in null_add_dev()
2211 dev->nullb = NULL; in null_add_dev()
2222 if (strcmp(nb->disk_name, name) == 0) { in null_find_dev_by_name()
2239 return -ENOMEM; in null_create_dev()
2252 struct nullb_device *dev = nullb->dev; in null_destroy_dev()
2283 return -EINVAL; in null_init()
2286 pr_err("legacy IO path is no longer available\n"); in null_init()
2287 return -EINVAL; in null_init()