Lines Matching +full:blocking +full:- +full:io

1 // SPDX-License-Identifier: GPL-2.0-only
14 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
16 #define SECTOR_MASK (PAGE_SECTORS - 1)
40 * CACHE: Device is using a write-back cache.
66 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
67 #define NULLB_PAGE_FREE (MAP_SZ - 2)
89 MODULE_PARM_DESC(no_sched, "No io scheduler");
102 * Documentation/fault-injection/fault-injection.rst.
125 return -EINVAL; in null_param_store_val()
128 return -EINVAL; in null_param_store_val()
160 module_param_named(blocking, g_blocking, bool, 0444);
161 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
165 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
169 MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
185 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
197 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: …
201 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
205 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Defa…
298 to_nullb_device(item)->NAME, page); \
314 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
315 ret = -EBUSY; \
318 dev->NAME = new_value; \
326 struct nullb *nullb = dev->nullb; in nullb_apply_submit_queues()
333 * Make sure that null_init_hctx() does not access nullb->queues[] past in nullb_apply_submit_queues()
337 return -EINVAL; in nullb_apply_submit_queues()
338 set = nullb->tag_set; in nullb_apply_submit_queues()
340 return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM; in nullb_apply_submit_queues()
352 NULLB_DEVICE_ATTR(blocking, bool, NULL);
367 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); in nullb_device_power_show()
381 if (!dev->power && newp) { in nullb_device_power_store()
382 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) in nullb_device_power_store()
385 clear_bit(NULLB_DEV_FL_UP, &dev->flags); in nullb_device_power_store()
386 return -ENOMEM; in nullb_device_power_store()
389 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); in nullb_device_power_store()
390 dev->power = newp; in nullb_device_power_store()
391 } else if (dev->power && !newp) { in nullb_device_power_store()
392 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { in nullb_device_power_store()
394 dev->power = newp; in nullb_device_power_store()
395 null_del_dev(dev->nullb); in nullb_device_power_store()
398 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); in nullb_device_power_store()
410 return badblocks_show(&t_dev->badblocks, page, 0); in nullb_device_badblocks_show()
423 return -ENOMEM; in nullb_device_badblocks_store()
427 ret = -EINVAL; in nullb_device_badblocks_store()
428 if (buf[0] != '+' && buf[0] != '-') in nullb_device_badblocks_store()
430 tmp = strchr(&buf[1], '-'); in nullb_device_badblocks_store()
440 ret = -EINVAL; in nullb_device_badblocks_store()
444 cmpxchg(&t_dev->badblocks.shift, -1, 0); in nullb_device_badblocks_store()
446 ret = badblocks_set(&t_dev->badblocks, start, in nullb_device_badblocks_store()
447 end - start + 1, 1); in nullb_device_badblocks_store()
449 ret = badblocks_clear(&t_dev->badblocks, start, in nullb_device_badblocks_store()
450 end - start + 1); in nullb_device_badblocks_store()
511 return ERR_PTR(-ENOMEM); in nullb_group_make_item()
513 config_item_init_type_name(&dev->item, name, &nullb_device_type); in nullb_group_make_item()
515 return &dev->item; in nullb_group_make_item()
523 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { in nullb_group_drop_item()
525 dev->power = false; in nullb_group_drop_item()
526 null_del_dev(dev->nullb); in nullb_group_drop_item()
568 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_cache_active()
578 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); in null_alloc_dev()
579 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); in null_alloc_dev()
580 if (badblocks_init(&dev->badblocks, 0)) { in null_alloc_dev()
585 dev->size = g_gb * 1024; in null_alloc_dev()
586 dev->completion_nsec = g_completion_nsec; in null_alloc_dev()
587 dev->submit_queues = g_submit_queues; in null_alloc_dev()
588 dev->home_node = g_home_node; in null_alloc_dev()
589 dev->queue_mode = g_queue_mode; in null_alloc_dev()
590 dev->blocksize = g_bs; in null_alloc_dev()
591 dev->irqmode = g_irqmode; in null_alloc_dev()
592 dev->hw_queue_depth = g_hw_queue_depth; in null_alloc_dev()
593 dev->blocking = g_blocking; in null_alloc_dev()
594 dev->use_per_node_hctx = g_use_per_node_hctx; in null_alloc_dev()
595 dev->zoned = g_zoned; in null_alloc_dev()
596 dev->zone_size = g_zone_size; in null_alloc_dev()
597 dev->zone_capacity = g_zone_capacity; in null_alloc_dev()
598 dev->zone_nr_conv = g_zone_nr_conv; in null_alloc_dev()
599 dev->zone_max_open = g_zone_max_open; in null_alloc_dev()
600 dev->zone_max_active = g_zone_max_active; in null_alloc_dev()
610 badblocks_exit(&dev->badblocks); in null_free_dev()
616 clear_bit_unlock(tag, nq->tag_map); in put_tag()
618 if (waitqueue_active(&nq->wait)) in put_tag()
619 wake_up(&nq->wait); in put_tag()
627 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); in get_tag()
628 if (tag >= nq->queue_depth) in get_tag()
629 return -1U; in get_tag()
630 } while (test_and_set_bit_lock(tag, nq->tag_map)); in get_tag()
637 put_tag(cmd->nq, cmd->tag); in free_cmd()
648 if (tag != -1U) { in __alloc_cmd()
649 cmd = &nq->cmds[tag]; in __alloc_cmd()
650 cmd->tag = tag; in __alloc_cmd()
651 cmd->error = BLK_STS_OK; in __alloc_cmd()
652 cmd->nq = nq; in __alloc_cmd()
653 if (nq->dev->irqmode == NULL_IRQ_TIMER) { in __alloc_cmd()
654 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, in __alloc_cmd()
656 cmd->timer.function = null_cmd_timer_expired; in __alloc_cmd()
674 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); in alloc_cmd()
682 finish_wait(&nq->wait, &wait); in alloc_cmd()
688 int queue_mode = cmd->nq->dev->queue_mode; in end_cmd()
692 blk_mq_end_request(cmd->rq, cmd->error); in end_cmd()
695 cmd->bio->bi_status = cmd->error; in end_cmd()
696 bio_endio(cmd->bio); in end_cmd()
712 ktime_t kt = cmd->nq->dev->completion_nsec; in null_cmd_end_timer()
714 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); in null_cmd_end_timer()
730 t_page->page = alloc_pages(gfp_flags, 0); in null_alloc_page()
731 if (!t_page->page) in null_alloc_page()
734 memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); in null_alloc_page()
744 __set_bit(NULLB_PAGE_FREE, t_page->bitmap); in null_free_page()
745 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) in null_free_page()
747 __free_page(t_page->page); in null_free_page()
753 int size = MAP_SZ - 2; in null_page_empty()
755 return find_first_bit(page->bitmap, size) == size; in null_page_empty()
766 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_free_sector()
772 __clear_bit(sector_bit, t_page->bitmap); in null_free_sector()
779 nullb->dev->curr_cache -= PAGE_SIZE; in null_free_sector()
789 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_radix_tree_insert()
794 WARN_ON(!t_page || t_page->page->index != idx); in null_radix_tree_insert()
796 nullb->dev->curr_cache += PAGE_SIZE; in null_radix_tree_insert()
808 root = is_cache ? &dev->cache : &dev->data; in null_free_device_storage()
817 pos = t_pages[i]->page->index; in null_free_device_storage()
827 dev->curr_cache = 0; in null_free_device_storage()
841 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in __null_lookup_page()
843 WARN_ON(t_page && t_page->page->index != idx); in __null_lookup_page()
845 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) in __null_lookup_page()
865 __releases(&nullb->lock) in null_insert_page()
866 __acquires(&nullb->lock) in null_insert_page()
875 spin_unlock_irq(&nullb->lock); in null_insert_page()
884 spin_lock_irq(&nullb->lock); in null_insert_page()
886 t_page->page->index = idx; in null_insert_page()
894 spin_lock_irq(&nullb->lock); in null_insert_page()
906 idx = c_page->page->index; in null_flush_cache_page()
910 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); in null_flush_cache_page()
911 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { in null_flush_cache_page()
914 ret = radix_tree_delete_item(&nullb->dev->data, in null_flush_cache_page()
922 return -ENOMEM; in null_flush_cache_page()
924 src = kmap_atomic(c_page->page); in null_flush_cache_page()
925 dst = kmap_atomic(t_page->page); in null_flush_cache_page()
928 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { in null_flush_cache_page()
929 if (test_bit(i, c_page->bitmap)) { in null_flush_cache_page()
932 nullb->dev->blocksize); in null_flush_cache_page()
933 __set_bit(i, t_page->bitmap); in null_flush_cache_page()
940 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); in null_flush_cache_page()
942 nullb->dev->curr_cache -= PAGE_SIZE; in null_flush_cache_page()
954 if ((nullb->dev->cache_size * 1024 * 1024) > in null_make_cache_space()
955 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) in null_make_cache_space()
958 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, in null_make_cache_space()
959 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); in null_make_cache_space()
965 nullb->cache_flush_pos = c_pages[i]->page->index; in null_make_cache_space()
970 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) in null_make_cache_space()
973 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); in null_make_cache_space()
989 nullb->cache_flush_pos = 0; in null_make_cache_space()
992 spin_unlock_irq(&nullb->lock); in null_make_cache_space()
993 spin_lock_irq(&nullb->lock); in null_make_cache_space()
1009 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_to_nullb()
1018 return -ENOSPC; in copy_to_nullb()
1021 dst = kmap_atomic(t_page->page); in copy_to_nullb()
1026 __set_bit(sector & SECTOR_MASK, t_page->bitmap); in copy_to_nullb()
1046 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_from_nullb()
1057 src = kmap_atomic(t_page->page); in copy_from_nullb()
1083 spin_lock_irq(&nullb->lock); in null_handle_discard()
1085 temp = min_t(size_t, n, nullb->dev->blocksize); in null_handle_discard()
1090 n -= temp; in null_handle_discard()
1092 spin_unlock_irq(&nullb->lock); in null_handle_discard()
1102 spin_lock_irq(&nullb->lock); in null_handle_flush()
1105 nullb->dev->cache_size * 1024 * 1024); in null_handle_flush()
1106 if (err || nullb->dev->curr_cache == 0) in null_handle_flush()
1110 WARN_ON(!radix_tree_empty(&nullb->dev->cache)); in null_handle_flush()
1111 spin_unlock_irq(&nullb->lock); in null_handle_flush()
1119 struct nullb_device *dev = nullb->dev; in null_transfer()
1124 if (dev->zoned) in null_transfer()
1132 len -= valid_len; in null_transfer()
1148 struct request *rq = cmd->rq; in null_handle_rq()
1149 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_rq()
1163 spin_lock_irq(&nullb->lock); in null_handle_rq()
1168 rq->cmd_flags & REQ_FUA); in null_handle_rq()
1170 spin_unlock_irq(&nullb->lock); in null_handle_rq()
1175 spin_unlock_irq(&nullb->lock); in null_handle_rq()
1182 struct bio *bio = cmd->bio; in null_handle_bio()
1183 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_bio()
1190 sector = bio->bi_iter.bi_sector; in null_handle_bio()
1198 spin_lock_irq(&nullb->lock); in null_handle_bio()
1203 bio->bi_opf & REQ_FUA); in null_handle_bio()
1205 spin_unlock_irq(&nullb->lock); in null_handle_bio()
1210 spin_unlock_irq(&nullb->lock); in null_handle_bio()
1216 struct request_queue *q = nullb->q; in null_stop_queue()
1218 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_stop_queue()
1224 struct request_queue *q = nullb->q; in null_restart_queue_async()
1226 if (nullb->dev->queue_mode == NULL_Q_MQ) in null_restart_queue_async()
1232 struct nullb_device *dev = cmd->nq->dev; in null_handle_throttled()
1233 struct nullb *nullb = dev->nullb; in null_handle_throttled()
1235 struct request *rq = cmd->rq; in null_handle_throttled()
1237 if (!hrtimer_active(&nullb->bw_timer)) in null_handle_throttled()
1238 hrtimer_restart(&nullb->bw_timer); in null_handle_throttled()
1240 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { in null_handle_throttled()
1243 if (atomic_long_read(&nullb->cur_bytes) > 0) in null_handle_throttled()
1255 struct badblocks *bb = &cmd->nq->dev->badblocks; in null_handle_badblocks()
1268 struct nullb_device *dev = cmd->nq->dev; in null_handle_memory_backed()
1271 if (dev->queue_mode == NULL_Q_BIO) in null_handle_memory_backed()
1281 struct nullb_device *dev = cmd->nq->dev; in nullb_zero_read_cmd_buffer()
1284 if (dev->memory_backed) in nullb_zero_read_cmd_buffer()
1287 if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer()
1288 zero_fill_bio(cmd->bio); in nullb_zero_read_cmd_buffer()
1289 } else if (req_op(cmd->rq) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer()
1290 __rq_for_each_bio(bio, cmd->rq) in nullb_zero_read_cmd_buffer()
1300 * data buffers of read commands. Zero-initialize these buffers in nullb_complete_cmd()
1307 /* Complete IO by inline, softirq or timer */ in nullb_complete_cmd()
1308 switch (cmd->nq->dev->irqmode) { in nullb_complete_cmd()
1310 switch (cmd->nq->dev->queue_mode) { in nullb_complete_cmd()
1312 if (likely(!blk_should_fake_timeout(cmd->rq->q))) in nullb_complete_cmd()
1313 blk_mq_complete_request(cmd->rq); in nullb_complete_cmd()
1336 struct nullb_device *dev = cmd->nq->dev; in null_process_cmd()
1339 if (dev->badblocks.shift != -1) { in null_process_cmd()
1345 if (dev->memory_backed) in null_process_cmd()
1354 struct nullb_device *dev = cmd->nq->dev; in null_handle_cmd()
1355 struct nullb *nullb = dev->nullb; in null_handle_cmd()
1358 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { in null_handle_cmd()
1365 cmd->error = errno_to_blk_status(null_handle_flush(nullb)); in null_handle_cmd()
1369 if (dev->zoned) in null_handle_cmd()
1370 cmd->error = null_process_zoned_cmd(cmd, op, in null_handle_cmd()
1373 cmd->error = null_process_cmd(cmd, op, sector, nr_sectors); in null_handle_cmd()
1384 unsigned int mbps = nullb->dev->mbps; in nullb_bwtimer_fn()
1386 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) in nullb_bwtimer_fn()
1389 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); in nullb_bwtimer_fn()
1392 hrtimer_forward_now(&nullb->bw_timer, timer_interval); in nullb_bwtimer_fn()
1401 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1402 nullb->bw_timer.function = nullb_bwtimer_fn; in nullb_setup_bwtimer()
1403 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); in nullb_setup_bwtimer()
1404 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1411 if (nullb->nr_queues != 1) in nullb_to_queue()
1412 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue()
1414 return &nullb->queues[index]; in nullb_to_queue()
1419 sector_t sector = bio->bi_iter.bi_sector; in null_submit_bio()
1421 struct nullb *nullb = bio->bi_disk->private_data; in null_submit_bio()
1426 cmd->bio = bio; in null_submit_bio()
1460 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); in null_queue_rq()
1461 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq()
1462 sector_t nr_sectors = blk_rq_sectors(bd->rq); in null_queue_rq()
1463 sector_t sector = blk_rq_pos(bd->rq); in null_queue_rq()
1465 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq()
1467 if (nq->dev->irqmode == NULL_IRQ_TIMER) { in null_queue_rq()
1468 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in null_queue_rq()
1469 cmd->timer.function = null_cmd_timer_expired; in null_queue_rq()
1471 cmd->rq = bd->rq; in null_queue_rq()
1472 cmd->error = BLK_STS_OK; in null_queue_rq()
1473 cmd->nq = nq; in null_queue_rq()
1475 blk_mq_start_request(bd->rq); in null_queue_rq()
1477 if (should_requeue_request(bd->rq)) { in null_queue_rq()
1482 nq->requeue_selection++; in null_queue_rq()
1483 if (nq->requeue_selection & 1) in null_queue_rq()
1486 blk_mq_requeue_request(bd->rq, true); in null_queue_rq()
1490 if (should_timeout_request(bd->rq)) in null_queue_rq()
1493 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); in null_queue_rq()
1498 kfree(nq->tag_map); in cleanup_queue()
1499 kfree(nq->cmds); in cleanup_queue()
1506 for (i = 0; i < nullb->nr_queues; i++) in cleanup_queues()
1507 cleanup_queue(&nullb->queues[i]); in cleanup_queues()
1509 kfree(nullb->queues); in cleanup_queues()
1514 struct nullb_queue *nq = hctx->driver_data; in null_exit_hctx()
1515 struct nullb *nullb = nq->dev->nullb; in null_exit_hctx()
1517 nullb->nr_queues--; in null_exit_hctx()
1522 init_waitqueue_head(&nq->wait); in null_init_queue()
1523 nq->queue_depth = nullb->queue_depth; in null_init_queue()
1524 nq->dev = nullb->dev; in null_init_queue()
1530 struct nullb *nullb = hctx->queue->queuedata; in null_init_hctx()
1535 return -EFAULT; in null_init_hctx()
1538 nq = &nullb->queues[hctx_idx]; in null_init_hctx()
1539 hctx->driver_data = nq; in null_init_hctx()
1541 nullb->nr_queues++; in null_init_hctx()
1561 dev = nullb->dev; in null_del_dev()
1563 ida_simple_remove(&nullb_indexes, nullb->index); in null_del_dev()
1565 list_del_init(&nullb->list); in null_del_dev()
1567 del_gendisk(nullb->disk); in null_del_dev()
1569 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { in null_del_dev()
1570 hrtimer_cancel(&nullb->bw_timer); in null_del_dev()
1571 atomic_long_set(&nullb->cur_bytes, LONG_MAX); in null_del_dev()
1575 blk_cleanup_queue(nullb->q); in null_del_dev()
1576 if (dev->queue_mode == NULL_Q_MQ && in null_del_dev()
1577 nullb->tag_set == &nullb->__tag_set) in null_del_dev()
1578 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev()
1579 put_disk(nullb->disk); in null_del_dev()
1582 null_free_device_storage(nullb->dev, true); in null_del_dev()
1584 dev->nullb = NULL; in null_del_dev()
1589 if (nullb->dev->discard == false) in null_config_discard()
1592 if (nullb->dev->zoned) { in null_config_discard()
1593 nullb->dev->discard = false; in null_config_discard()
1598 nullb->q->limits.discard_granularity = nullb->dev->blocksize; in null_config_discard()
1599 nullb->q->limits.discard_alignment = nullb->dev->blocksize; in null_config_discard()
1600 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); in null_config_discard()
1601 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); in null_config_discard()
1620 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL); in setup_commands()
1621 if (!nq->cmds) in setup_commands()
1622 return -ENOMEM; in setup_commands()
1624 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; in setup_commands()
1625 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL); in setup_commands()
1626 if (!nq->tag_map) { in setup_commands()
1627 kfree(nq->cmds); in setup_commands()
1628 return -ENOMEM; in setup_commands()
1631 for (i = 0; i < nq->queue_depth; i++) { in setup_commands()
1632 cmd = &nq->cmds[i]; in setup_commands()
1633 cmd->tag = -1U; in setup_commands()
1641 nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue), in setup_queues()
1643 if (!nullb->queues) in setup_queues()
1644 return -ENOMEM; in setup_queues()
1646 nullb->queue_depth = nullb->dev->hw_queue_depth; in setup_queues()
1656 for (i = 0; i < nullb->dev->submit_queues; i++) { in init_driver_queues()
1657 nq = &nullb->queues[i]; in init_driver_queues()
1664 nullb->nr_queues++; in init_driver_queues()
1671 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT; in null_gendisk_register()
1674 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); in null_gendisk_register()
1676 return -ENOMEM; in null_gendisk_register()
1679 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; in null_gendisk_register()
1680 disk->major = null_major; in null_gendisk_register()
1681 disk->first_minor = nullb->index; in null_gendisk_register()
1682 if (queue_is_mq(nullb->q)) in null_gendisk_register()
1683 disk->fops = &null_rq_ops; in null_gendisk_register()
1685 disk->fops = &null_bio_ops; in null_gendisk_register()
1686 disk->private_data = nullb; in null_gendisk_register()
1687 disk->queue = nullb->q; in null_gendisk_register()
1688 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); in null_gendisk_register()
1690 if (nullb->dev->zoned) { in null_gendisk_register()
1703 set->ops = &null_mq_ops; in null_init_tag_set()
1704 set->nr_hw_queues = nullb ? nullb->dev->submit_queues : in null_init_tag_set()
1706 set->queue_depth = nullb ? nullb->dev->hw_queue_depth : in null_init_tag_set()
1708 set->numa_node = nullb ? nullb->dev->home_node : g_home_node; in null_init_tag_set()
1709 set->cmd_size = sizeof(struct nullb_cmd); in null_init_tag_set()
1710 set->flags = BLK_MQ_F_SHOULD_MERGE; in null_init_tag_set()
1712 set->flags |= BLK_MQ_F_NO_SCHED; in null_init_tag_set()
1714 set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; in null_init_tag_set()
1715 set->driver_data = NULL; in null_init_tag_set()
1717 if ((nullb && nullb->dev->blocking) || g_blocking) in null_init_tag_set()
1718 set->flags |= BLK_MQ_F_BLOCKING; in null_init_tag_set()
1725 dev->blocksize = round_down(dev->blocksize, 512); in null_validate_conf()
1726 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); in null_validate_conf()
1728 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { in null_validate_conf()
1729 if (dev->submit_queues != nr_online_nodes) in null_validate_conf()
1730 dev->submit_queues = nr_online_nodes; in null_validate_conf()
1731 } else if (dev->submit_queues > nr_cpu_ids) in null_validate_conf()
1732 dev->submit_queues = nr_cpu_ids; in null_validate_conf()
1733 else if (dev->submit_queues == 0) in null_validate_conf()
1734 dev->submit_queues = 1; in null_validate_conf()
1736 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); in null_validate_conf()
1737 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); in null_validate_conf()
1739 /* Do memory allocation, so set blocking */ in null_validate_conf()
1740 if (dev->memory_backed) in null_validate_conf()
1741 dev->blocking = true; in null_validate_conf()
1743 dev->cache_size = 0; in null_validate_conf()
1744 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, in null_validate_conf()
1745 dev->cache_size); in null_validate_conf()
1746 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); in null_validate_conf()
1748 if (dev->queue_mode == NULL_Q_BIO) in null_validate_conf()
1749 dev->mbps = 0; in null_validate_conf()
1751 if (dev->zoned && in null_validate_conf()
1752 (!dev->zone_size || !is_power_of_2(dev->zone_size))) { in null_validate_conf()
1753 pr_err("zone_size must be power-of-two\n"); in null_validate_conf()
1754 return -EINVAL; in null_validate_conf()
1769 attr->verbose = 0; in __null_setup_fault()
1796 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); in null_add_dev()
1798 rv = -ENOMEM; in null_add_dev()
1801 nullb->dev = dev; in null_add_dev()
1802 dev->nullb = nullb; in null_add_dev()
1804 spin_lock_init(&nullb->lock); in null_add_dev()
1810 if (dev->queue_mode == NULL_Q_MQ) { in null_add_dev()
1812 nullb->tag_set = &tag_set; in null_add_dev()
1815 nullb->tag_set = &nullb->__tag_set; in null_add_dev()
1816 rv = null_init_tag_set(nullb, nullb->tag_set); in null_add_dev()
1825 nullb->tag_set->timeout = 5 * HZ; in null_add_dev()
1826 nullb->q = blk_mq_init_queue_data(nullb->tag_set, nullb); in null_add_dev()
1827 if (IS_ERR(nullb->q)) { in null_add_dev()
1828 rv = -ENOMEM; in null_add_dev()
1831 } else if (dev->queue_mode == NULL_Q_BIO) { in null_add_dev()
1832 nullb->q = blk_alloc_queue(dev->home_node); in null_add_dev()
1833 if (!nullb->q) { in null_add_dev()
1834 rv = -ENOMEM; in null_add_dev()
1842 if (dev->mbps) { in null_add_dev()
1843 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); in null_add_dev()
1847 if (dev->cache_size > 0) { in null_add_dev()
1848 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_add_dev()
1849 blk_queue_write_cache(nullb->q, true, true); in null_add_dev()
1852 if (dev->zoned) { in null_add_dev()
1853 rv = null_init_zoned_dev(dev, nullb->q); in null_add_dev()
1858 nullb->q->queuedata = nullb; in null_add_dev()
1859 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); in null_add_dev()
1860 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); in null_add_dev()
1863 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); in null_add_dev()
1864 dev->index = nullb->index; in null_add_dev()
1867 blk_queue_logical_block_size(nullb->q, dev->blocksize); in null_add_dev()
1868 blk_queue_physical_block_size(nullb->q, dev->blocksize); in null_add_dev()
1872 sprintf(nullb->disk_name, "nullb%d", nullb->index); in null_add_dev()
1879 list_add_tail(&nullb->list, &nullb_list); in null_add_dev()
1886 blk_cleanup_queue(nullb->q); in null_add_dev()
1888 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) in null_add_dev()
1889 blk_mq_free_tag_set(nullb->tag_set); in null_add_dev()
1894 dev->nullb = NULL; in null_add_dev()
1918 pr_err("legacy IO path no longer available\n"); in null_init()
1919 return -EINVAL; in null_init()
1956 ret = -ENOMEM; in null_init()
1972 dev = nullb->dev; in null_init()
1998 dev = nullb->dev; in null_exit()