Lines Matching refs:nbd
66 struct nbd_device *nbd; member
128 struct nbd_device *nbd; member
142 #define nbd_name(nbd) ((nbd)->disk->disk_name) argument
152 static int nbd_dev_dbg_init(struct nbd_device *nbd);
153 static void nbd_dev_dbg_close(struct nbd_device *nbd);
154 static void nbd_config_put(struct nbd_device *nbd);
158 static void nbd_disconnect_and_put(struct nbd_device *nbd);
160 static inline struct device *nbd_to_dev(struct nbd_device *nbd) in nbd_to_dev() argument
162 return disk_to_dev(nbd->disk); in nbd_to_dev()
210 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; in pid_show() local
212 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); in pid_show()
220 static void nbd_dev_remove(struct nbd_device *nbd) in nbd_dev_remove() argument
222 struct gendisk *disk = nbd->disk; in nbd_dev_remove()
229 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove()
240 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete) in nbd_dev_remove()
241 complete(nbd->destroy_complete); in nbd_dev_remove()
243 kfree(nbd); in nbd_dev_remove()
246 static void nbd_put(struct nbd_device *nbd) in nbd_put() argument
248 if (refcount_dec_and_mutex_lock(&nbd->refs, in nbd_put()
250 idr_remove(&nbd_index_idr, nbd->index); in nbd_put()
251 nbd_dev_remove(nbd); in nbd_put()
262 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, in nbd_mark_nsock_dead() argument
265 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { in nbd_mark_nsock_dead()
270 args->index = nbd->index; in nbd_mark_nsock_dead()
276 if (atomic_dec_return(&nbd->config->live_connections) == 0) { in nbd_mark_nsock_dead()
278 &nbd->config->runtime_flags)) { in nbd_mark_nsock_dead()
280 &nbd->config->runtime_flags); in nbd_mark_nsock_dead()
281 dev_info(nbd_to_dev(nbd), in nbd_mark_nsock_dead()
291 static void nbd_size_clear(struct nbd_device *nbd) in nbd_size_clear() argument
293 if (nbd->config->bytesize) { in nbd_size_clear()
294 set_capacity(nbd->disk, 0); in nbd_size_clear()
295 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_size_clear()
299 static void nbd_size_update(struct nbd_device *nbd, bool start) in nbd_size_update() argument
301 struct nbd_config *config = nbd->config; in nbd_size_update()
302 struct block_device *bdev = bdget_disk(nbd->disk, 0); in nbd_size_update()
306 nbd->disk->queue->limits.discard_granularity = config->blksize; in nbd_size_update()
307 nbd->disk->queue->limits.discard_alignment = config->blksize; in nbd_size_update()
308 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); in nbd_size_update()
310 blk_queue_logical_block_size(nbd->disk->queue, config->blksize); in nbd_size_update()
311 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); in nbd_size_update()
312 set_capacity(nbd->disk, nr_sectors); in nbd_size_update()
319 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_size_update()
322 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_size_update()
325 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, in nbd_size_set() argument
328 struct nbd_config *config = nbd->config; in nbd_size_set()
331 if (nbd->task_recv != NULL) in nbd_size_set()
332 nbd_size_update(nbd, false); in nbd_size_set()
339 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, in nbd_complete_rq()
348 static void sock_shutdown(struct nbd_device *nbd) in sock_shutdown() argument
350 struct nbd_config *config = nbd->config; in sock_shutdown()
361 nbd_mark_nsock_dead(nbd, nsock, 0); in sock_shutdown()
364 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); in sock_shutdown()
387 struct nbd_device *nbd = cmd->nbd; in nbd_xmit_timeout() local
393 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_xmit_timeout()
398 config = nbd->config; in nbd_xmit_timeout()
401 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout()
402 dev_err_ratelimited(nbd_to_dev(nbd), in nbd_xmit_timeout()
424 nbd_mark_nsock_dead(nbd, nsock, 1); in nbd_xmit_timeout()
429 nbd_config_put(nbd); in nbd_xmit_timeout()
434 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout()
441 …dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", in nbd_xmit_timeout()
451 nbd_config_put(nbd); in nbd_xmit_timeout()
456 nbd_config_put(nbd); in nbd_xmit_timeout()
460 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); in nbd_xmit_timeout()
464 sock_shutdown(nbd); in nbd_xmit_timeout()
465 nbd_config_put(nbd); in nbd_xmit_timeout()
474 static int sock_xmit(struct nbd_device *nbd, int index, int send, in sock_xmit() argument
477 struct nbd_config *config = nbd->config; in sock_xmit()
484 dev_err_ratelimited(disk_to_dev(nbd->disk), in sock_xmit()
530 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) in nbd_send_cmd() argument
533 struct nbd_config *config = nbd->config; in nbd_send_cmd()
554 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
590 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); in nbd_send_cmd()
592 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", in nbd_send_cmd()
595 result = sock_xmit(nbd, index, 1, &from, in nbd_send_cmd()
612 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
630 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", in nbd_send_cmd()
641 result = sock_xmit(nbd, index, 1, &from, flags, &sent); in nbd_send_cmd()
653 dev_err(disk_to_dev(nbd->disk), in nbd_send_cmd()
677 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) in nbd_read_stat() argument
679 struct nbd_config *config = nbd->config; in nbd_read_stat()
693 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); in nbd_read_stat()
696 dev_err(disk_to_dev(nbd->disk), in nbd_read_stat()
702 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", in nbd_read_stat()
710 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_read_stat()
711 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_read_stat()
714 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", in nbd_read_stat()
723 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", in nbd_read_stat()
729 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", in nbd_read_stat()
735 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", in nbd_read_stat()
741 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", in nbd_read_stat()
747 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); in nbd_read_stat()
754 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); in nbd_read_stat()
756 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", in nbd_read_stat()
771 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", in nbd_read_stat()
786 struct nbd_device *nbd = args->nbd; in recv_work() local
787 struct nbd_config *config = nbd->config; in recv_work()
792 cmd = nbd_read_stat(nbd, args->index); in recv_work()
797 nbd_mark_nsock_dead(nbd, nsock, 1); in recv_work()
806 nbd_config_put(nbd); in recv_work()
824 static void nbd_clear_que(struct nbd_device *nbd) in nbd_clear_que() argument
826 blk_mq_quiesce_queue(nbd->disk->queue); in nbd_clear_que()
827 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
828 blk_mq_unquiesce_queue(nbd->disk->queue); in nbd_clear_que()
829 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); in nbd_clear_que()
832 static int find_fallback(struct nbd_device *nbd, int index) in find_fallback() argument
834 struct nbd_config *config = nbd->config; in find_fallback()
843 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
866 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
875 static int wait_for_reconnect(struct nbd_device *nbd) in wait_for_reconnect() argument
877 struct nbd_config *config = nbd->config; in wait_for_reconnect()
890 struct nbd_device *nbd = cmd->nbd; in nbd_handle_cmd() local
895 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_handle_cmd()
896 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
901 config = nbd->config; in nbd_handle_cmd()
904 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
906 nbd_config_put(nbd); in nbd_handle_cmd()
916 index = find_fallback(nbd, index); in nbd_handle_cmd()
919 if (wait_for_reconnect(nbd)) { in nbd_handle_cmd()
929 sock_shutdown(nbd); in nbd_handle_cmd()
930 nbd_config_put(nbd); in nbd_handle_cmd()
952 ret = nbd_send_cmd(nbd, cmd, index); in nbd_handle_cmd()
954 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
956 nbd_mark_nsock_dead(nbd, nsock, 1); in nbd_handle_cmd()
962 nbd_config_put(nbd); in nbd_handle_cmd()
999 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, in nbd_get_socket() argument
1010 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); in nbd_get_socket()
1019 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, in nbd_add_socket() argument
1022 struct nbd_config *config = nbd->config; in nbd_add_socket()
1028 sock = nbd_get_socket(nbd, arg, &err); in nbd_add_socket()
1032 if (!netlink && !nbd->task_setup && in nbd_add_socket()
1034 nbd->task_setup = current; in nbd_add_socket()
1037 (nbd->task_setup != current || in nbd_add_socket()
1039 dev_err(disk_to_dev(nbd->disk), in nbd_add_socket()
1078 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) in nbd_reconnect_socket() argument
1080 struct nbd_config *config = nbd->config; in nbd_reconnect_socket()
1086 sock = nbd_get_socket(nbd, arg, &err); in nbd_reconnect_socket()
1108 if (nbd->tag_set.timeout) in nbd_reconnect_socket()
1109 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket()
1111 refcount_inc(&nbd->config_refs); in nbd_reconnect_socket()
1118 args->nbd = nbd; in nbd_reconnect_socket()
1128 queue_work(nbd->recv_workq, &args->work); in nbd_reconnect_socket()
1146 static void nbd_parse_flags(struct nbd_device *nbd) in nbd_parse_flags() argument
1148 struct nbd_config *config = nbd->config; in nbd_parse_flags()
1150 set_disk_ro(nbd->disk, true); in nbd_parse_flags()
1152 set_disk_ro(nbd->disk, false); in nbd_parse_flags()
1154 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); in nbd_parse_flags()
1157 blk_queue_write_cache(nbd->disk->queue, true, true); in nbd_parse_flags()
1159 blk_queue_write_cache(nbd->disk->queue, true, false); in nbd_parse_flags()
1162 blk_queue_write_cache(nbd->disk->queue, false, false); in nbd_parse_flags()
1165 static void send_disconnects(struct nbd_device *nbd) in send_disconnects() argument
1167 struct nbd_config *config = nbd->config; in send_disconnects()
1181 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); in send_disconnects()
1183 dev_err(disk_to_dev(nbd->disk), in send_disconnects()
1189 static int nbd_disconnect(struct nbd_device *nbd) in nbd_disconnect() argument
1191 struct nbd_config *config = nbd->config; in nbd_disconnect()
1193 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); in nbd_disconnect()
1195 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); in nbd_disconnect()
1196 send_disconnects(nbd); in nbd_disconnect()
1200 static void nbd_clear_sock(struct nbd_device *nbd) in nbd_clear_sock() argument
1202 sock_shutdown(nbd); in nbd_clear_sock()
1203 nbd_clear_que(nbd); in nbd_clear_sock()
1204 nbd->task_setup = NULL; in nbd_clear_sock()
1207 static void nbd_config_put(struct nbd_device *nbd) in nbd_config_put() argument
1209 if (refcount_dec_and_mutex_lock(&nbd->config_refs, in nbd_config_put()
1210 &nbd->config_lock)) { in nbd_config_put()
1211 struct nbd_config *config = nbd->config; in nbd_config_put()
1212 nbd_dev_dbg_close(nbd); in nbd_config_put()
1213 nbd_size_clear(nbd); in nbd_config_put()
1216 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_config_put()
1217 nbd->task_recv = NULL; in nbd_config_put()
1218 nbd_clear_sock(nbd); in nbd_config_put()
1227 kfree(nbd->config); in nbd_config_put()
1228 nbd->config = NULL; in nbd_config_put()
1230 if (nbd->recv_workq) in nbd_config_put()
1231 destroy_workqueue(nbd->recv_workq); in nbd_config_put()
1232 nbd->recv_workq = NULL; in nbd_config_put()
1234 nbd->tag_set.timeout = 0; in nbd_config_put()
1235 nbd->disk->queue->limits.discard_granularity = 0; in nbd_config_put()
1236 nbd->disk->queue->limits.discard_alignment = 0; in nbd_config_put()
1237 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); in nbd_config_put()
1238 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); in nbd_config_put()
1240 mutex_unlock(&nbd->config_lock); in nbd_config_put()
1241 nbd_put(nbd); in nbd_config_put()
1246 static int nbd_start_device(struct nbd_device *nbd) in nbd_start_device() argument
1248 struct nbd_config *config = nbd->config; in nbd_start_device()
1252 if (nbd->task_recv) in nbd_start_device()
1258 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); in nbd_start_device()
1262 nbd->recv_workq = alloc_workqueue("knbd%d-recv", in nbd_start_device()
1264 WQ_UNBOUND, 0, nbd->index); in nbd_start_device()
1265 if (!nbd->recv_workq) { in nbd_start_device()
1266 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); in nbd_start_device()
1270 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); in nbd_start_device()
1271 nbd->task_recv = current; in nbd_start_device()
1273 nbd_parse_flags(nbd); in nbd_start_device()
1275 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_start_device()
1277 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); in nbd_start_device()
1282 nbd_dev_dbg_init(nbd); in nbd_start_device()
1288 sock_shutdown(nbd); in nbd_start_device()
1298 flush_workqueue(nbd->recv_workq); in nbd_start_device()
1302 if (nbd->tag_set.timeout) in nbd_start_device()
1304 nbd->tag_set.timeout; in nbd_start_device()
1306 refcount_inc(&nbd->config_refs); in nbd_start_device()
1308 args->nbd = nbd; in nbd_start_device()
1310 queue_work(nbd->recv_workq, &args->work); in nbd_start_device()
1312 nbd_size_update(nbd, true); in nbd_start_device()
1316 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) in nbd_start_device_ioctl() argument
1318 struct nbd_config *config = nbd->config; in nbd_start_device_ioctl()
1321 ret = nbd_start_device(nbd); in nbd_start_device_ioctl()
1326 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_start_device_ioctl()
1327 mutex_unlock(&nbd->config_lock); in nbd_start_device_ioctl()
1331 sock_shutdown(nbd); in nbd_start_device_ioctl()
1332 flush_workqueue(nbd->recv_workq); in nbd_start_device_ioctl()
1334 mutex_lock(&nbd->config_lock); in nbd_start_device_ioctl()
1344 static void nbd_clear_sock_ioctl(struct nbd_device *nbd, in nbd_clear_sock_ioctl() argument
1347 sock_shutdown(nbd); in nbd_clear_sock_ioctl()
1351 &nbd->config->runtime_flags)) in nbd_clear_sock_ioctl()
1352 nbd_config_put(nbd); in nbd_clear_sock_ioctl()
1363 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) in nbd_set_cmd_timeout() argument
1365 nbd->tag_set.timeout = timeout * HZ; in nbd_set_cmd_timeout()
1367 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); in nbd_set_cmd_timeout()
1369 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); in nbd_set_cmd_timeout()
1373 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, in __nbd_ioctl() argument
1376 struct nbd_config *config = nbd->config; in __nbd_ioctl()
1380 return nbd_disconnect(nbd); in __nbd_ioctl()
1382 nbd_clear_sock_ioctl(nbd, bdev); in __nbd_ioctl()
1385 return nbd_add_socket(nbd, arg, false); in __nbd_ioctl()
1391 nbd_size_set(nbd, arg, in __nbd_ioctl()
1395 nbd_size_set(nbd, config->blksize, in __nbd_ioctl()
1399 nbd_size_set(nbd, config->blksize, arg); in __nbd_ioctl()
1402 nbd_set_cmd_timeout(nbd, arg); in __nbd_ioctl()
1409 return nbd_start_device_ioctl(nbd, bdev); in __nbd_ioctl()
1429 struct nbd_device *nbd = bdev->bd_disk->private_data; in nbd_ioctl() local
1430 struct nbd_config *config = nbd->config; in nbd_ioctl()
1442 mutex_lock(&nbd->config_lock); in nbd_ioctl()
1449 error = __nbd_ioctl(bdev, nbd, cmd, arg); in nbd_ioctl()
1451 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n"); in nbd_ioctl()
1452 mutex_unlock(&nbd->config_lock); in nbd_ioctl()
1474 struct nbd_device *nbd; in nbd_open() local
1478 nbd = bdev->bd_disk->private_data; in nbd_open()
1479 if (!nbd) { in nbd_open()
1483 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_open()
1487 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1490 mutex_lock(&nbd->config_lock); in nbd_open()
1491 if (refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1492 mutex_unlock(&nbd->config_lock); in nbd_open()
1495 config = nbd->config = nbd_alloc_config(); in nbd_open()
1498 mutex_unlock(&nbd->config_lock); in nbd_open()
1501 refcount_set(&nbd->config_refs, 1); in nbd_open()
1502 refcount_inc(&nbd->refs); in nbd_open()
1503 mutex_unlock(&nbd->config_lock); in nbd_open()
1505 } else if (nbd_disconnected(nbd->config)) { in nbd_open()
1515 struct nbd_device *nbd = disk->private_data; in nbd_release() local
1518 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && in nbd_release()
1520 nbd_disconnect_and_put(nbd); in nbd_release()
1523 nbd_config_put(nbd); in nbd_release()
1524 nbd_put(nbd); in nbd_release()
1540 struct nbd_device *nbd = s->private; in nbd_dbg_tasks_show() local
1542 if (nbd->task_recv) in nbd_dbg_tasks_show()
1543 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); in nbd_dbg_tasks_show()
1562 struct nbd_device *nbd = s->private; in nbd_dbg_flags_show() local
1563 u32 flags = nbd->config->flags; in nbd_dbg_flags_show()
1595 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init() argument
1598 struct nbd_config *config = nbd->config; in nbd_dev_dbg_init()
1603 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); in nbd_dev_dbg_init()
1605 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", in nbd_dev_dbg_init()
1606 nbd_name(nbd)); in nbd_dev_dbg_init()
1611 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); in nbd_dev_dbg_init()
1613 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); in nbd_dev_dbg_init()
1615 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); in nbd_dev_dbg_init()
1620 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close() argument
1622 debugfs_remove_recursive(nbd->config->dbg_dir); in nbd_dev_dbg_close()
1645 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init() argument
1650 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close() argument
1669 cmd->nbd = set->driver_data; in nbd_init_request()
1684 struct nbd_device *nbd; in nbd_dev_add() local
1689 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); in nbd_dev_add()
1690 if (!nbd) in nbd_dev_add()
1698 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, in nbd_dev_add()
1703 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); in nbd_dev_add()
1710 nbd->index = index; in nbd_dev_add()
1711 nbd->disk = disk; in nbd_dev_add()
1712 nbd->tag_set.ops = &nbd_mq_ops; in nbd_dev_add()
1713 nbd->tag_set.nr_hw_queues = 1; in nbd_dev_add()
1714 nbd->tag_set.queue_depth = 128; in nbd_dev_add()
1715 nbd->tag_set.numa_node = NUMA_NO_NODE; in nbd_dev_add()
1716 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); in nbd_dev_add()
1717 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | in nbd_dev_add()
1719 nbd->tag_set.driver_data = nbd; in nbd_dev_add()
1720 nbd->destroy_complete = NULL; in nbd_dev_add()
1722 err = blk_mq_alloc_tag_set(&nbd->tag_set); in nbd_dev_add()
1726 q = blk_mq_init_queue(&nbd->tag_set); in nbd_dev_add()
1746 mutex_init(&nbd->config_lock); in nbd_dev_add()
1747 refcount_set(&nbd->config_refs, 0); in nbd_dev_add()
1748 refcount_set(&nbd->refs, 1); in nbd_dev_add()
1749 INIT_LIST_HEAD(&nbd->list); in nbd_dev_add()
1753 disk->private_data = nbd; in nbd_dev_add()
1760 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_add()
1766 kfree(nbd); in nbd_dev_add()
1773 struct nbd_device *nbd = ptr; in find_free_cb() local
1776 if (!refcount_read(&nbd->config_refs)) { in find_free_cb()
1777 *found = nbd; in find_free_cb()
1809 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) in nbd_genl_size_set() argument
1811 struct nbd_config *config = nbd->config; in nbd_genl_size_set()
1829 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize)); in nbd_genl_size_set()
1836 struct nbd_device *nbd = NULL; in nbd_genl_connect() local
1858 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd); in nbd_genl_connect()
1867 nbd = idr_find(&nbd_index_idr, new_index); in nbd_genl_connect()
1870 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_connect()
1871 if (!nbd) { in nbd_genl_connect()
1878 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_connect()
1881 if (!nbd) { in nbd_genl_connect()
1888 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && in nbd_genl_connect()
1889 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) { in nbd_genl_connect()
1890 nbd->destroy_complete = &destroy_complete; in nbd_genl_connect()
1898 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_connect()
1908 mutex_lock(&nbd->config_lock); in nbd_genl_connect()
1909 if (refcount_read(&nbd->config_refs)) { in nbd_genl_connect()
1910 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1911 nbd_put(nbd); in nbd_genl_connect()
1917 if (WARN_ON(nbd->config)) { in nbd_genl_connect()
1918 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1919 nbd_put(nbd); in nbd_genl_connect()
1922 config = nbd->config = nbd_alloc_config(); in nbd_genl_connect()
1923 if (!nbd->config) { in nbd_genl_connect()
1924 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1925 nbd_put(nbd); in nbd_genl_connect()
1929 refcount_set(&nbd->config_refs, 1); in nbd_genl_connect()
1932 ret = nbd_genl_size_set(info, nbd); in nbd_genl_connect()
1937 nbd_set_cmd_timeout(nbd, in nbd_genl_connect()
1952 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); in nbd_genl_connect()
1955 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); in nbd_genl_connect()
1988 ret = nbd_add_socket(nbd, fd, true); in nbd_genl_connect()
1993 ret = nbd_start_device(nbd); in nbd_genl_connect()
1995 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1998 refcount_inc(&nbd->config_refs); in nbd_genl_connect()
1999 nbd_connect_reply(info, nbd->index); in nbd_genl_connect()
2001 nbd_config_put(nbd); in nbd_genl_connect()
2003 nbd_put(nbd); in nbd_genl_connect()
2007 static void nbd_disconnect_and_put(struct nbd_device *nbd) in nbd_disconnect_and_put() argument
2009 mutex_lock(&nbd->config_lock); in nbd_disconnect_and_put()
2010 nbd_disconnect(nbd); in nbd_disconnect_and_put()
2011 nbd_clear_sock(nbd); in nbd_disconnect_and_put()
2012 mutex_unlock(&nbd->config_lock); in nbd_disconnect_and_put()
2018 flush_workqueue(nbd->recv_workq); in nbd_disconnect_and_put()
2020 &nbd->config->runtime_flags)) in nbd_disconnect_and_put()
2021 nbd_config_put(nbd); in nbd_disconnect_and_put()
2026 struct nbd_device *nbd; in nbd_genl_disconnect() local
2038 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_disconnect()
2039 if (!nbd) { in nbd_genl_disconnect()
2045 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_disconnect()
2052 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_genl_disconnect()
2053 nbd_put(nbd); in nbd_genl_disconnect()
2056 nbd_disconnect_and_put(nbd); in nbd_genl_disconnect()
2057 nbd_config_put(nbd); in nbd_genl_disconnect()
2058 nbd_put(nbd); in nbd_genl_disconnect()
2064 struct nbd_device *nbd = NULL; in nbd_genl_reconfigure() local
2079 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_reconfigure()
2080 if (!nbd) { in nbd_genl_reconfigure()
2086 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_reconfigure()
2094 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_genl_reconfigure()
2095 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2097 nbd_put(nbd); in nbd_genl_reconfigure()
2101 mutex_lock(&nbd->config_lock); in nbd_genl_reconfigure()
2102 config = nbd->config; in nbd_genl_reconfigure()
2104 !nbd->task_recv) { in nbd_genl_reconfigure()
2105 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2111 ret = nbd_genl_size_set(info, nbd); in nbd_genl_reconfigure()
2116 nbd_set_cmd_timeout(nbd, in nbd_genl_reconfigure()
2129 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); in nbd_genl_reconfigure()
2133 refcount_inc(&nbd->refs); in nbd_genl_reconfigure()
2134 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); in nbd_genl_reconfigure()
2171 ret = nbd_reconnect_socket(nbd, fd); in nbd_genl_reconfigure()
2177 dev_info(nbd_to_dev(nbd), "reconnected socket\n"); in nbd_genl_reconfigure()
2181 mutex_unlock(&nbd->config_lock); in nbd_genl_reconfigure()
2182 nbd_config_put(nbd); in nbd_genl_reconfigure()
2183 nbd_put(nbd); in nbd_genl_reconfigure()
2185 nbd_put(nbd); in nbd_genl_reconfigure()
2229 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) in populate_nbd_status() argument
2242 if (refcount_read(&nbd->config_refs)) in populate_nbd_status()
2247 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); in populate_nbd_status()
2260 struct nbd_device *nbd = ptr; in status_cb() local
2261 return populate_nbd_status(nbd, (struct sk_buff *)data); in status_cb()
2300 struct nbd_device *nbd; in nbd_genl_status() local
2301 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_status()
2302 if (nbd) { in nbd_genl_status()
2303 ret = populate_nbd_status(nbd, reply); in nbd_genl_status()
2425 struct nbd_device *nbd = ptr; in nbd_exit_cb() local
2427 list_add_tail(&nbd->list, list); in nbd_exit_cb()
2433 struct nbd_device *nbd; in nbd_cleanup() local
2443 nbd = list_first_entry(&del_list, struct nbd_device, list); in nbd_cleanup()
2444 list_del_init(&nbd->list); in nbd_cleanup()
2445 if (refcount_read(&nbd->refs) != 1) in nbd_cleanup()
2447 nbd_put(nbd); in nbd_cleanup()