Lines Matching refs:nbd
69 struct nbd_device *nbd; member
144 struct nbd_device *nbd; member
158 #define nbd_name(nbd) ((nbd)->disk->disk_name) argument
166 static int nbd_dev_dbg_init(struct nbd_device *nbd);
167 static void nbd_dev_dbg_close(struct nbd_device *nbd);
168 static void nbd_config_put(struct nbd_device *nbd);
172 static void nbd_disconnect_and_put(struct nbd_device *nbd);
174 static inline struct device *nbd_to_dev(struct nbd_device *nbd) in nbd_to_dev() argument
176 return disk_to_dev(nbd->disk); in nbd_to_dev()
224 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; in pid_show() local
226 return sprintf(buf, "%d\n", nbd->pid); in pid_show()
238 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; in backend_show() local
240 return sprintf(buf, "%s\n", nbd->backend ?: ""); in backend_show()
248 static void nbd_dev_remove(struct nbd_device *nbd) in nbd_dev_remove() argument
250 struct gendisk *disk = nbd->disk; in nbd_dev_remove()
254 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove()
261 idr_remove(&nbd_index_idr, nbd->index); in nbd_dev_remove()
263 destroy_workqueue(nbd->recv_workq); in nbd_dev_remove()
264 kfree(nbd); in nbd_dev_remove()
272 static void nbd_put(struct nbd_device *nbd) in nbd_put() argument
274 if (!refcount_dec_and_test(&nbd->refs)) in nbd_put()
278 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) in nbd_put()
279 queue_work(nbd_del_wq, &nbd->remove_work); in nbd_put()
281 nbd_dev_remove(nbd); in nbd_put()
290 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, in nbd_mark_nsock_dead() argument
293 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { in nbd_mark_nsock_dead()
298 args->index = nbd->index; in nbd_mark_nsock_dead()
304 if (atomic_dec_return(&nbd->config->live_connections) == 0) { in nbd_mark_nsock_dead()
306 &nbd->config->runtime_flags)) { in nbd_mark_nsock_dead()
308 &nbd->config->runtime_flags); in nbd_mark_nsock_dead()
309 dev_info(nbd_to_dev(nbd), in nbd_mark_nsock_dead()
319 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, in nbd_set_size() argument
328 nbd->config->bytesize = bytesize; in nbd_set_size()
329 nbd->config->blksize_bits = __ffs(blksize); in nbd_set_size()
331 if (!nbd->pid) in nbd_set_size()
334 if (nbd->config->flags & NBD_FLAG_SEND_TRIM) { in nbd_set_size()
335 nbd->disk->queue->limits.discard_granularity = blksize; in nbd_set_size()
336 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); in nbd_set_size()
338 blk_queue_logical_block_size(nbd->disk->queue, blksize); in nbd_set_size()
339 blk_queue_physical_block_size(nbd->disk->queue, blksize); in nbd_set_size()
342 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_set_size()
343 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9)) in nbd_set_size()
344 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_set_size()
352 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, in nbd_complete_rq()
361 static void sock_shutdown(struct nbd_device *nbd) in sock_shutdown() argument
363 struct nbd_config *config = nbd->config; in sock_shutdown()
374 nbd_mark_nsock_dead(nbd, nsock, 0); in sock_shutdown()
377 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); in sock_shutdown()
399 struct nbd_device *nbd = cmd->nbd; in nbd_xmit_timeout() local
410 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_xmit_timeout()
416 config = nbd->config; in nbd_xmit_timeout()
419 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout()
420 dev_err_ratelimited(nbd_to_dev(nbd), in nbd_xmit_timeout()
442 nbd_mark_nsock_dead(nbd, nsock, 1); in nbd_xmit_timeout()
447 nbd_config_put(nbd); in nbd_xmit_timeout()
452 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout()
459 …dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", in nbd_xmit_timeout()
469 nbd_config_put(nbd); in nbd_xmit_timeout()
474 nbd_config_put(nbd); in nbd_xmit_timeout()
478 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); in nbd_xmit_timeout()
483 sock_shutdown(nbd); in nbd_xmit_timeout()
484 nbd_config_put(nbd); in nbd_xmit_timeout()
494 static int sock_xmit(struct nbd_device *nbd, int index, int send, in sock_xmit() argument
497 struct nbd_config *config = nbd->config; in sock_xmit()
504 dev_err_ratelimited(disk_to_dev(nbd->disk), in sock_xmit()
550 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) in nbd_send_cmd() argument
553 struct nbd_config *config = nbd->config; in nbd_send_cmd()
574 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
610 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); in nbd_send_cmd()
612 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", in nbd_send_cmd()
615 result = sock_xmit(nbd, index, 1, &from, in nbd_send_cmd()
632 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
650 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", in nbd_send_cmd()
661 result = sock_xmit(nbd, index, 1, &from, flags, &sent); in nbd_send_cmd()
673 dev_err(disk_to_dev(nbd->disk), in nbd_send_cmd()
696 static int nbd_read_reply(struct nbd_device *nbd, int index, in nbd_read_reply() argument
705 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); in nbd_read_reply()
707 if (!nbd_disconnected(nbd->config)) in nbd_read_reply()
708 dev_err(disk_to_dev(nbd->disk), in nbd_read_reply()
714 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", in nbd_read_reply()
723 static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index, in nbd_handle_reply() argument
737 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_handle_reply()
738 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_handle_reply()
741 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", in nbd_handle_reply()
750 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)", in nbd_handle_reply()
756 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)", in nbd_handle_reply()
762 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", in nbd_handle_reply()
768 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", in nbd_handle_reply()
774 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", in nbd_handle_reply()
780 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", in nbd_handle_reply()
786 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); in nbd_handle_reply()
794 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); in nbd_handle_reply()
796 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", in nbd_handle_reply()
804 if (nbd_disconnected(nbd->config)) { in nbd_handle_reply()
811 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", in nbd_handle_reply()
826 struct nbd_device *nbd = args->nbd; in recv_work() local
827 struct nbd_config *config = nbd->config; in recv_work()
828 struct request_queue *q = nbd->disk->queue; in recv_work()
836 if (nbd_read_reply(nbd, args->index, &reply)) in recv_work()
846 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n", in recv_work()
851 cmd = nbd_handle_reply(nbd, args->index, &reply); in recv_work()
873 nbd_mark_nsock_dead(nbd, nsock, 1); in recv_work()
876 nbd_config_put(nbd); in recv_work()
902 static void nbd_clear_que(struct nbd_device *nbd) in nbd_clear_que() argument
904 blk_mq_quiesce_queue(nbd->disk->queue); in nbd_clear_que()
905 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
906 blk_mq_unquiesce_queue(nbd->disk->queue); in nbd_clear_que()
907 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); in nbd_clear_que()
910 static int find_fallback(struct nbd_device *nbd, int index) in find_fallback() argument
912 struct nbd_config *config = nbd->config; in find_fallback()
921 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
944 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
953 static int wait_for_reconnect(struct nbd_device *nbd) in wait_for_reconnect() argument
955 struct nbd_config *config = nbd->config; in wait_for_reconnect()
972 struct nbd_device *nbd = cmd->nbd; in nbd_handle_cmd() local
977 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_handle_cmd()
978 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
982 config = nbd->config; in nbd_handle_cmd()
985 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
987 nbd_config_put(nbd); in nbd_handle_cmd()
996 index = find_fallback(nbd, index); in nbd_handle_cmd()
999 if (wait_for_reconnect(nbd)) { in nbd_handle_cmd()
1009 sock_shutdown(nbd); in nbd_handle_cmd()
1010 nbd_config_put(nbd); in nbd_handle_cmd()
1031 ret = nbd_send_cmd(nbd, cmd, index); in nbd_handle_cmd()
1039 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
1041 nbd_mark_nsock_dead(nbd, nsock, 1); in nbd_handle_cmd()
1047 nbd_config_put(nbd); in nbd_handle_cmd()
1084 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, in nbd_get_socket() argument
1095 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); in nbd_get_socket()
1104 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, in nbd_add_socket() argument
1107 struct nbd_config *config = nbd->config; in nbd_add_socket()
1113 sock = nbd_get_socket(nbd, arg, &err); in nbd_add_socket()
1121 blk_mq_freeze_queue(nbd->disk->queue); in nbd_add_socket()
1123 if (!netlink && !nbd->task_setup && in nbd_add_socket()
1125 nbd->task_setup = current; in nbd_add_socket()
1128 (nbd->task_setup != current || in nbd_add_socket()
1130 dev_err(disk_to_dev(nbd->disk), in nbd_add_socket()
1161 blk_mq_unfreeze_queue(nbd->disk->queue); in nbd_add_socket()
1166 blk_mq_unfreeze_queue(nbd->disk->queue); in nbd_add_socket()
1171 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) in nbd_reconnect_socket() argument
1173 struct nbd_config *config = nbd->config; in nbd_reconnect_socket()
1179 sock = nbd_get_socket(nbd, arg, &err); in nbd_reconnect_socket()
1201 if (nbd->tag_set.timeout) in nbd_reconnect_socket()
1202 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket()
1204 refcount_inc(&nbd->config_refs); in nbd_reconnect_socket()
1211 args->nbd = nbd; in nbd_reconnect_socket()
1221 queue_work(nbd->recv_workq, &args->work); in nbd_reconnect_socket()
1232 static void nbd_bdev_reset(struct nbd_device *nbd) in nbd_bdev_reset() argument
1234 if (disk_openers(nbd->disk) > 1) in nbd_bdev_reset()
1236 set_capacity(nbd->disk, 0); in nbd_bdev_reset()
1239 static void nbd_parse_flags(struct nbd_device *nbd) in nbd_parse_flags() argument
1241 struct nbd_config *config = nbd->config; in nbd_parse_flags()
1243 set_disk_ro(nbd->disk, true); in nbd_parse_flags()
1245 set_disk_ro(nbd->disk, false); in nbd_parse_flags()
1248 blk_queue_write_cache(nbd->disk->queue, true, true); in nbd_parse_flags()
1250 blk_queue_write_cache(nbd->disk->queue, true, false); in nbd_parse_flags()
1253 blk_queue_write_cache(nbd->disk->queue, false, false); in nbd_parse_flags()
1256 static void send_disconnects(struct nbd_device *nbd) in send_disconnects() argument
1258 struct nbd_config *config = nbd->config; in send_disconnects()
1272 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); in send_disconnects()
1274 dev_err(disk_to_dev(nbd->disk), in send_disconnects()
1280 static int nbd_disconnect(struct nbd_device *nbd) in nbd_disconnect() argument
1282 struct nbd_config *config = nbd->config; in nbd_disconnect()
1284 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); in nbd_disconnect()
1286 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); in nbd_disconnect()
1287 send_disconnects(nbd); in nbd_disconnect()
1291 static void nbd_clear_sock(struct nbd_device *nbd) in nbd_clear_sock() argument
1293 sock_shutdown(nbd); in nbd_clear_sock()
1294 nbd_clear_que(nbd); in nbd_clear_sock()
1295 nbd->task_setup = NULL; in nbd_clear_sock()
1298 static void nbd_config_put(struct nbd_device *nbd) in nbd_config_put() argument
1300 if (refcount_dec_and_mutex_lock(&nbd->config_refs, in nbd_config_put()
1301 &nbd->config_lock)) { in nbd_config_put()
1302 struct nbd_config *config = nbd->config; in nbd_config_put()
1303 nbd_dev_dbg_close(nbd); in nbd_config_put()
1304 invalidate_disk(nbd->disk); in nbd_config_put()
1305 if (nbd->config->bytesize) in nbd_config_put()
1306 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_config_put()
1309 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_config_put()
1310 nbd->pid = 0; in nbd_config_put()
1313 device_remove_file(disk_to_dev(nbd->disk), &backend_attr); in nbd_config_put()
1314 kfree(nbd->backend); in nbd_config_put()
1315 nbd->backend = NULL; in nbd_config_put()
1317 nbd_clear_sock(nbd); in nbd_config_put()
1326 kfree(nbd->config); in nbd_config_put()
1327 nbd->config = NULL; in nbd_config_put()
1329 nbd->tag_set.timeout = 0; in nbd_config_put()
1330 nbd->disk->queue->limits.discard_granularity = 0; in nbd_config_put()
1331 blk_queue_max_discard_sectors(nbd->disk->queue, 0); in nbd_config_put()
1333 mutex_unlock(&nbd->config_lock); in nbd_config_put()
1334 nbd_put(nbd); in nbd_config_put()
1339 static int nbd_start_device(struct nbd_device *nbd) in nbd_start_device() argument
1341 struct nbd_config *config = nbd->config; in nbd_start_device()
1345 if (nbd->pid) in nbd_start_device()
1351 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); in nbd_start_device()
1355 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); in nbd_start_device()
1356 nbd->pid = task_pid_nr(current); in nbd_start_device()
1358 nbd_parse_flags(nbd); in nbd_start_device()
1360 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_start_device()
1362 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n"); in nbd_start_device()
1367 nbd_dev_dbg_init(nbd); in nbd_start_device()
1373 sock_shutdown(nbd); in nbd_start_device()
1383 flush_workqueue(nbd->recv_workq); in nbd_start_device()
1387 if (nbd->tag_set.timeout) in nbd_start_device()
1389 nbd->tag_set.timeout; in nbd_start_device()
1391 refcount_inc(&nbd->config_refs); in nbd_start_device()
1393 args->nbd = nbd; in nbd_start_device()
1395 queue_work(nbd->recv_workq, &args->work); in nbd_start_device()
1397 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); in nbd_start_device()
1400 static int nbd_start_device_ioctl(struct nbd_device *nbd) in nbd_start_device_ioctl() argument
1402 struct nbd_config *config = nbd->config; in nbd_start_device_ioctl()
1405 ret = nbd_start_device(nbd); in nbd_start_device_ioctl()
1410 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_start_device_ioctl()
1411 mutex_unlock(&nbd->config_lock); in nbd_start_device_ioctl()
1415 sock_shutdown(nbd); in nbd_start_device_ioctl()
1416 nbd_clear_que(nbd); in nbd_start_device_ioctl()
1419 flush_workqueue(nbd->recv_workq); in nbd_start_device_ioctl()
1420 mutex_lock(&nbd->config_lock); in nbd_start_device_ioctl()
1421 nbd_bdev_reset(nbd); in nbd_start_device_ioctl()
1430 static void nbd_clear_sock_ioctl(struct nbd_device *nbd, in nbd_clear_sock_ioctl() argument
1433 nbd_clear_sock(nbd); in nbd_clear_sock_ioctl()
1435 nbd_bdev_reset(nbd); in nbd_clear_sock_ioctl()
1437 &nbd->config->runtime_flags)) in nbd_clear_sock_ioctl()
1438 nbd_config_put(nbd); in nbd_clear_sock_ioctl()
1441 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) in nbd_set_cmd_timeout() argument
1443 nbd->tag_set.timeout = timeout * HZ; in nbd_set_cmd_timeout()
1445 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); in nbd_set_cmd_timeout()
1447 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); in nbd_set_cmd_timeout()
1451 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, in __nbd_ioctl() argument
1454 struct nbd_config *config = nbd->config; in __nbd_ioctl()
1459 return nbd_disconnect(nbd); in __nbd_ioctl()
1461 nbd_clear_sock_ioctl(nbd, bdev); in __nbd_ioctl()
1464 return nbd_add_socket(nbd, arg, false); in __nbd_ioctl()
1466 return nbd_set_size(nbd, config->bytesize, arg); in __nbd_ioctl()
1468 return nbd_set_size(nbd, arg, nbd_blksize(config)); in __nbd_ioctl()
1472 return nbd_set_size(nbd, bytesize, nbd_blksize(config)); in __nbd_ioctl()
1474 nbd_set_cmd_timeout(nbd, arg); in __nbd_ioctl()
1481 return nbd_start_device_ioctl(nbd); in __nbd_ioctl()
1501 struct nbd_device *nbd = bdev->bd_disk->private_data; in nbd_ioctl() local
1502 struct nbd_config *config = nbd->config; in nbd_ioctl()
1514 mutex_lock(&nbd->config_lock); in nbd_ioctl()
1521 error = __nbd_ioctl(bdev, nbd, cmd, arg); in nbd_ioctl()
1523 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n"); in nbd_ioctl()
1524 mutex_unlock(&nbd->config_lock); in nbd_ioctl()
1551 struct nbd_device *nbd; in nbd_open() local
1555 nbd = bdev->bd_disk->private_data; in nbd_open()
1556 if (!nbd) { in nbd_open()
1560 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_open()
1564 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1567 mutex_lock(&nbd->config_lock); in nbd_open()
1568 if (refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1569 mutex_unlock(&nbd->config_lock); in nbd_open()
1575 mutex_unlock(&nbd->config_lock); in nbd_open()
1578 nbd->config = config; in nbd_open()
1579 refcount_set(&nbd->config_refs, 1); in nbd_open()
1580 refcount_inc(&nbd->refs); in nbd_open()
1581 mutex_unlock(&nbd->config_lock); in nbd_open()
1584 } else if (nbd_disconnected(nbd->config)) { in nbd_open()
1595 struct nbd_device *nbd = disk->private_data; in nbd_release() local
1597 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && in nbd_release()
1599 nbd_disconnect_and_put(nbd); in nbd_release()
1601 nbd_config_put(nbd); in nbd_release()
1602 nbd_put(nbd); in nbd_release()
1618 struct nbd_device *nbd = s->private; in nbd_dbg_tasks_show() local
1620 if (nbd->pid) in nbd_dbg_tasks_show()
1621 seq_printf(s, "recv: %d\n", nbd->pid); in nbd_dbg_tasks_show()
1630 struct nbd_device *nbd = s->private; in nbd_dbg_flags_show() local
1631 u32 flags = nbd->config->flags; in nbd_dbg_flags_show()
1653 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init() argument
1656 struct nbd_config *config = nbd->config; in nbd_dev_dbg_init()
1661 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); in nbd_dev_dbg_init()
1663 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", in nbd_dev_dbg_init()
1664 nbd_name(nbd)); in nbd_dev_dbg_init()
1669 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops); in nbd_dev_dbg_init()
1671 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); in nbd_dev_dbg_init()
1673 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops); in nbd_dev_dbg_init()
1678 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close() argument
1680 debugfs_remove_recursive(nbd->config->dbg_dir); in nbd_dev_dbg_close()
1703 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init() argument
1708 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close() argument
1727 cmd->nbd = set->driver_data; in nbd_init_request()
1742 struct nbd_device *nbd; in nbd_dev_add() local
1746 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); in nbd_dev_add()
1747 if (!nbd) in nbd_dev_add()
1750 nbd->tag_set.ops = &nbd_mq_ops; in nbd_dev_add()
1751 nbd->tag_set.nr_hw_queues = 1; in nbd_dev_add()
1752 nbd->tag_set.queue_depth = 128; in nbd_dev_add()
1753 nbd->tag_set.numa_node = NUMA_NO_NODE; in nbd_dev_add()
1754 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); in nbd_dev_add()
1755 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | in nbd_dev_add()
1757 nbd->tag_set.driver_data = nbd; in nbd_dev_add()
1758 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work); in nbd_dev_add()
1759 nbd->backend = NULL; in nbd_dev_add()
1761 err = blk_mq_alloc_tag_set(&nbd->tag_set); in nbd_dev_add()
1767 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, in nbd_dev_add()
1772 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); in nbd_dev_add()
1776 nbd->index = index; in nbd_dev_add()
1781 disk = blk_mq_alloc_disk(&nbd->tag_set, NULL); in nbd_dev_add()
1786 nbd->disk = disk; in nbd_dev_add()
1788 nbd->recv_workq = alloc_workqueue("nbd%d-recv", in nbd_dev_add()
1790 WQ_UNBOUND, 0, nbd->index); in nbd_dev_add()
1791 if (!nbd->recv_workq) { in nbd_dev_add()
1792 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); in nbd_dev_add()
1809 mutex_init(&nbd->config_lock); in nbd_dev_add()
1810 refcount_set(&nbd->config_refs, 0); in nbd_dev_add()
1815 refcount_set(&nbd->refs, 0); in nbd_dev_add()
1816 INIT_LIST_HEAD(&nbd->list); in nbd_dev_add()
1821 disk->private_data = nbd; in nbd_dev_add()
1830 refcount_set(&nbd->refs, refs); in nbd_dev_add()
1832 return nbd; in nbd_dev_add()
1835 destroy_workqueue(nbd->recv_workq); in nbd_dev_add()
1843 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_add()
1845 kfree(nbd); in nbd_dev_add()
1852 struct nbd_device *nbd; in nbd_find_get_unused() local
1857 idr_for_each_entry(&nbd_index_idr, nbd, id) { in nbd_find_get_unused()
1858 if (refcount_read(&nbd->config_refs) || in nbd_find_get_unused()
1859 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) in nbd_find_get_unused()
1861 if (refcount_inc_not_zero(&nbd->refs)) in nbd_find_get_unused()
1862 return nbd; in nbd_find_get_unused()
1895 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) in nbd_genl_size_set() argument
1897 struct nbd_config *config = nbd->config; in nbd_genl_size_set()
1908 return nbd_set_size(nbd, bytes, bsize); in nbd_genl_size_set()
1914 struct nbd_device *nbd; in nbd_genl_connect() local
1947 nbd = nbd_find_get_unused(); in nbd_genl_connect()
1949 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_connect()
1950 if (nbd) { in nbd_genl_connect()
1951 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && in nbd_genl_connect()
1952 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) || in nbd_genl_connect()
1953 !refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_connect()
1963 if (!nbd) { in nbd_genl_connect()
1964 nbd = nbd_dev_add(index, 2); in nbd_genl_connect()
1965 if (IS_ERR(nbd)) { in nbd_genl_connect()
1967 return PTR_ERR(nbd); in nbd_genl_connect()
1971 mutex_lock(&nbd->config_lock); in nbd_genl_connect()
1972 if (refcount_read(&nbd->config_refs)) { in nbd_genl_connect()
1973 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1974 nbd_put(nbd); in nbd_genl_connect()
1980 if (WARN_ON(nbd->config)) { in nbd_genl_connect()
1981 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1982 nbd_put(nbd); in nbd_genl_connect()
1987 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1988 nbd_put(nbd); in nbd_genl_connect()
1992 nbd->config = config; in nbd_genl_connect()
1993 refcount_set(&nbd->config_refs, 1); in nbd_genl_connect()
1996 ret = nbd_genl_size_set(info, nbd); in nbd_genl_connect()
2001 nbd_set_cmd_timeout(nbd, in nbd_genl_connect()
2023 &nbd->flags)) in nbd_genl_connect()
2027 &nbd->flags)) in nbd_genl_connect()
2028 refcount_inc(&nbd->refs); in nbd_genl_connect()
2061 ret = nbd_add_socket(nbd, fd, true); in nbd_genl_connect()
2066 ret = nbd_start_device(nbd); in nbd_genl_connect()
2070 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], in nbd_genl_connect()
2072 if (!nbd->backend) { in nbd_genl_connect()
2077 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr); in nbd_genl_connect()
2079 dev_err(disk_to_dev(nbd->disk), in nbd_genl_connect()
2085 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2088 refcount_inc(&nbd->config_refs); in nbd_genl_connect()
2089 nbd_connect_reply(info, nbd->index); in nbd_genl_connect()
2091 nbd_config_put(nbd); in nbd_genl_connect()
2093 nbd_put(nbd); in nbd_genl_connect()
2097 static void nbd_disconnect_and_put(struct nbd_device *nbd) in nbd_disconnect_and_put() argument
2099 mutex_lock(&nbd->config_lock); in nbd_disconnect_and_put()
2100 nbd_disconnect(nbd); in nbd_disconnect_and_put()
2101 sock_shutdown(nbd); in nbd_disconnect_and_put()
2102 wake_up(&nbd->config->conn_wait); in nbd_disconnect_and_put()
2107 flush_workqueue(nbd->recv_workq); in nbd_disconnect_and_put()
2108 nbd_clear_que(nbd); in nbd_disconnect_and_put()
2109 nbd->task_setup = NULL; in nbd_disconnect_and_put()
2110 mutex_unlock(&nbd->config_lock); in nbd_disconnect_and_put()
2113 &nbd->config->runtime_flags)) in nbd_disconnect_and_put()
2114 nbd_config_put(nbd); in nbd_disconnect_and_put()
2119 struct nbd_device *nbd; in nbd_genl_disconnect() local
2131 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_disconnect()
2132 if (!nbd) { in nbd_genl_disconnect()
2137 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_disconnect()
2143 if (!refcount_inc_not_zero(&nbd->config_refs)) in nbd_genl_disconnect()
2145 nbd_disconnect_and_put(nbd); in nbd_genl_disconnect()
2146 nbd_config_put(nbd); in nbd_genl_disconnect()
2148 nbd_put(nbd); in nbd_genl_disconnect()
2154 struct nbd_device *nbd = NULL; in nbd_genl_reconfigure() local
2169 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_reconfigure()
2170 if (!nbd) { in nbd_genl_reconfigure()
2175 if (nbd->backend) { in nbd_genl_reconfigure()
2178 nbd->backend)) { in nbd_genl_reconfigure()
2180 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2182 nbd->backend); in nbd_genl_reconfigure()
2187 dev_err(nbd_to_dev(nbd), "must specify backend\n"); in nbd_genl_reconfigure()
2191 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_reconfigure()
2198 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_genl_reconfigure()
2199 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2201 nbd_put(nbd); in nbd_genl_reconfigure()
2205 mutex_lock(&nbd->config_lock); in nbd_genl_reconfigure()
2206 config = nbd->config; in nbd_genl_reconfigure()
2208 !nbd->pid) { in nbd_genl_reconfigure()
2209 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2215 ret = nbd_genl_size_set(info, nbd); in nbd_genl_reconfigure()
2220 nbd_set_cmd_timeout(nbd, in nbd_genl_reconfigure()
2231 &nbd->flags)) in nbd_genl_reconfigure()
2235 &nbd->flags)) in nbd_genl_reconfigure()
2236 refcount_inc(&nbd->refs); in nbd_genl_reconfigure()
2273 ret = nbd_reconnect_socket(nbd, fd); in nbd_genl_reconfigure()
2279 dev_info(nbd_to_dev(nbd), "reconnected socket\n"); in nbd_genl_reconfigure()
2283 mutex_unlock(&nbd->config_lock); in nbd_genl_reconfigure()
2284 nbd_config_put(nbd); in nbd_genl_reconfigure()
2285 nbd_put(nbd); in nbd_genl_reconfigure()
2287 nbd_put(nbd); in nbd_genl_reconfigure()
2332 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) in populate_nbd_status() argument
2345 if (refcount_read(&nbd->config_refs)) in populate_nbd_status()
2350 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); in populate_nbd_status()
2363 struct nbd_device *nbd = ptr; in status_cb() local
2364 return populate_nbd_status(nbd, (struct sk_buff *)data); in status_cb()
2403 struct nbd_device *nbd; in nbd_genl_status() local
2404 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_status()
2405 if (nbd) { in nbd_genl_status()
2406 ret = populate_nbd_status(nbd, reply); in nbd_genl_status()
2533 struct nbd_device *nbd = ptr; in nbd_exit_cb() local
2536 if (refcount_read(&nbd->refs)) in nbd_exit_cb()
2537 list_add_tail(&nbd->list, list); in nbd_exit_cb()
2544 struct nbd_device *nbd; in nbd_cleanup() local
2560 nbd = list_first_entry(&del_list, struct nbd_device, list); in nbd_cleanup()
2561 list_del_init(&nbd->list); in nbd_cleanup()
2562 if (refcount_read(&nbd->config_refs)) in nbd_cleanup()
2564 refcount_read(&nbd->config_refs)); in nbd_cleanup()
2565 if (refcount_read(&nbd->refs) != 1) in nbd_cleanup()
2567 nbd_put(nbd); in nbd_cleanup()