Lines Matching refs:nbd
67 struct nbd_device *nbd; member
136 struct nbd_device *nbd; member
150 #define nbd_name(nbd) ((nbd)->disk->disk_name) argument
160 static int nbd_dev_dbg_init(struct nbd_device *nbd);
161 static void nbd_dev_dbg_close(struct nbd_device *nbd);
162 static void nbd_config_put(struct nbd_device *nbd);
166 static void nbd_disconnect_and_put(struct nbd_device *nbd);
168 static inline struct device *nbd_to_dev(struct nbd_device *nbd) in nbd_to_dev() argument
170 return disk_to_dev(nbd->disk); in nbd_to_dev()
218 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; in pid_show() local
220 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); in pid_show()
232 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; in backend_show() local
234 return sprintf(buf, "%s\n", nbd->backend ?: ""); in backend_show()
242 static void nbd_dev_remove(struct nbd_device *nbd) in nbd_dev_remove() argument
244 struct gendisk *disk = nbd->disk; in nbd_dev_remove()
248 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove()
255 idr_remove(&nbd_index_idr, nbd->index); in nbd_dev_remove()
258 kfree(nbd); in nbd_dev_remove()
266 static void nbd_put(struct nbd_device *nbd) in nbd_put() argument
268 if (!refcount_dec_and_test(&nbd->refs)) in nbd_put()
272 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) in nbd_put()
273 queue_work(nbd_del_wq, &nbd->remove_work); in nbd_put()
275 nbd_dev_remove(nbd); in nbd_put()
284 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, in nbd_mark_nsock_dead() argument
287 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { in nbd_mark_nsock_dead()
292 args->index = nbd->index; in nbd_mark_nsock_dead()
298 if (atomic_dec_return(&nbd->config->live_connections) == 0) { in nbd_mark_nsock_dead()
300 &nbd->config->runtime_flags)) { in nbd_mark_nsock_dead()
302 &nbd->config->runtime_flags); in nbd_mark_nsock_dead()
303 dev_info(nbd_to_dev(nbd), in nbd_mark_nsock_dead()
313 static void nbd_size_clear(struct nbd_device *nbd) in nbd_size_clear() argument
315 if (nbd->config->bytesize) { in nbd_size_clear()
316 set_capacity(nbd->disk, 0); in nbd_size_clear()
317 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_size_clear()
321 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, in nbd_set_size() argument
329 nbd->config->bytesize = bytesize; in nbd_set_size()
330 nbd->config->blksize_bits = __ffs(blksize); in nbd_set_size()
332 if (!nbd->task_recv) in nbd_set_size()
335 if (nbd->config->flags & NBD_FLAG_SEND_TRIM) { in nbd_set_size()
336 nbd->disk->queue->limits.discard_granularity = blksize; in nbd_set_size()
337 nbd->disk->queue->limits.discard_alignment = blksize; in nbd_set_size()
338 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); in nbd_set_size()
340 blk_queue_logical_block_size(nbd->disk->queue, blksize); in nbd_set_size()
341 blk_queue_physical_block_size(nbd->disk->queue, blksize); in nbd_set_size()
344 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_set_size()
345 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9)) in nbd_set_size()
346 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_set_size()
354 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, in nbd_complete_rq()
363 static void sock_shutdown(struct nbd_device *nbd) in sock_shutdown() argument
365 struct nbd_config *config = nbd->config; in sock_shutdown()
376 nbd_mark_nsock_dead(nbd, nsock, 0); in sock_shutdown()
379 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); in sock_shutdown()
402 struct nbd_device *nbd = cmd->nbd; in nbd_xmit_timeout() local
408 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_xmit_timeout()
413 config = nbd->config; in nbd_xmit_timeout()
416 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout()
417 dev_err_ratelimited(nbd_to_dev(nbd), in nbd_xmit_timeout()
439 nbd_mark_nsock_dead(nbd, nsock, 1); in nbd_xmit_timeout()
444 nbd_config_put(nbd); in nbd_xmit_timeout()
449 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout()
456 …dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", in nbd_xmit_timeout()
466 nbd_config_put(nbd); in nbd_xmit_timeout()
471 nbd_config_put(nbd); in nbd_xmit_timeout()
475 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); in nbd_xmit_timeout()
479 sock_shutdown(nbd); in nbd_xmit_timeout()
480 nbd_config_put(nbd); in nbd_xmit_timeout()
489 static int sock_xmit(struct nbd_device *nbd, int index, int send, in sock_xmit() argument
492 struct nbd_config *config = nbd->config; in sock_xmit()
499 dev_err_ratelimited(disk_to_dev(nbd->disk), in sock_xmit()
545 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) in nbd_send_cmd() argument
548 struct nbd_config *config = nbd->config; in nbd_send_cmd()
569 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
605 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); in nbd_send_cmd()
607 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", in nbd_send_cmd()
610 result = sock_xmit(nbd, index, 1, &from, in nbd_send_cmd()
627 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
645 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", in nbd_send_cmd()
656 result = sock_xmit(nbd, index, 1, &from, flags, &sent); in nbd_send_cmd()
668 dev_err(disk_to_dev(nbd->disk), in nbd_send_cmd()
692 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) in nbd_read_stat() argument
694 struct nbd_config *config = nbd->config; in nbd_read_stat()
708 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); in nbd_read_stat()
711 dev_err(disk_to_dev(nbd->disk), in nbd_read_stat()
717 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", in nbd_read_stat()
725 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_read_stat()
726 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_read_stat()
729 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", in nbd_read_stat()
738 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", in nbd_read_stat()
744 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", in nbd_read_stat()
750 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", in nbd_read_stat()
756 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", in nbd_read_stat()
762 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); in nbd_read_stat()
769 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); in nbd_read_stat()
771 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", in nbd_read_stat()
786 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", in nbd_read_stat()
801 struct nbd_device *nbd = args->nbd; in recv_work() local
802 struct nbd_config *config = nbd->config; in recv_work()
807 cmd = nbd_read_stat(nbd, args->index); in recv_work()
812 nbd_mark_nsock_dead(nbd, nsock, 1); in recv_work()
821 nbd_config_put(nbd); in recv_work()
843 static void nbd_clear_que(struct nbd_device *nbd) in nbd_clear_que() argument
845 blk_mq_quiesce_queue(nbd->disk->queue); in nbd_clear_que()
846 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
847 blk_mq_unquiesce_queue(nbd->disk->queue); in nbd_clear_que()
848 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); in nbd_clear_que()
851 static int find_fallback(struct nbd_device *nbd, int index) in find_fallback() argument
853 struct nbd_config *config = nbd->config; in find_fallback()
862 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
885 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
894 static int wait_for_reconnect(struct nbd_device *nbd) in wait_for_reconnect() argument
896 struct nbd_config *config = nbd->config; in wait_for_reconnect()
909 struct nbd_device *nbd = cmd->nbd; in nbd_handle_cmd() local
914 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_handle_cmd()
915 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
920 config = nbd->config; in nbd_handle_cmd()
923 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
925 nbd_config_put(nbd); in nbd_handle_cmd()
935 index = find_fallback(nbd, index); in nbd_handle_cmd()
938 if (wait_for_reconnect(nbd)) { in nbd_handle_cmd()
948 sock_shutdown(nbd); in nbd_handle_cmd()
949 nbd_config_put(nbd); in nbd_handle_cmd()
971 ret = nbd_send_cmd(nbd, cmd, index); in nbd_handle_cmd()
973 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
975 nbd_mark_nsock_dead(nbd, nsock, 1); in nbd_handle_cmd()
981 nbd_config_put(nbd); in nbd_handle_cmd()
1018 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, in nbd_get_socket() argument
1029 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); in nbd_get_socket()
1038 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, in nbd_add_socket() argument
1041 struct nbd_config *config = nbd->config; in nbd_add_socket()
1047 sock = nbd_get_socket(nbd, arg, &err); in nbd_add_socket()
1055 blk_mq_freeze_queue(nbd->disk->queue); in nbd_add_socket()
1057 if (!netlink && !nbd->task_setup && in nbd_add_socket()
1059 nbd->task_setup = current; in nbd_add_socket()
1062 (nbd->task_setup != current || in nbd_add_socket()
1064 dev_err(disk_to_dev(nbd->disk), in nbd_add_socket()
1095 blk_mq_unfreeze_queue(nbd->disk->queue); in nbd_add_socket()
1100 blk_mq_unfreeze_queue(nbd->disk->queue); in nbd_add_socket()
1105 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) in nbd_reconnect_socket() argument
1107 struct nbd_config *config = nbd->config; in nbd_reconnect_socket()
1113 sock = nbd_get_socket(nbd, arg, &err); in nbd_reconnect_socket()
1135 if (nbd->tag_set.timeout) in nbd_reconnect_socket()
1136 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket()
1138 refcount_inc(&nbd->config_refs); in nbd_reconnect_socket()
1145 args->nbd = nbd; in nbd_reconnect_socket()
1155 queue_work(nbd->recv_workq, &args->work); in nbd_reconnect_socket()
1173 static void nbd_parse_flags(struct nbd_device *nbd) in nbd_parse_flags() argument
1175 struct nbd_config *config = nbd->config; in nbd_parse_flags()
1177 set_disk_ro(nbd->disk, true); in nbd_parse_flags()
1179 set_disk_ro(nbd->disk, false); in nbd_parse_flags()
1181 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); in nbd_parse_flags()
1184 blk_queue_write_cache(nbd->disk->queue, true, true); in nbd_parse_flags()
1186 blk_queue_write_cache(nbd->disk->queue, true, false); in nbd_parse_flags()
1189 blk_queue_write_cache(nbd->disk->queue, false, false); in nbd_parse_flags()
1192 static void send_disconnects(struct nbd_device *nbd) in send_disconnects() argument
1194 struct nbd_config *config = nbd->config; in send_disconnects()
1208 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); in send_disconnects()
1210 dev_err(disk_to_dev(nbd->disk), in send_disconnects()
1216 static int nbd_disconnect(struct nbd_device *nbd) in nbd_disconnect() argument
1218 struct nbd_config *config = nbd->config; in nbd_disconnect()
1220 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); in nbd_disconnect()
1222 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); in nbd_disconnect()
1223 send_disconnects(nbd); in nbd_disconnect()
1227 static void nbd_clear_sock(struct nbd_device *nbd) in nbd_clear_sock() argument
1229 sock_shutdown(nbd); in nbd_clear_sock()
1230 nbd_clear_que(nbd); in nbd_clear_sock()
1231 nbd->task_setup = NULL; in nbd_clear_sock()
1234 static void nbd_config_put(struct nbd_device *nbd) in nbd_config_put() argument
1236 if (refcount_dec_and_mutex_lock(&nbd->config_refs, in nbd_config_put()
1237 &nbd->config_lock)) { in nbd_config_put()
1238 struct nbd_config *config = nbd->config; in nbd_config_put()
1239 nbd_dev_dbg_close(nbd); in nbd_config_put()
1240 nbd_size_clear(nbd); in nbd_config_put()
1243 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_config_put()
1244 nbd->task_recv = NULL; in nbd_config_put()
1247 device_remove_file(disk_to_dev(nbd->disk), &backend_attr); in nbd_config_put()
1248 kfree(nbd->backend); in nbd_config_put()
1249 nbd->backend = NULL; in nbd_config_put()
1251 nbd_clear_sock(nbd); in nbd_config_put()
1260 kfree(nbd->config); in nbd_config_put()
1261 nbd->config = NULL; in nbd_config_put()
1263 if (nbd->recv_workq) in nbd_config_put()
1264 destroy_workqueue(nbd->recv_workq); in nbd_config_put()
1265 nbd->recv_workq = NULL; in nbd_config_put()
1267 nbd->tag_set.timeout = 0; in nbd_config_put()
1268 nbd->disk->queue->limits.discard_granularity = 0; in nbd_config_put()
1269 nbd->disk->queue->limits.discard_alignment = 0; in nbd_config_put()
1270 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); in nbd_config_put()
1271 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); in nbd_config_put()
1273 mutex_unlock(&nbd->config_lock); in nbd_config_put()
1274 nbd_put(nbd); in nbd_config_put()
1279 static int nbd_start_device(struct nbd_device *nbd) in nbd_start_device() argument
1281 struct nbd_config *config = nbd->config; in nbd_start_device()
1285 if (nbd->task_recv) in nbd_start_device()
1291 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); in nbd_start_device()
1295 nbd->recv_workq = alloc_workqueue("knbd%d-recv", in nbd_start_device()
1297 WQ_UNBOUND, 0, nbd->index); in nbd_start_device()
1298 if (!nbd->recv_workq) { in nbd_start_device()
1299 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); in nbd_start_device()
1303 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); in nbd_start_device()
1304 nbd->task_recv = current; in nbd_start_device()
1306 nbd_parse_flags(nbd); in nbd_start_device()
1308 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_start_device()
1310 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n"); in nbd_start_device()
1315 nbd_dev_dbg_init(nbd); in nbd_start_device()
1321 sock_shutdown(nbd); in nbd_start_device()
1331 flush_workqueue(nbd->recv_workq); in nbd_start_device()
1335 if (nbd->tag_set.timeout) in nbd_start_device()
1337 nbd->tag_set.timeout; in nbd_start_device()
1339 refcount_inc(&nbd->config_refs); in nbd_start_device()
1341 args->nbd = nbd; in nbd_start_device()
1343 queue_work(nbd->recv_workq, &args->work); in nbd_start_device()
1345 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); in nbd_start_device()
1348 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) in nbd_start_device_ioctl() argument
1350 struct nbd_config *config = nbd->config; in nbd_start_device_ioctl()
1353 ret = nbd_start_device(nbd); in nbd_start_device_ioctl()
1358 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_start_device_ioctl()
1359 mutex_unlock(&nbd->config_lock); in nbd_start_device_ioctl()
1363 sock_shutdown(nbd); in nbd_start_device_ioctl()
1364 flush_workqueue(nbd->recv_workq); in nbd_start_device_ioctl()
1366 mutex_lock(&nbd->config_lock); in nbd_start_device_ioctl()
1376 static void nbd_clear_sock_ioctl(struct nbd_device *nbd, in nbd_clear_sock_ioctl() argument
1379 sock_shutdown(nbd); in nbd_clear_sock_ioctl()
1383 &nbd->config->runtime_flags)) in nbd_clear_sock_ioctl()
1384 nbd_config_put(nbd); in nbd_clear_sock_ioctl()
1387 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) in nbd_set_cmd_timeout() argument
1389 nbd->tag_set.timeout = timeout * HZ; in nbd_set_cmd_timeout()
1391 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); in nbd_set_cmd_timeout()
1393 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); in nbd_set_cmd_timeout()
1397 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, in __nbd_ioctl() argument
1400 struct nbd_config *config = nbd->config; in __nbd_ioctl()
1405 return nbd_disconnect(nbd); in __nbd_ioctl()
1407 nbd_clear_sock_ioctl(nbd, bdev); in __nbd_ioctl()
1410 return nbd_add_socket(nbd, arg, false); in __nbd_ioctl()
1412 return nbd_set_size(nbd, config->bytesize, arg); in __nbd_ioctl()
1414 return nbd_set_size(nbd, arg, nbd_blksize(config)); in __nbd_ioctl()
1418 return nbd_set_size(nbd, bytesize, nbd_blksize(config)); in __nbd_ioctl()
1420 nbd_set_cmd_timeout(nbd, arg); in __nbd_ioctl()
1427 return nbd_start_device_ioctl(nbd, bdev); in __nbd_ioctl()
1447 struct nbd_device *nbd = bdev->bd_disk->private_data; in nbd_ioctl() local
1448 struct nbd_config *config = nbd->config; in nbd_ioctl()
1460 mutex_lock(&nbd->config_lock); in nbd_ioctl()
1467 error = __nbd_ioctl(bdev, nbd, cmd, arg); in nbd_ioctl()
1469 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n"); in nbd_ioctl()
1470 mutex_unlock(&nbd->config_lock); in nbd_ioctl()
1492 struct nbd_device *nbd; in nbd_open() local
1496 nbd = bdev->bd_disk->private_data; in nbd_open()
1497 if (!nbd) { in nbd_open()
1501 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_open()
1505 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1508 mutex_lock(&nbd->config_lock); in nbd_open()
1509 if (refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1510 mutex_unlock(&nbd->config_lock); in nbd_open()
1513 config = nbd->config = nbd_alloc_config(); in nbd_open()
1516 mutex_unlock(&nbd->config_lock); in nbd_open()
1519 refcount_set(&nbd->config_refs, 1); in nbd_open()
1520 refcount_inc(&nbd->refs); in nbd_open()
1521 mutex_unlock(&nbd->config_lock); in nbd_open()
1524 } else if (nbd_disconnected(nbd->config)) { in nbd_open()
1535 struct nbd_device *nbd = disk->private_data; in nbd_release() local
1537 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && in nbd_release()
1539 nbd_disconnect_and_put(nbd); in nbd_release()
1541 nbd_config_put(nbd); in nbd_release()
1542 nbd_put(nbd); in nbd_release()
1558 struct nbd_device *nbd = s->private; in nbd_dbg_tasks_show() local
1560 if (nbd->task_recv) in nbd_dbg_tasks_show()
1561 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); in nbd_dbg_tasks_show()
1570 struct nbd_device *nbd = s->private; in nbd_dbg_flags_show() local
1571 u32 flags = nbd->config->flags; in nbd_dbg_flags_show()
1593 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init() argument
1596 struct nbd_config *config = nbd->config; in nbd_dev_dbg_init()
1601 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); in nbd_dev_dbg_init()
1603 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", in nbd_dev_dbg_init()
1604 nbd_name(nbd)); in nbd_dev_dbg_init()
1609 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops); in nbd_dev_dbg_init()
1611 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); in nbd_dev_dbg_init()
1613 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops); in nbd_dev_dbg_init()
1618 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close() argument
1620 debugfs_remove_recursive(nbd->config->dbg_dir); in nbd_dev_dbg_close()
1643 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init() argument
1648 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close() argument
1667 cmd->nbd = set->driver_data; in nbd_init_request()
1682 struct nbd_device *nbd; in nbd_dev_add() local
1686 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); in nbd_dev_add()
1687 if (!nbd) in nbd_dev_add()
1690 nbd->tag_set.ops = &nbd_mq_ops; in nbd_dev_add()
1691 nbd->tag_set.nr_hw_queues = 1; in nbd_dev_add()
1692 nbd->tag_set.queue_depth = 128; in nbd_dev_add()
1693 nbd->tag_set.numa_node = NUMA_NO_NODE; in nbd_dev_add()
1694 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); in nbd_dev_add()
1695 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | in nbd_dev_add()
1697 nbd->tag_set.driver_data = nbd; in nbd_dev_add()
1698 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work); in nbd_dev_add()
1699 nbd->backend = NULL; in nbd_dev_add()
1701 err = blk_mq_alloc_tag_set(&nbd->tag_set); in nbd_dev_add()
1707 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, in nbd_dev_add()
1712 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); in nbd_dev_add()
1716 nbd->index = index; in nbd_dev_add()
1721 disk = blk_mq_alloc_disk(&nbd->tag_set, NULL); in nbd_dev_add()
1726 nbd->disk = disk; in nbd_dev_add()
1741 mutex_init(&nbd->config_lock); in nbd_dev_add()
1742 refcount_set(&nbd->config_refs, 0); in nbd_dev_add()
1747 refcount_set(&nbd->refs, 0); in nbd_dev_add()
1748 INIT_LIST_HEAD(&nbd->list); in nbd_dev_add()
1763 disk->private_data = nbd; in nbd_dev_add()
1770 refcount_set(&nbd->refs, refs); in nbd_dev_add()
1772 return nbd; in nbd_dev_add()
1779 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_add()
1781 kfree(nbd); in nbd_dev_add()
1788 struct nbd_device *nbd; in nbd_find_get_unused() local
1793 idr_for_each_entry(&nbd_index_idr, nbd, id) { in nbd_find_get_unused()
1794 if (refcount_read(&nbd->config_refs) || in nbd_find_get_unused()
1795 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) in nbd_find_get_unused()
1797 if (refcount_inc_not_zero(&nbd->refs)) in nbd_find_get_unused()
1798 return nbd; in nbd_find_get_unused()
1831 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) in nbd_genl_size_set() argument
1833 struct nbd_config *config = nbd->config; in nbd_genl_size_set()
1844 return nbd_set_size(nbd, bytes, bsize); in nbd_genl_size_set()
1850 struct nbd_device *nbd; in nbd_genl_connect() local
1872 nbd = nbd_find_get_unused(); in nbd_genl_connect()
1874 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_connect()
1875 if (nbd) { in nbd_genl_connect()
1876 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && in nbd_genl_connect()
1877 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) || in nbd_genl_connect()
1878 !refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_connect()
1888 if (!nbd) { in nbd_genl_connect()
1889 nbd = nbd_dev_add(index, 2); in nbd_genl_connect()
1890 if (IS_ERR(nbd)) { in nbd_genl_connect()
1892 return PTR_ERR(nbd); in nbd_genl_connect()
1896 mutex_lock(&nbd->config_lock); in nbd_genl_connect()
1897 if (refcount_read(&nbd->config_refs)) { in nbd_genl_connect()
1898 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1899 nbd_put(nbd); in nbd_genl_connect()
1905 if (WARN_ON(nbd->config)) { in nbd_genl_connect()
1906 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1907 nbd_put(nbd); in nbd_genl_connect()
1910 config = nbd->config = nbd_alloc_config(); in nbd_genl_connect()
1911 if (!nbd->config) { in nbd_genl_connect()
1912 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1913 nbd_put(nbd); in nbd_genl_connect()
1917 refcount_set(&nbd->config_refs, 1); in nbd_genl_connect()
1920 ret = nbd_genl_size_set(info, nbd); in nbd_genl_connect()
1925 nbd_set_cmd_timeout(nbd, in nbd_genl_connect()
1947 &nbd->flags)) in nbd_genl_connect()
1951 &nbd->flags)) in nbd_genl_connect()
1952 refcount_inc(&nbd->refs); in nbd_genl_connect()
1985 ret = nbd_add_socket(nbd, fd, true); in nbd_genl_connect()
1990 ret = nbd_start_device(nbd); in nbd_genl_connect()
1994 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], in nbd_genl_connect()
1996 if (!nbd->backend) { in nbd_genl_connect()
2001 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr); in nbd_genl_connect()
2003 dev_err(disk_to_dev(nbd->disk), in nbd_genl_connect()
2009 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2012 refcount_inc(&nbd->config_refs); in nbd_genl_connect()
2013 nbd_connect_reply(info, nbd->index); in nbd_genl_connect()
2015 nbd_config_put(nbd); in nbd_genl_connect()
2017 nbd_put(nbd); in nbd_genl_connect()
2021 static void nbd_disconnect_and_put(struct nbd_device *nbd) in nbd_disconnect_and_put() argument
2023 mutex_lock(&nbd->config_lock); in nbd_disconnect_and_put()
2024 nbd_disconnect(nbd); in nbd_disconnect_and_put()
2025 sock_shutdown(nbd); in nbd_disconnect_and_put()
2032 if (nbd->recv_workq) in nbd_disconnect_and_put()
2033 flush_workqueue(nbd->recv_workq); in nbd_disconnect_and_put()
2034 nbd_clear_que(nbd); in nbd_disconnect_and_put()
2035 nbd->task_setup = NULL; in nbd_disconnect_and_put()
2036 mutex_unlock(&nbd->config_lock); in nbd_disconnect_and_put()
2039 &nbd->config->runtime_flags)) in nbd_disconnect_and_put()
2040 nbd_config_put(nbd); in nbd_disconnect_and_put()
2045 struct nbd_device *nbd; in nbd_genl_disconnect() local
2057 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_disconnect()
2058 if (!nbd) { in nbd_genl_disconnect()
2064 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_disconnect()
2071 if (!refcount_inc_not_zero(&nbd->config_refs)) in nbd_genl_disconnect()
2073 nbd_disconnect_and_put(nbd); in nbd_genl_disconnect()
2074 nbd_config_put(nbd); in nbd_genl_disconnect()
2076 nbd_put(nbd); in nbd_genl_disconnect()
2082 struct nbd_device *nbd = NULL; in nbd_genl_reconfigure() local
2097 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_reconfigure()
2098 if (!nbd) { in nbd_genl_reconfigure()
2104 if (nbd->backend) { in nbd_genl_reconfigure()
2107 nbd->backend)) { in nbd_genl_reconfigure()
2109 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2111 nbd->backend); in nbd_genl_reconfigure()
2116 dev_err(nbd_to_dev(nbd), "must specify backend\n"); in nbd_genl_reconfigure()
2120 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_reconfigure()
2128 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_genl_reconfigure()
2129 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2131 nbd_put(nbd); in nbd_genl_reconfigure()
2135 mutex_lock(&nbd->config_lock); in nbd_genl_reconfigure()
2136 config = nbd->config; in nbd_genl_reconfigure()
2138 !nbd->task_recv) { in nbd_genl_reconfigure()
2139 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2145 ret = nbd_genl_size_set(info, nbd); in nbd_genl_reconfigure()
2150 nbd_set_cmd_timeout(nbd, in nbd_genl_reconfigure()
2161 &nbd->flags)) in nbd_genl_reconfigure()
2165 &nbd->flags)) in nbd_genl_reconfigure()
2166 refcount_inc(&nbd->refs); in nbd_genl_reconfigure()
2203 ret = nbd_reconnect_socket(nbd, fd); in nbd_genl_reconfigure()
2209 dev_info(nbd_to_dev(nbd), "reconnected socket\n"); in nbd_genl_reconfigure()
2213 mutex_unlock(&nbd->config_lock); in nbd_genl_reconfigure()
2214 nbd_config_put(nbd); in nbd_genl_reconfigure()
2215 nbd_put(nbd); in nbd_genl_reconfigure()
2217 nbd_put(nbd); in nbd_genl_reconfigure()
2261 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) in populate_nbd_status() argument
2274 if (refcount_read(&nbd->config_refs)) in populate_nbd_status()
2279 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); in populate_nbd_status()
2292 struct nbd_device *nbd = ptr; in status_cb() local
2293 return populate_nbd_status(nbd, (struct sk_buff *)data); in status_cb()
2332 struct nbd_device *nbd; in nbd_genl_status() local
2333 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_status()
2334 if (nbd) { in nbd_genl_status()
2335 ret = populate_nbd_status(nbd, reply); in nbd_genl_status()
2462 struct nbd_device *nbd = ptr; in nbd_exit_cb() local
2465 if (refcount_read(&nbd->refs)) in nbd_exit_cb()
2466 list_add_tail(&nbd->list, list); in nbd_exit_cb()
2473 struct nbd_device *nbd; in nbd_cleanup() local
2483 nbd = list_first_entry(&del_list, struct nbd_device, list); in nbd_cleanup()
2484 list_del_init(&nbd->list); in nbd_cleanup()
2485 if (refcount_read(&nbd->refs) != 1) in nbd_cleanup()
2487 nbd_put(nbd); in nbd_cleanup()